]> git.proxmox.com Git - qemu.git/blob - target-i386/op_helper.c
target-i386: fix logarithmic and trigonometric helpers wrt softfloat
[qemu.git] / target-i386 / op_helper.c
1 /*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <math.h>
21 #include "exec.h"
22 #include "exec-all.h"
23 #include "host-utils.h"
24 #include "ioport.h"
25
26 //#define DEBUG_PCALL
27
28
29 #ifdef DEBUG_PCALL
30 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
31 # define LOG_PCALL_STATE(env) \
32 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
33 #else
34 # define LOG_PCALL(...) do { } while (0)
35 # define LOG_PCALL_STATE(env) do { } while (0)
36 #endif
37
38
39 #if 0
40 #define raise_exception_err(a, b)\
41 do {\
42 qemu_log("raise_exception line=%d\n", __LINE__);\
43 (raise_exception_err)(a, b);\
44 } while (0)
45 #endif
46
47 static const uint8_t parity_table[256] = {
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80 };
81
82 /* modulo 17 table */
83 static const uint8_t rclw_table[32] = {
84 0, 1, 2, 3, 4, 5, 6, 7,
85 8, 9,10,11,12,13,14,15,
86 16, 0, 1, 2, 3, 4, 5, 6,
87 7, 8, 9,10,11,12,13,14,
88 };
89
90 /* modulo 9 table */
91 static const uint8_t rclb_table[32] = {
92 0, 1, 2, 3, 4, 5, 6, 7,
93 8, 0, 1, 2, 3, 4, 5, 6,
94 7, 8, 0, 1, 2, 3, 4, 5,
95 6, 7, 8, 0, 1, 2, 3, 4,
96 };
97
98 static const CPU86_LDouble f15rk[7] =
99 {
100 0.00000000000000000000L,
101 1.00000000000000000000L,
102 3.14159265358979323851L, /*pi*/
103 0.30102999566398119523L, /*lg2*/
104 0.69314718055994530943L, /*ln2*/
105 1.44269504088896340739L, /*l2e*/
106 3.32192809488736234781L, /*l2t*/
107 };
108
109 /* broken thread support */
110
111 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
112
113 void helper_lock(void)
114 {
115 spin_lock(&global_cpu_lock);
116 }
117
118 void helper_unlock(void)
119 {
120 spin_unlock(&global_cpu_lock);
121 }
122
123 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
124 {
125 load_eflags(t0, update_mask);
126 }
127
128 target_ulong helper_read_eflags(void)
129 {
130 uint32_t eflags;
131 eflags = helper_cc_compute_all(CC_OP);
132 eflags |= (DF & DF_MASK);
133 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
134 return eflags;
135 }
136
137 /* return non zero if error */
138 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
139 int selector)
140 {
141 SegmentCache *dt;
142 int index;
143 target_ulong ptr;
144
145 if (selector & 0x4)
146 dt = &env->ldt;
147 else
148 dt = &env->gdt;
149 index = selector & ~7;
150 if ((index + 7) > dt->limit)
151 return -1;
152 ptr = dt->base + index;
153 *e1_ptr = ldl_kernel(ptr);
154 *e2_ptr = ldl_kernel(ptr + 4);
155 return 0;
156 }
157
158 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
159 {
160 unsigned int limit;
161 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
162 if (e2 & DESC_G_MASK)
163 limit = (limit << 12) | 0xfff;
164 return limit;
165 }
166
167 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
168 {
169 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
170 }
171
172 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
173 {
174 sc->base = get_seg_base(e1, e2);
175 sc->limit = get_seg_limit(e1, e2);
176 sc->flags = e2;
177 }
178
179 /* init the segment cache in vm86 mode. */
180 static inline void load_seg_vm(int seg, int selector)
181 {
182 selector &= 0xffff;
183 cpu_x86_load_seg_cache(env, seg, selector,
184 (selector << 4), 0xffff, 0);
185 }
186
187 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
188 uint32_t *esp_ptr, int dpl)
189 {
190 int type, index, shift;
191
192 #if 0
193 {
194 int i;
195 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
196 for(i=0;i<env->tr.limit;i++) {
197 printf("%02x ", env->tr.base[i]);
198 if ((i & 7) == 7) printf("\n");
199 }
200 printf("\n");
201 }
202 #endif
203
204 if (!(env->tr.flags & DESC_P_MASK))
205 cpu_abort(env, "invalid tss");
206 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
207 if ((type & 7) != 1)
208 cpu_abort(env, "invalid tss type");
209 shift = type >> 3;
210 index = (dpl * 4 + 2) << shift;
211 if (index + (4 << shift) - 1 > env->tr.limit)
212 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
213 if (shift == 0) {
214 *esp_ptr = lduw_kernel(env->tr.base + index);
215 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
216 } else {
217 *esp_ptr = ldl_kernel(env->tr.base + index);
218 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
219 }
220 }
221
222 /* XXX: merge with load_seg() */
223 static void tss_load_seg(int seg_reg, int selector)
224 {
225 uint32_t e1, e2;
226 int rpl, dpl, cpl;
227
228 if ((selector & 0xfffc) != 0) {
229 if (load_segment(&e1, &e2, selector) != 0)
230 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231 if (!(e2 & DESC_S_MASK))
232 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233 rpl = selector & 3;
234 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
235 cpl = env->hflags & HF_CPL_MASK;
236 if (seg_reg == R_CS) {
237 if (!(e2 & DESC_CS_MASK))
238 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
239 /* XXX: is it correct ? */
240 if (dpl != rpl)
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 if ((e2 & DESC_C_MASK) && dpl > rpl)
243 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
244 } else if (seg_reg == R_SS) {
245 /* SS must be writable data */
246 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
247 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248 if (dpl != cpl || dpl != rpl)
249 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
250 } else {
251 /* not readable code */
252 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
253 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
254 /* if data or non conforming code, checks the rights */
255 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
256 if (dpl < cpl || dpl < rpl)
257 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
258 }
259 }
260 if (!(e2 & DESC_P_MASK))
261 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
262 cpu_x86_load_seg_cache(env, seg_reg, selector,
263 get_seg_base(e1, e2),
264 get_seg_limit(e1, e2),
265 e2);
266 } else {
267 if (seg_reg == R_SS || seg_reg == R_CS)
268 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
269 }
270 }
271
272 #define SWITCH_TSS_JMP 0
273 #define SWITCH_TSS_IRET 1
274 #define SWITCH_TSS_CALL 2
275
276 /* XXX: restore CPU state in registers (PowerPC case) */
277 static void switch_tss(int tss_selector,
278 uint32_t e1, uint32_t e2, int source,
279 uint32_t next_eip)
280 {
281 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
282 target_ulong tss_base;
283 uint32_t new_regs[8], new_segs[6];
284 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
285 uint32_t old_eflags, eflags_mask;
286 SegmentCache *dt;
287 int index;
288 target_ulong ptr;
289
290 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
291 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
292
293 /* if task gate, we read the TSS segment and we load it */
294 if (type == 5) {
295 if (!(e2 & DESC_P_MASK))
296 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
297 tss_selector = e1 >> 16;
298 if (tss_selector & 4)
299 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
300 if (load_segment(&e1, &e2, tss_selector) != 0)
301 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
302 if (e2 & DESC_S_MASK)
303 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
304 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
305 if ((type & 7) != 1)
306 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
307 }
308
309 if (!(e2 & DESC_P_MASK))
310 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
311
312 if (type & 8)
313 tss_limit_max = 103;
314 else
315 tss_limit_max = 43;
316 tss_limit = get_seg_limit(e1, e2);
317 tss_base = get_seg_base(e1, e2);
318 if ((tss_selector & 4) != 0 ||
319 tss_limit < tss_limit_max)
320 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
321 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
322 if (old_type & 8)
323 old_tss_limit_max = 103;
324 else
325 old_tss_limit_max = 43;
326
327 /* read all the registers from the new TSS */
328 if (type & 8) {
329 /* 32 bit */
330 new_cr3 = ldl_kernel(tss_base + 0x1c);
331 new_eip = ldl_kernel(tss_base + 0x20);
332 new_eflags = ldl_kernel(tss_base + 0x24);
333 for(i = 0; i < 8; i++)
334 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
335 for(i = 0; i < 6; i++)
336 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
337 new_ldt = lduw_kernel(tss_base + 0x60);
338 new_trap = ldl_kernel(tss_base + 0x64);
339 } else {
340 /* 16 bit */
341 new_cr3 = 0;
342 new_eip = lduw_kernel(tss_base + 0x0e);
343 new_eflags = lduw_kernel(tss_base + 0x10);
344 for(i = 0; i < 8; i++)
345 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
346 for(i = 0; i < 4; i++)
347 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
348 new_ldt = lduw_kernel(tss_base + 0x2a);
349 new_segs[R_FS] = 0;
350 new_segs[R_GS] = 0;
351 new_trap = 0;
352 }
353 /* XXX: avoid a compiler warning, see
354 http://support.amd.com/us/Processor_TechDocs/24593.pdf
355 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
356 (void)new_trap;
357
358 /* NOTE: we must avoid memory exceptions during the task switch,
359 so we make dummy accesses before */
360 /* XXX: it can still fail in some cases, so a bigger hack is
361 necessary to valid the TLB after having done the accesses */
362
363 v1 = ldub_kernel(env->tr.base);
364 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
365 stb_kernel(env->tr.base, v1);
366 stb_kernel(env->tr.base + old_tss_limit_max, v2);
367
368 /* clear busy bit (it is restartable) */
369 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
370 target_ulong ptr;
371 uint32_t e2;
372 ptr = env->gdt.base + (env->tr.selector & ~7);
373 e2 = ldl_kernel(ptr + 4);
374 e2 &= ~DESC_TSS_BUSY_MASK;
375 stl_kernel(ptr + 4, e2);
376 }
377 old_eflags = compute_eflags();
378 if (source == SWITCH_TSS_IRET)
379 old_eflags &= ~NT_MASK;
380
381 /* save the current state in the old TSS */
382 if (type & 8) {
383 /* 32 bit */
384 stl_kernel(env->tr.base + 0x20, next_eip);
385 stl_kernel(env->tr.base + 0x24, old_eflags);
386 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
387 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
388 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
389 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
390 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
391 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
392 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
393 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
394 for(i = 0; i < 6; i++)
395 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
396 } else {
397 /* 16 bit */
398 stw_kernel(env->tr.base + 0x0e, next_eip);
399 stw_kernel(env->tr.base + 0x10, old_eflags);
400 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
401 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
402 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
403 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
404 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
405 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
406 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
407 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
408 for(i = 0; i < 4; i++)
409 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
410 }
411
412 /* now if an exception occurs, it will occurs in the next task
413 context */
414
415 if (source == SWITCH_TSS_CALL) {
416 stw_kernel(tss_base, env->tr.selector);
417 new_eflags |= NT_MASK;
418 }
419
420 /* set busy bit */
421 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
422 target_ulong ptr;
423 uint32_t e2;
424 ptr = env->gdt.base + (tss_selector & ~7);
425 e2 = ldl_kernel(ptr + 4);
426 e2 |= DESC_TSS_BUSY_MASK;
427 stl_kernel(ptr + 4, e2);
428 }
429
430 /* set the new CPU state */
431 /* from this point, any exception which occurs can give problems */
432 env->cr[0] |= CR0_TS_MASK;
433 env->hflags |= HF_TS_MASK;
434 env->tr.selector = tss_selector;
435 env->tr.base = tss_base;
436 env->tr.limit = tss_limit;
437 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
438
439 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
440 cpu_x86_update_cr3(env, new_cr3);
441 }
442
443 /* load all registers without an exception, then reload them with
444 possible exception */
445 env->eip = new_eip;
446 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
447 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
448 if (!(type & 8))
449 eflags_mask &= 0xffff;
450 load_eflags(new_eflags, eflags_mask);
451 /* XXX: what to do in 16 bit case ? */
452 EAX = new_regs[0];
453 ECX = new_regs[1];
454 EDX = new_regs[2];
455 EBX = new_regs[3];
456 ESP = new_regs[4];
457 EBP = new_regs[5];
458 ESI = new_regs[6];
459 EDI = new_regs[7];
460 if (new_eflags & VM_MASK) {
461 for(i = 0; i < 6; i++)
462 load_seg_vm(i, new_segs[i]);
463 /* in vm86, CPL is always 3 */
464 cpu_x86_set_cpl(env, 3);
465 } else {
466 /* CPL is set the RPL of CS */
467 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
468 /* first just selectors as the rest may trigger exceptions */
469 for(i = 0; i < 6; i++)
470 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
471 }
472
473 env->ldt.selector = new_ldt & ~4;
474 env->ldt.base = 0;
475 env->ldt.limit = 0;
476 env->ldt.flags = 0;
477
478 /* load the LDT */
479 if (new_ldt & 4)
480 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481
482 if ((new_ldt & 0xfffc) != 0) {
483 dt = &env->gdt;
484 index = new_ldt & ~7;
485 if ((index + 7) > dt->limit)
486 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487 ptr = dt->base + index;
488 e1 = ldl_kernel(ptr);
489 e2 = ldl_kernel(ptr + 4);
490 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
491 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
492 if (!(e2 & DESC_P_MASK))
493 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
494 load_seg_cache_raw_dt(&env->ldt, e1, e2);
495 }
496
497 /* load the segments */
498 if (!(new_eflags & VM_MASK)) {
499 tss_load_seg(R_CS, new_segs[R_CS]);
500 tss_load_seg(R_SS, new_segs[R_SS]);
501 tss_load_seg(R_ES, new_segs[R_ES]);
502 tss_load_seg(R_DS, new_segs[R_DS]);
503 tss_load_seg(R_FS, new_segs[R_FS]);
504 tss_load_seg(R_GS, new_segs[R_GS]);
505 }
506
507 /* check that EIP is in the CS segment limits */
508 if (new_eip > env->segs[R_CS].limit) {
509 /* XXX: different exception if CALL ? */
510 raise_exception_err(EXCP0D_GPF, 0);
511 }
512
513 #ifndef CONFIG_USER_ONLY
514 /* reset local breakpoints */
515 if (env->dr[7] & 0x55) {
516 for (i = 0; i < 4; i++) {
517 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
518 hw_breakpoint_remove(env, i);
519 }
520 env->dr[7] &= ~0x55;
521 }
522 #endif
523 }
524
525 /* check if Port I/O is allowed in TSS */
526 static inline void check_io(int addr, int size)
527 {
528 int io_offset, val, mask;
529
530 /* TSS must be a valid 32 bit one */
531 if (!(env->tr.flags & DESC_P_MASK) ||
532 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
533 env->tr.limit < 103)
534 goto fail;
535 io_offset = lduw_kernel(env->tr.base + 0x66);
536 io_offset += (addr >> 3);
537 /* Note: the check needs two bytes */
538 if ((io_offset + 1) > env->tr.limit)
539 goto fail;
540 val = lduw_kernel(env->tr.base + io_offset);
541 val >>= (addr & 7);
542 mask = (1 << size) - 1;
543 /* all bits must be zero to allow the I/O */
544 if ((val & mask) != 0) {
545 fail:
546 raise_exception_err(EXCP0D_GPF, 0);
547 }
548 }
549
550 void helper_check_iob(uint32_t t0)
551 {
552 check_io(t0, 1);
553 }
554
555 void helper_check_iow(uint32_t t0)
556 {
557 check_io(t0, 2);
558 }
559
560 void helper_check_iol(uint32_t t0)
561 {
562 check_io(t0, 4);
563 }
564
565 void helper_outb(uint32_t port, uint32_t data)
566 {
567 cpu_outb(port, data & 0xff);
568 }
569
570 target_ulong helper_inb(uint32_t port)
571 {
572 return cpu_inb(port);
573 }
574
575 void helper_outw(uint32_t port, uint32_t data)
576 {
577 cpu_outw(port, data & 0xffff);
578 }
579
580 target_ulong helper_inw(uint32_t port)
581 {
582 return cpu_inw(port);
583 }
584
585 void helper_outl(uint32_t port, uint32_t data)
586 {
587 cpu_outl(port, data);
588 }
589
590 target_ulong helper_inl(uint32_t port)
591 {
592 return cpu_inl(port);
593 }
594
595 static inline unsigned int get_sp_mask(unsigned int e2)
596 {
597 if (e2 & DESC_B_MASK)
598 return 0xffffffff;
599 else
600 return 0xffff;
601 }
602
603 static int exeption_has_error_code(int intno)
604 {
605 switch(intno) {
606 case 8:
607 case 10:
608 case 11:
609 case 12:
610 case 13:
611 case 14:
612 case 17:
613 return 1;
614 }
615 return 0;
616 }
617
618 #ifdef TARGET_X86_64
619 #define SET_ESP(val, sp_mask)\
620 do {\
621 if ((sp_mask) == 0xffff)\
622 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
623 else if ((sp_mask) == 0xffffffffLL)\
624 ESP = (uint32_t)(val);\
625 else\
626 ESP = (val);\
627 } while (0)
628 #else
629 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
630 #endif
631
632 /* in 64-bit machines, this can overflow. So this segment addition macro
633 * can be used to trim the value to 32-bit whenever needed */
634 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
635
636 /* XXX: add a is_user flag to have proper security support */
637 #define PUSHW(ssp, sp, sp_mask, val)\
638 {\
639 sp -= 2;\
640 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
641 }
642
643 #define PUSHL(ssp, sp, sp_mask, val)\
644 {\
645 sp -= 4;\
646 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
647 }
648
649 #define POPW(ssp, sp, sp_mask, val)\
650 {\
651 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
652 sp += 2;\
653 }
654
655 #define POPL(ssp, sp, sp_mask, val)\
656 {\
657 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
658 sp += 4;\
659 }
660
661 /* protected mode interrupt */
662 static void do_interrupt_protected(int intno, int is_int, int error_code,
663 unsigned int next_eip, int is_hw)
664 {
665 SegmentCache *dt;
666 target_ulong ptr, ssp;
667 int type, dpl, selector, ss_dpl, cpl;
668 int has_error_code, new_stack, shift;
669 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
670 uint32_t old_eip, sp_mask;
671
672 has_error_code = 0;
673 if (!is_int && !is_hw)
674 has_error_code = exeption_has_error_code(intno);
675 if (is_int)
676 old_eip = next_eip;
677 else
678 old_eip = env->eip;
679
680 dt = &env->idt;
681 if (intno * 8 + 7 > dt->limit)
682 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
683 ptr = dt->base + intno * 8;
684 e1 = ldl_kernel(ptr);
685 e2 = ldl_kernel(ptr + 4);
686 /* check gate type */
687 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
688 switch(type) {
689 case 5: /* task gate */
690 /* must do that check here to return the correct error code */
691 if (!(e2 & DESC_P_MASK))
692 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
693 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
694 if (has_error_code) {
695 int type;
696 uint32_t mask;
697 /* push the error code */
698 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
699 shift = type >> 3;
700 if (env->segs[R_SS].flags & DESC_B_MASK)
701 mask = 0xffffffff;
702 else
703 mask = 0xffff;
704 esp = (ESP - (2 << shift)) & mask;
705 ssp = env->segs[R_SS].base + esp;
706 if (shift)
707 stl_kernel(ssp, error_code);
708 else
709 stw_kernel(ssp, error_code);
710 SET_ESP(esp, mask);
711 }
712 return;
713 case 6: /* 286 interrupt gate */
714 case 7: /* 286 trap gate */
715 case 14: /* 386 interrupt gate */
716 case 15: /* 386 trap gate */
717 break;
718 default:
719 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
720 break;
721 }
722 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
723 cpl = env->hflags & HF_CPL_MASK;
724 /* check privilege if software int */
725 if (is_int && dpl < cpl)
726 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
727 /* check valid bit */
728 if (!(e2 & DESC_P_MASK))
729 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
730 selector = e1 >> 16;
731 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
732 if ((selector & 0xfffc) == 0)
733 raise_exception_err(EXCP0D_GPF, 0);
734
735 if (load_segment(&e1, &e2, selector) != 0)
736 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
738 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
739 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
740 if (dpl > cpl)
741 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
742 if (!(e2 & DESC_P_MASK))
743 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
744 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
745 /* to inner privilege */
746 get_ss_esp_from_tss(&ss, &esp, dpl);
747 if ((ss & 0xfffc) == 0)
748 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
749 if ((ss & 3) != dpl)
750 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
752 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
754 if (ss_dpl != dpl)
755 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756 if (!(ss_e2 & DESC_S_MASK) ||
757 (ss_e2 & DESC_CS_MASK) ||
758 !(ss_e2 & DESC_W_MASK))
759 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
760 if (!(ss_e2 & DESC_P_MASK))
761 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
762 new_stack = 1;
763 sp_mask = get_sp_mask(ss_e2);
764 ssp = get_seg_base(ss_e1, ss_e2);
765 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
766 /* to same privilege */
767 if (env->eflags & VM_MASK)
768 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
769 new_stack = 0;
770 sp_mask = get_sp_mask(env->segs[R_SS].flags);
771 ssp = env->segs[R_SS].base;
772 esp = ESP;
773 dpl = cpl;
774 } else {
775 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
776 new_stack = 0; /* avoid warning */
777 sp_mask = 0; /* avoid warning */
778 ssp = 0; /* avoid warning */
779 esp = 0; /* avoid warning */
780 }
781
782 shift = type >> 3;
783
784 #if 0
785 /* XXX: check that enough room is available */
786 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
787 if (env->eflags & VM_MASK)
788 push_size += 8;
789 push_size <<= shift;
790 #endif
791 if (shift == 1) {
792 if (new_stack) {
793 if (env->eflags & VM_MASK) {
794 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
795 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
796 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
797 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
798 }
799 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
800 PUSHL(ssp, esp, sp_mask, ESP);
801 }
802 PUSHL(ssp, esp, sp_mask, compute_eflags());
803 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
804 PUSHL(ssp, esp, sp_mask, old_eip);
805 if (has_error_code) {
806 PUSHL(ssp, esp, sp_mask, error_code);
807 }
808 } else {
809 if (new_stack) {
810 if (env->eflags & VM_MASK) {
811 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
812 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
813 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
814 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
815 }
816 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
817 PUSHW(ssp, esp, sp_mask, ESP);
818 }
819 PUSHW(ssp, esp, sp_mask, compute_eflags());
820 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
821 PUSHW(ssp, esp, sp_mask, old_eip);
822 if (has_error_code) {
823 PUSHW(ssp, esp, sp_mask, error_code);
824 }
825 }
826
827 if (new_stack) {
828 if (env->eflags & VM_MASK) {
829 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
830 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
831 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
832 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
833 }
834 ss = (ss & ~3) | dpl;
835 cpu_x86_load_seg_cache(env, R_SS, ss,
836 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
837 }
838 SET_ESP(esp, sp_mask);
839
840 selector = (selector & ~3) | dpl;
841 cpu_x86_load_seg_cache(env, R_CS, selector,
842 get_seg_base(e1, e2),
843 get_seg_limit(e1, e2),
844 e2);
845 cpu_x86_set_cpl(env, dpl);
846 env->eip = offset;
847
848 /* interrupt gate clear IF mask */
849 if ((type & 1) == 0) {
850 env->eflags &= ~IF_MASK;
851 }
852 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
853 }
854
855 #ifdef TARGET_X86_64
856
857 #define PUSHQ(sp, val)\
858 {\
859 sp -= 8;\
860 stq_kernel(sp, (val));\
861 }
862
863 #define POPQ(sp, val)\
864 {\
865 val = ldq_kernel(sp);\
866 sp += 8;\
867 }
868
869 static inline target_ulong get_rsp_from_tss(int level)
870 {
871 int index;
872
873 #if 0
874 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
875 env->tr.base, env->tr.limit);
876 #endif
877
878 if (!(env->tr.flags & DESC_P_MASK))
879 cpu_abort(env, "invalid tss");
880 index = 8 * level + 4;
881 if ((index + 7) > env->tr.limit)
882 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
883 return ldq_kernel(env->tr.base + index);
884 }
885
886 /* 64 bit interrupt */
887 static void do_interrupt64(int intno, int is_int, int error_code,
888 target_ulong next_eip, int is_hw)
889 {
890 SegmentCache *dt;
891 target_ulong ptr;
892 int type, dpl, selector, cpl, ist;
893 int has_error_code, new_stack;
894 uint32_t e1, e2, e3, ss;
895 target_ulong old_eip, esp, offset;
896
897 has_error_code = 0;
898 if (!is_int && !is_hw)
899 has_error_code = exeption_has_error_code(intno);
900 if (is_int)
901 old_eip = next_eip;
902 else
903 old_eip = env->eip;
904
905 dt = &env->idt;
906 if (intno * 16 + 15 > dt->limit)
907 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
908 ptr = dt->base + intno * 16;
909 e1 = ldl_kernel(ptr);
910 e2 = ldl_kernel(ptr + 4);
911 e3 = ldl_kernel(ptr + 8);
912 /* check gate type */
913 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
914 switch(type) {
915 case 14: /* 386 interrupt gate */
916 case 15: /* 386 trap gate */
917 break;
918 default:
919 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
920 break;
921 }
922 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
923 cpl = env->hflags & HF_CPL_MASK;
924 /* check privilege if software int */
925 if (is_int && dpl < cpl)
926 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
927 /* check valid bit */
928 if (!(e2 & DESC_P_MASK))
929 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
930 selector = e1 >> 16;
931 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
932 ist = e2 & 7;
933 if ((selector & 0xfffc) == 0)
934 raise_exception_err(EXCP0D_GPF, 0);
935
936 if (load_segment(&e1, &e2, selector) != 0)
937 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
939 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
941 if (dpl > cpl)
942 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943 if (!(e2 & DESC_P_MASK))
944 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
945 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
946 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
947 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
948 /* to inner privilege */
949 if (ist != 0)
950 esp = get_rsp_from_tss(ist + 3);
951 else
952 esp = get_rsp_from_tss(dpl);
953 esp &= ~0xfLL; /* align stack */
954 ss = 0;
955 new_stack = 1;
956 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
957 /* to same privilege */
958 if (env->eflags & VM_MASK)
959 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
960 new_stack = 0;
961 if (ist != 0)
962 esp = get_rsp_from_tss(ist + 3);
963 else
964 esp = ESP;
965 esp &= ~0xfLL; /* align stack */
966 dpl = cpl;
967 } else {
968 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
969 new_stack = 0; /* avoid warning */
970 esp = 0; /* avoid warning */
971 }
972
973 PUSHQ(esp, env->segs[R_SS].selector);
974 PUSHQ(esp, ESP);
975 PUSHQ(esp, compute_eflags());
976 PUSHQ(esp, env->segs[R_CS].selector);
977 PUSHQ(esp, old_eip);
978 if (has_error_code) {
979 PUSHQ(esp, error_code);
980 }
981
982 if (new_stack) {
983 ss = 0 | dpl;
984 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
985 }
986 ESP = esp;
987
988 selector = (selector & ~3) | dpl;
989 cpu_x86_load_seg_cache(env, R_CS, selector,
990 get_seg_base(e1, e2),
991 get_seg_limit(e1, e2),
992 e2);
993 cpu_x86_set_cpl(env, dpl);
994 env->eip = offset;
995
996 /* interrupt gate clear IF mask */
997 if ((type & 1) == 0) {
998 env->eflags &= ~IF_MASK;
999 }
1000 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1001 }
1002 #endif
1003
1004 #ifdef TARGET_X86_64
1005 #if defined(CONFIG_USER_ONLY)
1006 void helper_syscall(int next_eip_addend)
1007 {
1008 env->exception_index = EXCP_SYSCALL;
1009 env->exception_next_eip = env->eip + next_eip_addend;
1010 cpu_loop_exit();
1011 }
1012 #else
1013 void helper_syscall(int next_eip_addend)
1014 {
1015 int selector;
1016
1017 if (!(env->efer & MSR_EFER_SCE)) {
1018 raise_exception_err(EXCP06_ILLOP, 0);
1019 }
1020 selector = (env->star >> 32) & 0xffff;
1021 if (env->hflags & HF_LMA_MASK) {
1022 int code64;
1023
1024 ECX = env->eip + next_eip_addend;
1025 env->regs[11] = compute_eflags();
1026
1027 code64 = env->hflags & HF_CS64_MASK;
1028
1029 cpu_x86_set_cpl(env, 0);
1030 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1031 0, 0xffffffff,
1032 DESC_G_MASK | DESC_P_MASK |
1033 DESC_S_MASK |
1034 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1035 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1036 0, 0xffffffff,
1037 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038 DESC_S_MASK |
1039 DESC_W_MASK | DESC_A_MASK);
1040 env->eflags &= ~env->fmask;
1041 load_eflags(env->eflags, 0);
1042 if (code64)
1043 env->eip = env->lstar;
1044 else
1045 env->eip = env->cstar;
1046 } else {
1047 ECX = (uint32_t)(env->eip + next_eip_addend);
1048
1049 cpu_x86_set_cpl(env, 0);
1050 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1051 0, 0xffffffff,
1052 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1053 DESC_S_MASK |
1054 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1055 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1056 0, 0xffffffff,
1057 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1058 DESC_S_MASK |
1059 DESC_W_MASK | DESC_A_MASK);
1060 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1061 env->eip = (uint32_t)env->star;
1062 }
1063 }
1064 #endif
1065 #endif
1066
1067 #ifdef TARGET_X86_64
1068 void helper_sysret(int dflag)
1069 {
1070 int cpl, selector;
1071
1072 if (!(env->efer & MSR_EFER_SCE)) {
1073 raise_exception_err(EXCP06_ILLOP, 0);
1074 }
1075 cpl = env->hflags & HF_CPL_MASK;
1076 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1077 raise_exception_err(EXCP0D_GPF, 0);
1078 }
1079 selector = (env->star >> 48) & 0xffff;
1080 if (env->hflags & HF_LMA_MASK) {
1081 if (dflag == 2) {
1082 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1083 0, 0xffffffff,
1084 DESC_G_MASK | DESC_P_MASK |
1085 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1086 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1087 DESC_L_MASK);
1088 env->eip = ECX;
1089 } else {
1090 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1091 0, 0xffffffff,
1092 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1093 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1094 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1095 env->eip = (uint32_t)ECX;
1096 }
1097 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1098 0, 0xffffffff,
1099 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1100 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1101 DESC_W_MASK | DESC_A_MASK);
1102 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1103 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1104 cpu_x86_set_cpl(env, 3);
1105 } else {
1106 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1107 0, 0xffffffff,
1108 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1109 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1110 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1111 env->eip = (uint32_t)ECX;
1112 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1113 0, 0xffffffff,
1114 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1115 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1116 DESC_W_MASK | DESC_A_MASK);
1117 env->eflags |= IF_MASK;
1118 cpu_x86_set_cpl(env, 3);
1119 }
1120 }
1121 #endif
1122
1123 /* real mode interrupt */
1124 static void do_interrupt_real(int intno, int is_int, int error_code,
1125 unsigned int next_eip)
1126 {
1127 SegmentCache *dt;
1128 target_ulong ptr, ssp;
1129 int selector;
1130 uint32_t offset, esp;
1131 uint32_t old_cs, old_eip;
1132
1133 /* real mode (simpler !) */
1134 dt = &env->idt;
1135 if (intno * 4 + 3 > dt->limit)
1136 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1137 ptr = dt->base + intno * 4;
1138 offset = lduw_kernel(ptr);
1139 selector = lduw_kernel(ptr + 2);
1140 esp = ESP;
1141 ssp = env->segs[R_SS].base;
1142 if (is_int)
1143 old_eip = next_eip;
1144 else
1145 old_eip = env->eip;
1146 old_cs = env->segs[R_CS].selector;
1147 /* XXX: use SS segment size ? */
1148 PUSHW(ssp, esp, 0xffff, compute_eflags());
1149 PUSHW(ssp, esp, 0xffff, old_cs);
1150 PUSHW(ssp, esp, 0xffff, old_eip);
1151
1152 /* update processor state */
1153 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1154 env->eip = offset;
1155 env->segs[R_CS].selector = selector;
1156 env->segs[R_CS].base = (selector << 4);
1157 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1158 }
1159
1160 /* fake user mode interrupt */
1161 void do_interrupt_user(int intno, int is_int, int error_code,
1162 target_ulong next_eip)
1163 {
1164 SegmentCache *dt;
1165 target_ulong ptr;
1166 int dpl, cpl, shift;
1167 uint32_t e2;
1168
1169 dt = &env->idt;
1170 if (env->hflags & HF_LMA_MASK) {
1171 shift = 4;
1172 } else {
1173 shift = 3;
1174 }
1175 ptr = dt->base + (intno << shift);
1176 e2 = ldl_kernel(ptr + 4);
1177
1178 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1179 cpl = env->hflags & HF_CPL_MASK;
1180 /* check privilege if software int */
1181 if (is_int && dpl < cpl)
1182 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1183
1184 /* Since we emulate only user space, we cannot do more than
1185 exiting the emulation with the suitable exception and error
1186 code */
1187 if (is_int)
1188 EIP = next_eip;
1189 }
1190
1191 #if !defined(CONFIG_USER_ONLY)
1192 static void handle_even_inj(int intno, int is_int, int error_code,
1193 int is_hw, int rm)
1194 {
1195 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1196 if (!(event_inj & SVM_EVTINJ_VALID)) {
1197 int type;
1198 if (is_int)
1199 type = SVM_EVTINJ_TYPE_SOFT;
1200 else
1201 type = SVM_EVTINJ_TYPE_EXEPT;
1202 event_inj = intno | type | SVM_EVTINJ_VALID;
1203 if (!rm && exeption_has_error_code(intno)) {
1204 event_inj |= SVM_EVTINJ_VALID_ERR;
1205 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1206 }
1207 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1208 }
1209 }
1210 #endif
1211
1212 /*
1213 * Begin execution of an interruption. is_int is TRUE if coming from
1214 * the int instruction. next_eip is the EIP value AFTER the interrupt
1215 * instruction. It is only relevant if is_int is TRUE.
1216 */
1217 void do_interrupt(int intno, int is_int, int error_code,
1218 target_ulong next_eip, int is_hw)
1219 {
1220 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1221 if ((env->cr[0] & CR0_PE_MASK)) {
1222 static int count;
1223 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1224 count, intno, error_code, is_int,
1225 env->hflags & HF_CPL_MASK,
1226 env->segs[R_CS].selector, EIP,
1227 (int)env->segs[R_CS].base + EIP,
1228 env->segs[R_SS].selector, ESP);
1229 if (intno == 0x0e) {
1230 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1231 } else {
1232 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1233 }
1234 qemu_log("\n");
1235 log_cpu_state(env, X86_DUMP_CCOP);
1236 #if 0
1237 {
1238 int i;
1239 target_ulong ptr;
1240 qemu_log(" code=");
1241 ptr = env->segs[R_CS].base + env->eip;
1242 for(i = 0; i < 16; i++) {
1243 qemu_log(" %02x", ldub(ptr + i));
1244 }
1245 qemu_log("\n");
1246 }
1247 #endif
1248 count++;
1249 }
1250 }
1251 if (env->cr[0] & CR0_PE_MASK) {
1252 #if !defined(CONFIG_USER_ONLY)
1253 if (env->hflags & HF_SVMI_MASK)
1254 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1255 #endif
1256 #ifdef TARGET_X86_64
1257 if (env->hflags & HF_LMA_MASK) {
1258 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1259 } else
1260 #endif
1261 {
1262 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1263 }
1264 } else {
1265 #if !defined(CONFIG_USER_ONLY)
1266 if (env->hflags & HF_SVMI_MASK)
1267 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1268 #endif
1269 do_interrupt_real(intno, is_int, error_code, next_eip);
1270 }
1271
1272 #if !defined(CONFIG_USER_ONLY)
1273 if (env->hflags & HF_SVMI_MASK) {
1274 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1275 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1276 }
1277 #endif
1278 }
1279
1280 /* This should come from sysemu.h - if we could include it here... */
1281 void qemu_system_reset_request(void);
1282
1283 /*
1284 * Check nested exceptions and change to double or triple fault if
1285 * needed. It should only be called, if this is not an interrupt.
1286 * Returns the new exception number.
1287 */
1288 static int check_exception(int intno, int *error_code)
1289 {
1290 int first_contributory = env->old_exception == 0 ||
1291 (env->old_exception >= 10 &&
1292 env->old_exception <= 13);
1293 int second_contributory = intno == 0 ||
1294 (intno >= 10 && intno <= 13);
1295
1296 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1297 env->old_exception, intno);
1298
1299 #if !defined(CONFIG_USER_ONLY)
1300 if (env->old_exception == EXCP08_DBLE) {
1301 if (env->hflags & HF_SVMI_MASK)
1302 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1303
1304 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1305
1306 qemu_system_reset_request();
1307 return EXCP_HLT;
1308 }
1309 #endif
1310
1311 if ((first_contributory && second_contributory)
1312 || (env->old_exception == EXCP0E_PAGE &&
1313 (second_contributory || (intno == EXCP0E_PAGE)))) {
1314 intno = EXCP08_DBLE;
1315 *error_code = 0;
1316 }
1317
1318 if (second_contributory || (intno == EXCP0E_PAGE) ||
1319 (intno == EXCP08_DBLE))
1320 env->old_exception = intno;
1321
1322 return intno;
1323 }
1324
1325 /*
1326 * Signal an interruption. It is executed in the main CPU loop.
1327 * is_int is TRUE if coming from the int instruction. next_eip is the
1328 * EIP value AFTER the interrupt instruction. It is only relevant if
1329 * is_int is TRUE.
1330 */
1331 static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1332 int next_eip_addend)
1333 {
1334 if (!is_int) {
1335 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1336 intno = check_exception(intno, &error_code);
1337 } else {
1338 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1339 }
1340
1341 env->exception_index = intno;
1342 env->error_code = error_code;
1343 env->exception_is_int = is_int;
1344 env->exception_next_eip = env->eip + next_eip_addend;
1345 cpu_loop_exit();
1346 }
1347
1348 /* shortcuts to generate exceptions */
1349
1350 void raise_exception_err(int exception_index, int error_code)
1351 {
1352 raise_interrupt(exception_index, 0, error_code, 0);
1353 }
1354
1355 void raise_exception(int exception_index)
1356 {
1357 raise_interrupt(exception_index, 0, 0, 0);
1358 }
1359
1360 void raise_exception_env(int exception_index, CPUState *nenv)
1361 {
1362 env = nenv;
1363 raise_exception(exception_index);
1364 }
1365 /* SMM support */
1366
1367 #if defined(CONFIG_USER_ONLY)
1368
1369 void do_smm_enter(void)
1370 {
1371 }
1372
1373 void helper_rsm(void)
1374 {
1375 }
1376
1377 #else
1378
1379 #ifdef TARGET_X86_64
1380 #define SMM_REVISION_ID 0x00020064
1381 #else
1382 #define SMM_REVISION_ID 0x00020000
1383 #endif
1384
1385 void do_smm_enter(void)
1386 {
1387 target_ulong sm_state;
1388 SegmentCache *dt;
1389 int i, offset;
1390
1391 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1392 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1393
1394 env->hflags |= HF_SMM_MASK;
1395 cpu_smm_update(env);
1396
1397 sm_state = env->smbase + 0x8000;
1398
1399 #ifdef TARGET_X86_64
1400 for(i = 0; i < 6; i++) {
1401 dt = &env->segs[i];
1402 offset = 0x7e00 + i * 16;
1403 stw_phys(sm_state + offset, dt->selector);
1404 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1405 stl_phys(sm_state + offset + 4, dt->limit);
1406 stq_phys(sm_state + offset + 8, dt->base);
1407 }
1408
1409 stq_phys(sm_state + 0x7e68, env->gdt.base);
1410 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1411
1412 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1413 stq_phys(sm_state + 0x7e78, env->ldt.base);
1414 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1415 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1416
1417 stq_phys(sm_state + 0x7e88, env->idt.base);
1418 stl_phys(sm_state + 0x7e84, env->idt.limit);
1419
1420 stw_phys(sm_state + 0x7e90, env->tr.selector);
1421 stq_phys(sm_state + 0x7e98, env->tr.base);
1422 stl_phys(sm_state + 0x7e94, env->tr.limit);
1423 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1424
1425 stq_phys(sm_state + 0x7ed0, env->efer);
1426
1427 stq_phys(sm_state + 0x7ff8, EAX);
1428 stq_phys(sm_state + 0x7ff0, ECX);
1429 stq_phys(sm_state + 0x7fe8, EDX);
1430 stq_phys(sm_state + 0x7fe0, EBX);
1431 stq_phys(sm_state + 0x7fd8, ESP);
1432 stq_phys(sm_state + 0x7fd0, EBP);
1433 stq_phys(sm_state + 0x7fc8, ESI);
1434 stq_phys(sm_state + 0x7fc0, EDI);
1435 for(i = 8; i < 16; i++)
1436 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1437 stq_phys(sm_state + 0x7f78, env->eip);
1438 stl_phys(sm_state + 0x7f70, compute_eflags());
1439 stl_phys(sm_state + 0x7f68, env->dr[6]);
1440 stl_phys(sm_state + 0x7f60, env->dr[7]);
1441
1442 stl_phys(sm_state + 0x7f48, env->cr[4]);
1443 stl_phys(sm_state + 0x7f50, env->cr[3]);
1444 stl_phys(sm_state + 0x7f58, env->cr[0]);
1445
1446 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1447 stl_phys(sm_state + 0x7f00, env->smbase);
1448 #else
1449 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1450 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1451 stl_phys(sm_state + 0x7ff4, compute_eflags());
1452 stl_phys(sm_state + 0x7ff0, env->eip);
1453 stl_phys(sm_state + 0x7fec, EDI);
1454 stl_phys(sm_state + 0x7fe8, ESI);
1455 stl_phys(sm_state + 0x7fe4, EBP);
1456 stl_phys(sm_state + 0x7fe0, ESP);
1457 stl_phys(sm_state + 0x7fdc, EBX);
1458 stl_phys(sm_state + 0x7fd8, EDX);
1459 stl_phys(sm_state + 0x7fd4, ECX);
1460 stl_phys(sm_state + 0x7fd0, EAX);
1461 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1462 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1463
1464 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1465 stl_phys(sm_state + 0x7f64, env->tr.base);
1466 stl_phys(sm_state + 0x7f60, env->tr.limit);
1467 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1468
1469 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1470 stl_phys(sm_state + 0x7f80, env->ldt.base);
1471 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1472 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1473
1474 stl_phys(sm_state + 0x7f74, env->gdt.base);
1475 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1476
1477 stl_phys(sm_state + 0x7f58, env->idt.base);
1478 stl_phys(sm_state + 0x7f54, env->idt.limit);
1479
1480 for(i = 0; i < 6; i++) {
1481 dt = &env->segs[i];
1482 if (i < 3)
1483 offset = 0x7f84 + i * 12;
1484 else
1485 offset = 0x7f2c + (i - 3) * 12;
1486 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1487 stl_phys(sm_state + offset + 8, dt->base);
1488 stl_phys(sm_state + offset + 4, dt->limit);
1489 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1490 }
1491 stl_phys(sm_state + 0x7f14, env->cr[4]);
1492
1493 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1494 stl_phys(sm_state + 0x7ef8, env->smbase);
1495 #endif
1496 /* init SMM cpu state */
1497
1498 #ifdef TARGET_X86_64
1499 cpu_load_efer(env, 0);
1500 #endif
1501 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1502 env->eip = 0x00008000;
1503 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1504 0xffffffff, 0);
1505 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1506 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1507 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1508 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1509 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1510
1511 cpu_x86_update_cr0(env,
1512 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1513 cpu_x86_update_cr4(env, 0);
1514 env->dr[7] = 0x00000400;
1515 CC_OP = CC_OP_EFLAGS;
1516 }
1517
1518 void helper_rsm(void)
1519 {
1520 target_ulong sm_state;
1521 int i, offset;
1522 uint32_t val;
1523
1524 sm_state = env->smbase + 0x8000;
1525 #ifdef TARGET_X86_64
1526 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1527
1528 for(i = 0; i < 6; i++) {
1529 offset = 0x7e00 + i * 16;
1530 cpu_x86_load_seg_cache(env, i,
1531 lduw_phys(sm_state + offset),
1532 ldq_phys(sm_state + offset + 8),
1533 ldl_phys(sm_state + offset + 4),
1534 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1535 }
1536
1537 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1538 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1539
1540 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1541 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1542 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1543 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1544
1545 env->idt.base = ldq_phys(sm_state + 0x7e88);
1546 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1547
1548 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1549 env->tr.base = ldq_phys(sm_state + 0x7e98);
1550 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1551 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1552
1553 EAX = ldq_phys(sm_state + 0x7ff8);
1554 ECX = ldq_phys(sm_state + 0x7ff0);
1555 EDX = ldq_phys(sm_state + 0x7fe8);
1556 EBX = ldq_phys(sm_state + 0x7fe0);
1557 ESP = ldq_phys(sm_state + 0x7fd8);
1558 EBP = ldq_phys(sm_state + 0x7fd0);
1559 ESI = ldq_phys(sm_state + 0x7fc8);
1560 EDI = ldq_phys(sm_state + 0x7fc0);
1561 for(i = 8; i < 16; i++)
1562 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1563 env->eip = ldq_phys(sm_state + 0x7f78);
1564 load_eflags(ldl_phys(sm_state + 0x7f70),
1565 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1566 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1567 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1568
1569 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1570 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1571 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1572
1573 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1574 if (val & 0x20000) {
1575 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1576 }
1577 #else
1578 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1579 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1580 load_eflags(ldl_phys(sm_state + 0x7ff4),
1581 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1582 env->eip = ldl_phys(sm_state + 0x7ff0);
1583 EDI = ldl_phys(sm_state + 0x7fec);
1584 ESI = ldl_phys(sm_state + 0x7fe8);
1585 EBP = ldl_phys(sm_state + 0x7fe4);
1586 ESP = ldl_phys(sm_state + 0x7fe0);
1587 EBX = ldl_phys(sm_state + 0x7fdc);
1588 EDX = ldl_phys(sm_state + 0x7fd8);
1589 ECX = ldl_phys(sm_state + 0x7fd4);
1590 EAX = ldl_phys(sm_state + 0x7fd0);
1591 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1592 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1593
1594 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1595 env->tr.base = ldl_phys(sm_state + 0x7f64);
1596 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1597 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1598
1599 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1600 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1601 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1602 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1603
1604 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1605 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1606
1607 env->idt.base = ldl_phys(sm_state + 0x7f58);
1608 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1609
1610 for(i = 0; i < 6; i++) {
1611 if (i < 3)
1612 offset = 0x7f84 + i * 12;
1613 else
1614 offset = 0x7f2c + (i - 3) * 12;
1615 cpu_x86_load_seg_cache(env, i,
1616 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1617 ldl_phys(sm_state + offset + 8),
1618 ldl_phys(sm_state + offset + 4),
1619 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1620 }
1621 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1622
1623 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1624 if (val & 0x20000) {
1625 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1626 }
1627 #endif
1628 CC_OP = CC_OP_EFLAGS;
1629 env->hflags &= ~HF_SMM_MASK;
1630 cpu_smm_update(env);
1631
1632 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1633 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1634 }
1635
1636 #endif /* !CONFIG_USER_ONLY */
1637
1638
1639 /* division, flags are undefined */
1640
1641 void helper_divb_AL(target_ulong t0)
1642 {
1643 unsigned int num, den, q, r;
1644
1645 num = (EAX & 0xffff);
1646 den = (t0 & 0xff);
1647 if (den == 0) {
1648 raise_exception(EXCP00_DIVZ);
1649 }
1650 q = (num / den);
1651 if (q > 0xff)
1652 raise_exception(EXCP00_DIVZ);
1653 q &= 0xff;
1654 r = (num % den) & 0xff;
1655 EAX = (EAX & ~0xffff) | (r << 8) | q;
1656 }
1657
1658 void helper_idivb_AL(target_ulong t0)
1659 {
1660 int num, den, q, r;
1661
1662 num = (int16_t)EAX;
1663 den = (int8_t)t0;
1664 if (den == 0) {
1665 raise_exception(EXCP00_DIVZ);
1666 }
1667 q = (num / den);
1668 if (q != (int8_t)q)
1669 raise_exception(EXCP00_DIVZ);
1670 q &= 0xff;
1671 r = (num % den) & 0xff;
1672 EAX = (EAX & ~0xffff) | (r << 8) | q;
1673 }
1674
1675 void helper_divw_AX(target_ulong t0)
1676 {
1677 unsigned int num, den, q, r;
1678
1679 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1680 den = (t0 & 0xffff);
1681 if (den == 0) {
1682 raise_exception(EXCP00_DIVZ);
1683 }
1684 q = (num / den);
1685 if (q > 0xffff)
1686 raise_exception(EXCP00_DIVZ);
1687 q &= 0xffff;
1688 r = (num % den) & 0xffff;
1689 EAX = (EAX & ~0xffff) | q;
1690 EDX = (EDX & ~0xffff) | r;
1691 }
1692
1693 void helper_idivw_AX(target_ulong t0)
1694 {
1695 int num, den, q, r;
1696
1697 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1698 den = (int16_t)t0;
1699 if (den == 0) {
1700 raise_exception(EXCP00_DIVZ);
1701 }
1702 q = (num / den);
1703 if (q != (int16_t)q)
1704 raise_exception(EXCP00_DIVZ);
1705 q &= 0xffff;
1706 r = (num % den) & 0xffff;
1707 EAX = (EAX & ~0xffff) | q;
1708 EDX = (EDX & ~0xffff) | r;
1709 }
1710
1711 void helper_divl_EAX(target_ulong t0)
1712 {
1713 unsigned int den, r;
1714 uint64_t num, q;
1715
1716 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1717 den = t0;
1718 if (den == 0) {
1719 raise_exception(EXCP00_DIVZ);
1720 }
1721 q = (num / den);
1722 r = (num % den);
1723 if (q > 0xffffffff)
1724 raise_exception(EXCP00_DIVZ);
1725 EAX = (uint32_t)q;
1726 EDX = (uint32_t)r;
1727 }
1728
1729 void helper_idivl_EAX(target_ulong t0)
1730 {
1731 int den, r;
1732 int64_t num, q;
1733
1734 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1735 den = t0;
1736 if (den == 0) {
1737 raise_exception(EXCP00_DIVZ);
1738 }
1739 q = (num / den);
1740 r = (num % den);
1741 if (q != (int32_t)q)
1742 raise_exception(EXCP00_DIVZ);
1743 EAX = (uint32_t)q;
1744 EDX = (uint32_t)r;
1745 }
1746
1747 /* bcd */
1748
1749 /* XXX: exception */
1750 void helper_aam(int base)
1751 {
1752 int al, ah;
1753 al = EAX & 0xff;
1754 ah = al / base;
1755 al = al % base;
1756 EAX = (EAX & ~0xffff) | al | (ah << 8);
1757 CC_DST = al;
1758 }
1759
1760 void helper_aad(int base)
1761 {
1762 int al, ah;
1763 al = EAX & 0xff;
1764 ah = (EAX >> 8) & 0xff;
1765 al = ((ah * base) + al) & 0xff;
1766 EAX = (EAX & ~0xffff) | al;
1767 CC_DST = al;
1768 }
1769
1770 void helper_aaa(void)
1771 {
1772 int icarry;
1773 int al, ah, af;
1774 int eflags;
1775
1776 eflags = helper_cc_compute_all(CC_OP);
1777 af = eflags & CC_A;
1778 al = EAX & 0xff;
1779 ah = (EAX >> 8) & 0xff;
1780
1781 icarry = (al > 0xf9);
1782 if (((al & 0x0f) > 9 ) || af) {
1783 al = (al + 6) & 0x0f;
1784 ah = (ah + 1 + icarry) & 0xff;
1785 eflags |= CC_C | CC_A;
1786 } else {
1787 eflags &= ~(CC_C | CC_A);
1788 al &= 0x0f;
1789 }
1790 EAX = (EAX & ~0xffff) | al | (ah << 8);
1791 CC_SRC = eflags;
1792 }
1793
1794 void helper_aas(void)
1795 {
1796 int icarry;
1797 int al, ah, af;
1798 int eflags;
1799
1800 eflags = helper_cc_compute_all(CC_OP);
1801 af = eflags & CC_A;
1802 al = EAX & 0xff;
1803 ah = (EAX >> 8) & 0xff;
1804
1805 icarry = (al < 6);
1806 if (((al & 0x0f) > 9 ) || af) {
1807 al = (al - 6) & 0x0f;
1808 ah = (ah - 1 - icarry) & 0xff;
1809 eflags |= CC_C | CC_A;
1810 } else {
1811 eflags &= ~(CC_C | CC_A);
1812 al &= 0x0f;
1813 }
1814 EAX = (EAX & ~0xffff) | al | (ah << 8);
1815 CC_SRC = eflags;
1816 }
1817
1818 void helper_daa(void)
1819 {
1820 int al, af, cf;
1821 int eflags;
1822
1823 eflags = helper_cc_compute_all(CC_OP);
1824 cf = eflags & CC_C;
1825 af = eflags & CC_A;
1826 al = EAX & 0xff;
1827
1828 eflags = 0;
1829 if (((al & 0x0f) > 9 ) || af) {
1830 al = (al + 6) & 0xff;
1831 eflags |= CC_A;
1832 }
1833 if ((al > 0x9f) || cf) {
1834 al = (al + 0x60) & 0xff;
1835 eflags |= CC_C;
1836 }
1837 EAX = (EAX & ~0xff) | al;
1838 /* well, speed is not an issue here, so we compute the flags by hand */
1839 eflags |= (al == 0) << 6; /* zf */
1840 eflags |= parity_table[al]; /* pf */
1841 eflags |= (al & 0x80); /* sf */
1842 CC_SRC = eflags;
1843 }
1844
1845 void helper_das(void)
1846 {
1847 int al, al1, af, cf;
1848 int eflags;
1849
1850 eflags = helper_cc_compute_all(CC_OP);
1851 cf = eflags & CC_C;
1852 af = eflags & CC_A;
1853 al = EAX & 0xff;
1854
1855 eflags = 0;
1856 al1 = al;
1857 if (((al & 0x0f) > 9 ) || af) {
1858 eflags |= CC_A;
1859 if (al < 6 || cf)
1860 eflags |= CC_C;
1861 al = (al - 6) & 0xff;
1862 }
1863 if ((al1 > 0x99) || cf) {
1864 al = (al - 0x60) & 0xff;
1865 eflags |= CC_C;
1866 }
1867 EAX = (EAX & ~0xff) | al;
1868 /* well, speed is not an issue here, so we compute the flags by hand */
1869 eflags |= (al == 0) << 6; /* zf */
1870 eflags |= parity_table[al]; /* pf */
1871 eflags |= (al & 0x80); /* sf */
1872 CC_SRC = eflags;
1873 }
1874
1875 void helper_into(int next_eip_addend)
1876 {
1877 int eflags;
1878 eflags = helper_cc_compute_all(CC_OP);
1879 if (eflags & CC_O) {
1880 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1881 }
1882 }
1883
1884 void helper_cmpxchg8b(target_ulong a0)
1885 {
1886 uint64_t d;
1887 int eflags;
1888
1889 eflags = helper_cc_compute_all(CC_OP);
1890 d = ldq(a0);
1891 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1892 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1893 eflags |= CC_Z;
1894 } else {
1895 /* always do the store */
1896 stq(a0, d);
1897 EDX = (uint32_t)(d >> 32);
1898 EAX = (uint32_t)d;
1899 eflags &= ~CC_Z;
1900 }
1901 CC_SRC = eflags;
1902 }
1903
1904 #ifdef TARGET_X86_64
1905 void helper_cmpxchg16b(target_ulong a0)
1906 {
1907 uint64_t d0, d1;
1908 int eflags;
1909
1910 if ((a0 & 0xf) != 0)
1911 raise_exception(EXCP0D_GPF);
1912 eflags = helper_cc_compute_all(CC_OP);
1913 d0 = ldq(a0);
1914 d1 = ldq(a0 + 8);
1915 if (d0 == EAX && d1 == EDX) {
1916 stq(a0, EBX);
1917 stq(a0 + 8, ECX);
1918 eflags |= CC_Z;
1919 } else {
1920 /* always do the store */
1921 stq(a0, d0);
1922 stq(a0 + 8, d1);
1923 EDX = d1;
1924 EAX = d0;
1925 eflags &= ~CC_Z;
1926 }
1927 CC_SRC = eflags;
1928 }
1929 #endif
1930
1931 void helper_single_step(void)
1932 {
1933 #ifndef CONFIG_USER_ONLY
1934 check_hw_breakpoints(env, 1);
1935 env->dr[6] |= DR6_BS;
1936 #endif
1937 raise_exception(EXCP01_DB);
1938 }
1939
1940 void helper_cpuid(void)
1941 {
1942 uint32_t eax, ebx, ecx, edx;
1943
1944 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1945
1946 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1947 EAX = eax;
1948 EBX = ebx;
1949 ECX = ecx;
1950 EDX = edx;
1951 }
1952
1953 void helper_enter_level(int level, int data32, target_ulong t1)
1954 {
1955 target_ulong ssp;
1956 uint32_t esp_mask, esp, ebp;
1957
1958 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1959 ssp = env->segs[R_SS].base;
1960 ebp = EBP;
1961 esp = ESP;
1962 if (data32) {
1963 /* 32 bit */
1964 esp -= 4;
1965 while (--level) {
1966 esp -= 4;
1967 ebp -= 4;
1968 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1969 }
1970 esp -= 4;
1971 stl(ssp + (esp & esp_mask), t1);
1972 } else {
1973 /* 16 bit */
1974 esp -= 2;
1975 while (--level) {
1976 esp -= 2;
1977 ebp -= 2;
1978 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1979 }
1980 esp -= 2;
1981 stw(ssp + (esp & esp_mask), t1);
1982 }
1983 }
1984
1985 #ifdef TARGET_X86_64
1986 void helper_enter64_level(int level, int data64, target_ulong t1)
1987 {
1988 target_ulong esp, ebp;
1989 ebp = EBP;
1990 esp = ESP;
1991
1992 if (data64) {
1993 /* 64 bit */
1994 esp -= 8;
1995 while (--level) {
1996 esp -= 8;
1997 ebp -= 8;
1998 stq(esp, ldq(ebp));
1999 }
2000 esp -= 8;
2001 stq(esp, t1);
2002 } else {
2003 /* 16 bit */
2004 esp -= 2;
2005 while (--level) {
2006 esp -= 2;
2007 ebp -= 2;
2008 stw(esp, lduw(ebp));
2009 }
2010 esp -= 2;
2011 stw(esp, t1);
2012 }
2013 }
2014 #endif
2015
2016 void helper_lldt(int selector)
2017 {
2018 SegmentCache *dt;
2019 uint32_t e1, e2;
2020 int index, entry_limit;
2021 target_ulong ptr;
2022
2023 selector &= 0xffff;
2024 if ((selector & 0xfffc) == 0) {
2025 /* XXX: NULL selector case: invalid LDT */
2026 env->ldt.base = 0;
2027 env->ldt.limit = 0;
2028 } else {
2029 if (selector & 0x4)
2030 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2031 dt = &env->gdt;
2032 index = selector & ~7;
2033 #ifdef TARGET_X86_64
2034 if (env->hflags & HF_LMA_MASK)
2035 entry_limit = 15;
2036 else
2037 #endif
2038 entry_limit = 7;
2039 if ((index + entry_limit) > dt->limit)
2040 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2041 ptr = dt->base + index;
2042 e1 = ldl_kernel(ptr);
2043 e2 = ldl_kernel(ptr + 4);
2044 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2045 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2046 if (!(e2 & DESC_P_MASK))
2047 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2048 #ifdef TARGET_X86_64
2049 if (env->hflags & HF_LMA_MASK) {
2050 uint32_t e3;
2051 e3 = ldl_kernel(ptr + 8);
2052 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2053 env->ldt.base |= (target_ulong)e3 << 32;
2054 } else
2055 #endif
2056 {
2057 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2058 }
2059 }
2060 env->ldt.selector = selector;
2061 }
2062
2063 void helper_ltr(int selector)
2064 {
2065 SegmentCache *dt;
2066 uint32_t e1, e2;
2067 int index, type, entry_limit;
2068 target_ulong ptr;
2069
2070 selector &= 0xffff;
2071 if ((selector & 0xfffc) == 0) {
2072 /* NULL selector case: invalid TR */
2073 env->tr.base = 0;
2074 env->tr.limit = 0;
2075 env->tr.flags = 0;
2076 } else {
2077 if (selector & 0x4)
2078 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2079 dt = &env->gdt;
2080 index = selector & ~7;
2081 #ifdef TARGET_X86_64
2082 if (env->hflags & HF_LMA_MASK)
2083 entry_limit = 15;
2084 else
2085 #endif
2086 entry_limit = 7;
2087 if ((index + entry_limit) > dt->limit)
2088 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2089 ptr = dt->base + index;
2090 e1 = ldl_kernel(ptr);
2091 e2 = ldl_kernel(ptr + 4);
2092 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2093 if ((e2 & DESC_S_MASK) ||
2094 (type != 1 && type != 9))
2095 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2096 if (!(e2 & DESC_P_MASK))
2097 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2098 #ifdef TARGET_X86_64
2099 if (env->hflags & HF_LMA_MASK) {
2100 uint32_t e3, e4;
2101 e3 = ldl_kernel(ptr + 8);
2102 e4 = ldl_kernel(ptr + 12);
2103 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2104 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2105 load_seg_cache_raw_dt(&env->tr, e1, e2);
2106 env->tr.base |= (target_ulong)e3 << 32;
2107 } else
2108 #endif
2109 {
2110 load_seg_cache_raw_dt(&env->tr, e1, e2);
2111 }
2112 e2 |= DESC_TSS_BUSY_MASK;
2113 stl_kernel(ptr + 4, e2);
2114 }
2115 env->tr.selector = selector;
2116 }
2117
2118 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2119 void helper_load_seg(int seg_reg, int selector)
2120 {
2121 uint32_t e1, e2;
2122 int cpl, dpl, rpl;
2123 SegmentCache *dt;
2124 int index;
2125 target_ulong ptr;
2126
2127 selector &= 0xffff;
2128 cpl = env->hflags & HF_CPL_MASK;
2129 if ((selector & 0xfffc) == 0) {
2130 /* null selector case */
2131 if (seg_reg == R_SS
2132 #ifdef TARGET_X86_64
2133 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2134 #endif
2135 )
2136 raise_exception_err(EXCP0D_GPF, 0);
2137 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2138 } else {
2139
2140 if (selector & 0x4)
2141 dt = &env->ldt;
2142 else
2143 dt = &env->gdt;
2144 index = selector & ~7;
2145 if ((index + 7) > dt->limit)
2146 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2147 ptr = dt->base + index;
2148 e1 = ldl_kernel(ptr);
2149 e2 = ldl_kernel(ptr + 4);
2150
2151 if (!(e2 & DESC_S_MASK))
2152 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2153 rpl = selector & 3;
2154 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2155 if (seg_reg == R_SS) {
2156 /* must be writable segment */
2157 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2158 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2159 if (rpl != cpl || dpl != cpl)
2160 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2161 } else {
2162 /* must be readable segment */
2163 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2164 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2165
2166 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2167 /* if not conforming code, test rights */
2168 if (dpl < cpl || dpl < rpl)
2169 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2170 }
2171 }
2172
2173 if (!(e2 & DESC_P_MASK)) {
2174 if (seg_reg == R_SS)
2175 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2176 else
2177 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2178 }
2179
2180 /* set the access bit if not already set */
2181 if (!(e2 & DESC_A_MASK)) {
2182 e2 |= DESC_A_MASK;
2183 stl_kernel(ptr + 4, e2);
2184 }
2185
2186 cpu_x86_load_seg_cache(env, seg_reg, selector,
2187 get_seg_base(e1, e2),
2188 get_seg_limit(e1, e2),
2189 e2);
2190 #if 0
2191 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2192 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2193 #endif
2194 }
2195 }
2196
2197 /* protected mode jump */
2198 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2199 int next_eip_addend)
2200 {
2201 int gate_cs, type;
2202 uint32_t e1, e2, cpl, dpl, rpl, limit;
2203 target_ulong next_eip;
2204
2205 if ((new_cs & 0xfffc) == 0)
2206 raise_exception_err(EXCP0D_GPF, 0);
2207 if (load_segment(&e1, &e2, new_cs) != 0)
2208 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2209 cpl = env->hflags & HF_CPL_MASK;
2210 if (e2 & DESC_S_MASK) {
2211 if (!(e2 & DESC_CS_MASK))
2212 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2213 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2214 if (e2 & DESC_C_MASK) {
2215 /* conforming code segment */
2216 if (dpl > cpl)
2217 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2218 } else {
2219 /* non conforming code segment */
2220 rpl = new_cs & 3;
2221 if (rpl > cpl)
2222 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2223 if (dpl != cpl)
2224 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2225 }
2226 if (!(e2 & DESC_P_MASK))
2227 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2228 limit = get_seg_limit(e1, e2);
2229 if (new_eip > limit &&
2230 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2231 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2232 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2233 get_seg_base(e1, e2), limit, e2);
2234 EIP = new_eip;
2235 } else {
2236 /* jump to call or task gate */
2237 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2238 rpl = new_cs & 3;
2239 cpl = env->hflags & HF_CPL_MASK;
2240 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2241 switch(type) {
2242 case 1: /* 286 TSS */
2243 case 9: /* 386 TSS */
2244 case 5: /* task gate */
2245 if (dpl < cpl || dpl < rpl)
2246 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2247 next_eip = env->eip + next_eip_addend;
2248 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2249 CC_OP = CC_OP_EFLAGS;
2250 break;
2251 case 4: /* 286 call gate */
2252 case 12: /* 386 call gate */
2253 if ((dpl < cpl) || (dpl < rpl))
2254 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2255 if (!(e2 & DESC_P_MASK))
2256 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2257 gate_cs = e1 >> 16;
2258 new_eip = (e1 & 0xffff);
2259 if (type == 12)
2260 new_eip |= (e2 & 0xffff0000);
2261 if (load_segment(&e1, &e2, gate_cs) != 0)
2262 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2263 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2264 /* must be code segment */
2265 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2266 (DESC_S_MASK | DESC_CS_MASK)))
2267 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2268 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2269 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2270 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2271 if (!(e2 & DESC_P_MASK))
2272 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2273 limit = get_seg_limit(e1, e2);
2274 if (new_eip > limit)
2275 raise_exception_err(EXCP0D_GPF, 0);
2276 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2277 get_seg_base(e1, e2), limit, e2);
2278 EIP = new_eip;
2279 break;
2280 default:
2281 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2282 break;
2283 }
2284 }
2285 }
2286
2287 /* real mode call */
2288 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2289 int shift, int next_eip)
2290 {
2291 int new_eip;
2292 uint32_t esp, esp_mask;
2293 target_ulong ssp;
2294
2295 new_eip = new_eip1;
2296 esp = ESP;
2297 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2298 ssp = env->segs[R_SS].base;
2299 if (shift) {
2300 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2301 PUSHL(ssp, esp, esp_mask, next_eip);
2302 } else {
2303 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2304 PUSHW(ssp, esp, esp_mask, next_eip);
2305 }
2306
2307 SET_ESP(esp, esp_mask);
2308 env->eip = new_eip;
2309 env->segs[R_CS].selector = new_cs;
2310 env->segs[R_CS].base = (new_cs << 4);
2311 }
2312
2313 /* protected mode call */
2314 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2315 int shift, int next_eip_addend)
2316 {
2317 int new_stack, i;
2318 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2319 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2320 uint32_t val, limit, old_sp_mask;
2321 target_ulong ssp, old_ssp, next_eip;
2322
2323 next_eip = env->eip + next_eip_addend;
2324 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2325 LOG_PCALL_STATE(env);
2326 if ((new_cs & 0xfffc) == 0)
2327 raise_exception_err(EXCP0D_GPF, 0);
2328 if (load_segment(&e1, &e2, new_cs) != 0)
2329 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2330 cpl = env->hflags & HF_CPL_MASK;
2331 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2332 if (e2 & DESC_S_MASK) {
2333 if (!(e2 & DESC_CS_MASK))
2334 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2335 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2336 if (e2 & DESC_C_MASK) {
2337 /* conforming code segment */
2338 if (dpl > cpl)
2339 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2340 } else {
2341 /* non conforming code segment */
2342 rpl = new_cs & 3;
2343 if (rpl > cpl)
2344 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2345 if (dpl != cpl)
2346 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2347 }
2348 if (!(e2 & DESC_P_MASK))
2349 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2350
2351 #ifdef TARGET_X86_64
2352 /* XXX: check 16/32 bit cases in long mode */
2353 if (shift == 2) {
2354 target_ulong rsp;
2355 /* 64 bit case */
2356 rsp = ESP;
2357 PUSHQ(rsp, env->segs[R_CS].selector);
2358 PUSHQ(rsp, next_eip);
2359 /* from this point, not restartable */
2360 ESP = rsp;
2361 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2362 get_seg_base(e1, e2),
2363 get_seg_limit(e1, e2), e2);
2364 EIP = new_eip;
2365 } else
2366 #endif
2367 {
2368 sp = ESP;
2369 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2370 ssp = env->segs[R_SS].base;
2371 if (shift) {
2372 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2373 PUSHL(ssp, sp, sp_mask, next_eip);
2374 } else {
2375 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2376 PUSHW(ssp, sp, sp_mask, next_eip);
2377 }
2378
2379 limit = get_seg_limit(e1, e2);
2380 if (new_eip > limit)
2381 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2382 /* from this point, not restartable */
2383 SET_ESP(sp, sp_mask);
2384 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2385 get_seg_base(e1, e2), limit, e2);
2386 EIP = new_eip;
2387 }
2388 } else {
2389 /* check gate type */
2390 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2391 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2392 rpl = new_cs & 3;
2393 switch(type) {
2394 case 1: /* available 286 TSS */
2395 case 9: /* available 386 TSS */
2396 case 5: /* task gate */
2397 if (dpl < cpl || dpl < rpl)
2398 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2399 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2400 CC_OP = CC_OP_EFLAGS;
2401 return;
2402 case 4: /* 286 call gate */
2403 case 12: /* 386 call gate */
2404 break;
2405 default:
2406 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2407 break;
2408 }
2409 shift = type >> 3;
2410
2411 if (dpl < cpl || dpl < rpl)
2412 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2413 /* check valid bit */
2414 if (!(e2 & DESC_P_MASK))
2415 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2416 selector = e1 >> 16;
2417 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2418 param_count = e2 & 0x1f;
2419 if ((selector & 0xfffc) == 0)
2420 raise_exception_err(EXCP0D_GPF, 0);
2421
2422 if (load_segment(&e1, &e2, selector) != 0)
2423 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2424 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2425 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2426 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2427 if (dpl > cpl)
2428 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2429 if (!(e2 & DESC_P_MASK))
2430 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2431
2432 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2433 /* to inner privilege */
2434 get_ss_esp_from_tss(&ss, &sp, dpl);
2435 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2436 ss, sp, param_count, ESP);
2437 if ((ss & 0xfffc) == 0)
2438 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2439 if ((ss & 3) != dpl)
2440 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2442 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2443 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2444 if (ss_dpl != dpl)
2445 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2446 if (!(ss_e2 & DESC_S_MASK) ||
2447 (ss_e2 & DESC_CS_MASK) ||
2448 !(ss_e2 & DESC_W_MASK))
2449 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2450 if (!(ss_e2 & DESC_P_MASK))
2451 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2452
2453 // push_size = ((param_count * 2) + 8) << shift;
2454
2455 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2456 old_ssp = env->segs[R_SS].base;
2457
2458 sp_mask = get_sp_mask(ss_e2);
2459 ssp = get_seg_base(ss_e1, ss_e2);
2460 if (shift) {
2461 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2462 PUSHL(ssp, sp, sp_mask, ESP);
2463 for(i = param_count - 1; i >= 0; i--) {
2464 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2465 PUSHL(ssp, sp, sp_mask, val);
2466 }
2467 } else {
2468 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2469 PUSHW(ssp, sp, sp_mask, ESP);
2470 for(i = param_count - 1; i >= 0; i--) {
2471 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2472 PUSHW(ssp, sp, sp_mask, val);
2473 }
2474 }
2475 new_stack = 1;
2476 } else {
2477 /* to same privilege */
2478 sp = ESP;
2479 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2480 ssp = env->segs[R_SS].base;
2481 // push_size = (4 << shift);
2482 new_stack = 0;
2483 }
2484
2485 if (shift) {
2486 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2487 PUSHL(ssp, sp, sp_mask, next_eip);
2488 } else {
2489 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2490 PUSHW(ssp, sp, sp_mask, next_eip);
2491 }
2492
2493 /* from this point, not restartable */
2494
2495 if (new_stack) {
2496 ss = (ss & ~3) | dpl;
2497 cpu_x86_load_seg_cache(env, R_SS, ss,
2498 ssp,
2499 get_seg_limit(ss_e1, ss_e2),
2500 ss_e2);
2501 }
2502
2503 selector = (selector & ~3) | dpl;
2504 cpu_x86_load_seg_cache(env, R_CS, selector,
2505 get_seg_base(e1, e2),
2506 get_seg_limit(e1, e2),
2507 e2);
2508 cpu_x86_set_cpl(env, dpl);
2509 SET_ESP(sp, sp_mask);
2510 EIP = offset;
2511 }
2512 }
2513
2514 /* real and vm86 mode iret */
2515 void helper_iret_real(int shift)
2516 {
2517 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2518 target_ulong ssp;
2519 int eflags_mask;
2520
2521 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2522 sp = ESP;
2523 ssp = env->segs[R_SS].base;
2524 if (shift == 1) {
2525 /* 32 bits */
2526 POPL(ssp, sp, sp_mask, new_eip);
2527 POPL(ssp, sp, sp_mask, new_cs);
2528 new_cs &= 0xffff;
2529 POPL(ssp, sp, sp_mask, new_eflags);
2530 } else {
2531 /* 16 bits */
2532 POPW(ssp, sp, sp_mask, new_eip);
2533 POPW(ssp, sp, sp_mask, new_cs);
2534 POPW(ssp, sp, sp_mask, new_eflags);
2535 }
2536 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2537 env->segs[R_CS].selector = new_cs;
2538 env->segs[R_CS].base = (new_cs << 4);
2539 env->eip = new_eip;
2540 if (env->eflags & VM_MASK)
2541 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2542 else
2543 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2544 if (shift == 0)
2545 eflags_mask &= 0xffff;
2546 load_eflags(new_eflags, eflags_mask);
2547 env->hflags2 &= ~HF2_NMI_MASK;
2548 }
2549
2550 static inline void validate_seg(int seg_reg, int cpl)
2551 {
2552 int dpl;
2553 uint32_t e2;
2554
2555 /* XXX: on x86_64, we do not want to nullify FS and GS because
2556 they may still contain a valid base. I would be interested to
2557 know how a real x86_64 CPU behaves */
2558 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2559 (env->segs[seg_reg].selector & 0xfffc) == 0)
2560 return;
2561
2562 e2 = env->segs[seg_reg].flags;
2563 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2564 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2565 /* data or non conforming code segment */
2566 if (dpl < cpl) {
2567 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2568 }
2569 }
2570 }
2571
2572 /* protected mode iret */
2573 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2574 {
2575 uint32_t new_cs, new_eflags, new_ss;
2576 uint32_t new_es, new_ds, new_fs, new_gs;
2577 uint32_t e1, e2, ss_e1, ss_e2;
2578 int cpl, dpl, rpl, eflags_mask, iopl;
2579 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2580
2581 #ifdef TARGET_X86_64
2582 if (shift == 2)
2583 sp_mask = -1;
2584 else
2585 #endif
2586 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2587 sp = ESP;
2588 ssp = env->segs[R_SS].base;
2589 new_eflags = 0; /* avoid warning */
2590 #ifdef TARGET_X86_64
2591 if (shift == 2) {
2592 POPQ(sp, new_eip);
2593 POPQ(sp, new_cs);
2594 new_cs &= 0xffff;
2595 if (is_iret) {
2596 POPQ(sp, new_eflags);
2597 }
2598 } else
2599 #endif
2600 if (shift == 1) {
2601 /* 32 bits */
2602 POPL(ssp, sp, sp_mask, new_eip);
2603 POPL(ssp, sp, sp_mask, new_cs);
2604 new_cs &= 0xffff;
2605 if (is_iret) {
2606 POPL(ssp, sp, sp_mask, new_eflags);
2607 if (new_eflags & VM_MASK)
2608 goto return_to_vm86;
2609 }
2610 } else {
2611 /* 16 bits */
2612 POPW(ssp, sp, sp_mask, new_eip);
2613 POPW(ssp, sp, sp_mask, new_cs);
2614 if (is_iret)
2615 POPW(ssp, sp, sp_mask, new_eflags);
2616 }
2617 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2618 new_cs, new_eip, shift, addend);
2619 LOG_PCALL_STATE(env);
2620 if ((new_cs & 0xfffc) == 0)
2621 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2622 if (load_segment(&e1, &e2, new_cs) != 0)
2623 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2624 if (!(e2 & DESC_S_MASK) ||
2625 !(e2 & DESC_CS_MASK))
2626 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2627 cpl = env->hflags & HF_CPL_MASK;
2628 rpl = new_cs & 3;
2629 if (rpl < cpl)
2630 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2631 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2632 if (e2 & DESC_C_MASK) {
2633 if (dpl > rpl)
2634 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2635 } else {
2636 if (dpl != rpl)
2637 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2638 }
2639 if (!(e2 & DESC_P_MASK))
2640 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2641
2642 sp += addend;
2643 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2644 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2645 /* return to same privilege level */
2646 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2647 get_seg_base(e1, e2),
2648 get_seg_limit(e1, e2),
2649 e2);
2650 } else {
2651 /* return to different privilege level */
2652 #ifdef TARGET_X86_64
2653 if (shift == 2) {
2654 POPQ(sp, new_esp);
2655 POPQ(sp, new_ss);
2656 new_ss &= 0xffff;
2657 } else
2658 #endif
2659 if (shift == 1) {
2660 /* 32 bits */
2661 POPL(ssp, sp, sp_mask, new_esp);
2662 POPL(ssp, sp, sp_mask, new_ss);
2663 new_ss &= 0xffff;
2664 } else {
2665 /* 16 bits */
2666 POPW(ssp, sp, sp_mask, new_esp);
2667 POPW(ssp, sp, sp_mask, new_ss);
2668 }
2669 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2670 new_ss, new_esp);
2671 if ((new_ss & 0xfffc) == 0) {
2672 #ifdef TARGET_X86_64
2673 /* NULL ss is allowed in long mode if cpl != 3*/
2674 /* XXX: test CS64 ? */
2675 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2676 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2677 0, 0xffffffff,
2678 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2679 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2680 DESC_W_MASK | DESC_A_MASK);
2681 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2682 } else
2683 #endif
2684 {
2685 raise_exception_err(EXCP0D_GPF, 0);
2686 }
2687 } else {
2688 if ((new_ss & 3) != rpl)
2689 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2690 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2691 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2692 if (!(ss_e2 & DESC_S_MASK) ||
2693 (ss_e2 & DESC_CS_MASK) ||
2694 !(ss_e2 & DESC_W_MASK))
2695 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2696 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2697 if (dpl != rpl)
2698 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2699 if (!(ss_e2 & DESC_P_MASK))
2700 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2701 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2702 get_seg_base(ss_e1, ss_e2),
2703 get_seg_limit(ss_e1, ss_e2),
2704 ss_e2);
2705 }
2706
2707 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2708 get_seg_base(e1, e2),
2709 get_seg_limit(e1, e2),
2710 e2);
2711 cpu_x86_set_cpl(env, rpl);
2712 sp = new_esp;
2713 #ifdef TARGET_X86_64
2714 if (env->hflags & HF_CS64_MASK)
2715 sp_mask = -1;
2716 else
2717 #endif
2718 sp_mask = get_sp_mask(ss_e2);
2719
2720 /* validate data segments */
2721 validate_seg(R_ES, rpl);
2722 validate_seg(R_DS, rpl);
2723 validate_seg(R_FS, rpl);
2724 validate_seg(R_GS, rpl);
2725
2726 sp += addend;
2727 }
2728 SET_ESP(sp, sp_mask);
2729 env->eip = new_eip;
2730 if (is_iret) {
2731 /* NOTE: 'cpl' is the _old_ CPL */
2732 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2733 if (cpl == 0)
2734 eflags_mask |= IOPL_MASK;
2735 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2736 if (cpl <= iopl)
2737 eflags_mask |= IF_MASK;
2738 if (shift == 0)
2739 eflags_mask &= 0xffff;
2740 load_eflags(new_eflags, eflags_mask);
2741 }
2742 return;
2743
2744 return_to_vm86:
2745 POPL(ssp, sp, sp_mask, new_esp);
2746 POPL(ssp, sp, sp_mask, new_ss);
2747 POPL(ssp, sp, sp_mask, new_es);
2748 POPL(ssp, sp, sp_mask, new_ds);
2749 POPL(ssp, sp, sp_mask, new_fs);
2750 POPL(ssp, sp, sp_mask, new_gs);
2751
2752 /* modify processor state */
2753 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2754 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2755 load_seg_vm(R_CS, new_cs & 0xffff);
2756 cpu_x86_set_cpl(env, 3);
2757 load_seg_vm(R_SS, new_ss & 0xffff);
2758 load_seg_vm(R_ES, new_es & 0xffff);
2759 load_seg_vm(R_DS, new_ds & 0xffff);
2760 load_seg_vm(R_FS, new_fs & 0xffff);
2761 load_seg_vm(R_GS, new_gs & 0xffff);
2762
2763 env->eip = new_eip & 0xffff;
2764 ESP = new_esp;
2765 }
2766
2767 void helper_iret_protected(int shift, int next_eip)
2768 {
2769 int tss_selector, type;
2770 uint32_t e1, e2;
2771
2772 /* specific case for TSS */
2773 if (env->eflags & NT_MASK) {
2774 #ifdef TARGET_X86_64
2775 if (env->hflags & HF_LMA_MASK)
2776 raise_exception_err(EXCP0D_GPF, 0);
2777 #endif
2778 tss_selector = lduw_kernel(env->tr.base + 0);
2779 if (tss_selector & 4)
2780 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2781 if (load_segment(&e1, &e2, tss_selector) != 0)
2782 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2783 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2784 /* NOTE: we check both segment and busy TSS */
2785 if (type != 3)
2786 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2787 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2788 } else {
2789 helper_ret_protected(shift, 1, 0);
2790 }
2791 env->hflags2 &= ~HF2_NMI_MASK;
2792 }
2793
2794 void helper_lret_protected(int shift, int addend)
2795 {
2796 helper_ret_protected(shift, 0, addend);
2797 }
2798
2799 void helper_sysenter(void)
2800 {
2801 if (env->sysenter_cs == 0) {
2802 raise_exception_err(EXCP0D_GPF, 0);
2803 }
2804 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2805 cpu_x86_set_cpl(env, 0);
2806
2807 #ifdef TARGET_X86_64
2808 if (env->hflags & HF_LMA_MASK) {
2809 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2810 0, 0xffffffff,
2811 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2812 DESC_S_MASK |
2813 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2814 } else
2815 #endif
2816 {
2817 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2818 0, 0xffffffff,
2819 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2820 DESC_S_MASK |
2821 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2822 }
2823 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2824 0, 0xffffffff,
2825 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2826 DESC_S_MASK |
2827 DESC_W_MASK | DESC_A_MASK);
2828 ESP = env->sysenter_esp;
2829 EIP = env->sysenter_eip;
2830 }
2831
2832 void helper_sysexit(int dflag)
2833 {
2834 int cpl;
2835
2836 cpl = env->hflags & HF_CPL_MASK;
2837 if (env->sysenter_cs == 0 || cpl != 0) {
2838 raise_exception_err(EXCP0D_GPF, 0);
2839 }
2840 cpu_x86_set_cpl(env, 3);
2841 #ifdef TARGET_X86_64
2842 if (dflag == 2) {
2843 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2844 0, 0xffffffff,
2845 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2846 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2847 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2848 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2849 0, 0xffffffff,
2850 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2851 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2852 DESC_W_MASK | DESC_A_MASK);
2853 } else
2854 #endif
2855 {
2856 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2857 0, 0xffffffff,
2858 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2859 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2860 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2861 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2862 0, 0xffffffff,
2863 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2864 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2865 DESC_W_MASK | DESC_A_MASK);
2866 }
2867 ESP = ECX;
2868 EIP = EDX;
2869 }
2870
2871 #if defined(CONFIG_USER_ONLY)
2872 target_ulong helper_read_crN(int reg)
2873 {
2874 return 0;
2875 }
2876
2877 void helper_write_crN(int reg, target_ulong t0)
2878 {
2879 }
2880
2881 void helper_movl_drN_T0(int reg, target_ulong t0)
2882 {
2883 }
2884 #else
2885 target_ulong helper_read_crN(int reg)
2886 {
2887 target_ulong val;
2888
2889 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2890 switch(reg) {
2891 default:
2892 val = env->cr[reg];
2893 break;
2894 case 8:
2895 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2896 val = cpu_get_apic_tpr(env->apic_state);
2897 } else {
2898 val = env->v_tpr;
2899 }
2900 break;
2901 }
2902 return val;
2903 }
2904
2905 void helper_write_crN(int reg, target_ulong t0)
2906 {
2907 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2908 switch(reg) {
2909 case 0:
2910 cpu_x86_update_cr0(env, t0);
2911 break;
2912 case 3:
2913 cpu_x86_update_cr3(env, t0);
2914 break;
2915 case 4:
2916 cpu_x86_update_cr4(env, t0);
2917 break;
2918 case 8:
2919 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2920 cpu_set_apic_tpr(env->apic_state, t0);
2921 }
2922 env->v_tpr = t0 & 0x0f;
2923 break;
2924 default:
2925 env->cr[reg] = t0;
2926 break;
2927 }
2928 }
2929
2930 void helper_movl_drN_T0(int reg, target_ulong t0)
2931 {
2932 int i;
2933
2934 if (reg < 4) {
2935 hw_breakpoint_remove(env, reg);
2936 env->dr[reg] = t0;
2937 hw_breakpoint_insert(env, reg);
2938 } else if (reg == 7) {
2939 for (i = 0; i < 4; i++)
2940 hw_breakpoint_remove(env, i);
2941 env->dr[7] = t0;
2942 for (i = 0; i < 4; i++)
2943 hw_breakpoint_insert(env, i);
2944 } else
2945 env->dr[reg] = t0;
2946 }
2947 #endif
2948
2949 void helper_lmsw(target_ulong t0)
2950 {
2951 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2952 if already set to one. */
2953 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2954 helper_write_crN(0, t0);
2955 }
2956
2957 void helper_clts(void)
2958 {
2959 env->cr[0] &= ~CR0_TS_MASK;
2960 env->hflags &= ~HF_TS_MASK;
2961 }
2962
2963 void helper_invlpg(target_ulong addr)
2964 {
2965 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2966 tlb_flush_page(env, addr);
2967 }
2968
2969 void helper_rdtsc(void)
2970 {
2971 uint64_t val;
2972
2973 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2974 raise_exception(EXCP0D_GPF);
2975 }
2976 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2977
2978 val = cpu_get_tsc(env) + env->tsc_offset;
2979 EAX = (uint32_t)(val);
2980 EDX = (uint32_t)(val >> 32);
2981 }
2982
2983 void helper_rdtscp(void)
2984 {
2985 helper_rdtsc();
2986 ECX = (uint32_t)(env->tsc_aux);
2987 }
2988
2989 void helper_rdpmc(void)
2990 {
2991 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2992 raise_exception(EXCP0D_GPF);
2993 }
2994 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2995
2996 /* currently unimplemented */
2997 raise_exception_err(EXCP06_ILLOP, 0);
2998 }
2999
3000 #if defined(CONFIG_USER_ONLY)
3001 void helper_wrmsr(void)
3002 {
3003 }
3004
3005 void helper_rdmsr(void)
3006 {
3007 }
3008 #else
3009 void helper_wrmsr(void)
3010 {
3011 uint64_t val;
3012
3013 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3014
3015 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3016
3017 switch((uint32_t)ECX) {
3018 case MSR_IA32_SYSENTER_CS:
3019 env->sysenter_cs = val & 0xffff;
3020 break;
3021 case MSR_IA32_SYSENTER_ESP:
3022 env->sysenter_esp = val;
3023 break;
3024 case MSR_IA32_SYSENTER_EIP:
3025 env->sysenter_eip = val;
3026 break;
3027 case MSR_IA32_APICBASE:
3028 cpu_set_apic_base(env->apic_state, val);
3029 break;
3030 case MSR_EFER:
3031 {
3032 uint64_t update_mask;
3033 update_mask = 0;
3034 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3035 update_mask |= MSR_EFER_SCE;
3036 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3037 update_mask |= MSR_EFER_LME;
3038 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3039 update_mask |= MSR_EFER_FFXSR;
3040 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3041 update_mask |= MSR_EFER_NXE;
3042 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3043 update_mask |= MSR_EFER_SVME;
3044 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3045 update_mask |= MSR_EFER_FFXSR;
3046 cpu_load_efer(env, (env->efer & ~update_mask) |
3047 (val & update_mask));
3048 }
3049 break;
3050 case MSR_STAR:
3051 env->star = val;
3052 break;
3053 case MSR_PAT:
3054 env->pat = val;
3055 break;
3056 case MSR_VM_HSAVE_PA:
3057 env->vm_hsave = val;
3058 break;
3059 #ifdef TARGET_X86_64
3060 case MSR_LSTAR:
3061 env->lstar = val;
3062 break;
3063 case MSR_CSTAR:
3064 env->cstar = val;
3065 break;
3066 case MSR_FMASK:
3067 env->fmask = val;
3068 break;
3069 case MSR_FSBASE:
3070 env->segs[R_FS].base = val;
3071 break;
3072 case MSR_GSBASE:
3073 env->segs[R_GS].base = val;
3074 break;
3075 case MSR_KERNELGSBASE:
3076 env->kernelgsbase = val;
3077 break;
3078 #endif
3079 case MSR_MTRRphysBase(0):
3080 case MSR_MTRRphysBase(1):
3081 case MSR_MTRRphysBase(2):
3082 case MSR_MTRRphysBase(3):
3083 case MSR_MTRRphysBase(4):
3084 case MSR_MTRRphysBase(5):
3085 case MSR_MTRRphysBase(6):
3086 case MSR_MTRRphysBase(7):
3087 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3088 break;
3089 case MSR_MTRRphysMask(0):
3090 case MSR_MTRRphysMask(1):
3091 case MSR_MTRRphysMask(2):
3092 case MSR_MTRRphysMask(3):
3093 case MSR_MTRRphysMask(4):
3094 case MSR_MTRRphysMask(5):
3095 case MSR_MTRRphysMask(6):
3096 case MSR_MTRRphysMask(7):
3097 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3098 break;
3099 case MSR_MTRRfix64K_00000:
3100 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3101 break;
3102 case MSR_MTRRfix16K_80000:
3103 case MSR_MTRRfix16K_A0000:
3104 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3105 break;
3106 case MSR_MTRRfix4K_C0000:
3107 case MSR_MTRRfix4K_C8000:
3108 case MSR_MTRRfix4K_D0000:
3109 case MSR_MTRRfix4K_D8000:
3110 case MSR_MTRRfix4K_E0000:
3111 case MSR_MTRRfix4K_E8000:
3112 case MSR_MTRRfix4K_F0000:
3113 case MSR_MTRRfix4K_F8000:
3114 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3115 break;
3116 case MSR_MTRRdefType:
3117 env->mtrr_deftype = val;
3118 break;
3119 case MSR_MCG_STATUS:
3120 env->mcg_status = val;
3121 break;
3122 case MSR_MCG_CTL:
3123 if ((env->mcg_cap & MCG_CTL_P)
3124 && (val == 0 || val == ~(uint64_t)0))
3125 env->mcg_ctl = val;
3126 break;
3127 case MSR_TSC_AUX:
3128 env->tsc_aux = val;
3129 break;
3130 default:
3131 if ((uint32_t)ECX >= MSR_MC0_CTL
3132 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3133 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3134 if ((offset & 0x3) != 0
3135 || (val == 0 || val == ~(uint64_t)0))
3136 env->mce_banks[offset] = val;
3137 break;
3138 }
3139 /* XXX: exception ? */
3140 break;
3141 }
3142 }
3143
3144 void helper_rdmsr(void)
3145 {
3146 uint64_t val;
3147
3148 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3149
3150 switch((uint32_t)ECX) {
3151 case MSR_IA32_SYSENTER_CS:
3152 val = env->sysenter_cs;
3153 break;
3154 case MSR_IA32_SYSENTER_ESP:
3155 val = env->sysenter_esp;
3156 break;
3157 case MSR_IA32_SYSENTER_EIP:
3158 val = env->sysenter_eip;
3159 break;
3160 case MSR_IA32_APICBASE:
3161 val = cpu_get_apic_base(env->apic_state);
3162 break;
3163 case MSR_EFER:
3164 val = env->efer;
3165 break;
3166 case MSR_STAR:
3167 val = env->star;
3168 break;
3169 case MSR_PAT:
3170 val = env->pat;
3171 break;
3172 case MSR_VM_HSAVE_PA:
3173 val = env->vm_hsave;
3174 break;
3175 case MSR_IA32_PERF_STATUS:
3176 /* tsc_increment_by_tick */
3177 val = 1000ULL;
3178 /* CPU multiplier */
3179 val |= (((uint64_t)4ULL) << 40);
3180 break;
3181 #ifdef TARGET_X86_64
3182 case MSR_LSTAR:
3183 val = env->lstar;
3184 break;
3185 case MSR_CSTAR:
3186 val = env->cstar;
3187 break;
3188 case MSR_FMASK:
3189 val = env->fmask;
3190 break;
3191 case MSR_FSBASE:
3192 val = env->segs[R_FS].base;
3193 break;
3194 case MSR_GSBASE:
3195 val = env->segs[R_GS].base;
3196 break;
3197 case MSR_KERNELGSBASE:
3198 val = env->kernelgsbase;
3199 break;
3200 case MSR_TSC_AUX:
3201 val = env->tsc_aux;
3202 break;
3203 #endif
3204 case MSR_MTRRphysBase(0):
3205 case MSR_MTRRphysBase(1):
3206 case MSR_MTRRphysBase(2):
3207 case MSR_MTRRphysBase(3):
3208 case MSR_MTRRphysBase(4):
3209 case MSR_MTRRphysBase(5):
3210 case MSR_MTRRphysBase(6):
3211 case MSR_MTRRphysBase(7):
3212 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3213 break;
3214 case MSR_MTRRphysMask(0):
3215 case MSR_MTRRphysMask(1):
3216 case MSR_MTRRphysMask(2):
3217 case MSR_MTRRphysMask(3):
3218 case MSR_MTRRphysMask(4):
3219 case MSR_MTRRphysMask(5):
3220 case MSR_MTRRphysMask(6):
3221 case MSR_MTRRphysMask(7):
3222 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3223 break;
3224 case MSR_MTRRfix64K_00000:
3225 val = env->mtrr_fixed[0];
3226 break;
3227 case MSR_MTRRfix16K_80000:
3228 case MSR_MTRRfix16K_A0000:
3229 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3230 break;
3231 case MSR_MTRRfix4K_C0000:
3232 case MSR_MTRRfix4K_C8000:
3233 case MSR_MTRRfix4K_D0000:
3234 case MSR_MTRRfix4K_D8000:
3235 case MSR_MTRRfix4K_E0000:
3236 case MSR_MTRRfix4K_E8000:
3237 case MSR_MTRRfix4K_F0000:
3238 case MSR_MTRRfix4K_F8000:
3239 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3240 break;
3241 case MSR_MTRRdefType:
3242 val = env->mtrr_deftype;
3243 break;
3244 case MSR_MTRRcap:
3245 if (env->cpuid_features & CPUID_MTRR)
3246 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3247 else
3248 /* XXX: exception ? */
3249 val = 0;
3250 break;
3251 case MSR_MCG_CAP:
3252 val = env->mcg_cap;
3253 break;
3254 case MSR_MCG_CTL:
3255 if (env->mcg_cap & MCG_CTL_P)
3256 val = env->mcg_ctl;
3257 else
3258 val = 0;
3259 break;
3260 case MSR_MCG_STATUS:
3261 val = env->mcg_status;
3262 break;
3263 default:
3264 if ((uint32_t)ECX >= MSR_MC0_CTL
3265 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3266 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3267 val = env->mce_banks[offset];
3268 break;
3269 }
3270 /* XXX: exception ? */
3271 val = 0;
3272 break;
3273 }
3274 EAX = (uint32_t)(val);
3275 EDX = (uint32_t)(val >> 32);
3276 }
3277 #endif
3278
3279 target_ulong helper_lsl(target_ulong selector1)
3280 {
3281 unsigned int limit;
3282 uint32_t e1, e2, eflags, selector;
3283 int rpl, dpl, cpl, type;
3284
3285 selector = selector1 & 0xffff;
3286 eflags = helper_cc_compute_all(CC_OP);
3287 if ((selector & 0xfffc) == 0)
3288 goto fail;
3289 if (load_segment(&e1, &e2, selector) != 0)
3290 goto fail;
3291 rpl = selector & 3;
3292 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3293 cpl = env->hflags & HF_CPL_MASK;
3294 if (e2 & DESC_S_MASK) {
3295 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3296 /* conforming */
3297 } else {
3298 if (dpl < cpl || dpl < rpl)
3299 goto fail;
3300 }
3301 } else {
3302 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3303 switch(type) {
3304 case 1:
3305 case 2:
3306 case 3:
3307 case 9:
3308 case 11:
3309 break;
3310 default:
3311 goto fail;
3312 }
3313 if (dpl < cpl || dpl < rpl) {
3314 fail:
3315 CC_SRC = eflags & ~CC_Z;
3316 return 0;
3317 }
3318 }
3319 limit = get_seg_limit(e1, e2);
3320 CC_SRC = eflags | CC_Z;
3321 return limit;
3322 }
3323
3324 target_ulong helper_lar(target_ulong selector1)
3325 {
3326 uint32_t e1, e2, eflags, selector;
3327 int rpl, dpl, cpl, type;
3328
3329 selector = selector1 & 0xffff;
3330 eflags = helper_cc_compute_all(CC_OP);
3331 if ((selector & 0xfffc) == 0)
3332 goto fail;
3333 if (load_segment(&e1, &e2, selector) != 0)
3334 goto fail;
3335 rpl = selector & 3;
3336 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3337 cpl = env->hflags & HF_CPL_MASK;
3338 if (e2 & DESC_S_MASK) {
3339 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3340 /* conforming */
3341 } else {
3342 if (dpl < cpl || dpl < rpl)
3343 goto fail;
3344 }
3345 } else {
3346 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3347 switch(type) {
3348 case 1:
3349 case 2:
3350 case 3:
3351 case 4:
3352 case 5:
3353 case 9:
3354 case 11:
3355 case 12:
3356 break;
3357 default:
3358 goto fail;
3359 }
3360 if (dpl < cpl || dpl < rpl) {
3361 fail:
3362 CC_SRC = eflags & ~CC_Z;
3363 return 0;
3364 }
3365 }
3366 CC_SRC = eflags | CC_Z;
3367 return e2 & 0x00f0ff00;
3368 }
3369
3370 void helper_verr(target_ulong selector1)
3371 {
3372 uint32_t e1, e2, eflags, selector;
3373 int rpl, dpl, cpl;
3374
3375 selector = selector1 & 0xffff;
3376 eflags = helper_cc_compute_all(CC_OP);
3377 if ((selector & 0xfffc) == 0)
3378 goto fail;
3379 if (load_segment(&e1, &e2, selector) != 0)
3380 goto fail;
3381 if (!(e2 & DESC_S_MASK))
3382 goto fail;
3383 rpl = selector & 3;
3384 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3385 cpl = env->hflags & HF_CPL_MASK;
3386 if (e2 & DESC_CS_MASK) {
3387 if (!(e2 & DESC_R_MASK))
3388 goto fail;
3389 if (!(e2 & DESC_C_MASK)) {
3390 if (dpl < cpl || dpl < rpl)
3391 goto fail;
3392 }
3393 } else {
3394 if (dpl < cpl || dpl < rpl) {
3395 fail:
3396 CC_SRC = eflags & ~CC_Z;
3397 return;
3398 }
3399 }
3400 CC_SRC = eflags | CC_Z;
3401 }
3402
3403 void helper_verw(target_ulong selector1)
3404 {
3405 uint32_t e1, e2, eflags, selector;
3406 int rpl, dpl, cpl;
3407
3408 selector = selector1 & 0xffff;
3409 eflags = helper_cc_compute_all(CC_OP);
3410 if ((selector & 0xfffc) == 0)
3411 goto fail;
3412 if (load_segment(&e1, &e2, selector) != 0)
3413 goto fail;
3414 if (!(e2 & DESC_S_MASK))
3415 goto fail;
3416 rpl = selector & 3;
3417 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3418 cpl = env->hflags & HF_CPL_MASK;
3419 if (e2 & DESC_CS_MASK) {
3420 goto fail;
3421 } else {
3422 if (dpl < cpl || dpl < rpl)
3423 goto fail;
3424 if (!(e2 & DESC_W_MASK)) {
3425 fail:
3426 CC_SRC = eflags & ~CC_Z;
3427 return;
3428 }
3429 }
3430 CC_SRC = eflags | CC_Z;
3431 }
3432
3433 /* x87 FPU helpers */
3434
3435 static inline double CPU86_LDouble_to_double(CPU86_LDouble a)
3436 {
3437 union {
3438 float64 f64;
3439 double d;
3440 } u;
3441
3442 u.f64 = floatx_to_float64(a, &env->fp_status);
3443 return u.d;
3444 }
3445
3446 static inline CPU86_LDouble double_to_CPU86_LDouble(double a)
3447 {
3448 union {
3449 float64 f64;
3450 double d;
3451 } u;
3452
3453 u.d = a;
3454 return float64_to_floatx(u.f64, &env->fp_status);
3455 }
3456
3457 static void fpu_set_exception(int mask)
3458 {
3459 env->fpus |= mask;
3460 if (env->fpus & (~env->fpuc & FPUC_EM))
3461 env->fpus |= FPUS_SE | FPUS_B;
3462 }
3463
3464 static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3465 {
3466 if (floatx_is_zero(b)) {
3467 fpu_set_exception(FPUS_ZE);
3468 }
3469 return floatx_div(a, b, &env->fp_status);
3470 }
3471
3472 static void fpu_raise_exception(void)
3473 {
3474 if (env->cr[0] & CR0_NE_MASK) {
3475 raise_exception(EXCP10_COPR);
3476 }
3477 #if !defined(CONFIG_USER_ONLY)
3478 else {
3479 cpu_set_ferr(env);
3480 }
3481 #endif
3482 }
3483
3484 void helper_flds_FT0(uint32_t val)
3485 {
3486 union {
3487 float32 f;
3488 uint32_t i;
3489 } u;
3490 u.i = val;
3491 FT0 = float32_to_floatx(u.f, &env->fp_status);
3492 }
3493
3494 void helper_fldl_FT0(uint64_t val)
3495 {
3496 union {
3497 float64 f;
3498 uint64_t i;
3499 } u;
3500 u.i = val;
3501 FT0 = float64_to_floatx(u.f, &env->fp_status);
3502 }
3503
3504 void helper_fildl_FT0(int32_t val)
3505 {
3506 FT0 = int32_to_floatx(val, &env->fp_status);
3507 }
3508
3509 void helper_flds_ST0(uint32_t val)
3510 {
3511 int new_fpstt;
3512 union {
3513 float32 f;
3514 uint32_t i;
3515 } u;
3516 new_fpstt = (env->fpstt - 1) & 7;
3517 u.i = val;
3518 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3519 env->fpstt = new_fpstt;
3520 env->fptags[new_fpstt] = 0; /* validate stack entry */
3521 }
3522
3523 void helper_fldl_ST0(uint64_t val)
3524 {
3525 int new_fpstt;
3526 union {
3527 float64 f;
3528 uint64_t i;
3529 } u;
3530 new_fpstt = (env->fpstt - 1) & 7;
3531 u.i = val;
3532 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3533 env->fpstt = new_fpstt;
3534 env->fptags[new_fpstt] = 0; /* validate stack entry */
3535 }
3536
3537 void helper_fildl_ST0(int32_t val)
3538 {
3539 int new_fpstt;
3540 new_fpstt = (env->fpstt - 1) & 7;
3541 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3542 env->fpstt = new_fpstt;
3543 env->fptags[new_fpstt] = 0; /* validate stack entry */
3544 }
3545
3546 void helper_fildll_ST0(int64_t val)
3547 {
3548 int new_fpstt;
3549 new_fpstt = (env->fpstt - 1) & 7;
3550 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3551 env->fpstt = new_fpstt;
3552 env->fptags[new_fpstt] = 0; /* validate stack entry */
3553 }
3554
3555 uint32_t helper_fsts_ST0(void)
3556 {
3557 union {
3558 float32 f;
3559 uint32_t i;
3560 } u;
3561 u.f = floatx_to_float32(ST0, &env->fp_status);
3562 return u.i;
3563 }
3564
3565 uint64_t helper_fstl_ST0(void)
3566 {
3567 union {
3568 float64 f;
3569 uint64_t i;
3570 } u;
3571 u.f = floatx_to_float64(ST0, &env->fp_status);
3572 return u.i;
3573 }
3574
3575 int32_t helper_fist_ST0(void)
3576 {
3577 int32_t val;
3578 val = floatx_to_int32(ST0, &env->fp_status);
3579 if (val != (int16_t)val)
3580 val = -32768;
3581 return val;
3582 }
3583
3584 int32_t helper_fistl_ST0(void)
3585 {
3586 int32_t val;
3587 val = floatx_to_int32(ST0, &env->fp_status);
3588 return val;
3589 }
3590
3591 int64_t helper_fistll_ST0(void)
3592 {
3593 int64_t val;
3594 val = floatx_to_int64(ST0, &env->fp_status);
3595 return val;
3596 }
3597
3598 int32_t helper_fistt_ST0(void)
3599 {
3600 int32_t val;
3601 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3602 if (val != (int16_t)val)
3603 val = -32768;
3604 return val;
3605 }
3606
3607 int32_t helper_fisttl_ST0(void)
3608 {
3609 int32_t val;
3610 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3611 return val;
3612 }
3613
3614 int64_t helper_fisttll_ST0(void)
3615 {
3616 int64_t val;
3617 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3618 return val;
3619 }
3620
3621 void helper_fldt_ST0(target_ulong ptr)
3622 {
3623 int new_fpstt;
3624 new_fpstt = (env->fpstt - 1) & 7;
3625 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3626 env->fpstt = new_fpstt;
3627 env->fptags[new_fpstt] = 0; /* validate stack entry */
3628 }
3629
3630 void helper_fstt_ST0(target_ulong ptr)
3631 {
3632 helper_fstt(ST0, ptr);
3633 }
3634
3635 void helper_fpush(void)
3636 {
3637 fpush();
3638 }
3639
3640 void helper_fpop(void)
3641 {
3642 fpop();
3643 }
3644
3645 void helper_fdecstp(void)
3646 {
3647 env->fpstt = (env->fpstt - 1) & 7;
3648 env->fpus &= (~0x4700);
3649 }
3650
3651 void helper_fincstp(void)
3652 {
3653 env->fpstt = (env->fpstt + 1) & 7;
3654 env->fpus &= (~0x4700);
3655 }
3656
3657 /* FPU move */
3658
3659 void helper_ffree_STN(int st_index)
3660 {
3661 env->fptags[(env->fpstt + st_index) & 7] = 1;
3662 }
3663
3664 void helper_fmov_ST0_FT0(void)
3665 {
3666 ST0 = FT0;
3667 }
3668
3669 void helper_fmov_FT0_STN(int st_index)
3670 {
3671 FT0 = ST(st_index);
3672 }
3673
3674 void helper_fmov_ST0_STN(int st_index)
3675 {
3676 ST0 = ST(st_index);
3677 }
3678
3679 void helper_fmov_STN_ST0(int st_index)
3680 {
3681 ST(st_index) = ST0;
3682 }
3683
3684 void helper_fxchg_ST0_STN(int st_index)
3685 {
3686 CPU86_LDouble tmp;
3687 tmp = ST(st_index);
3688 ST(st_index) = ST0;
3689 ST0 = tmp;
3690 }
3691
3692 /* FPU operations */
3693
3694 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3695
3696 void helper_fcom_ST0_FT0(void)
3697 {
3698 int ret;
3699
3700 ret = floatx_compare(ST0, FT0, &env->fp_status);
3701 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3702 }
3703
3704 void helper_fucom_ST0_FT0(void)
3705 {
3706 int ret;
3707
3708 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3709 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3710 }
3711
3712 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3713
3714 void helper_fcomi_ST0_FT0(void)
3715 {
3716 int eflags;
3717 int ret;
3718
3719 ret = floatx_compare(ST0, FT0, &env->fp_status);
3720 eflags = helper_cc_compute_all(CC_OP);
3721 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3722 CC_SRC = eflags;
3723 }
3724
3725 void helper_fucomi_ST0_FT0(void)
3726 {
3727 int eflags;
3728 int ret;
3729
3730 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3731 eflags = helper_cc_compute_all(CC_OP);
3732 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3733 CC_SRC = eflags;
3734 }
3735
3736 void helper_fadd_ST0_FT0(void)
3737 {
3738 ST0 = floatx_add(ST0, FT0, &env->fp_status);
3739 }
3740
3741 void helper_fmul_ST0_FT0(void)
3742 {
3743 ST0 = floatx_mul(ST0, FT0, &env->fp_status);
3744 }
3745
3746 void helper_fsub_ST0_FT0(void)
3747 {
3748 ST0 = floatx_sub(ST0, FT0, &env->fp_status);
3749 }
3750
3751 void helper_fsubr_ST0_FT0(void)
3752 {
3753 ST0 = floatx_sub(FT0, ST0, &env->fp_status);
3754 }
3755
3756 void helper_fdiv_ST0_FT0(void)
3757 {
3758 ST0 = helper_fdiv(ST0, FT0);
3759 }
3760
3761 void helper_fdivr_ST0_FT0(void)
3762 {
3763 ST0 = helper_fdiv(FT0, ST0);
3764 }
3765
3766 /* fp operations between STN and ST0 */
3767
3768 void helper_fadd_STN_ST0(int st_index)
3769 {
3770 ST(st_index) = floatx_add(ST(st_index), ST0, &env->fp_status);
3771 }
3772
3773 void helper_fmul_STN_ST0(int st_index)
3774 {
3775 ST(st_index) = floatx_mul(ST(st_index), ST0, &env->fp_status);
3776 }
3777
3778 void helper_fsub_STN_ST0(int st_index)
3779 {
3780 ST(st_index) = floatx_sub(ST(st_index), ST0, &env->fp_status);
3781 }
3782
3783 void helper_fsubr_STN_ST0(int st_index)
3784 {
3785 ST(st_index) = floatx_sub(ST0, ST(st_index), &env->fp_status);
3786 }
3787
3788 void helper_fdiv_STN_ST0(int st_index)
3789 {
3790 CPU86_LDouble *p;
3791 p = &ST(st_index);
3792 *p = helper_fdiv(*p, ST0);
3793 }
3794
3795 void helper_fdivr_STN_ST0(int st_index)
3796 {
3797 CPU86_LDouble *p;
3798 p = &ST(st_index);
3799 *p = helper_fdiv(ST0, *p);
3800 }
3801
3802 /* misc FPU operations */
3803 void helper_fchs_ST0(void)
3804 {
3805 ST0 = floatx_chs(ST0);
3806 }
3807
3808 void helper_fabs_ST0(void)
3809 {
3810 ST0 = floatx_abs(ST0);
3811 }
3812
3813 void helper_fld1_ST0(void)
3814 {
3815 ST0 = f15rk[1];
3816 }
3817
3818 void helper_fldl2t_ST0(void)
3819 {
3820 ST0 = f15rk[6];
3821 }
3822
3823 void helper_fldl2e_ST0(void)
3824 {
3825 ST0 = f15rk[5];
3826 }
3827
3828 void helper_fldpi_ST0(void)
3829 {
3830 ST0 = f15rk[2];
3831 }
3832
3833 void helper_fldlg2_ST0(void)
3834 {
3835 ST0 = f15rk[3];
3836 }
3837
3838 void helper_fldln2_ST0(void)
3839 {
3840 ST0 = f15rk[4];
3841 }
3842
3843 void helper_fldz_ST0(void)
3844 {
3845 ST0 = f15rk[0];
3846 }
3847
3848 void helper_fldz_FT0(void)
3849 {
3850 FT0 = f15rk[0];
3851 }
3852
3853 uint32_t helper_fnstsw(void)
3854 {
3855 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3856 }
3857
3858 uint32_t helper_fnstcw(void)
3859 {
3860 return env->fpuc;
3861 }
3862
3863 static void update_fp_status(void)
3864 {
3865 int rnd_type;
3866
3867 /* set rounding mode */
3868 switch(env->fpuc & RC_MASK) {
3869 default:
3870 case RC_NEAR:
3871 rnd_type = float_round_nearest_even;
3872 break;
3873 case RC_DOWN:
3874 rnd_type = float_round_down;
3875 break;
3876 case RC_UP:
3877 rnd_type = float_round_up;
3878 break;
3879 case RC_CHOP:
3880 rnd_type = float_round_to_zero;
3881 break;
3882 }
3883 set_float_rounding_mode(rnd_type, &env->fp_status);
3884 #ifdef FLOATX80
3885 switch((env->fpuc >> 8) & 3) {
3886 case 0:
3887 rnd_type = 32;
3888 break;
3889 case 2:
3890 rnd_type = 64;
3891 break;
3892 case 3:
3893 default:
3894 rnd_type = 80;
3895 break;
3896 }
3897 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3898 #endif
3899 }
3900
3901 void helper_fldcw(uint32_t val)
3902 {
3903 env->fpuc = val;
3904 update_fp_status();
3905 }
3906
3907 void helper_fclex(void)
3908 {
3909 env->fpus &= 0x7f00;
3910 }
3911
3912 void helper_fwait(void)
3913 {
3914 if (env->fpus & FPUS_SE)
3915 fpu_raise_exception();
3916 }
3917
3918 void helper_fninit(void)
3919 {
3920 env->fpus = 0;
3921 env->fpstt = 0;
3922 env->fpuc = 0x37f;
3923 env->fptags[0] = 1;
3924 env->fptags[1] = 1;
3925 env->fptags[2] = 1;
3926 env->fptags[3] = 1;
3927 env->fptags[4] = 1;
3928 env->fptags[5] = 1;
3929 env->fptags[6] = 1;
3930 env->fptags[7] = 1;
3931 }
3932
3933 /* BCD ops */
3934
3935 void helper_fbld_ST0(target_ulong ptr)
3936 {
3937 CPU86_LDouble tmp;
3938 uint64_t val;
3939 unsigned int v;
3940 int i;
3941
3942 val = 0;
3943 for(i = 8; i >= 0; i--) {
3944 v = ldub(ptr + i);
3945 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3946 }
3947 tmp = int64_to_floatx(val, &env->fp_status);
3948 if (ldub(ptr + 9) & 0x80) {
3949 floatx_chs(tmp);
3950 }
3951 fpush();
3952 ST0 = tmp;
3953 }
3954
3955 void helper_fbst_ST0(target_ulong ptr)
3956 {
3957 int v;
3958 target_ulong mem_ref, mem_end;
3959 int64_t val;
3960
3961 val = floatx_to_int64(ST0, &env->fp_status);
3962 mem_ref = ptr;
3963 mem_end = mem_ref + 9;
3964 if (val < 0) {
3965 stb(mem_end, 0x80);
3966 val = -val;
3967 } else {
3968 stb(mem_end, 0x00);
3969 }
3970 while (mem_ref < mem_end) {
3971 if (val == 0)
3972 break;
3973 v = val % 100;
3974 val = val / 100;
3975 v = ((v / 10) << 4) | (v % 10);
3976 stb(mem_ref++, v);
3977 }
3978 while (mem_ref < mem_end) {
3979 stb(mem_ref++, 0);
3980 }
3981 }
3982
3983 void helper_f2xm1(void)
3984 {
3985 double val = CPU86_LDouble_to_double(ST0);
3986 val = pow(2.0, val) - 1.0;
3987 ST0 = double_to_CPU86_LDouble(val);
3988 }
3989
3990 void helper_fyl2x(void)
3991 {
3992 double fptemp = CPU86_LDouble_to_double(ST0);
3993
3994 if (fptemp>0.0){
3995 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3996 fptemp *= CPU86_LDouble_to_double(ST1);
3997 ST1 = double_to_CPU86_LDouble(fptemp);
3998 fpop();
3999 } else {
4000 env->fpus &= (~0x4700);
4001 env->fpus |= 0x400;
4002 }
4003 }
4004
4005 void helper_fptan(void)
4006 {
4007 double fptemp = CPU86_LDouble_to_double(ST0);
4008
4009 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4010 env->fpus |= 0x400;
4011 } else {
4012 fptemp = tan(fptemp);
4013 ST0 = double_to_CPU86_LDouble(fptemp);
4014 fpush();
4015 ST0 = floatx_one;
4016 env->fpus &= (~0x400); /* C2 <-- 0 */
4017 /* the above code is for |arg| < 2**52 only */
4018 }
4019 }
4020
4021 void helper_fpatan(void)
4022 {
4023 double fptemp, fpsrcop;
4024
4025 fpsrcop = CPU86_LDouble_to_double(ST1);
4026 fptemp = CPU86_LDouble_to_double(ST0);
4027 ST1 = double_to_CPU86_LDouble(atan2(fpsrcop, fptemp));
4028 fpop();
4029 }
4030
4031 void helper_fxtract(void)
4032 {
4033 CPU86_LDoubleU temp;
4034
4035 temp.d = ST0;
4036
4037 if (floatx_is_zero(ST0)) {
4038 /* Easy way to generate -inf and raising division by 0 exception */
4039 ST0 = floatx_div(floatx_chs(floatx_one), floatx_zero, &env->fp_status);
4040 fpush();
4041 ST0 = temp.d;
4042 } else {
4043 int expdif;
4044
4045 expdif = EXPD(temp) - EXPBIAS;
4046 /*DP exponent bias*/
4047 ST0 = int32_to_floatx(expdif, &env->fp_status);
4048 fpush();
4049 BIASEXPONENT(temp);
4050 ST0 = temp.d;
4051 }
4052 }
4053
4054 void helper_fprem1(void)
4055 {
4056 CPU86_LDouble dblq, fpsrcop, fptemp;
4057 CPU86_LDoubleU fpsrcop1, fptemp1;
4058 int expdif;
4059 signed long long int q;
4060
4061 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4062 ST0 = 0.0 / 0.0; /* NaN */
4063 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4064 return;
4065 }
4066
4067 fpsrcop = ST0;
4068 fptemp = ST1;
4069 fpsrcop1.d = fpsrcop;
4070 fptemp1.d = fptemp;
4071 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4072
4073 if (expdif < 0) {
4074 /* optimisation? taken from the AMD docs */
4075 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4076 /* ST0 is unchanged */
4077 return;
4078 }
4079
4080 if (expdif < 53) {
4081 dblq = fpsrcop / fptemp;
4082 /* round dblq towards nearest integer */
4083 dblq = rint(dblq);
4084 ST0 = fpsrcop - fptemp * dblq;
4085
4086 /* convert dblq to q by truncating towards zero */
4087 if (dblq < 0.0)
4088 q = (signed long long int)(-dblq);
4089 else
4090 q = (signed long long int)dblq;
4091
4092 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4093 /* (C0,C3,C1) <-- (q2,q1,q0) */
4094 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4095 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4096 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4097 } else {
4098 env->fpus |= 0x400; /* C2 <-- 1 */
4099 fptemp = pow(2.0, expdif - 50);
4100 fpsrcop = (ST0 / ST1) / fptemp;
4101 /* fpsrcop = integer obtained by chopping */
4102 fpsrcop = (fpsrcop < 0.0) ?
4103 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4104 ST0 -= (ST1 * fpsrcop * fptemp);
4105 }
4106 }
4107
4108 void helper_fprem(void)
4109 {
4110 CPU86_LDouble dblq, fpsrcop, fptemp;
4111 CPU86_LDoubleU fpsrcop1, fptemp1;
4112 int expdif;
4113 signed long long int q;
4114
4115 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4116 ST0 = 0.0 / 0.0; /* NaN */
4117 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4118 return;
4119 }
4120
4121 fpsrcop = (CPU86_LDouble)ST0;
4122 fptemp = (CPU86_LDouble)ST1;
4123 fpsrcop1.d = fpsrcop;
4124 fptemp1.d = fptemp;
4125 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4126
4127 if (expdif < 0) {
4128 /* optimisation? taken from the AMD docs */
4129 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4130 /* ST0 is unchanged */
4131 return;
4132 }
4133
4134 if ( expdif < 53 ) {
4135 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4136 /* round dblq towards zero */
4137 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4138 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4139
4140 /* convert dblq to q by truncating towards zero */
4141 if (dblq < 0.0)
4142 q = (signed long long int)(-dblq);
4143 else
4144 q = (signed long long int)dblq;
4145
4146 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4147 /* (C0,C3,C1) <-- (q2,q1,q0) */
4148 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4149 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4150 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4151 } else {
4152 int N = 32 + (expdif % 32); /* as per AMD docs */
4153 env->fpus |= 0x400; /* C2 <-- 1 */
4154 fptemp = pow(2.0, (double)(expdif - N));
4155 fpsrcop = (ST0 / ST1) / fptemp;
4156 /* fpsrcop = integer obtained by chopping */
4157 fpsrcop = (fpsrcop < 0.0) ?
4158 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4159 ST0 -= (ST1 * fpsrcop * fptemp);
4160 }
4161 }
4162
4163 void helper_fyl2xp1(void)
4164 {
4165 double fptemp = CPU86_LDouble_to_double(ST0);
4166
4167 if ((fptemp+1.0)>0.0) {
4168 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4169 fptemp *= CPU86_LDouble_to_double(ST1);
4170 ST1 = double_to_CPU86_LDouble(fptemp);
4171 fpop();
4172 } else {
4173 env->fpus &= (~0x4700);
4174 env->fpus |= 0x400;
4175 }
4176 }
4177
4178 void helper_fsqrt(void)
4179 {
4180 if (floatx_is_neg(ST0)) {
4181 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4182 env->fpus |= 0x400;
4183 }
4184 ST0 = floatx_sqrt(ST0, &env->fp_status);
4185 }
4186
4187 void helper_fsincos(void)
4188 {
4189 double fptemp = CPU86_LDouble_to_double(ST0);
4190
4191 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4192 env->fpus |= 0x400;
4193 } else {
4194 ST0 = double_to_CPU86_LDouble(sin(fptemp));
4195 fpush();
4196 ST0 = double_to_CPU86_LDouble(cos(fptemp));
4197 env->fpus &= (~0x400); /* C2 <-- 0 */
4198 /* the above code is for |arg| < 2**63 only */
4199 }
4200 }
4201
4202 void helper_frndint(void)
4203 {
4204 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4205 }
4206
4207 void helper_fscale(void)
4208 {
4209 if (floatx_is_any_nan(ST1)) {
4210 ST0 = ST1;
4211 } else {
4212 int n = floatx_to_int32_round_to_zero(ST1, &env->fp_status);
4213 ST0 = floatx_scalbn(ST0, n, &env->fp_status);
4214 }
4215 }
4216
4217 void helper_fsin(void)
4218 {
4219 double fptemp = CPU86_LDouble_to_double(ST0);
4220
4221 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4222 env->fpus |= 0x400;
4223 } else {
4224 ST0 = double_to_CPU86_LDouble(sin(fptemp));
4225 env->fpus &= (~0x400); /* C2 <-- 0 */
4226 /* the above code is for |arg| < 2**53 only */
4227 }
4228 }
4229
4230 void helper_fcos(void)
4231 {
4232 double fptemp = CPU86_LDouble_to_double(ST0);
4233
4234 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4235 env->fpus |= 0x400;
4236 } else {
4237 ST0 = double_to_CPU86_LDouble(cos(fptemp));
4238 env->fpus &= (~0x400); /* C2 <-- 0 */
4239 /* the above code is for |arg5 < 2**63 only */
4240 }
4241 }
4242
4243 void helper_fxam_ST0(void)
4244 {
4245 CPU86_LDoubleU temp;
4246 int expdif;
4247
4248 temp.d = ST0;
4249
4250 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4251 if (SIGND(temp))
4252 env->fpus |= 0x200; /* C1 <-- 1 */
4253
4254 /* XXX: test fptags too */
4255 expdif = EXPD(temp);
4256 if (expdif == MAXEXPD) {
4257 #ifdef USE_X86LDOUBLE
4258 if (MANTD(temp) == 0x8000000000000000ULL)
4259 #else
4260 if (MANTD(temp) == 0)
4261 #endif
4262 env->fpus |= 0x500 /*Infinity*/;
4263 else
4264 env->fpus |= 0x100 /*NaN*/;
4265 } else if (expdif == 0) {
4266 if (MANTD(temp) == 0)
4267 env->fpus |= 0x4000 /*Zero*/;
4268 else
4269 env->fpus |= 0x4400 /*Denormal*/;
4270 } else {
4271 env->fpus |= 0x400;
4272 }
4273 }
4274
4275 void helper_fstenv(target_ulong ptr, int data32)
4276 {
4277 int fpus, fptag, exp, i;
4278 uint64_t mant;
4279 CPU86_LDoubleU tmp;
4280
4281 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4282 fptag = 0;
4283 for (i=7; i>=0; i--) {
4284 fptag <<= 2;
4285 if (env->fptags[i]) {
4286 fptag |= 3;
4287 } else {
4288 tmp.d = env->fpregs[i].d;
4289 exp = EXPD(tmp);
4290 mant = MANTD(tmp);
4291 if (exp == 0 && mant == 0) {
4292 /* zero */
4293 fptag |= 1;
4294 } else if (exp == 0 || exp == MAXEXPD
4295 #ifdef USE_X86LDOUBLE
4296 || (mant & (1LL << 63)) == 0
4297 #endif
4298 ) {
4299 /* NaNs, infinity, denormal */
4300 fptag |= 2;
4301 }
4302 }
4303 }
4304 if (data32) {
4305 /* 32 bit */
4306 stl(ptr, env->fpuc);
4307 stl(ptr + 4, fpus);
4308 stl(ptr + 8, fptag);
4309 stl(ptr + 12, 0); /* fpip */
4310 stl(ptr + 16, 0); /* fpcs */
4311 stl(ptr + 20, 0); /* fpoo */
4312 stl(ptr + 24, 0); /* fpos */
4313 } else {
4314 /* 16 bit */
4315 stw(ptr, env->fpuc);
4316 stw(ptr + 2, fpus);
4317 stw(ptr + 4, fptag);
4318 stw(ptr + 6, 0);
4319 stw(ptr + 8, 0);
4320 stw(ptr + 10, 0);
4321 stw(ptr + 12, 0);
4322 }
4323 }
4324
4325 void helper_fldenv(target_ulong ptr, int data32)
4326 {
4327 int i, fpus, fptag;
4328
4329 if (data32) {
4330 env->fpuc = lduw(ptr);
4331 fpus = lduw(ptr + 4);
4332 fptag = lduw(ptr + 8);
4333 }
4334 else {
4335 env->fpuc = lduw(ptr);
4336 fpus = lduw(ptr + 2);
4337 fptag = lduw(ptr + 4);
4338 }
4339 env->fpstt = (fpus >> 11) & 7;
4340 env->fpus = fpus & ~0x3800;
4341 for(i = 0;i < 8; i++) {
4342 env->fptags[i] = ((fptag & 3) == 3);
4343 fptag >>= 2;
4344 }
4345 }
4346
4347 void helper_fsave(target_ulong ptr, int data32)
4348 {
4349 CPU86_LDouble tmp;
4350 int i;
4351
4352 helper_fstenv(ptr, data32);
4353
4354 ptr += (14 << data32);
4355 for(i = 0;i < 8; i++) {
4356 tmp = ST(i);
4357 helper_fstt(tmp, ptr);
4358 ptr += 10;
4359 }
4360
4361 /* fninit */
4362 env->fpus = 0;
4363 env->fpstt = 0;
4364 env->fpuc = 0x37f;
4365 env->fptags[0] = 1;
4366 env->fptags[1] = 1;
4367 env->fptags[2] = 1;
4368 env->fptags[3] = 1;
4369 env->fptags[4] = 1;
4370 env->fptags[5] = 1;
4371 env->fptags[6] = 1;
4372 env->fptags[7] = 1;
4373 }
4374
4375 void helper_frstor(target_ulong ptr, int data32)
4376 {
4377 CPU86_LDouble tmp;
4378 int i;
4379
4380 helper_fldenv(ptr, data32);
4381 ptr += (14 << data32);
4382
4383 for(i = 0;i < 8; i++) {
4384 tmp = helper_fldt(ptr);
4385 ST(i) = tmp;
4386 ptr += 10;
4387 }
4388 }
4389
4390 void helper_fxsave(target_ulong ptr, int data64)
4391 {
4392 int fpus, fptag, i, nb_xmm_regs;
4393 CPU86_LDouble tmp;
4394 target_ulong addr;
4395
4396 /* The operand must be 16 byte aligned */
4397 if (ptr & 0xf) {
4398 raise_exception(EXCP0D_GPF);
4399 }
4400
4401 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4402 fptag = 0;
4403 for(i = 0; i < 8; i++) {
4404 fptag |= (env->fptags[i] << i);
4405 }
4406 stw(ptr, env->fpuc);
4407 stw(ptr + 2, fpus);
4408 stw(ptr + 4, fptag ^ 0xff);
4409 #ifdef TARGET_X86_64
4410 if (data64) {
4411 stq(ptr + 0x08, 0); /* rip */
4412 stq(ptr + 0x10, 0); /* rdp */
4413 } else
4414 #endif
4415 {
4416 stl(ptr + 0x08, 0); /* eip */
4417 stl(ptr + 0x0c, 0); /* sel */
4418 stl(ptr + 0x10, 0); /* dp */
4419 stl(ptr + 0x14, 0); /* sel */
4420 }
4421
4422 addr = ptr + 0x20;
4423 for(i = 0;i < 8; i++) {
4424 tmp = ST(i);
4425 helper_fstt(tmp, addr);
4426 addr += 16;
4427 }
4428
4429 if (env->cr[4] & CR4_OSFXSR_MASK) {
4430 /* XXX: finish it */
4431 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4432 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4433 if (env->hflags & HF_CS64_MASK)
4434 nb_xmm_regs = 16;
4435 else
4436 nb_xmm_regs = 8;
4437 addr = ptr + 0xa0;
4438 /* Fast FXSAVE leaves out the XMM registers */
4439 if (!(env->efer & MSR_EFER_FFXSR)
4440 || (env->hflags & HF_CPL_MASK)
4441 || !(env->hflags & HF_LMA_MASK)) {
4442 for(i = 0; i < nb_xmm_regs; i++) {
4443 stq(addr, env->xmm_regs[i].XMM_Q(0));
4444 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4445 addr += 16;
4446 }
4447 }
4448 }
4449 }
4450
4451 void helper_fxrstor(target_ulong ptr, int data64)
4452 {
4453 int i, fpus, fptag, nb_xmm_regs;
4454 CPU86_LDouble tmp;
4455 target_ulong addr;
4456
4457 /* The operand must be 16 byte aligned */
4458 if (ptr & 0xf) {
4459 raise_exception(EXCP0D_GPF);
4460 }
4461
4462 env->fpuc = lduw(ptr);
4463 fpus = lduw(ptr + 2);
4464 fptag = lduw(ptr + 4);
4465 env->fpstt = (fpus >> 11) & 7;
4466 env->fpus = fpus & ~0x3800;
4467 fptag ^= 0xff;
4468 for(i = 0;i < 8; i++) {
4469 env->fptags[i] = ((fptag >> i) & 1);
4470 }
4471
4472 addr = ptr + 0x20;
4473 for(i = 0;i < 8; i++) {
4474 tmp = helper_fldt(addr);
4475 ST(i) = tmp;
4476 addr += 16;
4477 }
4478
4479 if (env->cr[4] & CR4_OSFXSR_MASK) {
4480 /* XXX: finish it */
4481 env->mxcsr = ldl(ptr + 0x18);
4482 //ldl(ptr + 0x1c);
4483 if (env->hflags & HF_CS64_MASK)
4484 nb_xmm_regs = 16;
4485 else
4486 nb_xmm_regs = 8;
4487 addr = ptr + 0xa0;
4488 /* Fast FXRESTORE leaves out the XMM registers */
4489 if (!(env->efer & MSR_EFER_FFXSR)
4490 || (env->hflags & HF_CPL_MASK)
4491 || !(env->hflags & HF_LMA_MASK)) {
4492 for(i = 0; i < nb_xmm_regs; i++) {
4493 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4494 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4495 addr += 16;
4496 }
4497 }
4498 }
4499 }
4500
4501 #ifndef USE_X86LDOUBLE
4502
4503 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4504 {
4505 CPU86_LDoubleU temp;
4506 int e;
4507
4508 temp.d = f;
4509 /* mantissa */
4510 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4511 /* exponent + sign */
4512 e = EXPD(temp) - EXPBIAS + 16383;
4513 e |= SIGND(temp) >> 16;
4514 *pexp = e;
4515 }
4516
4517 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4518 {
4519 CPU86_LDoubleU temp;
4520 int e;
4521 uint64_t ll;
4522
4523 /* XXX: handle overflow ? */
4524 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4525 e |= (upper >> 4) & 0x800; /* sign */
4526 ll = (mant >> 11) & ((1LL << 52) - 1);
4527 #ifdef __arm__
4528 temp.l.upper = (e << 20) | (ll >> 32);
4529 temp.l.lower = ll;
4530 #else
4531 temp.ll = ll | ((uint64_t)e << 52);
4532 #endif
4533 return temp.d;
4534 }
4535
4536 #else
4537
4538 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4539 {
4540 CPU86_LDoubleU temp;
4541
4542 temp.d = f;
4543 *pmant = temp.l.lower;
4544 *pexp = temp.l.upper;
4545 }
4546
4547 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4548 {
4549 CPU86_LDoubleU temp;
4550
4551 temp.l.upper = upper;
4552 temp.l.lower = mant;
4553 return temp.d;
4554 }
4555 #endif
4556
4557 #ifdef TARGET_X86_64
4558
4559 //#define DEBUG_MULDIV
4560
4561 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4562 {
4563 *plow += a;
4564 /* carry test */
4565 if (*plow < a)
4566 (*phigh)++;
4567 *phigh += b;
4568 }
4569
4570 static void neg128(uint64_t *plow, uint64_t *phigh)
4571 {
4572 *plow = ~ *plow;
4573 *phigh = ~ *phigh;
4574 add128(plow, phigh, 1, 0);
4575 }
4576
4577 /* return TRUE if overflow */
4578 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4579 {
4580 uint64_t q, r, a1, a0;
4581 int i, qb, ab;
4582
4583 a0 = *plow;
4584 a1 = *phigh;
4585 if (a1 == 0) {
4586 q = a0 / b;
4587 r = a0 % b;
4588 *plow = q;
4589 *phigh = r;
4590 } else {
4591 if (a1 >= b)
4592 return 1;
4593 /* XXX: use a better algorithm */
4594 for(i = 0; i < 64; i++) {
4595 ab = a1 >> 63;
4596 a1 = (a1 << 1) | (a0 >> 63);
4597 if (ab || a1 >= b) {
4598 a1 -= b;
4599 qb = 1;
4600 } else {
4601 qb = 0;
4602 }
4603 a0 = (a0 << 1) | qb;
4604 }
4605 #if defined(DEBUG_MULDIV)
4606 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4607 *phigh, *plow, b, a0, a1);
4608 #endif
4609 *plow = a0;
4610 *phigh = a1;
4611 }
4612 return 0;
4613 }
4614
4615 /* return TRUE if overflow */
4616 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4617 {
4618 int sa, sb;
4619 sa = ((int64_t)*phigh < 0);
4620 if (sa)
4621 neg128(plow, phigh);
4622 sb = (b < 0);
4623 if (sb)
4624 b = -b;
4625 if (div64(plow, phigh, b) != 0)
4626 return 1;
4627 if (sa ^ sb) {
4628 if (*plow > (1ULL << 63))
4629 return 1;
4630 *plow = - *plow;
4631 } else {
4632 if (*plow >= (1ULL << 63))
4633 return 1;
4634 }
4635 if (sa)
4636 *phigh = - *phigh;
4637 return 0;
4638 }
4639
4640 void helper_mulq_EAX_T0(target_ulong t0)
4641 {
4642 uint64_t r0, r1;
4643
4644 mulu64(&r0, &r1, EAX, t0);
4645 EAX = r0;
4646 EDX = r1;
4647 CC_DST = r0;
4648 CC_SRC = r1;
4649 }
4650
4651 void helper_imulq_EAX_T0(target_ulong t0)
4652 {
4653 uint64_t r0, r1;
4654
4655 muls64(&r0, &r1, EAX, t0);
4656 EAX = r0;
4657 EDX = r1;
4658 CC_DST = r0;
4659 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4660 }
4661
4662 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4663 {
4664 uint64_t r0, r1;
4665
4666 muls64(&r0, &r1, t0, t1);
4667 CC_DST = r0;
4668 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4669 return r0;
4670 }
4671
4672 void helper_divq_EAX(target_ulong t0)
4673 {
4674 uint64_t r0, r1;
4675 if (t0 == 0) {
4676 raise_exception(EXCP00_DIVZ);
4677 }
4678 r0 = EAX;
4679 r1 = EDX;
4680 if (div64(&r0, &r1, t0))
4681 raise_exception(EXCP00_DIVZ);
4682 EAX = r0;
4683 EDX = r1;
4684 }
4685
4686 void helper_idivq_EAX(target_ulong t0)
4687 {
4688 uint64_t r0, r1;
4689 if (t0 == 0) {
4690 raise_exception(EXCP00_DIVZ);
4691 }
4692 r0 = EAX;
4693 r1 = EDX;
4694 if (idiv64(&r0, &r1, t0))
4695 raise_exception(EXCP00_DIVZ);
4696 EAX = r0;
4697 EDX = r1;
4698 }
4699 #endif
4700
4701 static void do_hlt(void)
4702 {
4703 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4704 env->halted = 1;
4705 env->exception_index = EXCP_HLT;
4706 cpu_loop_exit();
4707 }
4708
4709 void helper_hlt(int next_eip_addend)
4710 {
4711 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4712 EIP += next_eip_addend;
4713
4714 do_hlt();
4715 }
4716
4717 void helper_monitor(target_ulong ptr)
4718 {
4719 if ((uint32_t)ECX != 0)
4720 raise_exception(EXCP0D_GPF);
4721 /* XXX: store address ? */
4722 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4723 }
4724
4725 void helper_mwait(int next_eip_addend)
4726 {
4727 if ((uint32_t)ECX != 0)
4728 raise_exception(EXCP0D_GPF);
4729 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4730 EIP += next_eip_addend;
4731
4732 /* XXX: not complete but not completely erroneous */
4733 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4734 /* more than one CPU: do not sleep because another CPU may
4735 wake this one */
4736 } else {
4737 do_hlt();
4738 }
4739 }
4740
4741 void helper_debug(void)
4742 {
4743 env->exception_index = EXCP_DEBUG;
4744 cpu_loop_exit();
4745 }
4746
4747 void helper_reset_rf(void)
4748 {
4749 env->eflags &= ~RF_MASK;
4750 }
4751
4752 void helper_raise_interrupt(int intno, int next_eip_addend)
4753 {
4754 raise_interrupt(intno, 1, 0, next_eip_addend);
4755 }
4756
4757 void helper_raise_exception(int exception_index)
4758 {
4759 raise_exception(exception_index);
4760 }
4761
4762 void helper_cli(void)
4763 {
4764 env->eflags &= ~IF_MASK;
4765 }
4766
4767 void helper_sti(void)
4768 {
4769 env->eflags |= IF_MASK;
4770 }
4771
4772 #if 0
4773 /* vm86plus instructions */
4774 void helper_cli_vm(void)
4775 {
4776 env->eflags &= ~VIF_MASK;
4777 }
4778
4779 void helper_sti_vm(void)
4780 {
4781 env->eflags |= VIF_MASK;
4782 if (env->eflags & VIP_MASK) {
4783 raise_exception(EXCP0D_GPF);
4784 }
4785 }
4786 #endif
4787
4788 void helper_set_inhibit_irq(void)
4789 {
4790 env->hflags |= HF_INHIBIT_IRQ_MASK;
4791 }
4792
4793 void helper_reset_inhibit_irq(void)
4794 {
4795 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4796 }
4797
4798 void helper_boundw(target_ulong a0, int v)
4799 {
4800 int low, high;
4801 low = ldsw(a0);
4802 high = ldsw(a0 + 2);
4803 v = (int16_t)v;
4804 if (v < low || v > high) {
4805 raise_exception(EXCP05_BOUND);
4806 }
4807 }
4808
4809 void helper_boundl(target_ulong a0, int v)
4810 {
4811 int low, high;
4812 low = ldl(a0);
4813 high = ldl(a0 + 4);
4814 if (v < low || v > high) {
4815 raise_exception(EXCP05_BOUND);
4816 }
4817 }
4818
4819 #if !defined(CONFIG_USER_ONLY)
4820
4821 #define MMUSUFFIX _mmu
4822
4823 #define SHIFT 0
4824 #include "softmmu_template.h"
4825
4826 #define SHIFT 1
4827 #include "softmmu_template.h"
4828
4829 #define SHIFT 2
4830 #include "softmmu_template.h"
4831
4832 #define SHIFT 3
4833 #include "softmmu_template.h"
4834
4835 #endif
4836
4837 #if !defined(CONFIG_USER_ONLY)
4838 /* try to fill the TLB and return an exception if error. If retaddr is
4839 NULL, it means that the function was called in C code (i.e. not
4840 from generated code or from helper.c) */
4841 /* XXX: fix it to restore all registers */
4842 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4843 {
4844 TranslationBlock *tb;
4845 int ret;
4846 unsigned long pc;
4847 CPUX86State *saved_env;
4848
4849 /* XXX: hack to restore env in all cases, even if not called from
4850 generated code */
4851 saved_env = env;
4852 env = cpu_single_env;
4853
4854 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4855 if (ret) {
4856 if (retaddr) {
4857 /* now we have a real cpu fault */
4858 pc = (unsigned long)retaddr;
4859 tb = tb_find_pc(pc);
4860 if (tb) {
4861 /* the PC is inside the translated code. It means that we have
4862 a virtual CPU fault */
4863 cpu_restore_state(tb, env, pc);
4864 }
4865 }
4866 raise_exception_err(env->exception_index, env->error_code);
4867 }
4868 env = saved_env;
4869 }
4870 #endif
4871
4872 /* Secure Virtual Machine helpers */
4873
4874 #if defined(CONFIG_USER_ONLY)
4875
4876 void helper_vmrun(int aflag, int next_eip_addend)
4877 {
4878 }
4879 void helper_vmmcall(void)
4880 {
4881 }
4882 void helper_vmload(int aflag)
4883 {
4884 }
4885 void helper_vmsave(int aflag)
4886 {
4887 }
4888 void helper_stgi(void)
4889 {
4890 }
4891 void helper_clgi(void)
4892 {
4893 }
4894 void helper_skinit(void)
4895 {
4896 }
4897 void helper_invlpga(int aflag)
4898 {
4899 }
4900 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4901 {
4902 }
4903 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4904 {
4905 }
4906
4907 void helper_svm_check_io(uint32_t port, uint32_t param,
4908 uint32_t next_eip_addend)
4909 {
4910 }
4911 #else
4912
4913 static inline void svm_save_seg(target_phys_addr_t addr,
4914 const SegmentCache *sc)
4915 {
4916 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4917 sc->selector);
4918 stq_phys(addr + offsetof(struct vmcb_seg, base),
4919 sc->base);
4920 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4921 sc->limit);
4922 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4923 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4924 }
4925
4926 static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4927 {
4928 unsigned int flags;
4929
4930 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4931 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4932 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4933 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4934 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4935 }
4936
4937 static inline void svm_load_seg_cache(target_phys_addr_t addr,
4938 CPUState *env, int seg_reg)
4939 {
4940 SegmentCache sc1, *sc = &sc1;
4941 svm_load_seg(addr, sc);
4942 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4943 sc->base, sc->limit, sc->flags);
4944 }
4945
4946 void helper_vmrun(int aflag, int next_eip_addend)
4947 {
4948 target_ulong addr;
4949 uint32_t event_inj;
4950 uint32_t int_ctl;
4951
4952 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4953
4954 if (aflag == 2)
4955 addr = EAX;
4956 else
4957 addr = (uint32_t)EAX;
4958
4959 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4960
4961 env->vm_vmcb = addr;
4962
4963 /* save the current CPU state in the hsave page */
4964 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4965 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4966
4967 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4968 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4969
4970 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4971 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4972 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4973 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4974 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4975 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4976
4977 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4978 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4979
4980 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4981 &env->segs[R_ES]);
4982 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4983 &env->segs[R_CS]);
4984 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4985 &env->segs[R_SS]);
4986 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4987 &env->segs[R_DS]);
4988
4989 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4990 EIP + next_eip_addend);
4991 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4992 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4993
4994 /* load the interception bitmaps so we do not need to access the
4995 vmcb in svm mode */
4996 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4997 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4998 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4999 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
5000 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
5001 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
5002
5003 /* enable intercepts */
5004 env->hflags |= HF_SVMI_MASK;
5005
5006 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
5007
5008 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5009 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5010
5011 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5012 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5013
5014 /* clear exit_info_2 so we behave like the real hardware */
5015 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5016
5017 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5018 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5019 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5020 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5021 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5022 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5023 if (int_ctl & V_INTR_MASKING_MASK) {
5024 env->v_tpr = int_ctl & V_TPR_MASK;
5025 env->hflags2 |= HF2_VINTR_MASK;
5026 if (env->eflags & IF_MASK)
5027 env->hflags2 |= HF2_HIF_MASK;
5028 }
5029
5030 cpu_load_efer(env,
5031 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5032 env->eflags = 0;
5033 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5034 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5035 CC_OP = CC_OP_EFLAGS;
5036
5037 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5038 env, R_ES);
5039 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5040 env, R_CS);
5041 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5042 env, R_SS);
5043 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5044 env, R_DS);
5045
5046 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5047 env->eip = EIP;
5048 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5049 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5050 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5051 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5052 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5053
5054 /* FIXME: guest state consistency checks */
5055
5056 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5057 case TLB_CONTROL_DO_NOTHING:
5058 break;
5059 case TLB_CONTROL_FLUSH_ALL_ASID:
5060 /* FIXME: this is not 100% correct but should work for now */
5061 tlb_flush(env, 1);
5062 break;
5063 }
5064
5065 env->hflags2 |= HF2_GIF_MASK;
5066
5067 if (int_ctl & V_IRQ_MASK) {
5068 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5069 }
5070
5071 /* maybe we need to inject an event */
5072 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5073 if (event_inj & SVM_EVTINJ_VALID) {
5074 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5075 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5076 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5077
5078 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5079 /* FIXME: need to implement valid_err */
5080 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5081 case SVM_EVTINJ_TYPE_INTR:
5082 env->exception_index = vector;
5083 env->error_code = event_inj_err;
5084 env->exception_is_int = 0;
5085 env->exception_next_eip = -1;
5086 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5087 /* XXX: is it always correct ? */
5088 do_interrupt(vector, 0, 0, 0, 1);
5089 break;
5090 case SVM_EVTINJ_TYPE_NMI:
5091 env->exception_index = EXCP02_NMI;
5092 env->error_code = event_inj_err;
5093 env->exception_is_int = 0;
5094 env->exception_next_eip = EIP;
5095 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5096 cpu_loop_exit();
5097 break;
5098 case SVM_EVTINJ_TYPE_EXEPT:
5099 env->exception_index = vector;
5100 env->error_code = event_inj_err;
5101 env->exception_is_int = 0;
5102 env->exception_next_eip = -1;
5103 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5104 cpu_loop_exit();
5105 break;
5106 case SVM_EVTINJ_TYPE_SOFT:
5107 env->exception_index = vector;
5108 env->error_code = event_inj_err;
5109 env->exception_is_int = 1;
5110 env->exception_next_eip = EIP;
5111 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5112 cpu_loop_exit();
5113 break;
5114 }
5115 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5116 }
5117 }
5118
5119 void helper_vmmcall(void)
5120 {
5121 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5122 raise_exception(EXCP06_ILLOP);
5123 }
5124
5125 void helper_vmload(int aflag)
5126 {
5127 target_ulong addr;
5128 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5129
5130 if (aflag == 2)
5131 addr = EAX;
5132 else
5133 addr = (uint32_t)EAX;
5134
5135 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5136 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5137 env->segs[R_FS].base);
5138
5139 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5140 env, R_FS);
5141 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5142 env, R_GS);
5143 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5144 &env->tr);
5145 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5146 &env->ldt);
5147
5148 #ifdef TARGET_X86_64
5149 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5150 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5151 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5152 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5153 #endif
5154 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5155 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5156 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5157 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5158 }
5159
5160 void helper_vmsave(int aflag)
5161 {
5162 target_ulong addr;
5163 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5164
5165 if (aflag == 2)
5166 addr = EAX;
5167 else
5168 addr = (uint32_t)EAX;
5169
5170 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5171 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5172 env->segs[R_FS].base);
5173
5174 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5175 &env->segs[R_FS]);
5176 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5177 &env->segs[R_GS]);
5178 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5179 &env->tr);
5180 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5181 &env->ldt);
5182
5183 #ifdef TARGET_X86_64
5184 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5185 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5186 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5187 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5188 #endif
5189 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5190 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5191 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5192 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5193 }
5194
5195 void helper_stgi(void)
5196 {
5197 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5198 env->hflags2 |= HF2_GIF_MASK;
5199 }
5200
5201 void helper_clgi(void)
5202 {
5203 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5204 env->hflags2 &= ~HF2_GIF_MASK;
5205 }
5206
5207 void helper_skinit(void)
5208 {
5209 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5210 /* XXX: not implemented */
5211 raise_exception(EXCP06_ILLOP);
5212 }
5213
5214 void helper_invlpga(int aflag)
5215 {
5216 target_ulong addr;
5217 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5218
5219 if (aflag == 2)
5220 addr = EAX;
5221 else
5222 addr = (uint32_t)EAX;
5223
5224 /* XXX: could use the ASID to see if it is needed to do the
5225 flush */
5226 tlb_flush_page(env, addr);
5227 }
5228
5229 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5230 {
5231 if (likely(!(env->hflags & HF_SVMI_MASK)))
5232 return;
5233 switch(type) {
5234 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5235 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5236 helper_vmexit(type, param);
5237 }
5238 break;
5239 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5240 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5241 helper_vmexit(type, param);
5242 }
5243 break;
5244 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5245 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5246 helper_vmexit(type, param);
5247 }
5248 break;
5249 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5250 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5251 helper_vmexit(type, param);
5252 }
5253 break;
5254 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5255 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5256 helper_vmexit(type, param);
5257 }
5258 break;
5259 case SVM_EXIT_MSR:
5260 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5261 /* FIXME: this should be read in at vmrun (faster this way?) */
5262 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5263 uint32_t t0, t1;
5264 switch((uint32_t)ECX) {
5265 case 0 ... 0x1fff:
5266 t0 = (ECX * 2) % 8;
5267 t1 = (ECX * 2) / 8;
5268 break;
5269 case 0xc0000000 ... 0xc0001fff:
5270 t0 = (8192 + ECX - 0xc0000000) * 2;
5271 t1 = (t0 / 8);
5272 t0 %= 8;
5273 break;
5274 case 0xc0010000 ... 0xc0011fff:
5275 t0 = (16384 + ECX - 0xc0010000) * 2;
5276 t1 = (t0 / 8);
5277 t0 %= 8;
5278 break;
5279 default:
5280 helper_vmexit(type, param);
5281 t0 = 0;
5282 t1 = 0;
5283 break;
5284 }
5285 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5286 helper_vmexit(type, param);
5287 }
5288 break;
5289 default:
5290 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5291 helper_vmexit(type, param);
5292 }
5293 break;
5294 }
5295 }
5296
5297 void helper_svm_check_io(uint32_t port, uint32_t param,
5298 uint32_t next_eip_addend)
5299 {
5300 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5301 /* FIXME: this should be read in at vmrun (faster this way?) */
5302 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5303 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5304 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5305 /* next EIP */
5306 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5307 env->eip + next_eip_addend);
5308 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5309 }
5310 }
5311 }
5312
5313 /* Note: currently only 32 bits of exit_code are used */
5314 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5315 {
5316 uint32_t int_ctl;
5317
5318 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5319 exit_code, exit_info_1,
5320 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5321 EIP);
5322
5323 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5324 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5325 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5326 } else {
5327 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5328 }
5329
5330 /* Save the VM state in the vmcb */
5331 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5332 &env->segs[R_ES]);
5333 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5334 &env->segs[R_CS]);
5335 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5336 &env->segs[R_SS]);
5337 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5338 &env->segs[R_DS]);
5339
5340 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5341 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5342
5343 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5344 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5345
5346 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5347 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5348 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5349 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5350 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5351
5352 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5353 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5354 int_ctl |= env->v_tpr & V_TPR_MASK;
5355 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5356 int_ctl |= V_IRQ_MASK;
5357 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5358
5359 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5360 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5361 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5362 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5363 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5364 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5365 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5366
5367 /* Reload the host state from vm_hsave */
5368 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5369 env->hflags &= ~HF_SVMI_MASK;
5370 env->intercept = 0;
5371 env->intercept_exceptions = 0;
5372 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5373 env->tsc_offset = 0;
5374
5375 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5376 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5377
5378 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5379 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5380
5381 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5382 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5383 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5384 /* we need to set the efer after the crs so the hidden flags get
5385 set properly */
5386 cpu_load_efer(env,
5387 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5388 env->eflags = 0;
5389 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5390 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5391 CC_OP = CC_OP_EFLAGS;
5392
5393 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5394 env, R_ES);
5395 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5396 env, R_CS);
5397 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5398 env, R_SS);
5399 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5400 env, R_DS);
5401
5402 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5403 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5404 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5405
5406 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5407 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5408
5409 /* other setups */
5410 cpu_x86_set_cpl(env, 0);
5411 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5412 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5413
5414 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5415 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5416 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5417 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5418 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5419
5420 env->hflags2 &= ~HF2_GIF_MASK;
5421 /* FIXME: Resets the current ASID register to zero (host ASID). */
5422
5423 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5424
5425 /* Clears the TSC_OFFSET inside the processor. */
5426
5427 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5428 from the page table indicated the host's CR3. If the PDPEs contain
5429 illegal state, the processor causes a shutdown. */
5430
5431 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5432 env->cr[0] |= CR0_PE_MASK;
5433 env->eflags &= ~VM_MASK;
5434
5435 /* Disables all breakpoints in the host DR7 register. */
5436
5437 /* Checks the reloaded host state for consistency. */
5438
5439 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5440 host's code segment or non-canonical (in the case of long mode), a
5441 #GP fault is delivered inside the host.) */
5442
5443 /* remove any pending exception */
5444 env->exception_index = -1;
5445 env->error_code = 0;
5446 env->old_exception = -1;
5447
5448 cpu_loop_exit();
5449 }
5450
5451 #endif
5452
5453 /* MMX/SSE */
5454 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5455 void helper_enter_mmx(void)
5456 {
5457 env->fpstt = 0;
5458 *(uint32_t *)(env->fptags) = 0;
5459 *(uint32_t *)(env->fptags + 4) = 0;
5460 }
5461
5462 void helper_emms(void)
5463 {
5464 /* set to empty state */
5465 *(uint32_t *)(env->fptags) = 0x01010101;
5466 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5467 }
5468
5469 /* XXX: suppress */
5470 void helper_movq(void *d, void *s)
5471 {
5472 *(uint64_t *)d = *(uint64_t *)s;
5473 }
5474
5475 #define SHIFT 0
5476 #include "ops_sse.h"
5477
5478 #define SHIFT 1
5479 #include "ops_sse.h"
5480
5481 #define SHIFT 0
5482 #include "helper_template.h"
5483 #undef SHIFT
5484
5485 #define SHIFT 1
5486 #include "helper_template.h"
5487 #undef SHIFT
5488
5489 #define SHIFT 2
5490 #include "helper_template.h"
5491 #undef SHIFT
5492
5493 #ifdef TARGET_X86_64
5494
5495 #define SHIFT 3
5496 #include "helper_template.h"
5497 #undef SHIFT
5498
5499 #endif
5500
5501 /* bit operations */
5502 target_ulong helper_bsf(target_ulong t0)
5503 {
5504 int count;
5505 target_ulong res;
5506
5507 res = t0;
5508 count = 0;
5509 while ((res & 1) == 0) {
5510 count++;
5511 res >>= 1;
5512 }
5513 return count;
5514 }
5515
5516 target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5517 {
5518 int count;
5519 target_ulong res, mask;
5520
5521 if (wordsize > 0 && t0 == 0) {
5522 return wordsize;
5523 }
5524 res = t0;
5525 count = TARGET_LONG_BITS - 1;
5526 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5527 while ((res & mask) == 0) {
5528 count--;
5529 res <<= 1;
5530 }
5531 if (wordsize > 0) {
5532 return wordsize - 1 - count;
5533 }
5534 return count;
5535 }
5536
5537 target_ulong helper_bsr(target_ulong t0)
5538 {
5539 return helper_lzcnt(t0, 0);
5540 }
5541
5542 static int compute_all_eflags(void)
5543 {
5544 return CC_SRC;
5545 }
5546
5547 static int compute_c_eflags(void)
5548 {
5549 return CC_SRC & CC_C;
5550 }
5551
5552 uint32_t helper_cc_compute_all(int op)
5553 {
5554 switch (op) {
5555 default: /* should never happen */ return 0;
5556
5557 case CC_OP_EFLAGS: return compute_all_eflags();
5558
5559 case CC_OP_MULB: return compute_all_mulb();
5560 case CC_OP_MULW: return compute_all_mulw();
5561 case CC_OP_MULL: return compute_all_mull();
5562
5563 case CC_OP_ADDB: return compute_all_addb();
5564 case CC_OP_ADDW: return compute_all_addw();
5565 case CC_OP_ADDL: return compute_all_addl();
5566
5567 case CC_OP_ADCB: return compute_all_adcb();
5568 case CC_OP_ADCW: return compute_all_adcw();
5569 case CC_OP_ADCL: return compute_all_adcl();
5570
5571 case CC_OP_SUBB: return compute_all_subb();
5572 case CC_OP_SUBW: return compute_all_subw();
5573 case CC_OP_SUBL: return compute_all_subl();
5574
5575 case CC_OP_SBBB: return compute_all_sbbb();
5576 case CC_OP_SBBW: return compute_all_sbbw();
5577 case CC_OP_SBBL: return compute_all_sbbl();
5578
5579 case CC_OP_LOGICB: return compute_all_logicb();
5580 case CC_OP_LOGICW: return compute_all_logicw();
5581 case CC_OP_LOGICL: return compute_all_logicl();
5582
5583 case CC_OP_INCB: return compute_all_incb();
5584 case CC_OP_INCW: return compute_all_incw();
5585 case CC_OP_INCL: return compute_all_incl();
5586
5587 case CC_OP_DECB: return compute_all_decb();
5588 case CC_OP_DECW: return compute_all_decw();
5589 case CC_OP_DECL: return compute_all_decl();
5590
5591 case CC_OP_SHLB: return compute_all_shlb();
5592 case CC_OP_SHLW: return compute_all_shlw();
5593 case CC_OP_SHLL: return compute_all_shll();
5594
5595 case CC_OP_SARB: return compute_all_sarb();
5596 case CC_OP_SARW: return compute_all_sarw();
5597 case CC_OP_SARL: return compute_all_sarl();
5598
5599 #ifdef TARGET_X86_64
5600 case CC_OP_MULQ: return compute_all_mulq();
5601
5602 case CC_OP_ADDQ: return compute_all_addq();
5603
5604 case CC_OP_ADCQ: return compute_all_adcq();
5605
5606 case CC_OP_SUBQ: return compute_all_subq();
5607
5608 case CC_OP_SBBQ: return compute_all_sbbq();
5609
5610 case CC_OP_LOGICQ: return compute_all_logicq();
5611
5612 case CC_OP_INCQ: return compute_all_incq();
5613
5614 case CC_OP_DECQ: return compute_all_decq();
5615
5616 case CC_OP_SHLQ: return compute_all_shlq();
5617
5618 case CC_OP_SARQ: return compute_all_sarq();
5619 #endif
5620 }
5621 }
5622
5623 uint32_t helper_cc_compute_c(int op)
5624 {
5625 switch (op) {
5626 default: /* should never happen */ return 0;
5627
5628 case CC_OP_EFLAGS: return compute_c_eflags();
5629
5630 case CC_OP_MULB: return compute_c_mull();
5631 case CC_OP_MULW: return compute_c_mull();
5632 case CC_OP_MULL: return compute_c_mull();
5633
5634 case CC_OP_ADDB: return compute_c_addb();
5635 case CC_OP_ADDW: return compute_c_addw();
5636 case CC_OP_ADDL: return compute_c_addl();
5637
5638 case CC_OP_ADCB: return compute_c_adcb();
5639 case CC_OP_ADCW: return compute_c_adcw();
5640 case CC_OP_ADCL: return compute_c_adcl();
5641
5642 case CC_OP_SUBB: return compute_c_subb();
5643 case CC_OP_SUBW: return compute_c_subw();
5644 case CC_OP_SUBL: return compute_c_subl();
5645
5646 case CC_OP_SBBB: return compute_c_sbbb();
5647 case CC_OP_SBBW: return compute_c_sbbw();
5648 case CC_OP_SBBL: return compute_c_sbbl();
5649
5650 case CC_OP_LOGICB: return compute_c_logicb();
5651 case CC_OP_LOGICW: return compute_c_logicw();
5652 case CC_OP_LOGICL: return compute_c_logicl();
5653
5654 case CC_OP_INCB: return compute_c_incl();
5655 case CC_OP_INCW: return compute_c_incl();
5656 case CC_OP_INCL: return compute_c_incl();
5657
5658 case CC_OP_DECB: return compute_c_incl();
5659 case CC_OP_DECW: return compute_c_incl();
5660 case CC_OP_DECL: return compute_c_incl();
5661
5662 case CC_OP_SHLB: return compute_c_shlb();
5663 case CC_OP_SHLW: return compute_c_shlw();
5664 case CC_OP_SHLL: return compute_c_shll();
5665
5666 case CC_OP_SARB: return compute_c_sarl();
5667 case CC_OP_SARW: return compute_c_sarl();
5668 case CC_OP_SARL: return compute_c_sarl();
5669
5670 #ifdef TARGET_X86_64
5671 case CC_OP_MULQ: return compute_c_mull();
5672
5673 case CC_OP_ADDQ: return compute_c_addq();
5674
5675 case CC_OP_ADCQ: return compute_c_adcq();
5676
5677 case CC_OP_SUBQ: return compute_c_subq();
5678
5679 case CC_OP_SBBQ: return compute_c_sbbq();
5680
5681 case CC_OP_LOGICQ: return compute_c_logicq();
5682
5683 case CC_OP_INCQ: return compute_c_incl();
5684
5685 case CC_OP_DECQ: return compute_c_incl();
5686
5687 case CC_OP_SHLQ: return compute_c_shlq();
5688
5689 case CC_OP_SARQ: return compute_c_sarl();
5690 #endif
5691 }
5692 }