]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/helper.c
find -type f | xargs sed -i 's/[\t ]*$//g' # Yes, again. Note the star in the regex.
[mirror_qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af
FB
1/*
2 * i386 helpers
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "exec.h"
21
f3f2d9be
FB
22//#define DEBUG_PCALL
23
8145122b
FB
24#if 0
25#define raise_exception_err(a, b)\
26do {\
9540a78b
FB
27 if (logfile)\
28 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
8145122b
FB
29 (raise_exception_err)(a, b);\
30} while (0)
31#endif
32
2c0262af
FB
33const uint8_t parity_table[256] = {
34 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66};
67
68/* modulo 17 table */
69const uint8_t rclw_table[32] = {
5fafdf24 70 0, 1, 2, 3, 4, 5, 6, 7,
2c0262af
FB
71 8, 9,10,11,12,13,14,15,
72 16, 0, 1, 2, 3, 4, 5, 6,
73 7, 8, 9,10,11,12,13,14,
74};
75
76/* modulo 9 table */
77const uint8_t rclb_table[32] = {
5fafdf24 78 0, 1, 2, 3, 4, 5, 6, 7,
2c0262af 79 8, 0, 1, 2, 3, 4, 5, 6,
5fafdf24 80 7, 8, 0, 1, 2, 3, 4, 5,
2c0262af
FB
81 6, 7, 8, 0, 1, 2, 3, 4,
82};
83
84const CPU86_LDouble f15rk[7] =
85{
86 0.00000000000000000000L,
87 1.00000000000000000000L,
88 3.14159265358979323851L, /*pi*/
89 0.30102999566398119523L, /*lg2*/
90 0.69314718055994530943L, /*ln2*/
91 1.44269504088896340739L, /*l2e*/
92 3.32192809488736234781L, /*l2t*/
93};
3b46e624 94
2c0262af
FB
95/* thread support */
96
97spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
98
99void cpu_lock(void)
100{
101 spin_lock(&global_cpu_lock);
102}
103
104void cpu_unlock(void)
105{
106 spin_unlock(&global_cpu_lock);
107}
108
7e84c249
FB
109/* return non zero if error */
110static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
111 int selector)
112{
113 SegmentCache *dt;
114 int index;
14ce26e7 115 target_ulong ptr;
7e84c249
FB
116
117 if (selector & 0x4)
118 dt = &env->ldt;
119 else
120 dt = &env->gdt;
121 index = selector & ~7;
122 if ((index + 7) > dt->limit)
123 return -1;
124 ptr = dt->base + index;
125 *e1_ptr = ldl_kernel(ptr);
126 *e2_ptr = ldl_kernel(ptr + 4);
127 return 0;
128}
3b46e624 129
7e84c249
FB
130static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
131{
132 unsigned int limit;
133 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
134 if (e2 & DESC_G_MASK)
135 limit = (limit << 12) | 0xfff;
136 return limit;
137}
138
14ce26e7 139static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
7e84c249 140{
14ce26e7 141 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
7e84c249
FB
142}
143
144static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
145{
146 sc->base = get_seg_base(e1, e2);
147 sc->limit = get_seg_limit(e1, e2);
148 sc->flags = e2;
149}
150
151/* init the segment cache in vm86 mode. */
152static inline void load_seg_vm(int seg, int selector)
153{
154 selector &= 0xffff;
5fafdf24 155 cpu_x86_load_seg_cache(env, seg, selector,
14ce26e7 156 (selector << 4), 0xffff, 0);
7e84c249
FB
157}
158
5fafdf24 159static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
2c0262af
FB
160 uint32_t *esp_ptr, int dpl)
161{
162 int type, index, shift;
3b46e624 163
2c0262af
FB
164#if 0
165 {
166 int i;
167 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
168 for(i=0;i<env->tr.limit;i++) {
169 printf("%02x ", env->tr.base[i]);
170 if ((i & 7) == 7) printf("\n");
171 }
172 printf("\n");
173 }
174#endif
175
176 if (!(env->tr.flags & DESC_P_MASK))
177 cpu_abort(env, "invalid tss");
178 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
179 if ((type & 7) != 1)
180 cpu_abort(env, "invalid tss type");
181 shift = type >> 3;
182 index = (dpl * 4 + 2) << shift;
183 if (index + (4 << shift) - 1 > env->tr.limit)
184 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
185 if (shift == 0) {
61382a50
FB
186 *esp_ptr = lduw_kernel(env->tr.base + index);
187 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
2c0262af 188 } else {
61382a50
FB
189 *esp_ptr = ldl_kernel(env->tr.base + index);
190 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
2c0262af
FB
191 }
192}
193
7e84c249
FB
194/* XXX: merge with load_seg() */
195static void tss_load_seg(int seg_reg, int selector)
196{
197 uint32_t e1, e2;
198 int rpl, dpl, cpl;
199
200 if ((selector & 0xfffc) != 0) {
201 if (load_segment(&e1, &e2, selector) != 0)
202 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
203 if (!(e2 & DESC_S_MASK))
204 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
205 rpl = selector & 3;
206 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
207 cpl = env->hflags & HF_CPL_MASK;
208 if (seg_reg == R_CS) {
209 if (!(e2 & DESC_CS_MASK))
210 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
9540a78b 211 /* XXX: is it correct ? */
7e84c249
FB
212 if (dpl != rpl)
213 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
214 if ((e2 & DESC_C_MASK) && dpl > rpl)
215 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
7e84c249
FB
216 } else if (seg_reg == R_SS) {
217 /* SS must be writable data */
218 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
219 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
220 if (dpl != cpl || dpl != rpl)
221 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222 } else {
223 /* not readable code */
224 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
225 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
226 /* if data or non conforming code, checks the rights */
227 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
228 if (dpl < cpl || dpl < rpl)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 }
231 }
232 if (!(e2 & DESC_P_MASK))
233 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
5fafdf24 234 cpu_x86_load_seg_cache(env, seg_reg, selector,
7e84c249
FB
235 get_seg_base(e1, e2),
236 get_seg_limit(e1, e2),
237 e2);
238 } else {
5fafdf24 239 if (seg_reg == R_SS || seg_reg == R_CS)
7e84c249
FB
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 }
242}
243
244#define SWITCH_TSS_JMP 0
245#define SWITCH_TSS_IRET 1
246#define SWITCH_TSS_CALL 2
247
248/* XXX: restore CPU state in registers (PowerPC case) */
5fafdf24 249static void switch_tss(int tss_selector,
883da8e2
FB
250 uint32_t e1, uint32_t e2, int source,
251 uint32_t next_eip)
2c0262af 252{
7e84c249 253 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
14ce26e7 254 target_ulong tss_base;
7e84c249
FB
255 uint32_t new_regs[8], new_segs[6];
256 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
257 uint32_t old_eflags, eflags_mask;
2c0262af
FB
258 SegmentCache *dt;
259 int index;
14ce26e7 260 target_ulong ptr;
2c0262af 261
7e84c249 262 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
dc6f57fd 263#ifdef DEBUG_PCALL
e19e89a5 264 if (loglevel & CPU_LOG_PCALL)
dc6f57fd
FB
265 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
266#endif
7e84c249
FB
267
268 /* if task gate, we read the TSS segment and we load it */
269 if (type == 5) {
270 if (!(e2 & DESC_P_MASK))
271 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
272 tss_selector = e1 >> 16;
273 if (tss_selector & 4)
274 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
275 if (load_segment(&e1, &e2, tss_selector) != 0)
276 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
277 if (e2 & DESC_S_MASK)
278 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
279 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
280 if ((type & 7) != 1)
281 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
282 }
283
284 if (!(e2 & DESC_P_MASK))
285 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
286
287 if (type & 8)
288 tss_limit_max = 103;
2c0262af 289 else
7e84c249
FB
290 tss_limit_max = 43;
291 tss_limit = get_seg_limit(e1, e2);
292 tss_base = get_seg_base(e1, e2);
5fafdf24 293 if ((tss_selector & 4) != 0 ||
7e84c249
FB
294 tss_limit < tss_limit_max)
295 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
296 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
297 if (old_type & 8)
298 old_tss_limit_max = 103;
299 else
300 old_tss_limit_max = 43;
301
302 /* read all the registers from the new TSS */
303 if (type & 8) {
304 /* 32 bit */
305 new_cr3 = ldl_kernel(tss_base + 0x1c);
306 new_eip = ldl_kernel(tss_base + 0x20);
307 new_eflags = ldl_kernel(tss_base + 0x24);
308 for(i = 0; i < 8; i++)
309 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
310 for(i = 0; i < 6; i++)
311 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
312 new_ldt = lduw_kernel(tss_base + 0x60);
313 new_trap = ldl_kernel(tss_base + 0x64);
314 } else {
315 /* 16 bit */
316 new_cr3 = 0;
317 new_eip = lduw_kernel(tss_base + 0x0e);
318 new_eflags = lduw_kernel(tss_base + 0x10);
319 for(i = 0; i < 8; i++)
320 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
321 for(i = 0; i < 4; i++)
322 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
323 new_ldt = lduw_kernel(tss_base + 0x2a);
324 new_segs[R_FS] = 0;
325 new_segs[R_GS] = 0;
326 new_trap = 0;
327 }
3b46e624 328
7e84c249
FB
329 /* NOTE: we must avoid memory exceptions during the task switch,
330 so we make dummy accesses before */
331 /* XXX: it can still fail in some cases, so a bigger hack is
332 necessary to valid the TLB after having done the accesses */
333
334 v1 = ldub_kernel(env->tr.base);
265d3497 335 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
7e84c249
FB
336 stb_kernel(env->tr.base, v1);
337 stb_kernel(env->tr.base + old_tss_limit_max, v2);
3b46e624 338
7e84c249
FB
339 /* clear busy bit (it is restartable) */
340 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
14ce26e7 341 target_ulong ptr;
7e84c249 342 uint32_t e2;
883da8e2 343 ptr = env->gdt.base + (env->tr.selector & ~7);
7e84c249
FB
344 e2 = ldl_kernel(ptr + 4);
345 e2 &= ~DESC_TSS_BUSY_MASK;
346 stl_kernel(ptr + 4, e2);
347 }
348 old_eflags = compute_eflags();
349 if (source == SWITCH_TSS_IRET)
350 old_eflags &= ~NT_MASK;
3b46e624 351
7e84c249
FB
352 /* save the current state in the old TSS */
353 if (type & 8) {
354 /* 32 bit */
883da8e2 355 stl_kernel(env->tr.base + 0x20, next_eip);
7e84c249 356 stl_kernel(env->tr.base + 0x24, old_eflags);
0d1a29f9
FB
357 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
358 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
359 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
360 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
361 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
362 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
363 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
364 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
7e84c249
FB
365 for(i = 0; i < 6; i++)
366 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
367 } else {
368 /* 16 bit */
883da8e2 369 stw_kernel(env->tr.base + 0x0e, next_eip);
7e84c249 370 stw_kernel(env->tr.base + 0x10, old_eflags);
0d1a29f9
FB
371 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
372 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
373 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
374 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
375 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
376 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
377 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
378 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
7e84c249
FB
379 for(i = 0; i < 4; i++)
380 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
381 }
3b46e624 382
7e84c249
FB
383 /* now if an exception occurs, it will occurs in the next task
384 context */
385
386 if (source == SWITCH_TSS_CALL) {
387 stw_kernel(tss_base, env->tr.selector);
388 new_eflags |= NT_MASK;
389 }
390
391 /* set busy bit */
392 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
14ce26e7 393 target_ulong ptr;
7e84c249 394 uint32_t e2;
883da8e2 395 ptr = env->gdt.base + (tss_selector & ~7);
7e84c249
FB
396 e2 = ldl_kernel(ptr + 4);
397 e2 |= DESC_TSS_BUSY_MASK;
398 stl_kernel(ptr + 4, e2);
399 }
400
401 /* set the new CPU state */
402 /* from this point, any exception which occurs can give problems */
403 env->cr[0] |= CR0_TS_MASK;
883da8e2 404 env->hflags |= HF_TS_MASK;
7e84c249
FB
405 env->tr.selector = tss_selector;
406 env->tr.base = tss_base;
407 env->tr.limit = tss_limit;
408 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
3b46e624 409
7e84c249 410 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
1ac157da 411 cpu_x86_update_cr3(env, new_cr3);
7e84c249 412 }
3b46e624 413
7e84c249
FB
414 /* load all registers without an exception, then reload them with
415 possible exception */
416 env->eip = new_eip;
5fafdf24 417 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
8145122b 418 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
7e84c249
FB
419 if (!(type & 8))
420 eflags_mask &= 0xffff;
421 load_eflags(new_eflags, eflags_mask);
0d1a29f9
FB
422 /* XXX: what to do in 16 bit case ? */
423 EAX = new_regs[0];
424 ECX = new_regs[1];
425 EDX = new_regs[2];
426 EBX = new_regs[3];
427 ESP = new_regs[4];
428 EBP = new_regs[5];
429 ESI = new_regs[6];
430 EDI = new_regs[7];
7e84c249 431 if (new_eflags & VM_MASK) {
5fafdf24 432 for(i = 0; i < 6; i++)
7e84c249
FB
433 load_seg_vm(i, new_segs[i]);
434 /* in vm86, CPL is always 3 */
435 cpu_x86_set_cpl(env, 3);
436 } else {
437 /* CPL is set the RPL of CS */
438 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
439 /* first just selectors as the rest may trigger exceptions */
440 for(i = 0; i < 6; i++)
14ce26e7 441 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
7e84c249 442 }
3b46e624 443
7e84c249 444 env->ldt.selector = new_ldt & ~4;
14ce26e7 445 env->ldt.base = 0;
7e84c249
FB
446 env->ldt.limit = 0;
447 env->ldt.flags = 0;
448
449 /* load the LDT */
450 if (new_ldt & 4)
451 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
452
8145122b
FB
453 if ((new_ldt & 0xfffc) != 0) {
454 dt = &env->gdt;
455 index = new_ldt & ~7;
456 if ((index + 7) > dt->limit)
457 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
458 ptr = dt->base + index;
459 e1 = ldl_kernel(ptr);
460 e2 = ldl_kernel(ptr + 4);
461 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
462 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
463 if (!(e2 & DESC_P_MASK))
464 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465 load_seg_cache_raw_dt(&env->ldt, e1, e2);
466 }
3b46e624 467
7e84c249
FB
468 /* load the segments */
469 if (!(new_eflags & VM_MASK)) {
470 tss_load_seg(R_CS, new_segs[R_CS]);
471 tss_load_seg(R_SS, new_segs[R_SS]);
472 tss_load_seg(R_ES, new_segs[R_ES]);
473 tss_load_seg(R_DS, new_segs[R_DS]);
474 tss_load_seg(R_FS, new_segs[R_FS]);
475 tss_load_seg(R_GS, new_segs[R_GS]);
476 }
3b46e624 477
7e84c249
FB
478 /* check that EIP is in the CS segment limits */
479 if (new_eip > env->segs[R_CS].limit) {
883da8e2 480 /* XXX: different exception if CALL ? */
7e84c249
FB
481 raise_exception_err(EXCP0D_GPF, 0);
482 }
2c0262af 483}
7e84c249
FB
484
485/* check if Port I/O is allowed in TSS */
486static inline void check_io(int addr, int size)
2c0262af 487{
7e84c249 488 int io_offset, val, mask;
3b46e624 489
7e84c249
FB
490 /* TSS must be a valid 32 bit one */
491 if (!(env->tr.flags & DESC_P_MASK) ||
492 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
493 env->tr.limit < 103)
494 goto fail;
495 io_offset = lduw_kernel(env->tr.base + 0x66);
496 io_offset += (addr >> 3);
497 /* Note: the check needs two bytes */
498 if ((io_offset + 1) > env->tr.limit)
499 goto fail;
500 val = lduw_kernel(env->tr.base + io_offset);
501 val >>= (addr & 7);
502 mask = (1 << size) - 1;
503 /* all bits must be zero to allow the I/O */
504 if ((val & mask) != 0) {
505 fail:
506 raise_exception_err(EXCP0D_GPF, 0);
507 }
2c0262af
FB
508}
509
7e84c249 510void check_iob_T0(void)
2c0262af 511{
7e84c249 512 check_io(T0, 1);
2c0262af
FB
513}
514
7e84c249 515void check_iow_T0(void)
2c0262af 516{
7e84c249 517 check_io(T0, 2);
2c0262af
FB
518}
519
7e84c249 520void check_iol_T0(void)
2c0262af 521{
7e84c249
FB
522 check_io(T0, 4);
523}
524
525void check_iob_DX(void)
526{
527 check_io(EDX & 0xffff, 1);
528}
529
530void check_iow_DX(void)
531{
532 check_io(EDX & 0xffff, 2);
533}
534
535void check_iol_DX(void)
536{
537 check_io(EDX & 0xffff, 4);
2c0262af
FB
538}
539
891b38e4
FB
540static inline unsigned int get_sp_mask(unsigned int e2)
541{
542 if (e2 & DESC_B_MASK)
543 return 0xffffffff;
544 else
545 return 0xffff;
546}
547
8d7b0fbb
FB
548#ifdef TARGET_X86_64
549#define SET_ESP(val, sp_mask)\
550do {\
551 if ((sp_mask) == 0xffff)\
552 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
553 else if ((sp_mask) == 0xffffffffLL)\
554 ESP = (uint32_t)(val);\
555 else\
556 ESP = (val);\
557} while (0)
558#else
559#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
560#endif
561
891b38e4
FB
562/* XXX: add a is_user flag to have proper security support */
563#define PUSHW(ssp, sp, sp_mask, val)\
564{\
565 sp -= 2;\
566 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
567}
568
569#define PUSHL(ssp, sp, sp_mask, val)\
570{\
571 sp -= 4;\
572 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
573}
574
575#define POPW(ssp, sp, sp_mask, val)\
576{\
577 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
578 sp += 2;\
579}
580
581#define POPL(ssp, sp, sp_mask, val)\
582{\
14ce26e7 583 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
891b38e4
FB
584 sp += 4;\
585}
586
2c0262af
FB
587/* protected mode interrupt */
588static void do_interrupt_protected(int intno, int is_int, int error_code,
589 unsigned int next_eip, int is_hw)
590{
591 SegmentCache *dt;
14ce26e7 592 target_ulong ptr, ssp;
8d7b0fbb 593 int type, dpl, selector, ss_dpl, cpl;
2c0262af 594 int has_error_code, new_stack, shift;
891b38e4 595 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
8d7b0fbb 596 uint32_t old_eip, sp_mask;
2c0262af 597
7e84c249
FB
598 has_error_code = 0;
599 if (!is_int && !is_hw) {
600 switch(intno) {
601 case 8:
602 case 10:
603 case 11:
604 case 12:
605 case 13:
606 case 14:
607 case 17:
608 has_error_code = 1;
609 break;
610 }
611 }
883da8e2
FB
612 if (is_int)
613 old_eip = next_eip;
614 else
615 old_eip = env->eip;
7e84c249 616
2c0262af
FB
617 dt = &env->idt;
618 if (intno * 8 + 7 > dt->limit)
619 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
620 ptr = dt->base + intno * 8;
61382a50
FB
621 e1 = ldl_kernel(ptr);
622 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
623 /* check gate type */
624 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
625 switch(type) {
626 case 5: /* task gate */
7e84c249
FB
627 /* must do that check here to return the correct error code */
628 if (!(e2 & DESC_P_MASK))
629 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
883da8e2 630 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
7e84c249 631 if (has_error_code) {
8d7b0fbb
FB
632 int type;
633 uint32_t mask;
7e84c249 634 /* push the error code */
3f20e1dd
FB
635 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
636 shift = type >> 3;
7e84c249
FB
637 if (env->segs[R_SS].flags & DESC_B_MASK)
638 mask = 0xffffffff;
639 else
640 mask = 0xffff;
0d1a29f9 641 esp = (ESP - (2 << shift)) & mask;
7e84c249
FB
642 ssp = env->segs[R_SS].base + esp;
643 if (shift)
644 stl_kernel(ssp, error_code);
645 else
646 stw_kernel(ssp, error_code);
8d7b0fbb 647 SET_ESP(esp, mask);
7e84c249
FB
648 }
649 return;
2c0262af
FB
650 case 6: /* 286 interrupt gate */
651 case 7: /* 286 trap gate */
652 case 14: /* 386 interrupt gate */
653 case 15: /* 386 trap gate */
654 break;
655 default:
656 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
657 break;
658 }
659 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
660 cpl = env->hflags & HF_CPL_MASK;
661 /* check privledge if software int */
662 if (is_int && dpl < cpl)
663 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
664 /* check valid bit */
665 if (!(e2 & DESC_P_MASK))
666 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
667 selector = e1 >> 16;
668 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
669 if ((selector & 0xfffc) == 0)
670 raise_exception_err(EXCP0D_GPF, 0);
671
672 if (load_segment(&e1, &e2, selector) != 0)
673 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
674 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
675 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
676 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
677 if (dpl > cpl)
678 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
679 if (!(e2 & DESC_P_MASK))
680 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
681 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
7f75ffd3 682 /* to inner privilege */
2c0262af
FB
683 get_ss_esp_from_tss(&ss, &esp, dpl);
684 if ((ss & 0xfffc) == 0)
685 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
686 if ((ss & 3) != dpl)
687 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
688 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
689 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
690 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
691 if (ss_dpl != dpl)
692 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
693 if (!(ss_e2 & DESC_S_MASK) ||
694 (ss_e2 & DESC_CS_MASK) ||
695 !(ss_e2 & DESC_W_MASK))
696 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
697 if (!(ss_e2 & DESC_P_MASK))
698 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
699 new_stack = 1;
891b38e4
FB
700 sp_mask = get_sp_mask(ss_e2);
701 ssp = get_seg_base(ss_e1, ss_e2);
2c0262af 702 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
7f75ffd3 703 /* to same privilege */
8e682019
FB
704 if (env->eflags & VM_MASK)
705 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2c0262af 706 new_stack = 0;
891b38e4
FB
707 sp_mask = get_sp_mask(env->segs[R_SS].flags);
708 ssp = env->segs[R_SS].base;
709 esp = ESP;
4796f5e9 710 dpl = cpl;
2c0262af
FB
711 } else {
712 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
713 new_stack = 0; /* avoid warning */
891b38e4 714 sp_mask = 0; /* avoid warning */
14ce26e7 715 ssp = 0; /* avoid warning */
891b38e4 716 esp = 0; /* avoid warning */
2c0262af
FB
717 }
718
719 shift = type >> 3;
891b38e4
FB
720
721#if 0
722 /* XXX: check that enough room is available */
2c0262af
FB
723 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
724 if (env->eflags & VM_MASK)
725 push_size += 8;
726 push_size <<= shift;
891b38e4 727#endif
2c0262af 728 if (shift == 1) {
2c0262af 729 if (new_stack) {
8e682019
FB
730 if (env->eflags & VM_MASK) {
731 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
732 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
733 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
734 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
735 }
891b38e4
FB
736 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
737 PUSHL(ssp, esp, sp_mask, ESP);
2c0262af 738 }
891b38e4
FB
739 PUSHL(ssp, esp, sp_mask, compute_eflags());
740 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
741 PUSHL(ssp, esp, sp_mask, old_eip);
2c0262af 742 if (has_error_code) {
891b38e4 743 PUSHL(ssp, esp, sp_mask, error_code);
2c0262af
FB
744 }
745 } else {
746 if (new_stack) {
8e682019
FB
747 if (env->eflags & VM_MASK) {
748 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
749 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
750 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
751 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
752 }
891b38e4
FB
753 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
754 PUSHW(ssp, esp, sp_mask, ESP);
2c0262af 755 }
891b38e4
FB
756 PUSHW(ssp, esp, sp_mask, compute_eflags());
757 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
758 PUSHW(ssp, esp, sp_mask, old_eip);
2c0262af 759 if (has_error_code) {
891b38e4 760 PUSHW(ssp, esp, sp_mask, error_code);
2c0262af
FB
761 }
762 }
3b46e624 763
891b38e4 764 if (new_stack) {
8e682019 765 if (env->eflags & VM_MASK) {
14ce26e7
FB
766 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
767 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
768 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
769 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
8e682019 770 }
891b38e4 771 ss = (ss & ~3) | dpl;
5fafdf24 772 cpu_x86_load_seg_cache(env, R_SS, ss,
891b38e4
FB
773 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
774 }
8d7b0fbb 775 SET_ESP(esp, sp_mask);
891b38e4
FB
776
777 selector = (selector & ~3) | dpl;
5fafdf24 778 cpu_x86_load_seg_cache(env, R_CS, selector,
891b38e4
FB
779 get_seg_base(e1, e2),
780 get_seg_limit(e1, e2),
781 e2);
782 cpu_x86_set_cpl(env, dpl);
783 env->eip = offset;
784
2c0262af
FB
785 /* interrupt gate clear IF mask */
786 if ((type & 1) == 0) {
787 env->eflags &= ~IF_MASK;
788 }
789 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
790}
791
14ce26e7
FB
792#ifdef TARGET_X86_64
793
794#define PUSHQ(sp, val)\
795{\
796 sp -= 8;\
797 stq_kernel(sp, (val));\
798}
799
800#define POPQ(sp, val)\
801{\
802 val = ldq_kernel(sp);\
803 sp += 8;\
804}
805
806static inline target_ulong get_rsp_from_tss(int level)
807{
808 int index;
3b46e624 809
14ce26e7 810#if 0
5fafdf24 811 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
14ce26e7
FB
812 env->tr.base, env->tr.limit);
813#endif
814
815 if (!(env->tr.flags & DESC_P_MASK))
816 cpu_abort(env, "invalid tss");
817 index = 8 * level + 4;
818 if ((index + 7) > env->tr.limit)
819 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
820 return ldq_kernel(env->tr.base + index);
821}
822
823/* 64 bit interrupt */
824static void do_interrupt64(int intno, int is_int, int error_code,
825 target_ulong next_eip, int is_hw)
826{
827 SegmentCache *dt;
828 target_ulong ptr;
829 int type, dpl, selector, cpl, ist;
830 int has_error_code, new_stack;
831 uint32_t e1, e2, e3, ss;
832 target_ulong old_eip, esp, offset;
833
834 has_error_code = 0;
835 if (!is_int && !is_hw) {
836 switch(intno) {
837 case 8:
838 case 10:
839 case 11:
840 case 12:
841 case 13:
842 case 14:
843 case 17:
844 has_error_code = 1;
845 break;
846 }
847 }
848 if (is_int)
849 old_eip = next_eip;
850 else
851 old_eip = env->eip;
852
853 dt = &env->idt;
854 if (intno * 16 + 15 > dt->limit)
855 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
856 ptr = dt->base + intno * 16;
857 e1 = ldl_kernel(ptr);
858 e2 = ldl_kernel(ptr + 4);
859 e3 = ldl_kernel(ptr + 8);
860 /* check gate type */
861 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
862 switch(type) {
863 case 14: /* 386 interrupt gate */
864 case 15: /* 386 trap gate */
865 break;
866 default:
867 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
868 break;
869 }
870 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
871 cpl = env->hflags & HF_CPL_MASK;
872 /* check privledge if software int */
873 if (is_int && dpl < cpl)
874 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
875 /* check valid bit */
876 if (!(e2 & DESC_P_MASK))
877 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
878 selector = e1 >> 16;
879 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
880 ist = e2 & 7;
881 if ((selector & 0xfffc) == 0)
882 raise_exception_err(EXCP0D_GPF, 0);
883
884 if (load_segment(&e1, &e2, selector) != 0)
885 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
886 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
887 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
888 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
889 if (dpl > cpl)
890 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
891 if (!(e2 & DESC_P_MASK))
892 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
893 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
894 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
895 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
7f75ffd3 896 /* to inner privilege */
14ce26e7
FB
897 if (ist != 0)
898 esp = get_rsp_from_tss(ist + 3);
899 else
900 esp = get_rsp_from_tss(dpl);
9540a78b 901 esp &= ~0xfLL; /* align stack */
14ce26e7
FB
902 ss = 0;
903 new_stack = 1;
904 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
7f75ffd3 905 /* to same privilege */
14ce26e7
FB
906 if (env->eflags & VM_MASK)
907 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908 new_stack = 0;
9540a78b
FB
909 if (ist != 0)
910 esp = get_rsp_from_tss(ist + 3);
911 else
912 esp = ESP;
913 esp &= ~0xfLL; /* align stack */
14ce26e7
FB
914 dpl = cpl;
915 } else {
916 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917 new_stack = 0; /* avoid warning */
918 esp = 0; /* avoid warning */
919 }
920
921 PUSHQ(esp, env->segs[R_SS].selector);
922 PUSHQ(esp, ESP);
923 PUSHQ(esp, compute_eflags());
924 PUSHQ(esp, env->segs[R_CS].selector);
925 PUSHQ(esp, old_eip);
926 if (has_error_code) {
927 PUSHQ(esp, error_code);
928 }
3b46e624 929
14ce26e7
FB
930 if (new_stack) {
931 ss = 0 | dpl;
932 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
933 }
934 ESP = esp;
935
936 selector = (selector & ~3) | dpl;
5fafdf24 937 cpu_x86_load_seg_cache(env, R_CS, selector,
14ce26e7
FB
938 get_seg_base(e1, e2),
939 get_seg_limit(e1, e2),
940 e2);
941 cpu_x86_set_cpl(env, dpl);
942 env->eip = offset;
943
944 /* interrupt gate clear IF mask */
945 if ((type & 1) == 0) {
946 env->eflags &= ~IF_MASK;
947 }
948 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
949}
f419b321 950#endif
14ce26e7 951
06c2f506 952void helper_syscall(int next_eip_addend)
14ce26e7
FB
953{
954 int selector;
955
956 if (!(env->efer & MSR_EFER_SCE)) {
957 raise_exception_err(EXCP06_ILLOP, 0);
958 }
959 selector = (env->star >> 32) & 0xffff;
f419b321 960#ifdef TARGET_X86_64
14ce26e7 961 if (env->hflags & HF_LMA_MASK) {
9540a78b
FB
962 int code64;
963
06c2f506 964 ECX = env->eip + next_eip_addend;
14ce26e7 965 env->regs[11] = compute_eflags();
3b46e624 966
9540a78b 967 code64 = env->hflags & HF_CS64_MASK;
14ce26e7
FB
968
969 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
970 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
971 0, 0xffffffff,
d80c7d1c 972 DESC_G_MASK | DESC_P_MASK |
14ce26e7
FB
973 DESC_S_MASK |
974 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
5fafdf24 975 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
14ce26e7
FB
976 0, 0xffffffff,
977 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
978 DESC_S_MASK |
979 DESC_W_MASK | DESC_A_MASK);
980 env->eflags &= ~env->fmask;
9540a78b 981 if (code64)
14ce26e7
FB
982 env->eip = env->lstar;
983 else
984 env->eip = env->cstar;
5fafdf24 985 } else
f419b321
FB
986#endif
987 {
06c2f506 988 ECX = (uint32_t)(env->eip + next_eip_addend);
3b46e624 989
14ce26e7 990 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
991 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
992 0, 0xffffffff,
14ce26e7
FB
993 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
994 DESC_S_MASK |
995 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 996 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
14ce26e7
FB
997 0, 0xffffffff,
998 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
999 DESC_S_MASK |
1000 DESC_W_MASK | DESC_A_MASK);
1001 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1002 env->eip = (uint32_t)env->star;
1003 }
1004}
1005
1006void helper_sysret(int dflag)
1007{
1008 int cpl, selector;
1009
f419b321
FB
1010 if (!(env->efer & MSR_EFER_SCE)) {
1011 raise_exception_err(EXCP06_ILLOP, 0);
1012 }
14ce26e7
FB
1013 cpl = env->hflags & HF_CPL_MASK;
1014 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1015 raise_exception_err(EXCP0D_GPF, 0);
1016 }
1017 selector = (env->star >> 48) & 0xffff;
f419b321 1018#ifdef TARGET_X86_64
14ce26e7
FB
1019 if (env->hflags & HF_LMA_MASK) {
1020 if (dflag == 2) {
5fafdf24
TS
1021 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1022 0, 0xffffffff,
d80c7d1c 1023 DESC_G_MASK | DESC_P_MASK |
14ce26e7 1024 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
5fafdf24 1025 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
14ce26e7
FB
1026 DESC_L_MASK);
1027 env->eip = ECX;
1028 } else {
5fafdf24
TS
1029 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1030 0, 0xffffffff,
14ce26e7
FB
1031 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1033 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1034 env->eip = (uint32_t)ECX;
1035 }
5fafdf24 1036 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
14ce26e7
FB
1037 0, 0xffffffff,
1038 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1039 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1040 DESC_W_MASK | DESC_A_MASK);
5fafdf24 1041 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
31313213 1042 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
14ce26e7 1043 cpu_x86_set_cpl(env, 3);
5fafdf24 1044 } else
f419b321
FB
1045#endif
1046 {
5fafdf24
TS
1047 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1048 0, 0xffffffff,
14ce26e7
FB
1049 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1050 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1051 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1052 env->eip = (uint32_t)ECX;
5fafdf24 1053 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
14ce26e7
FB
1054 0, 0xffffffff,
1055 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1056 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1057 DESC_W_MASK | DESC_A_MASK);
1058 env->eflags |= IF_MASK;
1059 cpu_x86_set_cpl(env, 3);
1060 }
f419b321
FB
1061#ifdef USE_KQEMU
1062 if (kqemu_is_ok(env)) {
1063 if (env->hflags & HF_LMA_MASK)
1064 CC_OP = CC_OP_EFLAGS;
1065 env->exception_index = -1;
1066 cpu_loop_exit();
1067 }
14ce26e7 1068#endif
f419b321 1069}
14ce26e7 1070
2c0262af
FB
1071/* real mode interrupt */
1072static void do_interrupt_real(int intno, int is_int, int error_code,
4136f33c 1073 unsigned int next_eip)
2c0262af
FB
1074{
1075 SegmentCache *dt;
14ce26e7 1076 target_ulong ptr, ssp;
2c0262af
FB
1077 int selector;
1078 uint32_t offset, esp;
1079 uint32_t old_cs, old_eip;
1080
1081 /* real mode (simpler !) */
1082 dt = &env->idt;
1083 if (intno * 4 + 3 > dt->limit)
1084 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1085 ptr = dt->base + intno * 4;
61382a50
FB
1086 offset = lduw_kernel(ptr);
1087 selector = lduw_kernel(ptr + 2);
2c0262af
FB
1088 esp = ESP;
1089 ssp = env->segs[R_SS].base;
1090 if (is_int)
1091 old_eip = next_eip;
1092 else
1093 old_eip = env->eip;
1094 old_cs = env->segs[R_CS].selector;
891b38e4
FB
1095 /* XXX: use SS segment size ? */
1096 PUSHW(ssp, esp, 0xffff, compute_eflags());
1097 PUSHW(ssp, esp, 0xffff, old_cs);
1098 PUSHW(ssp, esp, 0xffff, old_eip);
3b46e624 1099
2c0262af
FB
1100 /* update processor state */
1101 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1102 env->eip = offset;
1103 env->segs[R_CS].selector = selector;
14ce26e7 1104 env->segs[R_CS].base = (selector << 4);
2c0262af
FB
1105 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1106}
1107
1108/* fake user mode interrupt */
5fafdf24 1109void do_interrupt_user(int intno, int is_int, int error_code,
14ce26e7 1110 target_ulong next_eip)
2c0262af
FB
1111{
1112 SegmentCache *dt;
14ce26e7 1113 target_ulong ptr;
2c0262af
FB
1114 int dpl, cpl;
1115 uint32_t e2;
1116
1117 dt = &env->idt;
1118 ptr = dt->base + (intno * 8);
61382a50 1119 e2 = ldl_kernel(ptr + 4);
3b46e624 1120
2c0262af
FB
1121 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1122 cpl = env->hflags & HF_CPL_MASK;
1123 /* check privledge if software int */
1124 if (is_int && dpl < cpl)
1125 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1126
1127 /* Since we emulate only user space, we cannot do more than
1128 exiting the emulation with the suitable exception and error
1129 code */
1130 if (is_int)
1131 EIP = next_eip;
1132}
1133
1134/*
e19e89a5 1135 * Begin execution of an interruption. is_int is TRUE if coming from
2c0262af 1136 * the int instruction. next_eip is the EIP value AFTER the interrupt
3b46e624 1137 * instruction. It is only relevant if is_int is TRUE.
2c0262af 1138 */
5fafdf24 1139void do_interrupt(int intno, int is_int, int error_code,
14ce26e7 1140 target_ulong next_eip, int is_hw)
2c0262af 1141{
1247c5f7 1142 if (loglevel & CPU_LOG_INT) {
e19e89a5
FB
1143 if ((env->cr[0] & CR0_PE_MASK)) {
1144 static int count;
14ce26e7 1145 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
dc6f57fd
FB
1146 count, intno, error_code, is_int,
1147 env->hflags & HF_CPL_MASK,
1148 env->segs[R_CS].selector, EIP,
2ee73ac3 1149 (int)env->segs[R_CS].base + EIP,
8145122b
FB
1150 env->segs[R_SS].selector, ESP);
1151 if (intno == 0x0e) {
14ce26e7 1152 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
8145122b 1153 } else {
14ce26e7 1154 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
8145122b 1155 }
e19e89a5 1156 fprintf(logfile, "\n");
06c2f506 1157 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1247c5f7 1158#if 0
e19e89a5
FB
1159 {
1160 int i;
1161 uint8_t *ptr;
1162 fprintf(logfile, " code=");
1163 ptr = env->segs[R_CS].base + env->eip;
1164 for(i = 0; i < 16; i++) {
1165 fprintf(logfile, " %02x", ldub(ptr + i));
dc6f57fd 1166 }
e19e89a5 1167 fprintf(logfile, "\n");
dc6f57fd 1168 }
8e682019 1169#endif
e19e89a5 1170 count++;
4136f33c 1171 }
4136f33c 1172 }
2c0262af 1173 if (env->cr[0] & CR0_PE_MASK) {
14ce26e7
FB
1174#if TARGET_X86_64
1175 if (env->hflags & HF_LMA_MASK) {
1176 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1177 } else
1178#endif
1179 {
1180 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1181 }
2c0262af
FB
1182 } else {
1183 do_interrupt_real(intno, is_int, error_code, next_eip);
1184 }
1185}
1186
678dde13
TS
1187/*
1188 * Check nested exceptions and change to double or triple fault if
1189 * needed. It should only be called, if this is not an interrupt.
1190 * Returns the new exception number.
1191 */
1192int check_exception(int intno, int *error_code)
1193{
1194 char first_contributory = env->old_exception == 0 ||
1195 (env->old_exception >= 10 &&
1196 env->old_exception <= 13);
1197 char second_contributory = intno == 0 ||
1198 (intno >= 10 && intno <= 13);
1199
1200 if (loglevel & CPU_LOG_INT)
1201 fprintf(logfile, "check_exception old: %x new %x\n",
1202 env->old_exception, intno);
1203
1204 if (env->old_exception == EXCP08_DBLE)
1205 cpu_abort(env, "triple fault");
1206
1207 if ((first_contributory && second_contributory)
1208 || (env->old_exception == EXCP0E_PAGE &&
1209 (second_contributory || (intno == EXCP0E_PAGE)))) {
1210 intno = EXCP08_DBLE;
1211 *error_code = 0;
1212 }
1213
1214 if (second_contributory || (intno == EXCP0E_PAGE) ||
1215 (intno == EXCP08_DBLE))
1216 env->old_exception = intno;
1217
1218 return intno;
1219}
1220
2c0262af
FB
1221/*
1222 * Signal an interruption. It is executed in the main CPU loop.
1223 * is_int is TRUE if coming from the int instruction. next_eip is the
1224 * EIP value AFTER the interrupt instruction. It is only relevant if
3b46e624 1225 * is_int is TRUE.
2c0262af 1226 */
5fafdf24 1227void raise_interrupt(int intno, int is_int, int error_code,
a8ede8ba 1228 int next_eip_addend)
2c0262af 1229{
678dde13
TS
1230 if (!is_int)
1231 intno = check_exception(intno, &error_code);
1232
2c0262af
FB
1233 env->exception_index = intno;
1234 env->error_code = error_code;
1235 env->exception_is_int = is_int;
a8ede8ba 1236 env->exception_next_eip = env->eip + next_eip_addend;
2c0262af
FB
1237 cpu_loop_exit();
1238}
1239
0d1a29f9
FB
1240/* same as raise_exception_err, but do not restore global registers */
1241static void raise_exception_err_norestore(int exception_index, int error_code)
1242{
678dde13
TS
1243 exception_index = check_exception(exception_index, &error_code);
1244
0d1a29f9
FB
1245 env->exception_index = exception_index;
1246 env->error_code = error_code;
1247 env->exception_is_int = 0;
1248 env->exception_next_eip = 0;
1249 longjmp(env->jmp_env, 1);
1250}
1251
2c0262af 1252/* shortcuts to generate exceptions */
8145122b
FB
1253
1254void (raise_exception_err)(int exception_index, int error_code)
2c0262af
FB
1255{
1256 raise_interrupt(exception_index, 0, error_code, 0);
1257}
1258
1259void raise_exception(int exception_index)
1260{
1261 raise_interrupt(exception_index, 0, 0, 0);
1262}
1263
3b21e03e
FB
1264/* SMM support */
1265
5fafdf24 1266#if defined(CONFIG_USER_ONLY)
74ce674f
FB
1267
1268void do_smm_enter(void)
1269{
1270}
1271
1272void helper_rsm(void)
1273{
1274}
1275
1276#else
1277
3b21e03e
FB
1278#ifdef TARGET_X86_64
1279#define SMM_REVISION_ID 0x00020064
1280#else
1281#define SMM_REVISION_ID 0x00020000
1282#endif
1283
1284void do_smm_enter(void)
1285{
1286 target_ulong sm_state;
1287 SegmentCache *dt;
1288 int i, offset;
1289
1290 if (loglevel & CPU_LOG_INT) {
1291 fprintf(logfile, "SMM: enter\n");
1292 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1293 }
1294
1295 env->hflags |= HF_SMM_MASK;
1296 cpu_smm_update(env);
1297
1298 sm_state = env->smbase + 0x8000;
3b46e624 1299
3b21e03e
FB
1300#ifdef TARGET_X86_64
1301 for(i = 0; i < 6; i++) {
1302 dt = &env->segs[i];
1303 offset = 0x7e00 + i * 16;
1304 stw_phys(sm_state + offset, dt->selector);
1305 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1306 stl_phys(sm_state + offset + 4, dt->limit);
1307 stq_phys(sm_state + offset + 8, dt->base);
1308 }
1309
1310 stq_phys(sm_state + 0x7e68, env->gdt.base);
1311 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1312
1313 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1314 stq_phys(sm_state + 0x7e78, env->ldt.base);
1315 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1316 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
3b46e624 1317
3b21e03e
FB
1318 stq_phys(sm_state + 0x7e88, env->idt.base);
1319 stl_phys(sm_state + 0x7e84, env->idt.limit);
1320
1321 stw_phys(sm_state + 0x7e90, env->tr.selector);
1322 stq_phys(sm_state + 0x7e98, env->tr.base);
1323 stl_phys(sm_state + 0x7e94, env->tr.limit);
1324 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
3b46e624 1325
3b21e03e
FB
1326 stq_phys(sm_state + 0x7ed0, env->efer);
1327
1328 stq_phys(sm_state + 0x7ff8, EAX);
1329 stq_phys(sm_state + 0x7ff0, ECX);
1330 stq_phys(sm_state + 0x7fe8, EDX);
1331 stq_phys(sm_state + 0x7fe0, EBX);
1332 stq_phys(sm_state + 0x7fd8, ESP);
1333 stq_phys(sm_state + 0x7fd0, EBP);
1334 stq_phys(sm_state + 0x7fc8, ESI);
1335 stq_phys(sm_state + 0x7fc0, EDI);
5fafdf24 1336 for(i = 8; i < 16; i++)
3b21e03e
FB
1337 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1338 stq_phys(sm_state + 0x7f78, env->eip);
1339 stl_phys(sm_state + 0x7f70, compute_eflags());
1340 stl_phys(sm_state + 0x7f68, env->dr[6]);
1341 stl_phys(sm_state + 0x7f60, env->dr[7]);
1342
1343 stl_phys(sm_state + 0x7f48, env->cr[4]);
1344 stl_phys(sm_state + 0x7f50, env->cr[3]);
1345 stl_phys(sm_state + 0x7f58, env->cr[0]);
1346
1347 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1348 stl_phys(sm_state + 0x7f00, env->smbase);
1349#else
1350 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1351 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1352 stl_phys(sm_state + 0x7ff4, compute_eflags());
1353 stl_phys(sm_state + 0x7ff0, env->eip);
1354 stl_phys(sm_state + 0x7fec, EDI);
1355 stl_phys(sm_state + 0x7fe8, ESI);
1356 stl_phys(sm_state + 0x7fe4, EBP);
1357 stl_phys(sm_state + 0x7fe0, ESP);
1358 stl_phys(sm_state + 0x7fdc, EBX);
1359 stl_phys(sm_state + 0x7fd8, EDX);
1360 stl_phys(sm_state + 0x7fd4, ECX);
1361 stl_phys(sm_state + 0x7fd0, EAX);
1362 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1363 stl_phys(sm_state + 0x7fc8, env->dr[7]);
3b46e624 1364
3b21e03e
FB
1365 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1366 stl_phys(sm_state + 0x7f64, env->tr.base);
1367 stl_phys(sm_state + 0x7f60, env->tr.limit);
1368 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
3b46e624 1369
3b21e03e
FB
1370 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1371 stl_phys(sm_state + 0x7f80, env->ldt.base);
1372 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1373 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
3b46e624 1374
3b21e03e
FB
1375 stl_phys(sm_state + 0x7f74, env->gdt.base);
1376 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1377
1378 stl_phys(sm_state + 0x7f58, env->idt.base);
1379 stl_phys(sm_state + 0x7f54, env->idt.limit);
1380
1381 for(i = 0; i < 6; i++) {
1382 dt = &env->segs[i];
1383 if (i < 3)
1384 offset = 0x7f84 + i * 12;
1385 else
1386 offset = 0x7f2c + (i - 3) * 12;
1387 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1388 stl_phys(sm_state + offset + 8, dt->base);
1389 stl_phys(sm_state + offset + 4, dt->limit);
1390 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1391 }
1392 stl_phys(sm_state + 0x7f14, env->cr[4]);
1393
1394 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1395 stl_phys(sm_state + 0x7ef8, env->smbase);
1396#endif
1397 /* init SMM cpu state */
1398
8988ae89
FB
1399#ifdef TARGET_X86_64
1400 env->efer = 0;
1401 env->hflags &= ~HF_LMA_MASK;
1402#endif
3b21e03e
FB
1403 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1404 env->eip = 0x00008000;
1405 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1406 0xffffffff, 0);
1407 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1408 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1409 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1410 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1411 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
3b46e624 1412
5fafdf24 1413 cpu_x86_update_cr0(env,
3b21e03e
FB
1414 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1415 cpu_x86_update_cr4(env, 0);
1416 env->dr[7] = 0x00000400;
3b21e03e
FB
1417 CC_OP = CC_OP_EFLAGS;
1418}
1419
1420void helper_rsm(void)
1421{
1422 target_ulong sm_state;
1423 int i, offset;
1424 uint32_t val;
1425
1426 sm_state = env->smbase + 0x8000;
1427#ifdef TARGET_X86_64
8988ae89
FB
1428 env->efer = ldq_phys(sm_state + 0x7ed0);
1429 if (env->efer & MSR_EFER_LMA)
1430 env->hflags |= HF_LMA_MASK;
1431 else
1432 env->hflags &= ~HF_LMA_MASK;
1433
3b21e03e
FB
1434 for(i = 0; i < 6; i++) {
1435 offset = 0x7e00 + i * 16;
5fafdf24 1436 cpu_x86_load_seg_cache(env, i,
3b21e03e
FB
1437 lduw_phys(sm_state + offset),
1438 ldq_phys(sm_state + offset + 8),
1439 ldl_phys(sm_state + offset + 4),
1440 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1441 }
1442
1443 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1444 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1445
1446 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1447 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1448 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1449 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
3b46e624 1450
3b21e03e
FB
1451 env->idt.base = ldq_phys(sm_state + 0x7e88);
1452 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1453
1454 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1455 env->tr.base = ldq_phys(sm_state + 0x7e98);
1456 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1457 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
3b46e624 1458
3b21e03e
FB
1459 EAX = ldq_phys(sm_state + 0x7ff8);
1460 ECX = ldq_phys(sm_state + 0x7ff0);
1461 EDX = ldq_phys(sm_state + 0x7fe8);
1462 EBX = ldq_phys(sm_state + 0x7fe0);
1463 ESP = ldq_phys(sm_state + 0x7fd8);
1464 EBP = ldq_phys(sm_state + 0x7fd0);
1465 ESI = ldq_phys(sm_state + 0x7fc8);
1466 EDI = ldq_phys(sm_state + 0x7fc0);
5fafdf24 1467 for(i = 8; i < 16; i++)
3b21e03e
FB
1468 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1469 env->eip = ldq_phys(sm_state + 0x7f78);
5fafdf24 1470 load_eflags(ldl_phys(sm_state + 0x7f70),
3b21e03e
FB
1471 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1472 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1473 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1474
1475 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1476 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1477 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1478
1479 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1480 if (val & 0x20000) {
1481 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1482 }
1483#else
1484 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1485 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
5fafdf24 1486 load_eflags(ldl_phys(sm_state + 0x7ff4),
3b21e03e
FB
1487 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1488 env->eip = ldl_phys(sm_state + 0x7ff0);
1489 EDI = ldl_phys(sm_state + 0x7fec);
1490 ESI = ldl_phys(sm_state + 0x7fe8);
1491 EBP = ldl_phys(sm_state + 0x7fe4);
1492 ESP = ldl_phys(sm_state + 0x7fe0);
1493 EBX = ldl_phys(sm_state + 0x7fdc);
1494 EDX = ldl_phys(sm_state + 0x7fd8);
1495 ECX = ldl_phys(sm_state + 0x7fd4);
1496 EAX = ldl_phys(sm_state + 0x7fd0);
1497 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1498 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
3b46e624 1499
3b21e03e
FB
1500 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1501 env->tr.base = ldl_phys(sm_state + 0x7f64);
1502 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1503 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
3b46e624 1504
3b21e03e
FB
1505 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1506 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1507 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1508 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
3b46e624 1509
3b21e03e
FB
1510 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1511 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1512
1513 env->idt.base = ldl_phys(sm_state + 0x7f58);
1514 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1515
1516 for(i = 0; i < 6; i++) {
1517 if (i < 3)
1518 offset = 0x7f84 + i * 12;
1519 else
1520 offset = 0x7f2c + (i - 3) * 12;
5fafdf24 1521 cpu_x86_load_seg_cache(env, i,
3b21e03e
FB
1522 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1523 ldl_phys(sm_state + offset + 8),
1524 ldl_phys(sm_state + offset + 4),
1525 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1526 }
1527 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1528
1529 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1530 if (val & 0x20000) {
1531 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1532 }
1533#endif
1534 CC_OP = CC_OP_EFLAGS;
1535 env->hflags &= ~HF_SMM_MASK;
1536 cpu_smm_update(env);
1537
1538 if (loglevel & CPU_LOG_INT) {
1539 fprintf(logfile, "SMM: after RSM\n");
1540 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1541 }
1542}
1543
74ce674f
FB
1544#endif /* !CONFIG_USER_ONLY */
1545
1546
2c0262af
FB
1547#ifdef BUGGY_GCC_DIV64
1548/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1549 call it from another function */
45bbbb46 1550uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
2c0262af
FB
1551{
1552 *q_ptr = num / den;
1553 return num % den;
1554}
1555
45bbbb46 1556int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
2c0262af
FB
1557{
1558 *q_ptr = num / den;
1559 return num % den;
1560}
1561#endif
1562
14ce26e7 1563void helper_divl_EAX_T0(void)
2c0262af 1564{
45bbbb46
FB
1565 unsigned int den, r;
1566 uint64_t num, q;
3b46e624 1567
31313213 1568 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2c0262af
FB
1569 den = T0;
1570 if (den == 0) {
2c0262af
FB
1571 raise_exception(EXCP00_DIVZ);
1572 }
1573#ifdef BUGGY_GCC_DIV64
14ce26e7 1574 r = div32(&q, num, den);
2c0262af
FB
1575#else
1576 q = (num / den);
1577 r = (num % den);
1578#endif
45bbbb46
FB
1579 if (q > 0xffffffff)
1580 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
1581 EAX = (uint32_t)q;
1582 EDX = (uint32_t)r;
2c0262af
FB
1583}
1584
14ce26e7 1585void helper_idivl_EAX_T0(void)
2c0262af 1586{
45bbbb46
FB
1587 int den, r;
1588 int64_t num, q;
3b46e624 1589
31313213 1590 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2c0262af
FB
1591 den = T0;
1592 if (den == 0) {
2c0262af
FB
1593 raise_exception(EXCP00_DIVZ);
1594 }
1595#ifdef BUGGY_GCC_DIV64
14ce26e7 1596 r = idiv32(&q, num, den);
2c0262af
FB
1597#else
1598 q = (num / den);
1599 r = (num % den);
1600#endif
45bbbb46
FB
1601 if (q != (int32_t)q)
1602 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
1603 EAX = (uint32_t)q;
1604 EDX = (uint32_t)r;
2c0262af
FB
1605}
1606
1607void helper_cmpxchg8b(void)
1608{
1609 uint64_t d;
1610 int eflags;
1611
1612 eflags = cc_table[CC_OP].compute_all();
14ce26e7 1613 d = ldq(A0);
2c0262af 1614 if (d == (((uint64_t)EDX << 32) | EAX)) {
14ce26e7 1615 stq(A0, ((uint64_t)ECX << 32) | EBX);
2c0262af
FB
1616 eflags |= CC_Z;
1617 } else {
1618 EDX = d >> 32;
1619 EAX = d;
1620 eflags &= ~CC_Z;
1621 }
1622 CC_SRC = eflags;
1623}
1624
88fe8a41
TS
1625void helper_single_step()
1626{
1627 env->dr[6] |= 0x4000;
1628 raise_exception(EXCP01_SSTP);
1629}
1630
2c0262af
FB
1631void helper_cpuid(void)
1632{
f419b321
FB
1633 uint32_t index;
1634 index = (uint32_t)EAX;
3b46e624 1635
f419b321
FB
1636 /* test if maximum index reached */
1637 if (index & 0x80000000) {
5fafdf24 1638 if (index > env->cpuid_xlevel)
f419b321
FB
1639 index = env->cpuid_level;
1640 } else {
5fafdf24 1641 if (index > env->cpuid_level)
f419b321
FB
1642 index = env->cpuid_level;
1643 }
3b46e624 1644
f419b321 1645 switch(index) {
8e682019 1646 case 0:
f419b321 1647 EAX = env->cpuid_level;
14ce26e7
FB
1648 EBX = env->cpuid_vendor1;
1649 EDX = env->cpuid_vendor2;
1650 ECX = env->cpuid_vendor3;
8e682019
FB
1651 break;
1652 case 1:
14ce26e7 1653 EAX = env->cpuid_version;
eae7629b 1654 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
9df217a3 1655 ECX = env->cpuid_ext_features;
14ce26e7 1656 EDX = env->cpuid_features;
8e682019 1657 break;
f419b321 1658 case 2:
8e682019 1659 /* cache info: needed for Pentium Pro compatibility */
d8134d91 1660 EAX = 1;
2c0262af
FB
1661 EBX = 0;
1662 ECX = 0;
d8134d91 1663 EDX = 0x2c307d;
8e682019 1664 break;
14ce26e7 1665 case 0x80000000:
f419b321 1666 EAX = env->cpuid_xlevel;
14ce26e7
FB
1667 EBX = env->cpuid_vendor1;
1668 EDX = env->cpuid_vendor2;
1669 ECX = env->cpuid_vendor3;
1670 break;
1671 case 0x80000001:
1672 EAX = env->cpuid_features;
1673 EBX = 0;
1674 ECX = 0;
f419b321
FB
1675 EDX = env->cpuid_ext2_features;
1676 break;
1677 case 0x80000002:
1678 case 0x80000003:
1679 case 0x80000004:
1680 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1681 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1682 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1683 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
14ce26e7 1684 break;
8f091a59
FB
1685 case 0x80000005:
1686 /* cache info (L1 cache) */
1687 EAX = 0x01ff01ff;
1688 EBX = 0x01ff01ff;
1689 ECX = 0x40020140;
1690 EDX = 0x40020140;
1691 break;
1692 case 0x80000006:
1693 /* cache info (L2 cache) */
1694 EAX = 0;
1695 EBX = 0x42004200;
1696 ECX = 0x02008140;
1697 EDX = 0;
1698 break;
14ce26e7
FB
1699 case 0x80000008:
1700 /* virtual & phys address size in low 2 bytes. */
1701 EAX = 0x00003028;
1702 EBX = 0;
1703 ECX = 0;
1704 EDX = 0;
1705 break;
f419b321
FB
1706 default:
1707 /* reserved values: zero */
1708 EAX = 0;
1709 EBX = 0;
1710 ECX = 0;
1711 EDX = 0;
1712 break;
2c0262af
FB
1713 }
1714}
1715
61a8c4ec
FB
1716void helper_enter_level(int level, int data32)
1717{
14ce26e7 1718 target_ulong ssp;
61a8c4ec
FB
1719 uint32_t esp_mask, esp, ebp;
1720
1721 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1722 ssp = env->segs[R_SS].base;
1723 ebp = EBP;
1724 esp = ESP;
1725 if (data32) {
1726 /* 32 bit */
1727 esp -= 4;
1728 while (--level) {
1729 esp -= 4;
1730 ebp -= 4;
1731 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1732 }
1733 esp -= 4;
1734 stl(ssp + (esp & esp_mask), T1);
1735 } else {
1736 /* 16 bit */
1737 esp -= 2;
1738 while (--level) {
1739 esp -= 2;
1740 ebp -= 2;
1741 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1742 }
1743 esp -= 2;
1744 stw(ssp + (esp & esp_mask), T1);
1745 }
1746}
1747
8f091a59
FB
1748#ifdef TARGET_X86_64
1749void helper_enter64_level(int level, int data64)
1750{
1751 target_ulong esp, ebp;
1752 ebp = EBP;
1753 esp = ESP;
1754
1755 if (data64) {
1756 /* 64 bit */
1757 esp -= 8;
1758 while (--level) {
1759 esp -= 8;
1760 ebp -= 8;
1761 stq(esp, ldq(ebp));
1762 }
1763 esp -= 8;
1764 stq(esp, T1);
1765 } else {
1766 /* 16 bit */
1767 esp -= 2;
1768 while (--level) {
1769 esp -= 2;
1770 ebp -= 2;
1771 stw(esp, lduw(ebp));
1772 }
1773 esp -= 2;
1774 stw(esp, T1);
1775 }
1776}
1777#endif
1778
2c0262af
FB
1779void helper_lldt_T0(void)
1780{
1781 int selector;
1782 SegmentCache *dt;
1783 uint32_t e1, e2;
14ce26e7
FB
1784 int index, entry_limit;
1785 target_ulong ptr;
3b46e624 1786
2c0262af
FB
1787 selector = T0 & 0xffff;
1788 if ((selector & 0xfffc) == 0) {
1789 /* XXX: NULL selector case: invalid LDT */
14ce26e7 1790 env->ldt.base = 0;
2c0262af
FB
1791 env->ldt.limit = 0;
1792 } else {
1793 if (selector & 0x4)
1794 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1795 dt = &env->gdt;
1796 index = selector & ~7;
14ce26e7
FB
1797#ifdef TARGET_X86_64
1798 if (env->hflags & HF_LMA_MASK)
1799 entry_limit = 15;
1800 else
3b46e624 1801#endif
14ce26e7
FB
1802 entry_limit = 7;
1803 if ((index + entry_limit) > dt->limit)
2c0262af
FB
1804 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1805 ptr = dt->base + index;
61382a50
FB
1806 e1 = ldl_kernel(ptr);
1807 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
1808 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1809 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1810 if (!(e2 & DESC_P_MASK))
1811 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
14ce26e7
FB
1812#ifdef TARGET_X86_64
1813 if (env->hflags & HF_LMA_MASK) {
1814 uint32_t e3;
1815 e3 = ldl_kernel(ptr + 8);
1816 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1817 env->ldt.base |= (target_ulong)e3 << 32;
1818 } else
1819#endif
1820 {
1821 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1822 }
2c0262af
FB
1823 }
1824 env->ldt.selector = selector;
1825}
1826
1827void helper_ltr_T0(void)
1828{
1829 int selector;
1830 SegmentCache *dt;
1831 uint32_t e1, e2;
14ce26e7
FB
1832 int index, type, entry_limit;
1833 target_ulong ptr;
3b46e624 1834
2c0262af
FB
1835 selector = T0 & 0xffff;
1836 if ((selector & 0xfffc) == 0) {
14ce26e7
FB
1837 /* NULL selector case: invalid TR */
1838 env->tr.base = 0;
2c0262af
FB
1839 env->tr.limit = 0;
1840 env->tr.flags = 0;
1841 } else {
1842 if (selector & 0x4)
1843 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1844 dt = &env->gdt;
1845 index = selector & ~7;
14ce26e7
FB
1846#ifdef TARGET_X86_64
1847 if (env->hflags & HF_LMA_MASK)
1848 entry_limit = 15;
1849 else
3b46e624 1850#endif
14ce26e7
FB
1851 entry_limit = 7;
1852 if ((index + entry_limit) > dt->limit)
2c0262af
FB
1853 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1854 ptr = dt->base + index;
61382a50
FB
1855 e1 = ldl_kernel(ptr);
1856 e2 = ldl_kernel(ptr + 4);
2c0262af 1857 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
5fafdf24 1858 if ((e2 & DESC_S_MASK) ||
7e84c249 1859 (type != 1 && type != 9))
2c0262af
FB
1860 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1861 if (!(e2 & DESC_P_MASK))
1862 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
14ce26e7
FB
1863#ifdef TARGET_X86_64
1864 if (env->hflags & HF_LMA_MASK) {
b0ee3ff0 1865 uint32_t e3, e4;
14ce26e7 1866 e3 = ldl_kernel(ptr + 8);
b0ee3ff0
TS
1867 e4 = ldl_kernel(ptr + 12);
1868 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1869 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
14ce26e7
FB
1870 load_seg_cache_raw_dt(&env->tr, e1, e2);
1871 env->tr.base |= (target_ulong)e3 << 32;
5fafdf24 1872 } else
14ce26e7
FB
1873#endif
1874 {
1875 load_seg_cache_raw_dt(&env->tr, e1, e2);
1876 }
8e682019 1877 e2 |= DESC_TSS_BUSY_MASK;
61382a50 1878 stl_kernel(ptr + 4, e2);
2c0262af
FB
1879 }
1880 env->tr.selector = selector;
1881}
1882
3ab493de 1883/* only works if protected mode and not VM86. seg_reg must be != R_CS */
8e682019 1884void load_seg(int seg_reg, int selector)
2c0262af
FB
1885{
1886 uint32_t e1, e2;
3ab493de
FB
1887 int cpl, dpl, rpl;
1888 SegmentCache *dt;
1889 int index;
14ce26e7 1890 target_ulong ptr;
3ab493de 1891
8e682019 1892 selector &= 0xffff;
b359d4e7 1893 cpl = env->hflags & HF_CPL_MASK;
2c0262af
FB
1894 if ((selector & 0xfffc) == 0) {
1895 /* null selector case */
4d6b6c0a
FB
1896 if (seg_reg == R_SS
1897#ifdef TARGET_X86_64
b359d4e7 1898 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
4d6b6c0a
FB
1899#endif
1900 )
2c0262af 1901 raise_exception_err(EXCP0D_GPF, 0);
14ce26e7 1902 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2c0262af 1903 } else {
3b46e624 1904
3ab493de
FB
1905 if (selector & 0x4)
1906 dt = &env->ldt;
1907 else
1908 dt = &env->gdt;
1909 index = selector & ~7;
8e682019 1910 if ((index + 7) > dt->limit)
2c0262af 1911 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de
FB
1912 ptr = dt->base + index;
1913 e1 = ldl_kernel(ptr);
1914 e2 = ldl_kernel(ptr + 4);
3b46e624 1915
8e682019 1916 if (!(e2 & DESC_S_MASK))
2c0262af 1917 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de
FB
1918 rpl = selector & 3;
1919 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2c0262af 1920 if (seg_reg == R_SS) {
3ab493de 1921 /* must be writable segment */
8e682019 1922 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2c0262af 1923 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
8e682019 1924 if (rpl != cpl || dpl != cpl)
3ab493de 1925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2c0262af 1926 } else {
3ab493de 1927 /* must be readable segment */
8e682019 1928 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2c0262af 1929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3b46e624 1930
3ab493de
FB
1931 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1932 /* if not conforming code, test rights */
5fafdf24 1933 if (dpl < cpl || dpl < rpl)
3ab493de 1934 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de 1935 }
2c0262af
FB
1936 }
1937
1938 if (!(e2 & DESC_P_MASK)) {
2c0262af
FB
1939 if (seg_reg == R_SS)
1940 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1941 else
1942 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1943 }
3ab493de
FB
1944
1945 /* set the access bit if not already set */
1946 if (!(e2 & DESC_A_MASK)) {
1947 e2 |= DESC_A_MASK;
1948 stl_kernel(ptr + 4, e2);
1949 }
1950
5fafdf24 1951 cpu_x86_load_seg_cache(env, seg_reg, selector,
2c0262af
FB
1952 get_seg_base(e1, e2),
1953 get_seg_limit(e1, e2),
1954 e2);
1955#if 0
5fafdf24 1956 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2c0262af
FB
1957 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1958#endif
1959 }
1960}
1961
1962/* protected mode jump */
f419b321 1963void helper_ljmp_protected_T0_T1(int next_eip_addend)
2c0262af 1964{
14ce26e7 1965 int new_cs, gate_cs, type;
2c0262af 1966 uint32_t e1, e2, cpl, dpl, rpl, limit;
f419b321 1967 target_ulong new_eip, next_eip;
3b46e624 1968
2c0262af
FB
1969 new_cs = T0;
1970 new_eip = T1;
1971 if ((new_cs & 0xfffc) == 0)
1972 raise_exception_err(EXCP0D_GPF, 0);
1973 if (load_segment(&e1, &e2, new_cs) != 0)
1974 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1975 cpl = env->hflags & HF_CPL_MASK;
1976 if (e2 & DESC_S_MASK) {
1977 if (!(e2 & DESC_CS_MASK))
1978 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1979 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 1980 if (e2 & DESC_C_MASK) {
2c0262af
FB
1981 /* conforming code segment */
1982 if (dpl > cpl)
1983 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1984 } else {
1985 /* non conforming code segment */
1986 rpl = new_cs & 3;
1987 if (rpl > cpl)
1988 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1989 if (dpl != cpl)
1990 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1991 }
1992 if (!(e2 & DESC_P_MASK))
1993 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
1994 limit = get_seg_limit(e1, e2);
5fafdf24 1995 if (new_eip > limit &&
ca954f6d 1996 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2c0262af
FB
1997 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
1998 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1999 get_seg_base(e1, e2), limit, e2);
2000 EIP = new_eip;
2001 } else {
7e84c249
FB
2002 /* jump to call or task gate */
2003 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2004 rpl = new_cs & 3;
2005 cpl = env->hflags & HF_CPL_MASK;
2006 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2007 switch(type) {
2008 case 1: /* 286 TSS */
2009 case 9: /* 386 TSS */
2010 case 5: /* task gate */
2011 if (dpl < cpl || dpl < rpl)
2012 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
f419b321 2013 next_eip = env->eip + next_eip_addend;
08cea4ee 2014 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
447c2cef 2015 CC_OP = CC_OP_EFLAGS;
7e84c249
FB
2016 break;
2017 case 4: /* 286 call gate */
2018 case 12: /* 386 call gate */
2019 if ((dpl < cpl) || (dpl < rpl))
2020 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2021 if (!(e2 & DESC_P_MASK))
2022 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2023 gate_cs = e1 >> 16;
516633dc
FB
2024 new_eip = (e1 & 0xffff);
2025 if (type == 12)
2026 new_eip |= (e2 & 0xffff0000);
7e84c249
FB
2027 if (load_segment(&e1, &e2, gate_cs) != 0)
2028 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2029 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2030 /* must be code segment */
5fafdf24 2031 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
7e84c249
FB
2032 (DESC_S_MASK | DESC_CS_MASK)))
2033 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
5fafdf24 2034 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
7e84c249
FB
2035 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2036 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2037 if (!(e2 & DESC_P_MASK))
2038 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
7e84c249
FB
2039 limit = get_seg_limit(e1, e2);
2040 if (new_eip > limit)
2041 raise_exception_err(EXCP0D_GPF, 0);
2042 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2043 get_seg_base(e1, e2), limit, e2);
2044 EIP = new_eip;
2045 break;
2046 default:
2047 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2048 break;
2049 }
2c0262af
FB
2050 }
2051}
2052
2053/* real mode call */
2054void helper_lcall_real_T0_T1(int shift, int next_eip)
2055{
2056 int new_cs, new_eip;
2057 uint32_t esp, esp_mask;
14ce26e7 2058 target_ulong ssp;
2c0262af
FB
2059
2060 new_cs = T0;
2061 new_eip = T1;
2062 esp = ESP;
891b38e4 2063 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2c0262af
FB
2064 ssp = env->segs[R_SS].base;
2065 if (shift) {
891b38e4
FB
2066 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2067 PUSHL(ssp, esp, esp_mask, next_eip);
2c0262af 2068 } else {
891b38e4
FB
2069 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2070 PUSHW(ssp, esp, esp_mask, next_eip);
2c0262af
FB
2071 }
2072
8d7b0fbb 2073 SET_ESP(esp, esp_mask);
2c0262af
FB
2074 env->eip = new_eip;
2075 env->segs[R_CS].selector = new_cs;
14ce26e7 2076 env->segs[R_CS].base = (new_cs << 4);
2c0262af
FB
2077}
2078
2079/* protected mode call */
f419b321 2080void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2c0262af 2081{
649ea05a 2082 int new_cs, new_stack, i;
2c0262af 2083 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
891b38e4
FB
2084 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2085 uint32_t val, limit, old_sp_mask;
649ea05a 2086 target_ulong ssp, old_ssp, next_eip, new_eip;
3b46e624 2087
2c0262af
FB
2088 new_cs = T0;
2089 new_eip = T1;
f419b321 2090 next_eip = env->eip + next_eip_addend;
f3f2d9be 2091#ifdef DEBUG_PCALL
e19e89a5
FB
2092 if (loglevel & CPU_LOG_PCALL) {
2093 fprintf(logfile, "lcall %04x:%08x s=%d\n",
649ea05a 2094 new_cs, (uint32_t)new_eip, shift);
7fe48483 2095 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
f3f2d9be
FB
2096 }
2097#endif
2c0262af
FB
2098 if ((new_cs & 0xfffc) == 0)
2099 raise_exception_err(EXCP0D_GPF, 0);
2100 if (load_segment(&e1, &e2, new_cs) != 0)
2101 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2102 cpl = env->hflags & HF_CPL_MASK;
f3f2d9be 2103#ifdef DEBUG_PCALL
e19e89a5 2104 if (loglevel & CPU_LOG_PCALL) {
f3f2d9be
FB
2105 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2106 }
2107#endif
2c0262af
FB
2108 if (e2 & DESC_S_MASK) {
2109 if (!(e2 & DESC_CS_MASK))
2110 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2111 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 2112 if (e2 & DESC_C_MASK) {
2c0262af
FB
2113 /* conforming code segment */
2114 if (dpl > cpl)
2115 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2116 } else {
2117 /* non conforming code segment */
2118 rpl = new_cs & 3;
2119 if (rpl > cpl)
2120 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2121 if (dpl != cpl)
2122 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2123 }
2124 if (!(e2 & DESC_P_MASK))
2125 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2126
f419b321
FB
2127#ifdef TARGET_X86_64
2128 /* XXX: check 16/32 bit cases in long mode */
2129 if (shift == 2) {
2130 target_ulong rsp;
2131 /* 64 bit case */
2132 rsp = ESP;
2133 PUSHQ(rsp, env->segs[R_CS].selector);
2134 PUSHQ(rsp, next_eip);
2135 /* from this point, not restartable */
2136 ESP = rsp;
2137 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
5fafdf24 2138 get_seg_base(e1, e2),
f419b321
FB
2139 get_seg_limit(e1, e2), e2);
2140 EIP = new_eip;
5fafdf24 2141 } else
f419b321
FB
2142#endif
2143 {
2144 sp = ESP;
2145 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2146 ssp = env->segs[R_SS].base;
2147 if (shift) {
2148 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2149 PUSHL(ssp, sp, sp_mask, next_eip);
2150 } else {
2151 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2152 PUSHW(ssp, sp, sp_mask, next_eip);
2153 }
3b46e624 2154
f419b321
FB
2155 limit = get_seg_limit(e1, e2);
2156 if (new_eip > limit)
2157 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2158 /* from this point, not restartable */
8d7b0fbb 2159 SET_ESP(sp, sp_mask);
f419b321
FB
2160 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2161 get_seg_base(e1, e2), limit, e2);
2162 EIP = new_eip;
2c0262af 2163 }
2c0262af
FB
2164 } else {
2165 /* check gate type */
2166 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
7e84c249
FB
2167 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2168 rpl = new_cs & 3;
2c0262af
FB
2169 switch(type) {
2170 case 1: /* available 286 TSS */
2171 case 9: /* available 386 TSS */
2172 case 5: /* task gate */
7e84c249
FB
2173 if (dpl < cpl || dpl < rpl)
2174 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
883da8e2 2175 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
447c2cef 2176 CC_OP = CC_OP_EFLAGS;
8145122b 2177 return;
2c0262af
FB
2178 case 4: /* 286 call gate */
2179 case 12: /* 386 call gate */
2180 break;
2181 default:
2182 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2183 break;
2184 }
2185 shift = type >> 3;
2186
2c0262af
FB
2187 if (dpl < cpl || dpl < rpl)
2188 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2189 /* check valid bit */
2190 if (!(e2 & DESC_P_MASK))
2191 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2192 selector = e1 >> 16;
2193 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
f3f2d9be 2194 param_count = e2 & 0x1f;
2c0262af
FB
2195 if ((selector & 0xfffc) == 0)
2196 raise_exception_err(EXCP0D_GPF, 0);
2197
2198 if (load_segment(&e1, &e2, selector) != 0)
2199 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2200 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2201 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2202 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2203 if (dpl > cpl)
2204 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2205 if (!(e2 & DESC_P_MASK))
2206 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2207
2208 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
7f75ffd3 2209 /* to inner privilege */
2c0262af 2210 get_ss_esp_from_tss(&ss, &sp, dpl);
f3f2d9be 2211#ifdef DEBUG_PCALL
e19e89a5 2212 if (loglevel & CPU_LOG_PCALL)
5fafdf24 2213 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
f3f2d9be
FB
2214 ss, sp, param_count, ESP);
2215#endif
2c0262af
FB
2216 if ((ss & 0xfffc) == 0)
2217 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2218 if ((ss & 3) != dpl)
2219 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2220 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2221 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2222 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2223 if (ss_dpl != dpl)
2224 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2225 if (!(ss_e2 & DESC_S_MASK) ||
2226 (ss_e2 & DESC_CS_MASK) ||
2227 !(ss_e2 & DESC_W_MASK))
2228 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2229 if (!(ss_e2 & DESC_P_MASK))
2230 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3b46e624 2231
891b38e4 2232 // push_size = ((param_count * 2) + 8) << shift;
2c0262af 2233
891b38e4
FB
2234 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2235 old_ssp = env->segs[R_SS].base;
3b46e624 2236
891b38e4
FB
2237 sp_mask = get_sp_mask(ss_e2);
2238 ssp = get_seg_base(ss_e1, ss_e2);
2c0262af 2239 if (shift) {
891b38e4
FB
2240 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2241 PUSHL(ssp, sp, sp_mask, ESP);
2242 for(i = param_count - 1; i >= 0; i--) {
2243 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2244 PUSHL(ssp, sp, sp_mask, val);
2c0262af
FB
2245 }
2246 } else {
891b38e4
FB
2247 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2248 PUSHW(ssp, sp, sp_mask, ESP);
2249 for(i = param_count - 1; i >= 0; i--) {
2250 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2251 PUSHW(ssp, sp, sp_mask, val);
2c0262af
FB
2252 }
2253 }
891b38e4 2254 new_stack = 1;
2c0262af 2255 } else {
7f75ffd3 2256 /* to same privilege */
891b38e4
FB
2257 sp = ESP;
2258 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2259 ssp = env->segs[R_SS].base;
2260 // push_size = (4 << shift);
2261 new_stack = 0;
2c0262af
FB
2262 }
2263
2264 if (shift) {
891b38e4
FB
2265 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2266 PUSHL(ssp, sp, sp_mask, next_eip);
2c0262af 2267 } else {
891b38e4
FB
2268 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2269 PUSHW(ssp, sp, sp_mask, next_eip);
2270 }
2271
2272 /* from this point, not restartable */
2273
2274 if (new_stack) {
2275 ss = (ss & ~3) | dpl;
5fafdf24 2276 cpu_x86_load_seg_cache(env, R_SS, ss,
891b38e4
FB
2277 ssp,
2278 get_seg_limit(ss_e1, ss_e2),
2279 ss_e2);
2c0262af
FB
2280 }
2281
2c0262af 2282 selector = (selector & ~3) | dpl;
5fafdf24 2283 cpu_x86_load_seg_cache(env, R_CS, selector,
2c0262af
FB
2284 get_seg_base(e1, e2),
2285 get_seg_limit(e1, e2),
2286 e2);
2287 cpu_x86_set_cpl(env, dpl);
8d7b0fbb 2288 SET_ESP(sp, sp_mask);
2c0262af
FB
2289 EIP = offset;
2290 }
9df217a3
FB
2291#ifdef USE_KQEMU
2292 if (kqemu_is_ok(env)) {
2293 env->exception_index = -1;
2294 cpu_loop_exit();
2295 }
2296#endif
2c0262af
FB
2297}
2298
7e84c249 2299/* real and vm86 mode iret */
2c0262af
FB
2300void helper_iret_real(int shift)
2301{
891b38e4 2302 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
14ce26e7 2303 target_ulong ssp;
2c0262af 2304 int eflags_mask;
7e84c249 2305
891b38e4
FB
2306 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2307 sp = ESP;
2308 ssp = env->segs[R_SS].base;
2c0262af
FB
2309 if (shift == 1) {
2310 /* 32 bits */
891b38e4
FB
2311 POPL(ssp, sp, sp_mask, new_eip);
2312 POPL(ssp, sp, sp_mask, new_cs);
2313 new_cs &= 0xffff;
2314 POPL(ssp, sp, sp_mask, new_eflags);
2c0262af
FB
2315 } else {
2316 /* 16 bits */
891b38e4
FB
2317 POPW(ssp, sp, sp_mask, new_eip);
2318 POPW(ssp, sp, sp_mask, new_cs);
2319 POPW(ssp, sp, sp_mask, new_eflags);
2c0262af 2320 }
4136f33c 2321 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2c0262af
FB
2322 load_seg_vm(R_CS, new_cs);
2323 env->eip = new_eip;
7e84c249 2324 if (env->eflags & VM_MASK)
8145122b 2325 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
7e84c249 2326 else
8145122b 2327 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2c0262af
FB
2328 if (shift == 0)
2329 eflags_mask &= 0xffff;
2330 load_eflags(new_eflags, eflags_mask);
2331}
2332
8e682019
FB
2333static inline void validate_seg(int seg_reg, int cpl)
2334{
2335 int dpl;
2336 uint32_t e2;
cd072e01
FB
2337
2338 /* XXX: on x86_64, we do not want to nullify FS and GS because
2339 they may still contain a valid base. I would be interested to
2340 know how a real x86_64 CPU behaves */
5fafdf24 2341 if ((seg_reg == R_FS || seg_reg == R_GS) &&
cd072e01
FB
2342 (env->segs[seg_reg].selector & 0xfffc) == 0)
2343 return;
2344
8e682019
FB
2345 e2 = env->segs[seg_reg].flags;
2346 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2347 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2348 /* data or non conforming code segment */
2349 if (dpl < cpl) {
14ce26e7 2350 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
8e682019
FB
2351 }
2352 }
2353}
2354
2c0262af
FB
2355/* protected mode iret */
2356static inline void helper_ret_protected(int shift, int is_iret, int addend)
2357{
14ce26e7 2358 uint32_t new_cs, new_eflags, new_ss;
2c0262af
FB
2359 uint32_t new_es, new_ds, new_fs, new_gs;
2360 uint32_t e1, e2, ss_e1, ss_e2;
4136f33c 2361 int cpl, dpl, rpl, eflags_mask, iopl;
14ce26e7 2362 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3b46e624 2363
14ce26e7
FB
2364#ifdef TARGET_X86_64
2365 if (shift == 2)
2366 sp_mask = -1;
2367 else
2368#endif
2369 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2c0262af 2370 sp = ESP;
891b38e4 2371 ssp = env->segs[R_SS].base;
354ff226 2372 new_eflags = 0; /* avoid warning */
14ce26e7
FB
2373#ifdef TARGET_X86_64
2374 if (shift == 2) {
2375 POPQ(sp, new_eip);
2376 POPQ(sp, new_cs);
2377 new_cs &= 0xffff;
2378 if (is_iret) {
2379 POPQ(sp, new_eflags);
2380 }
2381 } else
2382#endif
2c0262af
FB
2383 if (shift == 1) {
2384 /* 32 bits */
891b38e4
FB
2385 POPL(ssp, sp, sp_mask, new_eip);
2386 POPL(ssp, sp, sp_mask, new_cs);
2387 new_cs &= 0xffff;
2388 if (is_iret) {
2389 POPL(ssp, sp, sp_mask, new_eflags);
2390 if (new_eflags & VM_MASK)
2391 goto return_to_vm86;
2392 }
2c0262af
FB
2393 } else {
2394 /* 16 bits */
891b38e4
FB
2395 POPW(ssp, sp, sp_mask, new_eip);
2396 POPW(ssp, sp, sp_mask, new_cs);
2c0262af 2397 if (is_iret)
891b38e4 2398 POPW(ssp, sp, sp_mask, new_eflags);
2c0262af 2399 }
891b38e4 2400#ifdef DEBUG_PCALL
e19e89a5 2401 if (loglevel & CPU_LOG_PCALL) {
14ce26e7 2402 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
e19e89a5 2403 new_cs, new_eip, shift, addend);
7fe48483 2404 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
891b38e4
FB
2405 }
2406#endif
2c0262af
FB
2407 if ((new_cs & 0xfffc) == 0)
2408 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2409 if (load_segment(&e1, &e2, new_cs) != 0)
2410 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2411 if (!(e2 & DESC_S_MASK) ||
2412 !(e2 & DESC_CS_MASK))
2413 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2414 cpl = env->hflags & HF_CPL_MASK;
5fafdf24 2415 rpl = new_cs & 3;
2c0262af
FB
2416 if (rpl < cpl)
2417 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2418 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 2419 if (e2 & DESC_C_MASK) {
2c0262af
FB
2420 if (dpl > rpl)
2421 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2422 } else {
2423 if (dpl != rpl)
2424 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2425 }
2426 if (!(e2 & DESC_P_MASK))
2427 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3b46e624 2428
891b38e4 2429 sp += addend;
5fafdf24 2430 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
ca954f6d 2431 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2c0262af 2432 /* return to same priledge level */
5fafdf24 2433 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2c0262af
FB
2434 get_seg_base(e1, e2),
2435 get_seg_limit(e1, e2),
2436 e2);
2c0262af 2437 } else {
7f75ffd3 2438 /* return to different privilege level */
14ce26e7
FB
2439#ifdef TARGET_X86_64
2440 if (shift == 2) {
2441 POPQ(sp, new_esp);
2442 POPQ(sp, new_ss);
2443 new_ss &= 0xffff;
2444 } else
2445#endif
2c0262af
FB
2446 if (shift == 1) {
2447 /* 32 bits */
891b38e4
FB
2448 POPL(ssp, sp, sp_mask, new_esp);
2449 POPL(ssp, sp, sp_mask, new_ss);
2450 new_ss &= 0xffff;
2c0262af
FB
2451 } else {
2452 /* 16 bits */
891b38e4
FB
2453 POPW(ssp, sp, sp_mask, new_esp);
2454 POPW(ssp, sp, sp_mask, new_ss);
2c0262af 2455 }
e19e89a5
FB
2456#ifdef DEBUG_PCALL
2457 if (loglevel & CPU_LOG_PCALL) {
14ce26e7 2458 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
e19e89a5
FB
2459 new_ss, new_esp);
2460 }
2461#endif
b359d4e7
FB
2462 if ((new_ss & 0xfffc) == 0) {
2463#ifdef TARGET_X86_64
2464 /* NULL ss is allowed in long mode if cpl != 3*/
d80c7d1c 2465 /* XXX: test CS64 ? */
b359d4e7 2466 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
5fafdf24 2467 cpu_x86_load_seg_cache(env, R_SS, new_ss,
b359d4e7
FB
2468 0, 0xffffffff,
2469 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2470 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2471 DESC_W_MASK | DESC_A_MASK);
d80c7d1c 2472 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
5fafdf24 2473 } else
b359d4e7
FB
2474#endif
2475 {
2476 raise_exception_err(EXCP0D_GPF, 0);
2477 }
14ce26e7
FB
2478 } else {
2479 if ((new_ss & 3) != rpl)
2480 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2481 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2482 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2483 if (!(ss_e2 & DESC_S_MASK) ||
2484 (ss_e2 & DESC_CS_MASK) ||
2485 !(ss_e2 & DESC_W_MASK))
2486 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2487 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2488 if (dpl != rpl)
2489 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2490 if (!(ss_e2 & DESC_P_MASK))
2491 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
5fafdf24 2492 cpu_x86_load_seg_cache(env, R_SS, new_ss,
14ce26e7
FB
2493 get_seg_base(ss_e1, ss_e2),
2494 get_seg_limit(ss_e1, ss_e2),
2495 ss_e2);
2496 }
2c0262af 2497
5fafdf24 2498 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2c0262af
FB
2499 get_seg_base(e1, e2),
2500 get_seg_limit(e1, e2),
2501 e2);
2c0262af 2502 cpu_x86_set_cpl(env, rpl);
891b38e4 2503 sp = new_esp;
14ce26e7 2504#ifdef TARGET_X86_64
2c8e0301 2505 if (env->hflags & HF_CS64_MASK)
14ce26e7
FB
2506 sp_mask = -1;
2507 else
2508#endif
2509 sp_mask = get_sp_mask(ss_e2);
8e682019
FB
2510
2511 /* validate data segments */
89984cd2
FB
2512 validate_seg(R_ES, rpl);
2513 validate_seg(R_DS, rpl);
2514 validate_seg(R_FS, rpl);
2515 validate_seg(R_GS, rpl);
4afa6482
FB
2516
2517 sp += addend;
2c0262af 2518 }
8d7b0fbb 2519 SET_ESP(sp, sp_mask);
2c0262af
FB
2520 env->eip = new_eip;
2521 if (is_iret) {
4136f33c 2522 /* NOTE: 'cpl' is the _old_ CPL */
8145122b 2523 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2c0262af 2524 if (cpl == 0)
4136f33c
FB
2525 eflags_mask |= IOPL_MASK;
2526 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2527 if (cpl <= iopl)
2528 eflags_mask |= IF_MASK;
2c0262af
FB
2529 if (shift == 0)
2530 eflags_mask &= 0xffff;
2531 load_eflags(new_eflags, eflags_mask);
2532 }
2533 return;
2534
2535 return_to_vm86:
891b38e4
FB
2536 POPL(ssp, sp, sp_mask, new_esp);
2537 POPL(ssp, sp, sp_mask, new_ss);
2538 POPL(ssp, sp, sp_mask, new_es);
2539 POPL(ssp, sp, sp_mask, new_ds);
2540 POPL(ssp, sp, sp_mask, new_fs);
2541 POPL(ssp, sp, sp_mask, new_gs);
3b46e624 2542
2c0262af 2543 /* modify processor state */
5fafdf24 2544 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
8145122b 2545 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
891b38e4 2546 load_seg_vm(R_CS, new_cs & 0xffff);
2c0262af 2547 cpu_x86_set_cpl(env, 3);
891b38e4
FB
2548 load_seg_vm(R_SS, new_ss & 0xffff);
2549 load_seg_vm(R_ES, new_es & 0xffff);
2550 load_seg_vm(R_DS, new_ds & 0xffff);
2551 load_seg_vm(R_FS, new_fs & 0xffff);
2552 load_seg_vm(R_GS, new_gs & 0xffff);
2c0262af 2553
fd836909 2554 env->eip = new_eip & 0xffff;
2c0262af
FB
2555 ESP = new_esp;
2556}
2557
08cea4ee 2558void helper_iret_protected(int shift, int next_eip)
2c0262af 2559{
7e84c249
FB
2560 int tss_selector, type;
2561 uint32_t e1, e2;
3b46e624 2562
7e84c249
FB
2563 /* specific case for TSS */
2564 if (env->eflags & NT_MASK) {
14ce26e7
FB
2565#ifdef TARGET_X86_64
2566 if (env->hflags & HF_LMA_MASK)
2567 raise_exception_err(EXCP0D_GPF, 0);
2568#endif
7e84c249
FB
2569 tss_selector = lduw_kernel(env->tr.base + 0);
2570 if (tss_selector & 4)
2571 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2572 if (load_segment(&e1, &e2, tss_selector) != 0)
2573 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2574 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2575 /* NOTE: we check both segment and busy TSS */
2576 if (type != 3)
2577 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
08cea4ee 2578 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
7e84c249
FB
2579 } else {
2580 helper_ret_protected(shift, 1, 0);
2581 }
9df217a3
FB
2582#ifdef USE_KQEMU
2583 if (kqemu_is_ok(env)) {
2584 CC_OP = CC_OP_EFLAGS;
2585 env->exception_index = -1;
2586 cpu_loop_exit();
2587 }
2588#endif
2c0262af
FB
2589}
2590
2591void helper_lret_protected(int shift, int addend)
2592{
2593 helper_ret_protected(shift, 0, addend);
9df217a3
FB
2594#ifdef USE_KQEMU
2595 if (kqemu_is_ok(env)) {
9df217a3
FB
2596 env->exception_index = -1;
2597 cpu_loop_exit();
2598 }
2599#endif
2c0262af
FB
2600}
2601
023fe10d
FB
2602void helper_sysenter(void)
2603{
2604 if (env->sysenter_cs == 0) {
2605 raise_exception_err(EXCP0D_GPF, 0);
2606 }
2607 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2608 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
2609 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2610 0, 0xffffffff,
023fe10d
FB
2611 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2612 DESC_S_MASK |
2613 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 2614 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
14ce26e7 2615 0, 0xffffffff,
023fe10d
FB
2616 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2617 DESC_S_MASK |
2618 DESC_W_MASK | DESC_A_MASK);
2619 ESP = env->sysenter_esp;
2620 EIP = env->sysenter_eip;
2621}
2622
2623void helper_sysexit(void)
2624{
2625 int cpl;
2626
2627 cpl = env->hflags & HF_CPL_MASK;
2628 if (env->sysenter_cs == 0 || cpl != 0) {
2629 raise_exception_err(EXCP0D_GPF, 0);
2630 }
2631 cpu_x86_set_cpl(env, 3);
5fafdf24
TS
2632 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2633 0, 0xffffffff,
023fe10d
FB
2634 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2635 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2636 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 2637 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
14ce26e7 2638 0, 0xffffffff,
023fe10d
FB
2639 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2640 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2641 DESC_W_MASK | DESC_A_MASK);
2642 ESP = ECX;
2643 EIP = EDX;
9df217a3
FB
2644#ifdef USE_KQEMU
2645 if (kqemu_is_ok(env)) {
2646 env->exception_index = -1;
2647 cpu_loop_exit();
2648 }
2649#endif
023fe10d
FB
2650}
2651
2c0262af
FB
2652void helper_movl_crN_T0(int reg)
2653{
5fafdf24 2654#if !defined(CONFIG_USER_ONLY)
2c0262af
FB
2655 switch(reg) {
2656 case 0:
1ac157da 2657 cpu_x86_update_cr0(env, T0);
2c0262af
FB
2658 break;
2659 case 3:
1ac157da
FB
2660 cpu_x86_update_cr3(env, T0);
2661 break;
2662 case 4:
2663 cpu_x86_update_cr4(env, T0);
2664 break;
4d6b6c0a
FB
2665 case 8:
2666 cpu_set_apic_tpr(env, T0);
2667 break;
1ac157da
FB
2668 default:
2669 env->cr[reg] = T0;
2c0262af
FB
2670 break;
2671 }
4d6b6c0a 2672#endif
2c0262af
FB
2673}
2674
2675/* XXX: do more */
2676void helper_movl_drN_T0(int reg)
2677{
2678 env->dr[reg] = T0;
2679}
2680
8f091a59 2681void helper_invlpg(target_ulong addr)
2c0262af
FB
2682{
2683 cpu_x86_flush_tlb(env, addr);
2684}
2685
2c0262af
FB
2686void helper_rdtsc(void)
2687{
2688 uint64_t val;
ecada8a2
FB
2689
2690 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2691 raise_exception(EXCP0D_GPF);
2692 }
28ab0e2e 2693 val = cpu_get_tsc(env);
14ce26e7
FB
2694 EAX = (uint32_t)(val);
2695 EDX = (uint32_t)(val >> 32);
2696}
2697
5fafdf24 2698#if defined(CONFIG_USER_ONLY)
14ce26e7
FB
2699void helper_wrmsr(void)
2700{
2c0262af
FB
2701}
2702
14ce26e7
FB
2703void helper_rdmsr(void)
2704{
2705}
2706#else
2c0262af
FB
2707void helper_wrmsr(void)
2708{
14ce26e7
FB
2709 uint64_t val;
2710
2711 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2712
2713 switch((uint32_t)ECX) {
2c0262af 2714 case MSR_IA32_SYSENTER_CS:
14ce26e7 2715 env->sysenter_cs = val & 0xffff;
2c0262af
FB
2716 break;
2717 case MSR_IA32_SYSENTER_ESP:
14ce26e7 2718 env->sysenter_esp = val;
2c0262af
FB
2719 break;
2720 case MSR_IA32_SYSENTER_EIP:
14ce26e7
FB
2721 env->sysenter_eip = val;
2722 break;
2723 case MSR_IA32_APICBASE:
2724 cpu_set_apic_base(env, val);
2725 break;
14ce26e7 2726 case MSR_EFER:
f419b321
FB
2727 {
2728 uint64_t update_mask;
2729 update_mask = 0;
2730 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2731 update_mask |= MSR_EFER_SCE;
2732 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2733 update_mask |= MSR_EFER_LME;
2734 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2735 update_mask |= MSR_EFER_FFXSR;
2736 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2737 update_mask |= MSR_EFER_NXE;
5fafdf24 2738 env->efer = (env->efer & ~update_mask) |
f419b321
FB
2739 (val & update_mask);
2740 }
2c0262af 2741 break;
14ce26e7
FB
2742 case MSR_STAR:
2743 env->star = val;
2744 break;
8f091a59
FB
2745 case MSR_PAT:
2746 env->pat = val;
2747 break;
f419b321 2748#ifdef TARGET_X86_64
14ce26e7
FB
2749 case MSR_LSTAR:
2750 env->lstar = val;
2751 break;
2752 case MSR_CSTAR:
2753 env->cstar = val;
2754 break;
2755 case MSR_FMASK:
2756 env->fmask = val;
2757 break;
2758 case MSR_FSBASE:
2759 env->segs[R_FS].base = val;
2760 break;
2761 case MSR_GSBASE:
2762 env->segs[R_GS].base = val;
2763 break;
2764 case MSR_KERNELGSBASE:
2765 env->kernelgsbase = val;
2766 break;
2767#endif
2c0262af
FB
2768 default:
2769 /* XXX: exception ? */
5fafdf24 2770 break;
2c0262af
FB
2771 }
2772}
2773
2774void helper_rdmsr(void)
2775{
14ce26e7
FB
2776 uint64_t val;
2777 switch((uint32_t)ECX) {
2c0262af 2778 case MSR_IA32_SYSENTER_CS:
14ce26e7 2779 val = env->sysenter_cs;
2c0262af
FB
2780 break;
2781 case MSR_IA32_SYSENTER_ESP:
14ce26e7 2782 val = env->sysenter_esp;
2c0262af
FB
2783 break;
2784 case MSR_IA32_SYSENTER_EIP:
14ce26e7
FB
2785 val = env->sysenter_eip;
2786 break;
2787 case MSR_IA32_APICBASE:
2788 val = cpu_get_apic_base(env);
2789 break;
14ce26e7
FB
2790 case MSR_EFER:
2791 val = env->efer;
2792 break;
2793 case MSR_STAR:
2794 val = env->star;
2795 break;
8f091a59
FB
2796 case MSR_PAT:
2797 val = env->pat;
2798 break;
f419b321 2799#ifdef TARGET_X86_64
14ce26e7
FB
2800 case MSR_LSTAR:
2801 val = env->lstar;
2802 break;
2803 case MSR_CSTAR:
2804 val = env->cstar;
2805 break;
2806 case MSR_FMASK:
2807 val = env->fmask;
2808 break;
2809 case MSR_FSBASE:
2810 val = env->segs[R_FS].base;
2811 break;
2812 case MSR_GSBASE:
2813 val = env->segs[R_GS].base;
2c0262af 2814 break;
14ce26e7
FB
2815 case MSR_KERNELGSBASE:
2816 val = env->kernelgsbase;
2817 break;
2818#endif
2c0262af
FB
2819 default:
2820 /* XXX: exception ? */
14ce26e7 2821 val = 0;
5fafdf24 2822 break;
2c0262af 2823 }
14ce26e7
FB
2824 EAX = (uint32_t)(val);
2825 EDX = (uint32_t)(val >> 32);
2c0262af 2826}
14ce26e7 2827#endif
2c0262af
FB
2828
2829void helper_lsl(void)
2830{
2831 unsigned int selector, limit;
5516d670 2832 uint32_t e1, e2, eflags;
3ab493de 2833 int rpl, dpl, cpl, type;
2c0262af 2834
5516d670 2835 eflags = cc_table[CC_OP].compute_all();
2c0262af
FB
2836 selector = T0 & 0xffff;
2837 if (load_segment(&e1, &e2, selector) != 0)
5516d670 2838 goto fail;
3ab493de
FB
2839 rpl = selector & 3;
2840 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2841 cpl = env->hflags & HF_CPL_MASK;
2842 if (e2 & DESC_S_MASK) {
2843 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2844 /* conforming */
2845 } else {
2846 if (dpl < cpl || dpl < rpl)
5516d670 2847 goto fail;
3ab493de
FB
2848 }
2849 } else {
2850 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2851 switch(type) {
2852 case 1:
2853 case 2:
2854 case 3:
2855 case 9:
2856 case 11:
2857 break;
2858 default:
5516d670 2859 goto fail;
3ab493de 2860 }
5516d670
FB
2861 if (dpl < cpl || dpl < rpl) {
2862 fail:
2863 CC_SRC = eflags & ~CC_Z;
3ab493de 2864 return;
5516d670 2865 }
3ab493de
FB
2866 }
2867 limit = get_seg_limit(e1, e2);
2c0262af 2868 T1 = limit;
5516d670 2869 CC_SRC = eflags | CC_Z;
2c0262af
FB
2870}
2871
2872void helper_lar(void)
2873{
2874 unsigned int selector;
5516d670 2875 uint32_t e1, e2, eflags;
3ab493de 2876 int rpl, dpl, cpl, type;
2c0262af 2877
5516d670 2878 eflags = cc_table[CC_OP].compute_all();
2c0262af 2879 selector = T0 & 0xffff;
3ab493de 2880 if ((selector & 0xfffc) == 0)
5516d670 2881 goto fail;
2c0262af 2882 if (load_segment(&e1, &e2, selector) != 0)
5516d670 2883 goto fail;
3ab493de
FB
2884 rpl = selector & 3;
2885 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2886 cpl = env->hflags & HF_CPL_MASK;
2887 if (e2 & DESC_S_MASK) {
2888 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2889 /* conforming */
2890 } else {
2891 if (dpl < cpl || dpl < rpl)
5516d670 2892 goto fail;
3ab493de
FB
2893 }
2894 } else {
2895 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2896 switch(type) {
2897 case 1:
2898 case 2:
2899 case 3:
2900 case 4:
2901 case 5:
2902 case 9:
2903 case 11:
2904 case 12:
2905 break;
2906 default:
5516d670 2907 goto fail;
3ab493de 2908 }
5516d670
FB
2909 if (dpl < cpl || dpl < rpl) {
2910 fail:
2911 CC_SRC = eflags & ~CC_Z;
3ab493de 2912 return;
5516d670 2913 }
3ab493de 2914 }
2c0262af 2915 T1 = e2 & 0x00f0ff00;
5516d670 2916 CC_SRC = eflags | CC_Z;
2c0262af
FB
2917}
2918
3ab493de
FB
2919void helper_verr(void)
2920{
2921 unsigned int selector;
5516d670 2922 uint32_t e1, e2, eflags;
3ab493de
FB
2923 int rpl, dpl, cpl;
2924
5516d670 2925 eflags = cc_table[CC_OP].compute_all();
3ab493de
FB
2926 selector = T0 & 0xffff;
2927 if ((selector & 0xfffc) == 0)
5516d670 2928 goto fail;
3ab493de 2929 if (load_segment(&e1, &e2, selector) != 0)
5516d670 2930 goto fail;
3ab493de 2931 if (!(e2 & DESC_S_MASK))
5516d670 2932 goto fail;
3ab493de
FB
2933 rpl = selector & 3;
2934 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2935 cpl = env->hflags & HF_CPL_MASK;
2936 if (e2 & DESC_CS_MASK) {
2937 if (!(e2 & DESC_R_MASK))
5516d670 2938 goto fail;
3ab493de
FB
2939 if (!(e2 & DESC_C_MASK)) {
2940 if (dpl < cpl || dpl < rpl)
5516d670 2941 goto fail;
3ab493de
FB
2942 }
2943 } else {
5516d670
FB
2944 if (dpl < cpl || dpl < rpl) {
2945 fail:
2946 CC_SRC = eflags & ~CC_Z;
3ab493de 2947 return;
5516d670 2948 }
3ab493de 2949 }
5516d670 2950 CC_SRC = eflags | CC_Z;
3ab493de
FB
2951}
2952
2953void helper_verw(void)
2954{
2955 unsigned int selector;
5516d670 2956 uint32_t e1, e2, eflags;
3ab493de
FB
2957 int rpl, dpl, cpl;
2958
5516d670 2959 eflags = cc_table[CC_OP].compute_all();
3ab493de
FB
2960 selector = T0 & 0xffff;
2961 if ((selector & 0xfffc) == 0)
5516d670 2962 goto fail;
3ab493de 2963 if (load_segment(&e1, &e2, selector) != 0)
5516d670 2964 goto fail;
3ab493de 2965 if (!(e2 & DESC_S_MASK))
5516d670 2966 goto fail;
3ab493de
FB
2967 rpl = selector & 3;
2968 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2969 cpl = env->hflags & HF_CPL_MASK;
2970 if (e2 & DESC_CS_MASK) {
5516d670 2971 goto fail;
3ab493de
FB
2972 } else {
2973 if (dpl < cpl || dpl < rpl)
5516d670
FB
2974 goto fail;
2975 if (!(e2 & DESC_W_MASK)) {
2976 fail:
2977 CC_SRC = eflags & ~CC_Z;
3ab493de 2978 return;
5516d670 2979 }
3ab493de 2980 }
5516d670 2981 CC_SRC = eflags | CC_Z;
3ab493de
FB
2982}
2983
2c0262af
FB
2984/* FPU helpers */
2985
2c0262af
FB
2986void helper_fldt_ST0_A0(void)
2987{
2988 int new_fpstt;
2989 new_fpstt = (env->fpstt - 1) & 7;
664e0f19 2990 env->fpregs[new_fpstt].d = helper_fldt(A0);
2c0262af
FB
2991 env->fpstt = new_fpstt;
2992 env->fptags[new_fpstt] = 0; /* validate stack entry */
2993}
2994
2995void helper_fstt_ST0_A0(void)
2996{
14ce26e7 2997 helper_fstt(ST0, A0);
2c0262af 2998}
2c0262af 2999
2ee73ac3
FB
3000void fpu_set_exception(int mask)
3001{
3002 env->fpus |= mask;
3003 if (env->fpus & (~env->fpuc & FPUC_EM))
3004 env->fpus |= FPUS_SE | FPUS_B;
3005}
3006
3007CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3008{
5fafdf24 3009 if (b == 0.0)
2ee73ac3
FB
3010 fpu_set_exception(FPUS_ZE);
3011 return a / b;
3012}
3013
3014void fpu_raise_exception(void)
3015{
3016 if (env->cr[0] & CR0_NE_MASK) {
3017 raise_exception(EXCP10_COPR);
5fafdf24
TS
3018 }
3019#if !defined(CONFIG_USER_ONLY)
2ee73ac3
FB
3020 else {
3021 cpu_set_ferr(env);
3022 }
3023#endif
3024}
3025
2c0262af
FB
3026/* BCD ops */
3027
2c0262af
FB
3028void helper_fbld_ST0_A0(void)
3029{
3030 CPU86_LDouble tmp;
3031 uint64_t val;
3032 unsigned int v;
3033 int i;
3034
3035 val = 0;
3036 for(i = 8; i >= 0; i--) {
14ce26e7 3037 v = ldub(A0 + i);
2c0262af
FB
3038 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3039 }
3040 tmp = val;
14ce26e7 3041 if (ldub(A0 + 9) & 0x80)
2c0262af
FB
3042 tmp = -tmp;
3043 fpush();
3044 ST0 = tmp;
3045}
3046
3047void helper_fbst_ST0_A0(void)
3048{
2c0262af 3049 int v;
14ce26e7 3050 target_ulong mem_ref, mem_end;
2c0262af
FB
3051 int64_t val;
3052
7a0e1f41 3053 val = floatx_to_int64(ST0, &env->fp_status);
14ce26e7 3054 mem_ref = A0;
2c0262af
FB
3055 mem_end = mem_ref + 9;
3056 if (val < 0) {
3057 stb(mem_end, 0x80);
3058 val = -val;
3059 } else {
3060 stb(mem_end, 0x00);
3061 }
3062 while (mem_ref < mem_end) {
3063 if (val == 0)
3064 break;
3065 v = val % 100;
3066 val = val / 100;
3067 v = ((v / 10) << 4) | (v % 10);
3068 stb(mem_ref++, v);
3069 }
3070 while (mem_ref < mem_end) {
3071 stb(mem_ref++, 0);
3072 }
3073}
3074
3075void helper_f2xm1(void)
3076{
3077 ST0 = pow(2.0,ST0) - 1.0;
3078}
3079
3080void helper_fyl2x(void)
3081{
3082 CPU86_LDouble fptemp;
3b46e624 3083
2c0262af
FB
3084 fptemp = ST0;
3085 if (fptemp>0.0){
3086 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3087 ST1 *= fptemp;
3088 fpop();
5fafdf24 3089 } else {
2c0262af
FB
3090 env->fpus &= (~0x4700);
3091 env->fpus |= 0x400;
3092 }
3093}
3094
3095void helper_fptan(void)
3096{
3097 CPU86_LDouble fptemp;
3098
3099 fptemp = ST0;
3100 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3101 env->fpus |= 0x400;
3102 } else {
3103 ST0 = tan(fptemp);
3104 fpush();
3105 ST0 = 1.0;
3106 env->fpus &= (~0x400); /* C2 <-- 0 */
3107 /* the above code is for |arg| < 2**52 only */
3108 }
3109}
3110
3111void helper_fpatan(void)
3112{
3113 CPU86_LDouble fptemp, fpsrcop;
3114
3115 fpsrcop = ST1;
3116 fptemp = ST0;
3117 ST1 = atan2(fpsrcop,fptemp);
3118 fpop();
3119}
3120
3121void helper_fxtract(void)
3122{
3123 CPU86_LDoubleU temp;
3124 unsigned int expdif;
3125
3126 temp.d = ST0;
3127 expdif = EXPD(temp) - EXPBIAS;
3128 /*DP exponent bias*/
3129 ST0 = expdif;
3130 fpush();
3131 BIASEXPONENT(temp);
3132 ST0 = temp.d;
3133}
3134
3135void helper_fprem1(void)
3136{
3137 CPU86_LDouble dblq, fpsrcop, fptemp;
3138 CPU86_LDoubleU fpsrcop1, fptemp1;
3139 int expdif;
7524c84d
TS
3140 signed long long int q;
3141
3142 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3143 ST0 = 0.0 / 0.0; /* NaN */
3144 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3145 return;
3146 }
2c0262af
FB
3147
3148 fpsrcop = ST0;
3149 fptemp = ST1;
3150 fpsrcop1.d = fpsrcop;
3151 fptemp1.d = fptemp;
3152 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
7524c84d
TS
3153
3154 if (expdif < 0) {
3155 /* optimisation? taken from the AMD docs */
3156 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3157 /* ST0 is unchanged */
3158 return;
3159 }
3160
2c0262af
FB
3161 if (expdif < 53) {
3162 dblq = fpsrcop / fptemp;
7524c84d
TS
3163 /* round dblq towards nearest integer */
3164 dblq = rint(dblq);
3165 ST0 = fpsrcop - fptemp * dblq;
3166
3167 /* convert dblq to q by truncating towards zero */
3168 if (dblq < 0.0)
3169 q = (signed long long int)(-dblq);
3170 else
3171 q = (signed long long int)dblq;
3172
2c0262af 3173 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
7524c84d
TS
3174 /* (C0,C3,C1) <-- (q2,q1,q0) */
3175 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3176 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3177 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
2c0262af
FB
3178 } else {
3179 env->fpus |= 0x400; /* C2 <-- 1 */
7524c84d 3180 fptemp = pow(2.0, expdif - 50);
2c0262af 3181 fpsrcop = (ST0 / ST1) / fptemp;
7524c84d
TS
3182 /* fpsrcop = integer obtained by chopping */
3183 fpsrcop = (fpsrcop < 0.0) ?
3184 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
2c0262af
FB
3185 ST0 -= (ST1 * fpsrcop * fptemp);
3186 }
3187}
3188
3189void helper_fprem(void)
3190{
3191 CPU86_LDouble dblq, fpsrcop, fptemp;
3192 CPU86_LDoubleU fpsrcop1, fptemp1;
3193 int expdif;
7524c84d
TS
3194 signed long long int q;
3195
3196 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3197 ST0 = 0.0 / 0.0; /* NaN */
3198 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3199 return;
3200 }
3201
3202 fpsrcop = (CPU86_LDouble)ST0;
3203 fptemp = (CPU86_LDouble)ST1;
2c0262af
FB
3204 fpsrcop1.d = fpsrcop;
3205 fptemp1.d = fptemp;
3206 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
7524c84d
TS
3207
3208 if (expdif < 0) {
3209 /* optimisation? taken from the AMD docs */
3210 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3211 /* ST0 is unchanged */
3212 return;
3213 }
3214
2c0262af 3215 if ( expdif < 53 ) {
7524c84d
TS
3216 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3217 /* round dblq towards zero */
3218 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3219 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3220
3221 /* convert dblq to q by truncating towards zero */
3222 if (dblq < 0.0)
3223 q = (signed long long int)(-dblq);
3224 else
3225 q = (signed long long int)dblq;
3226
2c0262af 3227 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
7524c84d
TS
3228 /* (C0,C3,C1) <-- (q2,q1,q0) */
3229 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3230 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3231 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
2c0262af 3232 } else {
7524c84d 3233 int N = 32 + (expdif % 32); /* as per AMD docs */
2c0262af 3234 env->fpus |= 0x400; /* C2 <-- 1 */
7524c84d 3235 fptemp = pow(2.0, (double)(expdif - N));
2c0262af
FB
3236 fpsrcop = (ST0 / ST1) / fptemp;
3237 /* fpsrcop = integer obtained by chopping */
7524c84d
TS
3238 fpsrcop = (fpsrcop < 0.0) ?
3239 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
2c0262af
FB
3240 ST0 -= (ST1 * fpsrcop * fptemp);
3241 }
3242}
3243
3244void helper_fyl2xp1(void)
3245{
3246 CPU86_LDouble fptemp;
3247
3248 fptemp = ST0;
3249 if ((fptemp+1.0)>0.0) {
3250 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3251 ST1 *= fptemp;
3252 fpop();
5fafdf24 3253 } else {
2c0262af
FB
3254 env->fpus &= (~0x4700);
3255 env->fpus |= 0x400;
3256 }
3257}
3258
3259void helper_fsqrt(void)
3260{
3261 CPU86_LDouble fptemp;
3262
3263 fptemp = ST0;
5fafdf24 3264 if (fptemp<0.0) {
2c0262af
FB
3265 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3266 env->fpus |= 0x400;
3267 }
3268 ST0 = sqrt(fptemp);
3269}
3270
3271void helper_fsincos(void)
3272{
3273 CPU86_LDouble fptemp;
3274
3275 fptemp = ST0;
3276 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3277 env->fpus |= 0x400;
3278 } else {
3279 ST0 = sin(fptemp);
3280 fpush();
3281 ST0 = cos(fptemp);
3282 env->fpus &= (~0x400); /* C2 <-- 0 */
3283 /* the above code is for |arg| < 2**63 only */
3284 }
3285}
3286
3287void helper_frndint(void)
3288{
7a0e1f41 3289 ST0 = floatx_round_to_int(ST0, &env->fp_status);
2c0262af
FB
3290}
3291
3292void helper_fscale(void)
3293{
5fafdf24 3294 ST0 = ldexp (ST0, (int)(ST1));
2c0262af
FB
3295}
3296
3297void helper_fsin(void)
3298{
3299 CPU86_LDouble fptemp;
3300
3301 fptemp = ST0;
3302 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3303 env->fpus |= 0x400;
3304 } else {
3305 ST0 = sin(fptemp);
3306 env->fpus &= (~0x400); /* C2 <-- 0 */
3307 /* the above code is for |arg| < 2**53 only */
3308 }
3309}
3310
3311void helper_fcos(void)
3312{
3313 CPU86_LDouble fptemp;
3314
3315 fptemp = ST0;
3316 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3317 env->fpus |= 0x400;
3318 } else {
3319 ST0 = cos(fptemp);
3320 env->fpus &= (~0x400); /* C2 <-- 0 */
3321 /* the above code is for |arg5 < 2**63 only */
3322 }
3323}
3324
3325void helper_fxam_ST0(void)
3326{
3327 CPU86_LDoubleU temp;
3328 int expdif;
3329
3330 temp.d = ST0;
3331
3332 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3333 if (SIGND(temp))
3334 env->fpus |= 0x200; /* C1 <-- 1 */
3335
a891c7a1 3336 /* XXX: test fptags too */
2c0262af
FB
3337 expdif = EXPD(temp);
3338 if (expdif == MAXEXPD) {
a891c7a1
FB
3339#ifdef USE_X86LDOUBLE
3340 if (MANTD(temp) == 0x8000000000000000ULL)
3341#else
2c0262af 3342 if (MANTD(temp) == 0)
a891c7a1 3343#endif
2c0262af
FB
3344 env->fpus |= 0x500 /*Infinity*/;
3345 else
3346 env->fpus |= 0x100 /*NaN*/;
3347 } else if (expdif == 0) {
3348 if (MANTD(temp) == 0)
3349 env->fpus |= 0x4000 /*Zero*/;
3350 else
3351 env->fpus |= 0x4400 /*Denormal*/;
3352 } else {
3353 env->fpus |= 0x400;
3354 }
3355}
3356
14ce26e7 3357void helper_fstenv(target_ulong ptr, int data32)
2c0262af
FB
3358{
3359 int fpus, fptag, exp, i;
3360 uint64_t mant;
3361 CPU86_LDoubleU tmp;
3362
3363 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3364 fptag = 0;
3365 for (i=7; i>=0; i--) {
3366 fptag <<= 2;
3367 if (env->fptags[i]) {
3368 fptag |= 3;
3369 } else {
664e0f19 3370 tmp.d = env->fpregs[i].d;
2c0262af
FB
3371 exp = EXPD(tmp);
3372 mant = MANTD(tmp);
3373 if (exp == 0 && mant == 0) {
3374 /* zero */
3375 fptag |= 1;
3376 } else if (exp == 0 || exp == MAXEXPD
3377#ifdef USE_X86LDOUBLE
3378 || (mant & (1LL << 63)) == 0
3379#endif
3380 ) {
3381 /* NaNs, infinity, denormal */
3382 fptag |= 2;
3383 }
3384 }
3385 }
3386 if (data32) {
3387 /* 32 bit */
3388 stl(ptr, env->fpuc);
3389 stl(ptr + 4, fpus);
3390 stl(ptr + 8, fptag);
2edcdce3
FB
3391 stl(ptr + 12, 0); /* fpip */
3392 stl(ptr + 16, 0); /* fpcs */
3393 stl(ptr + 20, 0); /* fpoo */
3394 stl(ptr + 24, 0); /* fpos */
2c0262af
FB
3395 } else {
3396 /* 16 bit */
3397 stw(ptr, env->fpuc);
3398 stw(ptr + 2, fpus);
3399 stw(ptr + 4, fptag);
3400 stw(ptr + 6, 0);
3401 stw(ptr + 8, 0);
3402 stw(ptr + 10, 0);
3403 stw(ptr + 12, 0);
3404 }
3405}
3406
14ce26e7 3407void helper_fldenv(target_ulong ptr, int data32)
2c0262af
FB
3408{
3409 int i, fpus, fptag;
3410
3411 if (data32) {
3412 env->fpuc = lduw(ptr);
3413 fpus = lduw(ptr + 4);
3414 fptag = lduw(ptr + 8);
3415 }
3416 else {
3417 env->fpuc = lduw(ptr);
3418 fpus = lduw(ptr + 2);
3419 fptag = lduw(ptr + 4);
3420 }
3421 env->fpstt = (fpus >> 11) & 7;
3422 env->fpus = fpus & ~0x3800;
2edcdce3 3423 for(i = 0;i < 8; i++) {
2c0262af
FB
3424 env->fptags[i] = ((fptag & 3) == 3);
3425 fptag >>= 2;
3426 }
3427}
3428
14ce26e7 3429void helper_fsave(target_ulong ptr, int data32)
2c0262af
FB
3430{
3431 CPU86_LDouble tmp;
3432 int i;
3433
3434 helper_fstenv(ptr, data32);
3435
3436 ptr += (14 << data32);
3437 for(i = 0;i < 8; i++) {
3438 tmp = ST(i);
2c0262af 3439 helper_fstt(tmp, ptr);
2c0262af
FB
3440 ptr += 10;
3441 }
3442
3443 /* fninit */
3444 env->fpus = 0;
3445 env->fpstt = 0;
3446 env->fpuc = 0x37f;
3447 env->fptags[0] = 1;
3448 env->fptags[1] = 1;
3449 env->fptags[2] = 1;
3450 env->fptags[3] = 1;
3451 env->fptags[4] = 1;
3452 env->fptags[5] = 1;
3453 env->fptags[6] = 1;
3454 env->fptags[7] = 1;
3455}
3456
14ce26e7 3457void helper_frstor(target_ulong ptr, int data32)
2c0262af
FB
3458{
3459 CPU86_LDouble tmp;
3460 int i;
3461
3462 helper_fldenv(ptr, data32);
3463 ptr += (14 << data32);
3464
3465 for(i = 0;i < 8; i++) {
2c0262af 3466 tmp = helper_fldt(ptr);
2c0262af
FB
3467 ST(i) = tmp;
3468 ptr += 10;
3469 }
3470}
3471
14ce26e7
FB
3472void helper_fxsave(target_ulong ptr, int data64)
3473{
3474 int fpus, fptag, i, nb_xmm_regs;
3475 CPU86_LDouble tmp;
3476 target_ulong addr;
3477
3478 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3479 fptag = 0;
3480 for(i = 0; i < 8; i++) {
d3c61721 3481 fptag |= (env->fptags[i] << i);
14ce26e7
FB
3482 }
3483 stw(ptr, env->fpuc);
3484 stw(ptr + 2, fpus);
d3c61721 3485 stw(ptr + 4, fptag ^ 0xff);
14ce26e7
FB
3486
3487 addr = ptr + 0x20;
3488 for(i = 0;i < 8; i++) {
3489 tmp = ST(i);
3490 helper_fstt(tmp, addr);
3491 addr += 16;
3492 }
3b46e624 3493
14ce26e7 3494 if (env->cr[4] & CR4_OSFXSR_MASK) {
a8ede8ba 3495 /* XXX: finish it */
664e0f19 3496 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
d3c61721 3497 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
14ce26e7
FB
3498 nb_xmm_regs = 8 << data64;
3499 addr = ptr + 0xa0;
3500 for(i = 0; i < nb_xmm_regs; i++) {
a8ede8ba
FB
3501 stq(addr, env->xmm_regs[i].XMM_Q(0));
3502 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
14ce26e7
FB
3503 addr += 16;
3504 }
3505 }
3506}
3507
3508void helper_fxrstor(target_ulong ptr, int data64)
3509{
3510 int i, fpus, fptag, nb_xmm_regs;
3511 CPU86_LDouble tmp;
3512 target_ulong addr;
3513
3514 env->fpuc = lduw(ptr);
3515 fpus = lduw(ptr + 2);
d3c61721 3516 fptag = lduw(ptr + 4);
14ce26e7
FB
3517 env->fpstt = (fpus >> 11) & 7;
3518 env->fpus = fpus & ~0x3800;
3519 fptag ^= 0xff;
3520 for(i = 0;i < 8; i++) {
d3c61721 3521 env->fptags[i] = ((fptag >> i) & 1);
14ce26e7
FB
3522 }
3523
3524 addr = ptr + 0x20;
3525 for(i = 0;i < 8; i++) {
3526 tmp = helper_fldt(addr);
3527 ST(i) = tmp;
3528 addr += 16;
3529 }
3530
3531 if (env->cr[4] & CR4_OSFXSR_MASK) {
31313213 3532 /* XXX: finish it */
664e0f19 3533 env->mxcsr = ldl(ptr + 0x18);
14ce26e7
FB
3534 //ldl(ptr + 0x1c);
3535 nb_xmm_regs = 8 << data64;
3536 addr = ptr + 0xa0;
3537 for(i = 0; i < nb_xmm_regs; i++) {
a8ede8ba
FB
3538 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3539 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
14ce26e7
FB
3540 addr += 16;
3541 }
3542 }
3543}
1f1af9fd
FB
3544
3545#ifndef USE_X86LDOUBLE
3546
3547void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3548{
3549 CPU86_LDoubleU temp;
3550 int e;
3551
3552 temp.d = f;
3553 /* mantissa */
3554 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3555 /* exponent + sign */
3556 e = EXPD(temp) - EXPBIAS + 16383;
3557 e |= SIGND(temp) >> 16;
3558 *pexp = e;
3559}
3560
3561CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3562{
3563 CPU86_LDoubleU temp;
3564 int e;
3565 uint64_t ll;
3566
3567 /* XXX: handle overflow ? */
3568 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3569 e |= (upper >> 4) & 0x800; /* sign */
3570 ll = (mant >> 11) & ((1LL << 52) - 1);
3571#ifdef __arm__
3572 temp.l.upper = (e << 20) | (ll >> 32);
3573 temp.l.lower = ll;
3574#else
3575 temp.ll = ll | ((uint64_t)e << 52);
3576#endif
3577 return temp.d;
3578}
3579
3580#else
3581
3582void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3583{
3584 CPU86_LDoubleU temp;
3585
3586 temp.d = f;
3587 *pmant = temp.l.lower;
3588 *pexp = temp.l.upper;
3589}
3590
3591CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3592{
3593 CPU86_LDoubleU temp;
3594
3595 temp.l.upper = upper;
3596 temp.l.lower = mant;
3597 return temp.d;
3598}
3599#endif
3600
14ce26e7
FB
3601#ifdef TARGET_X86_64
3602
3603//#define DEBUG_MULDIV
3604
3605static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3606{
3607 *plow += a;
3608 /* carry test */
3609 if (*plow < a)
3610 (*phigh)++;
3611 *phigh += b;
3612}
3613
3614static void neg128(uint64_t *plow, uint64_t *phigh)
3615{
3616 *plow = ~ *plow;
3617 *phigh = ~ *phigh;
3618 add128(plow, phigh, 1, 0);
3619}
3620
45bbbb46
FB
3621/* return TRUE if overflow */
3622static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
14ce26e7
FB
3623{
3624 uint64_t q, r, a1, a0;
c0b24a1d 3625 int i, qb, ab;
14ce26e7
FB
3626
3627 a0 = *plow;
3628 a1 = *phigh;
3629 if (a1 == 0) {
3630 q = a0 / b;
3631 r = a0 % b;
3632 *plow = q;
3633 *phigh = r;
3634 } else {
45bbbb46
FB
3635 if (a1 >= b)
3636 return 1;
14ce26e7
FB
3637 /* XXX: use a better algorithm */
3638 for(i = 0; i < 64; i++) {
c0b24a1d 3639 ab = a1 >> 63;
a8ede8ba 3640 a1 = (a1 << 1) | (a0 >> 63);
c0b24a1d 3641 if (ab || a1 >= b) {
14ce26e7
FB
3642 a1 -= b;
3643 qb = 1;
3644 } else {
3645 qb = 0;
3646 }
14ce26e7
FB
3647 a0 = (a0 << 1) | qb;
3648 }
a8ede8ba 3649#if defined(DEBUG_MULDIV)
26a76461 3650 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
14ce26e7
FB
3651 *phigh, *plow, b, a0, a1);
3652#endif
3653 *plow = a0;
3654 *phigh = a1;
3655 }
45bbbb46 3656 return 0;
14ce26e7
FB
3657}
3658
45bbbb46
FB
3659/* return TRUE if overflow */
3660static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
14ce26e7
FB
3661{
3662 int sa, sb;
3663 sa = ((int64_t)*phigh < 0);
3664 if (sa)
3665 neg128(plow, phigh);
3666 sb = (b < 0);
3667 if (sb)
3668 b = -b;
45bbbb46
FB
3669 if (div64(plow, phigh, b) != 0)
3670 return 1;
3671 if (sa ^ sb) {
3672 if (*plow > (1ULL << 63))
3673 return 1;
14ce26e7 3674 *plow = - *plow;
45bbbb46
FB
3675 } else {
3676 if (*plow >= (1ULL << 63))
3677 return 1;
3678 }
31313213 3679 if (sa)
14ce26e7 3680 *phigh = - *phigh;
45bbbb46 3681 return 0;
14ce26e7
FB
3682}
3683
3684void helper_mulq_EAX_T0(void)
3685{
3686 uint64_t r0, r1;
3687
69d35728 3688 mulu64(&r1, &r0, EAX, T0);
14ce26e7
FB
3689 EAX = r0;
3690 EDX = r1;
3691 CC_DST = r0;
3692 CC_SRC = r1;
3693}
3694
3695void helper_imulq_EAX_T0(void)
3696{
3697 uint64_t r0, r1;
3698
69d35728 3699 muls64(&r1, &r0, EAX, T0);
14ce26e7
FB
3700 EAX = r0;
3701 EDX = r1;
3702 CC_DST = r0;
a8ede8ba 3703 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
14ce26e7
FB
3704}
3705
3706void helper_imulq_T0_T1(void)
3707{
3708 uint64_t r0, r1;
3709
69d35728 3710 muls64(&r1, &r0, T0, T1);
14ce26e7
FB
3711 T0 = r0;
3712 CC_DST = r0;
3713 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3714}
3715
3716void helper_divq_EAX_T0(void)
3717{
3718 uint64_t r0, r1;
3719 if (T0 == 0) {
3720 raise_exception(EXCP00_DIVZ);
3721 }
3722 r0 = EAX;
3723 r1 = EDX;
45bbbb46
FB
3724 if (div64(&r0, &r1, T0))
3725 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
3726 EAX = r0;
3727 EDX = r1;
3728}
3729
3730void helper_idivq_EAX_T0(void)
3731{
3732 uint64_t r0, r1;
3733 if (T0 == 0) {
3734 raise_exception(EXCP00_DIVZ);
3735 }
3736 r0 = EAX;
3737 r1 = EDX;
45bbbb46
FB
3738 if (idiv64(&r0, &r1, T0))
3739 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
3740 EAX = r0;
3741 EDX = r1;
3742}
3743
68cae3d8
FB
3744void helper_bswapq_T0(void)
3745{
3746 T0 = bswap64(T0);
3747}
14ce26e7
FB
3748#endif
3749
3d7374c5
FB
3750void helper_hlt(void)
3751{
3752 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3753 env->hflags |= HF_HALTED_MASK;
3754 env->exception_index = EXCP_HLT;
3755 cpu_loop_exit();
3756}
3757
3758void helper_monitor(void)
3759{
d80c7d1c 3760 if ((uint32_t)ECX != 0)
3d7374c5
FB
3761 raise_exception(EXCP0D_GPF);
3762 /* XXX: store address ? */
3763}
3764
3765void helper_mwait(void)
3766{
d80c7d1c 3767 if ((uint32_t)ECX != 0)
3d7374c5
FB
3768 raise_exception(EXCP0D_GPF);
3769 /* XXX: not complete but not completely erroneous */
3770 if (env->cpu_index != 0 || env->next_cpu != NULL) {
3771 /* more than one CPU: do not sleep because another CPU may
3772 wake this one */
3773 } else {
3774 helper_hlt();
3775 }
3776}
3777
664e0f19
FB
3778float approx_rsqrt(float a)
3779{
3780 return 1.0 / sqrt(a);
3781}
3782
3783float approx_rcp(float a)
3784{
3785 return 1.0 / a;
3786}
3787
7a0e1f41 3788void update_fp_status(void)
4d6b6c0a 3789{
7a0e1f41 3790 int rnd_type;
4d6b6c0a 3791
7a0e1f41
FB
3792 /* set rounding mode */
3793 switch(env->fpuc & RC_MASK) {
3794 default:
3795 case RC_NEAR:
3796 rnd_type = float_round_nearest_even;
3797 break;
3798 case RC_DOWN:
3799 rnd_type = float_round_down;
3800 break;
3801 case RC_UP:
3802 rnd_type = float_round_up;
3803 break;
3804 case RC_CHOP:
3805 rnd_type = float_round_to_zero;
3806 break;
3807 }
3808 set_float_rounding_mode(rnd_type, &env->fp_status);
3809#ifdef FLOATX80
3810 switch((env->fpuc >> 8) & 3) {
3811 case 0:
3812 rnd_type = 32;
3813 break;
3814 case 2:
3815 rnd_type = 64;
3816 break;
3817 case 3:
3818 default:
3819 rnd_type = 80;
3820 break;
3821 }
3822 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4d6b6c0a 3823#endif
7a0e1f41 3824}
664e0f19 3825
5fafdf24 3826#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3827
3828#define MMUSUFFIX _mmu
3829#define GETPC() (__builtin_return_address(0))
3830
2c0262af
FB
3831#define SHIFT 0
3832#include "softmmu_template.h"
3833
3834#define SHIFT 1
3835#include "softmmu_template.h"
3836
3837#define SHIFT 2
3838#include "softmmu_template.h"
3839
3840#define SHIFT 3
3841#include "softmmu_template.h"
3842
61382a50
FB
3843#endif
3844
3845/* try to fill the TLB and return an exception if error. If retaddr is
3846 NULL, it means that the function was called in C code (i.e. not
3847 from generated code or from helper.c) */
3848/* XXX: fix it to restore all registers */
14ce26e7 3849void tlb_fill(target_ulong addr, int is_write, int is_user, void *retaddr)
2c0262af
FB
3850{
3851 TranslationBlock *tb;
3852 int ret;
3853 unsigned long pc;
61382a50
FB
3854 CPUX86State *saved_env;
3855
3856 /* XXX: hack to restore env in all cases, even if not called from
3857 generated code */
3858 saved_env = env;
3859 env = cpu_single_env;
61382a50
FB
3860
3861 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1);
2c0262af 3862 if (ret) {
61382a50
FB
3863 if (retaddr) {
3864 /* now we have a real cpu fault */
3865 pc = (unsigned long)retaddr;
3866 tb = tb_find_pc(pc);
3867 if (tb) {
3868 /* the PC is inside the translated code. It means that we have
3869 a virtual CPU fault */
58fe2f10 3870 cpu_restore_state(tb, env, pc, NULL);
61382a50 3871 }
2c0262af 3872 }
0d1a29f9 3873 if (retaddr)
54ca9095 3874 raise_exception_err(env->exception_index, env->error_code);
0d1a29f9 3875 else
54ca9095 3876 raise_exception_err_norestore(env->exception_index, env->error_code);
2c0262af 3877 }
61382a50 3878 env = saved_env;
2c0262af 3879}