]> git.proxmox.com Git - qemu.git/blame - target-i386/helper.c
BSR/BSF TCG conversion
[qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af
FB
1/*
2 * i386 helpers
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
b8b6a50b 20#define CPU_NO_GLOBAL_REGS
2c0262af 21#include "exec.h"
7a51ad82 22#include "host-utils.h"
2c0262af 23
f3f2d9be
FB
24//#define DEBUG_PCALL
25
8145122b
FB
26#if 0
27#define raise_exception_err(a, b)\
28do {\
9540a78b
FB
29 if (logfile)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
8145122b
FB
31 (raise_exception_err)(a, b);\
32} while (0)
33#endif
34
2c0262af
FB
35const uint8_t parity_table[256] = {
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68};
69
70/* modulo 17 table */
71const uint8_t rclw_table[32] = {
5fafdf24 72 0, 1, 2, 3, 4, 5, 6, 7,
2c0262af
FB
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
76};
77
78/* modulo 9 table */
79const uint8_t rclb_table[32] = {
5fafdf24 80 0, 1, 2, 3, 4, 5, 6, 7,
2c0262af 81 8, 0, 1, 2, 3, 4, 5, 6,
5fafdf24 82 7, 8, 0, 1, 2, 3, 4, 5,
2c0262af
FB
83 6, 7, 8, 0, 1, 2, 3, 4,
84};
85
86const CPU86_LDouble f15rk[7] =
87{
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
95};
3b46e624 96
b8b6a50b 97/* broken thread support */
2c0262af
FB
98
99spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100
b8b6a50b 101void helper_lock(void)
2c0262af
FB
102{
103 spin_lock(&global_cpu_lock);
104}
105
b8b6a50b 106void helper_unlock(void)
2c0262af
FB
107{
108 spin_unlock(&global_cpu_lock);
109}
110
7e84c249
FB
111/* return non zero if error */
112static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
113 int selector)
114{
115 SegmentCache *dt;
116 int index;
14ce26e7 117 target_ulong ptr;
7e84c249
FB
118
119 if (selector & 0x4)
120 dt = &env->ldt;
121 else
122 dt = &env->gdt;
123 index = selector & ~7;
124 if ((index + 7) > dt->limit)
125 return -1;
126 ptr = dt->base + index;
127 *e1_ptr = ldl_kernel(ptr);
128 *e2_ptr = ldl_kernel(ptr + 4);
129 return 0;
130}
3b46e624 131
7e84c249
FB
132static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
133{
134 unsigned int limit;
135 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
136 if (e2 & DESC_G_MASK)
137 limit = (limit << 12) | 0xfff;
138 return limit;
139}
140
14ce26e7 141static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
7e84c249 142{
14ce26e7 143 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
7e84c249
FB
144}
145
146static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
147{
148 sc->base = get_seg_base(e1, e2);
149 sc->limit = get_seg_limit(e1, e2);
150 sc->flags = e2;
151}
152
153/* init the segment cache in vm86 mode. */
154static inline void load_seg_vm(int seg, int selector)
155{
156 selector &= 0xffff;
5fafdf24 157 cpu_x86_load_seg_cache(env, seg, selector,
14ce26e7 158 (selector << 4), 0xffff, 0);
7e84c249
FB
159}
160
5fafdf24 161static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
2c0262af
FB
162 uint32_t *esp_ptr, int dpl)
163{
164 int type, index, shift;
3b46e624 165
2c0262af
FB
166#if 0
167 {
168 int i;
169 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
170 for(i=0;i<env->tr.limit;i++) {
171 printf("%02x ", env->tr.base[i]);
172 if ((i & 7) == 7) printf("\n");
173 }
174 printf("\n");
175 }
176#endif
177
178 if (!(env->tr.flags & DESC_P_MASK))
179 cpu_abort(env, "invalid tss");
180 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
181 if ((type & 7) != 1)
182 cpu_abort(env, "invalid tss type");
183 shift = type >> 3;
184 index = (dpl * 4 + 2) << shift;
185 if (index + (4 << shift) - 1 > env->tr.limit)
186 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
187 if (shift == 0) {
61382a50
FB
188 *esp_ptr = lduw_kernel(env->tr.base + index);
189 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
2c0262af 190 } else {
61382a50
FB
191 *esp_ptr = ldl_kernel(env->tr.base + index);
192 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
2c0262af
FB
193 }
194}
195
7e84c249
FB
196/* XXX: merge with load_seg() */
197static void tss_load_seg(int seg_reg, int selector)
198{
199 uint32_t e1, e2;
200 int rpl, dpl, cpl;
201
202 if ((selector & 0xfffc) != 0) {
203 if (load_segment(&e1, &e2, selector) != 0)
204 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
205 if (!(e2 & DESC_S_MASK))
206 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
207 rpl = selector & 3;
208 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
209 cpl = env->hflags & HF_CPL_MASK;
210 if (seg_reg == R_CS) {
211 if (!(e2 & DESC_CS_MASK))
212 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
9540a78b 213 /* XXX: is it correct ? */
7e84c249
FB
214 if (dpl != rpl)
215 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
216 if ((e2 & DESC_C_MASK) && dpl > rpl)
217 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
7e84c249
FB
218 } else if (seg_reg == R_SS) {
219 /* SS must be writable data */
220 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
221 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222 if (dpl != cpl || dpl != rpl)
223 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
224 } else {
225 /* not readable code */
226 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
227 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
228 /* if data or non conforming code, checks the rights */
229 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
230 if (dpl < cpl || dpl < rpl)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 }
233 }
234 if (!(e2 & DESC_P_MASK))
235 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
5fafdf24 236 cpu_x86_load_seg_cache(env, seg_reg, selector,
7e84c249
FB
237 get_seg_base(e1, e2),
238 get_seg_limit(e1, e2),
239 e2);
240 } else {
5fafdf24 241 if (seg_reg == R_SS || seg_reg == R_CS)
7e84c249
FB
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 }
244}
245
246#define SWITCH_TSS_JMP 0
247#define SWITCH_TSS_IRET 1
248#define SWITCH_TSS_CALL 2
249
250/* XXX: restore CPU state in registers (PowerPC case) */
5fafdf24 251static void switch_tss(int tss_selector,
883da8e2
FB
252 uint32_t e1, uint32_t e2, int source,
253 uint32_t next_eip)
2c0262af 254{
7e84c249 255 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
14ce26e7 256 target_ulong tss_base;
7e84c249
FB
257 uint32_t new_regs[8], new_segs[6];
258 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
259 uint32_t old_eflags, eflags_mask;
2c0262af
FB
260 SegmentCache *dt;
261 int index;
14ce26e7 262 target_ulong ptr;
2c0262af 263
7e84c249 264 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
dc6f57fd 265#ifdef DEBUG_PCALL
e19e89a5 266 if (loglevel & CPU_LOG_PCALL)
dc6f57fd
FB
267 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
268#endif
7e84c249
FB
269
270 /* if task gate, we read the TSS segment and we load it */
271 if (type == 5) {
272 if (!(e2 & DESC_P_MASK))
273 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
274 tss_selector = e1 >> 16;
275 if (tss_selector & 4)
276 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
277 if (load_segment(&e1, &e2, tss_selector) != 0)
278 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
279 if (e2 & DESC_S_MASK)
280 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
281 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
282 if ((type & 7) != 1)
283 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
284 }
285
286 if (!(e2 & DESC_P_MASK))
287 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288
289 if (type & 8)
290 tss_limit_max = 103;
2c0262af 291 else
7e84c249
FB
292 tss_limit_max = 43;
293 tss_limit = get_seg_limit(e1, e2);
294 tss_base = get_seg_base(e1, e2);
5fafdf24 295 if ((tss_selector & 4) != 0 ||
7e84c249
FB
296 tss_limit < tss_limit_max)
297 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
298 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
299 if (old_type & 8)
300 old_tss_limit_max = 103;
301 else
302 old_tss_limit_max = 43;
303
304 /* read all the registers from the new TSS */
305 if (type & 8) {
306 /* 32 bit */
307 new_cr3 = ldl_kernel(tss_base + 0x1c);
308 new_eip = ldl_kernel(tss_base + 0x20);
309 new_eflags = ldl_kernel(tss_base + 0x24);
310 for(i = 0; i < 8; i++)
311 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
312 for(i = 0; i < 6; i++)
313 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
314 new_ldt = lduw_kernel(tss_base + 0x60);
315 new_trap = ldl_kernel(tss_base + 0x64);
316 } else {
317 /* 16 bit */
318 new_cr3 = 0;
319 new_eip = lduw_kernel(tss_base + 0x0e);
320 new_eflags = lduw_kernel(tss_base + 0x10);
321 for(i = 0; i < 8; i++)
322 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
323 for(i = 0; i < 4; i++)
324 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
325 new_ldt = lduw_kernel(tss_base + 0x2a);
326 new_segs[R_FS] = 0;
327 new_segs[R_GS] = 0;
328 new_trap = 0;
329 }
3b46e624 330
7e84c249
FB
331 /* NOTE: we must avoid memory exceptions during the task switch,
332 so we make dummy accesses before */
333 /* XXX: it can still fail in some cases, so a bigger hack is
334 necessary to valid the TLB after having done the accesses */
335
336 v1 = ldub_kernel(env->tr.base);
265d3497 337 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
7e84c249
FB
338 stb_kernel(env->tr.base, v1);
339 stb_kernel(env->tr.base + old_tss_limit_max, v2);
3b46e624 340
7e84c249
FB
341 /* clear busy bit (it is restartable) */
342 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
14ce26e7 343 target_ulong ptr;
7e84c249 344 uint32_t e2;
883da8e2 345 ptr = env->gdt.base + (env->tr.selector & ~7);
7e84c249
FB
346 e2 = ldl_kernel(ptr + 4);
347 e2 &= ~DESC_TSS_BUSY_MASK;
348 stl_kernel(ptr + 4, e2);
349 }
350 old_eflags = compute_eflags();
351 if (source == SWITCH_TSS_IRET)
352 old_eflags &= ~NT_MASK;
3b46e624 353
7e84c249
FB
354 /* save the current state in the old TSS */
355 if (type & 8) {
356 /* 32 bit */
883da8e2 357 stl_kernel(env->tr.base + 0x20, next_eip);
7e84c249 358 stl_kernel(env->tr.base + 0x24, old_eflags);
0d1a29f9
FB
359 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
360 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
361 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
362 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
363 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
364 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
365 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
366 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
7e84c249
FB
367 for(i = 0; i < 6; i++)
368 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
369 } else {
370 /* 16 bit */
883da8e2 371 stw_kernel(env->tr.base + 0x0e, next_eip);
7e84c249 372 stw_kernel(env->tr.base + 0x10, old_eflags);
0d1a29f9
FB
373 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
374 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
375 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
376 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
377 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
378 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
379 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
380 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
7e84c249
FB
381 for(i = 0; i < 4; i++)
382 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
383 }
3b46e624 384
7e84c249
FB
385 /* now if an exception occurs, it will occurs in the next task
386 context */
387
388 if (source == SWITCH_TSS_CALL) {
389 stw_kernel(tss_base, env->tr.selector);
390 new_eflags |= NT_MASK;
391 }
392
393 /* set busy bit */
394 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
14ce26e7 395 target_ulong ptr;
7e84c249 396 uint32_t e2;
883da8e2 397 ptr = env->gdt.base + (tss_selector & ~7);
7e84c249
FB
398 e2 = ldl_kernel(ptr + 4);
399 e2 |= DESC_TSS_BUSY_MASK;
400 stl_kernel(ptr + 4, e2);
401 }
402
403 /* set the new CPU state */
404 /* from this point, any exception which occurs can give problems */
405 env->cr[0] |= CR0_TS_MASK;
883da8e2 406 env->hflags |= HF_TS_MASK;
7e84c249
FB
407 env->tr.selector = tss_selector;
408 env->tr.base = tss_base;
409 env->tr.limit = tss_limit;
410 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
3b46e624 411
7e84c249 412 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
1ac157da 413 cpu_x86_update_cr3(env, new_cr3);
7e84c249 414 }
3b46e624 415
7e84c249
FB
416 /* load all registers without an exception, then reload them with
417 possible exception */
418 env->eip = new_eip;
5fafdf24 419 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
8145122b 420 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
7e84c249
FB
421 if (!(type & 8))
422 eflags_mask &= 0xffff;
423 load_eflags(new_eflags, eflags_mask);
0d1a29f9
FB
424 /* XXX: what to do in 16 bit case ? */
425 EAX = new_regs[0];
426 ECX = new_regs[1];
427 EDX = new_regs[2];
428 EBX = new_regs[3];
429 ESP = new_regs[4];
430 EBP = new_regs[5];
431 ESI = new_regs[6];
432 EDI = new_regs[7];
7e84c249 433 if (new_eflags & VM_MASK) {
5fafdf24 434 for(i = 0; i < 6; i++)
7e84c249
FB
435 load_seg_vm(i, new_segs[i]);
436 /* in vm86, CPL is always 3 */
437 cpu_x86_set_cpl(env, 3);
438 } else {
439 /* CPL is set the RPL of CS */
440 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
441 /* first just selectors as the rest may trigger exceptions */
442 for(i = 0; i < 6; i++)
14ce26e7 443 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
7e84c249 444 }
3b46e624 445
7e84c249 446 env->ldt.selector = new_ldt & ~4;
14ce26e7 447 env->ldt.base = 0;
7e84c249
FB
448 env->ldt.limit = 0;
449 env->ldt.flags = 0;
450
451 /* load the LDT */
452 if (new_ldt & 4)
453 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
454
8145122b
FB
455 if ((new_ldt & 0xfffc) != 0) {
456 dt = &env->gdt;
457 index = new_ldt & ~7;
458 if ((index + 7) > dt->limit)
459 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
460 ptr = dt->base + index;
461 e1 = ldl_kernel(ptr);
462 e2 = ldl_kernel(ptr + 4);
463 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
464 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465 if (!(e2 & DESC_P_MASK))
466 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
467 load_seg_cache_raw_dt(&env->ldt, e1, e2);
468 }
3b46e624 469
7e84c249
FB
470 /* load the segments */
471 if (!(new_eflags & VM_MASK)) {
472 tss_load_seg(R_CS, new_segs[R_CS]);
473 tss_load_seg(R_SS, new_segs[R_SS]);
474 tss_load_seg(R_ES, new_segs[R_ES]);
475 tss_load_seg(R_DS, new_segs[R_DS]);
476 tss_load_seg(R_FS, new_segs[R_FS]);
477 tss_load_seg(R_GS, new_segs[R_GS]);
478 }
3b46e624 479
7e84c249
FB
480 /* check that EIP is in the CS segment limits */
481 if (new_eip > env->segs[R_CS].limit) {
883da8e2 482 /* XXX: different exception if CALL ? */
7e84c249
FB
483 raise_exception_err(EXCP0D_GPF, 0);
484 }
2c0262af 485}
7e84c249
FB
486
487/* check if Port I/O is allowed in TSS */
488static inline void check_io(int addr, int size)
2c0262af 489{
7e84c249 490 int io_offset, val, mask;
3b46e624 491
7e84c249
FB
492 /* TSS must be a valid 32 bit one */
493 if (!(env->tr.flags & DESC_P_MASK) ||
494 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
495 env->tr.limit < 103)
496 goto fail;
497 io_offset = lduw_kernel(env->tr.base + 0x66);
498 io_offset += (addr >> 3);
499 /* Note: the check needs two bytes */
500 if ((io_offset + 1) > env->tr.limit)
501 goto fail;
502 val = lduw_kernel(env->tr.base + io_offset);
503 val >>= (addr & 7);
504 mask = (1 << size) - 1;
505 /* all bits must be zero to allow the I/O */
506 if ((val & mask) != 0) {
507 fail:
508 raise_exception_err(EXCP0D_GPF, 0);
509 }
2c0262af
FB
510}
511
b8b6a50b 512void helper_check_iob(uint32_t t0)
2c0262af 513{
b8b6a50b 514 check_io(t0, 1);
2c0262af
FB
515}
516
b8b6a50b 517void helper_check_iow(uint32_t t0)
2c0262af 518{
b8b6a50b 519 check_io(t0, 2);
2c0262af
FB
520}
521
b8b6a50b 522void helper_check_iol(uint32_t t0)
2c0262af 523{
b8b6a50b 524 check_io(t0, 4);
7e84c249
FB
525}
526
b8b6a50b 527void helper_outb(uint32_t port, uint32_t data)
7e84c249 528{
b8b6a50b 529 cpu_outb(env, port, data & 0xff);
7e84c249
FB
530}
531
b8b6a50b 532target_ulong helper_inb(uint32_t port)
7e84c249 533{
b8b6a50b 534 return cpu_inb(env, port);
7e84c249
FB
535}
536
b8b6a50b 537void helper_outw(uint32_t port, uint32_t data)
7e84c249 538{
b8b6a50b
FB
539 cpu_outw(env, port, data & 0xffff);
540}
541
542target_ulong helper_inw(uint32_t port)
543{
544 return cpu_inw(env, port);
545}
546
547void helper_outl(uint32_t port, uint32_t data)
548{
549 cpu_outl(env, port, data);
550}
551
552target_ulong helper_inl(uint32_t port)
553{
554 return cpu_inl(env, port);
2c0262af
FB
555}
556
891b38e4
FB
557static inline unsigned int get_sp_mask(unsigned int e2)
558{
559 if (e2 & DESC_B_MASK)
560 return 0xffffffff;
561 else
562 return 0xffff;
563}
564
8d7b0fbb
FB
565#ifdef TARGET_X86_64
566#define SET_ESP(val, sp_mask)\
567do {\
568 if ((sp_mask) == 0xffff)\
569 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
570 else if ((sp_mask) == 0xffffffffLL)\
571 ESP = (uint32_t)(val);\
572 else\
573 ESP = (val);\
574} while (0)
575#else
576#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
577#endif
578
891b38e4
FB
579/* XXX: add a is_user flag to have proper security support */
580#define PUSHW(ssp, sp, sp_mask, val)\
581{\
582 sp -= 2;\
583 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
584}
585
586#define PUSHL(ssp, sp, sp_mask, val)\
587{\
588 sp -= 4;\
589 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
590}
591
592#define POPW(ssp, sp, sp_mask, val)\
593{\
594 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
595 sp += 2;\
596}
597
598#define POPL(ssp, sp, sp_mask, val)\
599{\
14ce26e7 600 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
891b38e4
FB
601 sp += 4;\
602}
603
2c0262af
FB
604/* protected mode interrupt */
605static void do_interrupt_protected(int intno, int is_int, int error_code,
606 unsigned int next_eip, int is_hw)
607{
608 SegmentCache *dt;
14ce26e7 609 target_ulong ptr, ssp;
8d7b0fbb 610 int type, dpl, selector, ss_dpl, cpl;
2c0262af 611 int has_error_code, new_stack, shift;
891b38e4 612 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
8d7b0fbb 613 uint32_t old_eip, sp_mask;
0573fbfc 614 int svm_should_check = 1;
2c0262af 615
0573fbfc
TS
616 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
617 next_eip = EIP;
618 svm_should_check = 0;
619 }
620
621 if (svm_should_check
622 && (INTERCEPTEDl(_exceptions, 1 << intno)
623 && !is_int)) {
624 raise_interrupt(intno, is_int, error_code, 0);
625 }
7e84c249
FB
626 has_error_code = 0;
627 if (!is_int && !is_hw) {
628 switch(intno) {
629 case 8:
630 case 10:
631 case 11:
632 case 12:
633 case 13:
634 case 14:
635 case 17:
636 has_error_code = 1;
637 break;
638 }
639 }
883da8e2
FB
640 if (is_int)
641 old_eip = next_eip;
642 else
643 old_eip = env->eip;
7e84c249 644
2c0262af
FB
645 dt = &env->idt;
646 if (intno * 8 + 7 > dt->limit)
647 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
648 ptr = dt->base + intno * 8;
61382a50
FB
649 e1 = ldl_kernel(ptr);
650 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
651 /* check gate type */
652 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
653 switch(type) {
654 case 5: /* task gate */
7e84c249
FB
655 /* must do that check here to return the correct error code */
656 if (!(e2 & DESC_P_MASK))
657 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
883da8e2 658 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
7e84c249 659 if (has_error_code) {
8d7b0fbb
FB
660 int type;
661 uint32_t mask;
7e84c249 662 /* push the error code */
3f20e1dd
FB
663 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
664 shift = type >> 3;
7e84c249
FB
665 if (env->segs[R_SS].flags & DESC_B_MASK)
666 mask = 0xffffffff;
667 else
668 mask = 0xffff;
0d1a29f9 669 esp = (ESP - (2 << shift)) & mask;
7e84c249
FB
670 ssp = env->segs[R_SS].base + esp;
671 if (shift)
672 stl_kernel(ssp, error_code);
673 else
674 stw_kernel(ssp, error_code);
8d7b0fbb 675 SET_ESP(esp, mask);
7e84c249
FB
676 }
677 return;
2c0262af
FB
678 case 6: /* 286 interrupt gate */
679 case 7: /* 286 trap gate */
680 case 14: /* 386 interrupt gate */
681 case 15: /* 386 trap gate */
682 break;
683 default:
684 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
685 break;
686 }
687 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
688 cpl = env->hflags & HF_CPL_MASK;
689 /* check privledge if software int */
690 if (is_int && dpl < cpl)
691 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
692 /* check valid bit */
693 if (!(e2 & DESC_P_MASK))
694 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
695 selector = e1 >> 16;
696 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
697 if ((selector & 0xfffc) == 0)
698 raise_exception_err(EXCP0D_GPF, 0);
699
700 if (load_segment(&e1, &e2, selector) != 0)
701 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
702 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
703 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
704 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
705 if (dpl > cpl)
706 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707 if (!(e2 & DESC_P_MASK))
708 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
709 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
7f75ffd3 710 /* to inner privilege */
2c0262af
FB
711 get_ss_esp_from_tss(&ss, &esp, dpl);
712 if ((ss & 0xfffc) == 0)
713 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
714 if ((ss & 3) != dpl)
715 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
716 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
717 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
718 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
719 if (ss_dpl != dpl)
720 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721 if (!(ss_e2 & DESC_S_MASK) ||
722 (ss_e2 & DESC_CS_MASK) ||
723 !(ss_e2 & DESC_W_MASK))
724 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
725 if (!(ss_e2 & DESC_P_MASK))
726 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
727 new_stack = 1;
891b38e4
FB
728 sp_mask = get_sp_mask(ss_e2);
729 ssp = get_seg_base(ss_e1, ss_e2);
2c0262af 730 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
7f75ffd3 731 /* to same privilege */
8e682019
FB
732 if (env->eflags & VM_MASK)
733 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2c0262af 734 new_stack = 0;
891b38e4
FB
735 sp_mask = get_sp_mask(env->segs[R_SS].flags);
736 ssp = env->segs[R_SS].base;
737 esp = ESP;
4796f5e9 738 dpl = cpl;
2c0262af
FB
739 } else {
740 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741 new_stack = 0; /* avoid warning */
891b38e4 742 sp_mask = 0; /* avoid warning */
14ce26e7 743 ssp = 0; /* avoid warning */
891b38e4 744 esp = 0; /* avoid warning */
2c0262af
FB
745 }
746
747 shift = type >> 3;
891b38e4
FB
748
749#if 0
750 /* XXX: check that enough room is available */
2c0262af
FB
751 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
752 if (env->eflags & VM_MASK)
753 push_size += 8;
754 push_size <<= shift;
891b38e4 755#endif
2c0262af 756 if (shift == 1) {
2c0262af 757 if (new_stack) {
8e682019
FB
758 if (env->eflags & VM_MASK) {
759 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
760 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
761 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
762 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
763 }
891b38e4
FB
764 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
765 PUSHL(ssp, esp, sp_mask, ESP);
2c0262af 766 }
891b38e4
FB
767 PUSHL(ssp, esp, sp_mask, compute_eflags());
768 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
769 PUSHL(ssp, esp, sp_mask, old_eip);
2c0262af 770 if (has_error_code) {
891b38e4 771 PUSHL(ssp, esp, sp_mask, error_code);
2c0262af
FB
772 }
773 } else {
774 if (new_stack) {
8e682019
FB
775 if (env->eflags & VM_MASK) {
776 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
777 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
778 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
779 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
780 }
891b38e4
FB
781 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
782 PUSHW(ssp, esp, sp_mask, ESP);
2c0262af 783 }
891b38e4
FB
784 PUSHW(ssp, esp, sp_mask, compute_eflags());
785 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
786 PUSHW(ssp, esp, sp_mask, old_eip);
2c0262af 787 if (has_error_code) {
891b38e4 788 PUSHW(ssp, esp, sp_mask, error_code);
2c0262af
FB
789 }
790 }
3b46e624 791
891b38e4 792 if (new_stack) {
8e682019 793 if (env->eflags & VM_MASK) {
14ce26e7
FB
794 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
797 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
8e682019 798 }
891b38e4 799 ss = (ss & ~3) | dpl;
5fafdf24 800 cpu_x86_load_seg_cache(env, R_SS, ss,
891b38e4
FB
801 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
802 }
8d7b0fbb 803 SET_ESP(esp, sp_mask);
891b38e4
FB
804
805 selector = (selector & ~3) | dpl;
5fafdf24 806 cpu_x86_load_seg_cache(env, R_CS, selector,
891b38e4
FB
807 get_seg_base(e1, e2),
808 get_seg_limit(e1, e2),
809 e2);
810 cpu_x86_set_cpl(env, dpl);
811 env->eip = offset;
812
2c0262af
FB
813 /* interrupt gate clear IF mask */
814 if ((type & 1) == 0) {
815 env->eflags &= ~IF_MASK;
816 }
817 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
818}
819
14ce26e7
FB
820#ifdef TARGET_X86_64
821
822#define PUSHQ(sp, val)\
823{\
824 sp -= 8;\
825 stq_kernel(sp, (val));\
826}
827
828#define POPQ(sp, val)\
829{\
830 val = ldq_kernel(sp);\
831 sp += 8;\
832}
833
834static inline target_ulong get_rsp_from_tss(int level)
835{
836 int index;
3b46e624 837
14ce26e7 838#if 0
5fafdf24 839 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
14ce26e7
FB
840 env->tr.base, env->tr.limit);
841#endif
842
843 if (!(env->tr.flags & DESC_P_MASK))
844 cpu_abort(env, "invalid tss");
845 index = 8 * level + 4;
846 if ((index + 7) > env->tr.limit)
847 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
848 return ldq_kernel(env->tr.base + index);
849}
850
851/* 64 bit interrupt */
852static void do_interrupt64(int intno, int is_int, int error_code,
853 target_ulong next_eip, int is_hw)
854{
855 SegmentCache *dt;
856 target_ulong ptr;
857 int type, dpl, selector, cpl, ist;
858 int has_error_code, new_stack;
859 uint32_t e1, e2, e3, ss;
860 target_ulong old_eip, esp, offset;
0573fbfc 861 int svm_should_check = 1;
14ce26e7 862
0573fbfc
TS
863 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
864 next_eip = EIP;
865 svm_should_check = 0;
866 }
867 if (svm_should_check
868 && INTERCEPTEDl(_exceptions, 1 << intno)
869 && !is_int) {
870 raise_interrupt(intno, is_int, error_code, 0);
871 }
14ce26e7
FB
872 has_error_code = 0;
873 if (!is_int && !is_hw) {
874 switch(intno) {
875 case 8:
876 case 10:
877 case 11:
878 case 12:
879 case 13:
880 case 14:
881 case 17:
882 has_error_code = 1;
883 break;
884 }
885 }
886 if (is_int)
887 old_eip = next_eip;
888 else
889 old_eip = env->eip;
890
891 dt = &env->idt;
892 if (intno * 16 + 15 > dt->limit)
893 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
894 ptr = dt->base + intno * 16;
895 e1 = ldl_kernel(ptr);
896 e2 = ldl_kernel(ptr + 4);
897 e3 = ldl_kernel(ptr + 8);
898 /* check gate type */
899 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
900 switch(type) {
901 case 14: /* 386 interrupt gate */
902 case 15: /* 386 trap gate */
903 break;
904 default:
905 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
906 break;
907 }
908 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
909 cpl = env->hflags & HF_CPL_MASK;
910 /* check privledge if software int */
911 if (is_int && dpl < cpl)
912 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
913 /* check valid bit */
914 if (!(e2 & DESC_P_MASK))
915 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
916 selector = e1 >> 16;
917 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
918 ist = e2 & 7;
919 if ((selector & 0xfffc) == 0)
920 raise_exception_err(EXCP0D_GPF, 0);
921
922 if (load_segment(&e1, &e2, selector) != 0)
923 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
924 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
927 if (dpl > cpl)
928 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
929 if (!(e2 & DESC_P_MASK))
930 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
931 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
932 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
7f75ffd3 934 /* to inner privilege */
14ce26e7
FB
935 if (ist != 0)
936 esp = get_rsp_from_tss(ist + 3);
937 else
938 esp = get_rsp_from_tss(dpl);
9540a78b 939 esp &= ~0xfLL; /* align stack */
14ce26e7
FB
940 ss = 0;
941 new_stack = 1;
942 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
7f75ffd3 943 /* to same privilege */
14ce26e7
FB
944 if (env->eflags & VM_MASK)
945 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
946 new_stack = 0;
9540a78b
FB
947 if (ist != 0)
948 esp = get_rsp_from_tss(ist + 3);
949 else
950 esp = ESP;
951 esp &= ~0xfLL; /* align stack */
14ce26e7
FB
952 dpl = cpl;
953 } else {
954 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955 new_stack = 0; /* avoid warning */
956 esp = 0; /* avoid warning */
957 }
958
959 PUSHQ(esp, env->segs[R_SS].selector);
960 PUSHQ(esp, ESP);
961 PUSHQ(esp, compute_eflags());
962 PUSHQ(esp, env->segs[R_CS].selector);
963 PUSHQ(esp, old_eip);
964 if (has_error_code) {
965 PUSHQ(esp, error_code);
966 }
3b46e624 967
14ce26e7
FB
968 if (new_stack) {
969 ss = 0 | dpl;
970 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
971 }
972 ESP = esp;
973
974 selector = (selector & ~3) | dpl;
5fafdf24 975 cpu_x86_load_seg_cache(env, R_CS, selector,
14ce26e7
FB
976 get_seg_base(e1, e2),
977 get_seg_limit(e1, e2),
978 e2);
979 cpu_x86_set_cpl(env, dpl);
980 env->eip = offset;
981
982 /* interrupt gate clear IF mask */
983 if ((type & 1) == 0) {
984 env->eflags &= ~IF_MASK;
985 }
986 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
987}
f419b321 988#endif
14ce26e7 989
d2fd1af7
FB
990#if defined(CONFIG_USER_ONLY)
991void helper_syscall(int next_eip_addend)
992{
993 env->exception_index = EXCP_SYSCALL;
994 env->exception_next_eip = env->eip + next_eip_addend;
995 cpu_loop_exit();
996}
997#else
06c2f506 998void helper_syscall(int next_eip_addend)
14ce26e7
FB
999{
1000 int selector;
1001
1002 if (!(env->efer & MSR_EFER_SCE)) {
1003 raise_exception_err(EXCP06_ILLOP, 0);
1004 }
1005 selector = (env->star >> 32) & 0xffff;
f419b321 1006#ifdef TARGET_X86_64
14ce26e7 1007 if (env->hflags & HF_LMA_MASK) {
9540a78b
FB
1008 int code64;
1009
06c2f506 1010 ECX = env->eip + next_eip_addend;
14ce26e7 1011 env->regs[11] = compute_eflags();
3b46e624 1012
9540a78b 1013 code64 = env->hflags & HF_CS64_MASK;
14ce26e7
FB
1014
1015 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
1016 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1017 0, 0xffffffff,
d80c7d1c 1018 DESC_G_MASK | DESC_P_MASK |
14ce26e7
FB
1019 DESC_S_MASK |
1020 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
5fafdf24 1021 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
14ce26e7
FB
1022 0, 0xffffffff,
1023 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024 DESC_S_MASK |
1025 DESC_W_MASK | DESC_A_MASK);
1026 env->eflags &= ~env->fmask;
f94f7181 1027 load_eflags(env->eflags, 0);
9540a78b 1028 if (code64)
14ce26e7
FB
1029 env->eip = env->lstar;
1030 else
1031 env->eip = env->cstar;
5fafdf24 1032 } else
f419b321
FB
1033#endif
1034 {
06c2f506 1035 ECX = (uint32_t)(env->eip + next_eip_addend);
3b46e624 1036
14ce26e7 1037 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
1038 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1039 0, 0xffffffff,
14ce26e7
FB
1040 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1041 DESC_S_MASK |
1042 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 1043 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
14ce26e7
FB
1044 0, 0xffffffff,
1045 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1046 DESC_S_MASK |
1047 DESC_W_MASK | DESC_A_MASK);
1048 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1049 env->eip = (uint32_t)env->star;
1050 }
1051}
d2fd1af7 1052#endif
14ce26e7
FB
1053
1054void helper_sysret(int dflag)
1055{
1056 int cpl, selector;
1057
f419b321
FB
1058 if (!(env->efer & MSR_EFER_SCE)) {
1059 raise_exception_err(EXCP06_ILLOP, 0);
1060 }
14ce26e7
FB
1061 cpl = env->hflags & HF_CPL_MASK;
1062 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1063 raise_exception_err(EXCP0D_GPF, 0);
1064 }
1065 selector = (env->star >> 48) & 0xffff;
f419b321 1066#ifdef TARGET_X86_64
14ce26e7
FB
1067 if (env->hflags & HF_LMA_MASK) {
1068 if (dflag == 2) {
5fafdf24
TS
1069 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1070 0, 0xffffffff,
d80c7d1c 1071 DESC_G_MASK | DESC_P_MASK |
14ce26e7 1072 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
5fafdf24 1073 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
14ce26e7
FB
1074 DESC_L_MASK);
1075 env->eip = ECX;
1076 } else {
5fafdf24
TS
1077 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1078 0, 0xffffffff,
14ce26e7
FB
1079 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1082 env->eip = (uint32_t)ECX;
1083 }
5fafdf24 1084 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
14ce26e7
FB
1085 0, 0xffffffff,
1086 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088 DESC_W_MASK | DESC_A_MASK);
5fafdf24 1089 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
31313213 1090 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
14ce26e7 1091 cpu_x86_set_cpl(env, 3);
5fafdf24 1092 } else
f419b321
FB
1093#endif
1094 {
5fafdf24
TS
1095 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1096 0, 0xffffffff,
14ce26e7
FB
1097 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1098 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1099 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1100 env->eip = (uint32_t)ECX;
5fafdf24 1101 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
14ce26e7
FB
1102 0, 0xffffffff,
1103 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105 DESC_W_MASK | DESC_A_MASK);
1106 env->eflags |= IF_MASK;
1107 cpu_x86_set_cpl(env, 3);
1108 }
f419b321
FB
1109#ifdef USE_KQEMU
1110 if (kqemu_is_ok(env)) {
1111 if (env->hflags & HF_LMA_MASK)
1112 CC_OP = CC_OP_EFLAGS;
1113 env->exception_index = -1;
1114 cpu_loop_exit();
1115 }
14ce26e7 1116#endif
f419b321 1117}
14ce26e7 1118
2c0262af
FB
1119/* real mode interrupt */
1120static void do_interrupt_real(int intno, int is_int, int error_code,
4136f33c 1121 unsigned int next_eip)
2c0262af
FB
1122{
1123 SegmentCache *dt;
14ce26e7 1124 target_ulong ptr, ssp;
2c0262af
FB
1125 int selector;
1126 uint32_t offset, esp;
1127 uint32_t old_cs, old_eip;
0573fbfc 1128 int svm_should_check = 1;
2c0262af 1129
0573fbfc
TS
1130 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1131 next_eip = EIP;
1132 svm_should_check = 0;
1133 }
1134 if (svm_should_check
1135 && INTERCEPTEDl(_exceptions, 1 << intno)
1136 && !is_int) {
1137 raise_interrupt(intno, is_int, error_code, 0);
1138 }
2c0262af
FB
1139 /* real mode (simpler !) */
1140 dt = &env->idt;
1141 if (intno * 4 + 3 > dt->limit)
1142 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1143 ptr = dt->base + intno * 4;
61382a50
FB
1144 offset = lduw_kernel(ptr);
1145 selector = lduw_kernel(ptr + 2);
2c0262af
FB
1146 esp = ESP;
1147 ssp = env->segs[R_SS].base;
1148 if (is_int)
1149 old_eip = next_eip;
1150 else
1151 old_eip = env->eip;
1152 old_cs = env->segs[R_CS].selector;
891b38e4
FB
1153 /* XXX: use SS segment size ? */
1154 PUSHW(ssp, esp, 0xffff, compute_eflags());
1155 PUSHW(ssp, esp, 0xffff, old_cs);
1156 PUSHW(ssp, esp, 0xffff, old_eip);
3b46e624 1157
2c0262af
FB
1158 /* update processor state */
1159 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1160 env->eip = offset;
1161 env->segs[R_CS].selector = selector;
14ce26e7 1162 env->segs[R_CS].base = (selector << 4);
2c0262af
FB
1163 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1164}
1165
1166/* fake user mode interrupt */
5fafdf24 1167void do_interrupt_user(int intno, int is_int, int error_code,
14ce26e7 1168 target_ulong next_eip)
2c0262af
FB
1169{
1170 SegmentCache *dt;
14ce26e7 1171 target_ulong ptr;
d2fd1af7 1172 int dpl, cpl, shift;
2c0262af
FB
1173 uint32_t e2;
1174
1175 dt = &env->idt;
d2fd1af7
FB
1176 if (env->hflags & HF_LMA_MASK) {
1177 shift = 4;
1178 } else {
1179 shift = 3;
1180 }
1181 ptr = dt->base + (intno << shift);
61382a50 1182 e2 = ldl_kernel(ptr + 4);
3b46e624 1183
2c0262af
FB
1184 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1185 cpl = env->hflags & HF_CPL_MASK;
1186 /* check privledge if software int */
1187 if (is_int && dpl < cpl)
d2fd1af7 1188 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
2c0262af
FB
1189
1190 /* Since we emulate only user space, we cannot do more than
1191 exiting the emulation with the suitable exception and error
1192 code */
1193 if (is_int)
1194 EIP = next_eip;
1195}
1196
1197/*
e19e89a5 1198 * Begin execution of an interruption. is_int is TRUE if coming from
2c0262af 1199 * the int instruction. next_eip is the EIP value AFTER the interrupt
3b46e624 1200 * instruction. It is only relevant if is_int is TRUE.
2c0262af 1201 */
5fafdf24 1202void do_interrupt(int intno, int is_int, int error_code,
14ce26e7 1203 target_ulong next_eip, int is_hw)
2c0262af 1204{
1247c5f7 1205 if (loglevel & CPU_LOG_INT) {
e19e89a5
FB
1206 if ((env->cr[0] & CR0_PE_MASK)) {
1207 static int count;
14ce26e7 1208 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
dc6f57fd
FB
1209 count, intno, error_code, is_int,
1210 env->hflags & HF_CPL_MASK,
1211 env->segs[R_CS].selector, EIP,
2ee73ac3 1212 (int)env->segs[R_CS].base + EIP,
8145122b
FB
1213 env->segs[R_SS].selector, ESP);
1214 if (intno == 0x0e) {
14ce26e7 1215 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
8145122b 1216 } else {
14ce26e7 1217 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
8145122b 1218 }
e19e89a5 1219 fprintf(logfile, "\n");
06c2f506 1220 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1247c5f7 1221#if 0
e19e89a5
FB
1222 {
1223 int i;
1224 uint8_t *ptr;
1225 fprintf(logfile, " code=");
1226 ptr = env->segs[R_CS].base + env->eip;
1227 for(i = 0; i < 16; i++) {
1228 fprintf(logfile, " %02x", ldub(ptr + i));
dc6f57fd 1229 }
e19e89a5 1230 fprintf(logfile, "\n");
dc6f57fd 1231 }
8e682019 1232#endif
e19e89a5 1233 count++;
4136f33c 1234 }
4136f33c 1235 }
2c0262af 1236 if (env->cr[0] & CR0_PE_MASK) {
14ce26e7
FB
1237#if TARGET_X86_64
1238 if (env->hflags & HF_LMA_MASK) {
1239 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1240 } else
1241#endif
1242 {
1243 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1244 }
2c0262af
FB
1245 } else {
1246 do_interrupt_real(intno, is_int, error_code, next_eip);
1247 }
1248}
1249
678dde13
TS
1250/*
1251 * Check nested exceptions and change to double or triple fault if
1252 * needed. It should only be called, if this is not an interrupt.
1253 * Returns the new exception number.
1254 */
9596ebb7 1255static int check_exception(int intno, int *error_code)
678dde13 1256{
75d28b05 1257 int first_contributory = env->old_exception == 0 ||
678dde13
TS
1258 (env->old_exception >= 10 &&
1259 env->old_exception <= 13);
75d28b05 1260 int second_contributory = intno == 0 ||
678dde13
TS
1261 (intno >= 10 && intno <= 13);
1262
1263 if (loglevel & CPU_LOG_INT)
75d28b05 1264 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
678dde13
TS
1265 env->old_exception, intno);
1266
1267 if (env->old_exception == EXCP08_DBLE)
1268 cpu_abort(env, "triple fault");
1269
1270 if ((first_contributory && second_contributory)
1271 || (env->old_exception == EXCP0E_PAGE &&
1272 (second_contributory || (intno == EXCP0E_PAGE)))) {
1273 intno = EXCP08_DBLE;
1274 *error_code = 0;
1275 }
1276
1277 if (second_contributory || (intno == EXCP0E_PAGE) ||
1278 (intno == EXCP08_DBLE))
1279 env->old_exception = intno;
1280
1281 return intno;
1282}
1283
2c0262af
FB
1284/*
1285 * Signal an interruption. It is executed in the main CPU loop.
1286 * is_int is TRUE if coming from the int instruction. next_eip is the
1287 * EIP value AFTER the interrupt instruction. It is only relevant if
3b46e624 1288 * is_int is TRUE.
2c0262af 1289 */
5fafdf24 1290void raise_interrupt(int intno, int is_int, int error_code,
a8ede8ba 1291 int next_eip_addend)
2c0262af 1292{
0573fbfc 1293 if (!is_int) {
b8b6a50b 1294 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
678dde13 1295 intno = check_exception(intno, &error_code);
0573fbfc 1296 }
678dde13 1297
2c0262af
FB
1298 env->exception_index = intno;
1299 env->error_code = error_code;
1300 env->exception_is_int = is_int;
a8ede8ba 1301 env->exception_next_eip = env->eip + next_eip_addend;
2c0262af
FB
1302 cpu_loop_exit();
1303}
1304
0d1a29f9
FB
1305/* same as raise_exception_err, but do not restore global registers */
1306static void raise_exception_err_norestore(int exception_index, int error_code)
1307{
678dde13
TS
1308 exception_index = check_exception(exception_index, &error_code);
1309
0d1a29f9
FB
1310 env->exception_index = exception_index;
1311 env->error_code = error_code;
1312 env->exception_is_int = 0;
1313 env->exception_next_eip = 0;
1314 longjmp(env->jmp_env, 1);
1315}
1316
2c0262af 1317/* shortcuts to generate exceptions */
8145122b
FB
1318
1319void (raise_exception_err)(int exception_index, int error_code)
2c0262af
FB
1320{
1321 raise_interrupt(exception_index, 0, error_code, 0);
1322}
1323
1324void raise_exception(int exception_index)
1325{
1326 raise_interrupt(exception_index, 0, 0, 0);
1327}
1328
3b21e03e
FB
1329/* SMM support */
1330
5fafdf24 1331#if defined(CONFIG_USER_ONLY)
74ce674f
FB
1332
1333void do_smm_enter(void)
1334{
1335}
1336
1337void helper_rsm(void)
1338{
1339}
1340
1341#else
1342
3b21e03e
FB
1343#ifdef TARGET_X86_64
1344#define SMM_REVISION_ID 0x00020064
1345#else
1346#define SMM_REVISION_ID 0x00020000
1347#endif
1348
1349void do_smm_enter(void)
1350{
1351 target_ulong sm_state;
1352 SegmentCache *dt;
1353 int i, offset;
1354
1355 if (loglevel & CPU_LOG_INT) {
1356 fprintf(logfile, "SMM: enter\n");
1357 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1358 }
1359
1360 env->hflags |= HF_SMM_MASK;
1361 cpu_smm_update(env);
1362
1363 sm_state = env->smbase + 0x8000;
3b46e624 1364
3b21e03e
FB
1365#ifdef TARGET_X86_64
1366 for(i = 0; i < 6; i++) {
1367 dt = &env->segs[i];
1368 offset = 0x7e00 + i * 16;
1369 stw_phys(sm_state + offset, dt->selector);
1370 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1371 stl_phys(sm_state + offset + 4, dt->limit);
1372 stq_phys(sm_state + offset + 8, dt->base);
1373 }
1374
1375 stq_phys(sm_state + 0x7e68, env->gdt.base);
1376 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1377
1378 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1379 stq_phys(sm_state + 0x7e78, env->ldt.base);
1380 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1381 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
3b46e624 1382
3b21e03e
FB
1383 stq_phys(sm_state + 0x7e88, env->idt.base);
1384 stl_phys(sm_state + 0x7e84, env->idt.limit);
1385
1386 stw_phys(sm_state + 0x7e90, env->tr.selector);
1387 stq_phys(sm_state + 0x7e98, env->tr.base);
1388 stl_phys(sm_state + 0x7e94, env->tr.limit);
1389 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
3b46e624 1390
3b21e03e
FB
1391 stq_phys(sm_state + 0x7ed0, env->efer);
1392
1393 stq_phys(sm_state + 0x7ff8, EAX);
1394 stq_phys(sm_state + 0x7ff0, ECX);
1395 stq_phys(sm_state + 0x7fe8, EDX);
1396 stq_phys(sm_state + 0x7fe0, EBX);
1397 stq_phys(sm_state + 0x7fd8, ESP);
1398 stq_phys(sm_state + 0x7fd0, EBP);
1399 stq_phys(sm_state + 0x7fc8, ESI);
1400 stq_phys(sm_state + 0x7fc0, EDI);
5fafdf24 1401 for(i = 8; i < 16; i++)
3b21e03e
FB
1402 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1403 stq_phys(sm_state + 0x7f78, env->eip);
1404 stl_phys(sm_state + 0x7f70, compute_eflags());
1405 stl_phys(sm_state + 0x7f68, env->dr[6]);
1406 stl_phys(sm_state + 0x7f60, env->dr[7]);
1407
1408 stl_phys(sm_state + 0x7f48, env->cr[4]);
1409 stl_phys(sm_state + 0x7f50, env->cr[3]);
1410 stl_phys(sm_state + 0x7f58, env->cr[0]);
1411
1412 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1413 stl_phys(sm_state + 0x7f00, env->smbase);
1414#else
1415 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1416 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1417 stl_phys(sm_state + 0x7ff4, compute_eflags());
1418 stl_phys(sm_state + 0x7ff0, env->eip);
1419 stl_phys(sm_state + 0x7fec, EDI);
1420 stl_phys(sm_state + 0x7fe8, ESI);
1421 stl_phys(sm_state + 0x7fe4, EBP);
1422 stl_phys(sm_state + 0x7fe0, ESP);
1423 stl_phys(sm_state + 0x7fdc, EBX);
1424 stl_phys(sm_state + 0x7fd8, EDX);
1425 stl_phys(sm_state + 0x7fd4, ECX);
1426 stl_phys(sm_state + 0x7fd0, EAX);
1427 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1428 stl_phys(sm_state + 0x7fc8, env->dr[7]);
3b46e624 1429
3b21e03e
FB
1430 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1431 stl_phys(sm_state + 0x7f64, env->tr.base);
1432 stl_phys(sm_state + 0x7f60, env->tr.limit);
1433 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
3b46e624 1434
3b21e03e
FB
1435 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1436 stl_phys(sm_state + 0x7f80, env->ldt.base);
1437 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1438 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
3b46e624 1439
3b21e03e
FB
1440 stl_phys(sm_state + 0x7f74, env->gdt.base);
1441 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1442
1443 stl_phys(sm_state + 0x7f58, env->idt.base);
1444 stl_phys(sm_state + 0x7f54, env->idt.limit);
1445
1446 for(i = 0; i < 6; i++) {
1447 dt = &env->segs[i];
1448 if (i < 3)
1449 offset = 0x7f84 + i * 12;
1450 else
1451 offset = 0x7f2c + (i - 3) * 12;
1452 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1453 stl_phys(sm_state + offset + 8, dt->base);
1454 stl_phys(sm_state + offset + 4, dt->limit);
1455 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1456 }
1457 stl_phys(sm_state + 0x7f14, env->cr[4]);
1458
1459 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1460 stl_phys(sm_state + 0x7ef8, env->smbase);
1461#endif
1462 /* init SMM cpu state */
1463
8988ae89
FB
1464#ifdef TARGET_X86_64
1465 env->efer = 0;
1466 env->hflags &= ~HF_LMA_MASK;
1467#endif
3b21e03e
FB
1468 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1469 env->eip = 0x00008000;
1470 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1471 0xffffffff, 0);
1472 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1473 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1474 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1475 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1476 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
3b46e624 1477
5fafdf24 1478 cpu_x86_update_cr0(env,
3b21e03e
FB
1479 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1480 cpu_x86_update_cr4(env, 0);
1481 env->dr[7] = 0x00000400;
3b21e03e
FB
1482 CC_OP = CC_OP_EFLAGS;
1483}
1484
1485void helper_rsm(void)
1486{
1487 target_ulong sm_state;
1488 int i, offset;
1489 uint32_t val;
1490
1491 sm_state = env->smbase + 0x8000;
1492#ifdef TARGET_X86_64
8988ae89
FB
1493 env->efer = ldq_phys(sm_state + 0x7ed0);
1494 if (env->efer & MSR_EFER_LMA)
1495 env->hflags |= HF_LMA_MASK;
1496 else
1497 env->hflags &= ~HF_LMA_MASK;
1498
3b21e03e
FB
1499 for(i = 0; i < 6; i++) {
1500 offset = 0x7e00 + i * 16;
5fafdf24 1501 cpu_x86_load_seg_cache(env, i,
3b21e03e
FB
1502 lduw_phys(sm_state + offset),
1503 ldq_phys(sm_state + offset + 8),
1504 ldl_phys(sm_state + offset + 4),
1505 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1506 }
1507
1508 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1509 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1510
1511 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1512 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1513 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1514 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
3b46e624 1515
3b21e03e
FB
1516 env->idt.base = ldq_phys(sm_state + 0x7e88);
1517 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1518
1519 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1520 env->tr.base = ldq_phys(sm_state + 0x7e98);
1521 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1522 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
3b46e624 1523
3b21e03e
FB
1524 EAX = ldq_phys(sm_state + 0x7ff8);
1525 ECX = ldq_phys(sm_state + 0x7ff0);
1526 EDX = ldq_phys(sm_state + 0x7fe8);
1527 EBX = ldq_phys(sm_state + 0x7fe0);
1528 ESP = ldq_phys(sm_state + 0x7fd8);
1529 EBP = ldq_phys(sm_state + 0x7fd0);
1530 ESI = ldq_phys(sm_state + 0x7fc8);
1531 EDI = ldq_phys(sm_state + 0x7fc0);
5fafdf24 1532 for(i = 8; i < 16; i++)
3b21e03e
FB
1533 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1534 env->eip = ldq_phys(sm_state + 0x7f78);
5fafdf24 1535 load_eflags(ldl_phys(sm_state + 0x7f70),
3b21e03e
FB
1536 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1537 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1538 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1539
1540 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1541 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1542 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1543
1544 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1545 if (val & 0x20000) {
1546 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1547 }
1548#else
1549 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1550 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
5fafdf24 1551 load_eflags(ldl_phys(sm_state + 0x7ff4),
3b21e03e
FB
1552 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1553 env->eip = ldl_phys(sm_state + 0x7ff0);
1554 EDI = ldl_phys(sm_state + 0x7fec);
1555 ESI = ldl_phys(sm_state + 0x7fe8);
1556 EBP = ldl_phys(sm_state + 0x7fe4);
1557 ESP = ldl_phys(sm_state + 0x7fe0);
1558 EBX = ldl_phys(sm_state + 0x7fdc);
1559 EDX = ldl_phys(sm_state + 0x7fd8);
1560 ECX = ldl_phys(sm_state + 0x7fd4);
1561 EAX = ldl_phys(sm_state + 0x7fd0);
1562 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1563 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
3b46e624 1564
3b21e03e
FB
1565 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1566 env->tr.base = ldl_phys(sm_state + 0x7f64);
1567 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1568 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
3b46e624 1569
3b21e03e
FB
1570 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1571 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1572 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1573 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
3b46e624 1574
3b21e03e
FB
1575 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1576 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1577
1578 env->idt.base = ldl_phys(sm_state + 0x7f58);
1579 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1580
1581 for(i = 0; i < 6; i++) {
1582 if (i < 3)
1583 offset = 0x7f84 + i * 12;
1584 else
1585 offset = 0x7f2c + (i - 3) * 12;
5fafdf24 1586 cpu_x86_load_seg_cache(env, i,
3b21e03e
FB
1587 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1588 ldl_phys(sm_state + offset + 8),
1589 ldl_phys(sm_state + offset + 4),
1590 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1591 }
1592 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1593
1594 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1595 if (val & 0x20000) {
1596 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1597 }
1598#endif
1599 CC_OP = CC_OP_EFLAGS;
1600 env->hflags &= ~HF_SMM_MASK;
1601 cpu_smm_update(env);
1602
1603 if (loglevel & CPU_LOG_INT) {
1604 fprintf(logfile, "SMM: after RSM\n");
1605 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1606 }
1607}
1608
74ce674f
FB
1609#endif /* !CONFIG_USER_ONLY */
1610
1611
2c0262af
FB
1612#ifdef BUGGY_GCC_DIV64
1613/* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1614 call it from another function */
45bbbb46 1615uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
2c0262af
FB
1616{
1617 *q_ptr = num / den;
1618 return num % den;
1619}
1620
45bbbb46 1621int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
2c0262af
FB
1622{
1623 *q_ptr = num / den;
1624 return num % den;
1625}
1626#endif
1627
b5b38f61
FB
1628/* division, flags are undefined */
1629
1630void helper_divb_AL(target_ulong t0)
1631{
1632 unsigned int num, den, q, r;
1633
1634 num = (EAX & 0xffff);
1635 den = (t0 & 0xff);
1636 if (den == 0) {
1637 raise_exception(EXCP00_DIVZ);
1638 }
1639 q = (num / den);
1640 if (q > 0xff)
1641 raise_exception(EXCP00_DIVZ);
1642 q &= 0xff;
1643 r = (num % den) & 0xff;
1644 EAX = (EAX & ~0xffff) | (r << 8) | q;
1645}
1646
1647void helper_idivb_AL(target_ulong t0)
1648{
1649 int num, den, q, r;
1650
1651 num = (int16_t)EAX;
1652 den = (int8_t)t0;
1653 if (den == 0) {
1654 raise_exception(EXCP00_DIVZ);
1655 }
1656 q = (num / den);
1657 if (q != (int8_t)q)
1658 raise_exception(EXCP00_DIVZ);
1659 q &= 0xff;
1660 r = (num % den) & 0xff;
1661 EAX = (EAX & ~0xffff) | (r << 8) | q;
1662}
1663
1664void helper_divw_AX(target_ulong t0)
1665{
1666 unsigned int num, den, q, r;
1667
1668 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1669 den = (t0 & 0xffff);
1670 if (den == 0) {
1671 raise_exception(EXCP00_DIVZ);
1672 }
1673 q = (num / den);
1674 if (q > 0xffff)
1675 raise_exception(EXCP00_DIVZ);
1676 q &= 0xffff;
1677 r = (num % den) & 0xffff;
1678 EAX = (EAX & ~0xffff) | q;
1679 EDX = (EDX & ~0xffff) | r;
1680}
1681
1682void helper_idivw_AX(target_ulong t0)
1683{
1684 int num, den, q, r;
1685
1686 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1687 den = (int16_t)t0;
1688 if (den == 0) {
1689 raise_exception(EXCP00_DIVZ);
1690 }
1691 q = (num / den);
1692 if (q != (int16_t)q)
1693 raise_exception(EXCP00_DIVZ);
1694 q &= 0xffff;
1695 r = (num % den) & 0xffff;
1696 EAX = (EAX & ~0xffff) | q;
1697 EDX = (EDX & ~0xffff) | r;
1698}
1699
1700void helper_divl_EAX(target_ulong t0)
2c0262af 1701{
45bbbb46
FB
1702 unsigned int den, r;
1703 uint64_t num, q;
3b46e624 1704
31313213 1705 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
57fec1fe 1706 den = t0;
2c0262af 1707 if (den == 0) {
2c0262af
FB
1708 raise_exception(EXCP00_DIVZ);
1709 }
1710#ifdef BUGGY_GCC_DIV64
14ce26e7 1711 r = div32(&q, num, den);
2c0262af
FB
1712#else
1713 q = (num / den);
1714 r = (num % den);
1715#endif
45bbbb46
FB
1716 if (q > 0xffffffff)
1717 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
1718 EAX = (uint32_t)q;
1719 EDX = (uint32_t)r;
2c0262af
FB
1720}
1721
b5b38f61 1722void helper_idivl_EAX(target_ulong t0)
2c0262af 1723{
45bbbb46
FB
1724 int den, r;
1725 int64_t num, q;
3b46e624 1726
31313213 1727 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
57fec1fe 1728 den = t0;
2c0262af 1729 if (den == 0) {
2c0262af
FB
1730 raise_exception(EXCP00_DIVZ);
1731 }
1732#ifdef BUGGY_GCC_DIV64
14ce26e7 1733 r = idiv32(&q, num, den);
2c0262af
FB
1734#else
1735 q = (num / den);
1736 r = (num % den);
1737#endif
45bbbb46
FB
1738 if (q != (int32_t)q)
1739 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
1740 EAX = (uint32_t)q;
1741 EDX = (uint32_t)r;
2c0262af
FB
1742}
1743
b5b38f61
FB
1744/* bcd */
1745
1746/* XXX: exception */
1747void helper_aam(int base)
1748{
1749 int al, ah;
1750 al = EAX & 0xff;
1751 ah = al / base;
1752 al = al % base;
1753 EAX = (EAX & ~0xffff) | al | (ah << 8);
1754 CC_DST = al;
1755}
1756
1757void helper_aad(int base)
1758{
1759 int al, ah;
1760 al = EAX & 0xff;
1761 ah = (EAX >> 8) & 0xff;
1762 al = ((ah * base) + al) & 0xff;
1763 EAX = (EAX & ~0xffff) | al;
1764 CC_DST = al;
1765}
1766
1767void helper_aaa(void)
1768{
1769 int icarry;
1770 int al, ah, af;
1771 int eflags;
1772
1773 eflags = cc_table[CC_OP].compute_all();
1774 af = eflags & CC_A;
1775 al = EAX & 0xff;
1776 ah = (EAX >> 8) & 0xff;
1777
1778 icarry = (al > 0xf9);
1779 if (((al & 0x0f) > 9 ) || af) {
1780 al = (al + 6) & 0x0f;
1781 ah = (ah + 1 + icarry) & 0xff;
1782 eflags |= CC_C | CC_A;
1783 } else {
1784 eflags &= ~(CC_C | CC_A);
1785 al &= 0x0f;
1786 }
1787 EAX = (EAX & ~0xffff) | al | (ah << 8);
1788 CC_SRC = eflags;
1789 FORCE_RET();
1790}
1791
1792void helper_aas(void)
1793{
1794 int icarry;
1795 int al, ah, af;
1796 int eflags;
1797
1798 eflags = cc_table[CC_OP].compute_all();
1799 af = eflags & CC_A;
1800 al = EAX & 0xff;
1801 ah = (EAX >> 8) & 0xff;
1802
1803 icarry = (al < 6);
1804 if (((al & 0x0f) > 9 ) || af) {
1805 al = (al - 6) & 0x0f;
1806 ah = (ah - 1 - icarry) & 0xff;
1807 eflags |= CC_C | CC_A;
1808 } else {
1809 eflags &= ~(CC_C | CC_A);
1810 al &= 0x0f;
1811 }
1812 EAX = (EAX & ~0xffff) | al | (ah << 8);
1813 CC_SRC = eflags;
1814 FORCE_RET();
1815}
1816
1817void helper_daa(void)
1818{
1819 int al, af, cf;
1820 int eflags;
1821
1822 eflags = cc_table[CC_OP].compute_all();
1823 cf = eflags & CC_C;
1824 af = eflags & CC_A;
1825 al = EAX & 0xff;
1826
1827 eflags = 0;
1828 if (((al & 0x0f) > 9 ) || af) {
1829 al = (al + 6) & 0xff;
1830 eflags |= CC_A;
1831 }
1832 if ((al > 0x9f) || cf) {
1833 al = (al + 0x60) & 0xff;
1834 eflags |= CC_C;
1835 }
1836 EAX = (EAX & ~0xff) | al;
1837 /* well, speed is not an issue here, so we compute the flags by hand */
1838 eflags |= (al == 0) << 6; /* zf */
1839 eflags |= parity_table[al]; /* pf */
1840 eflags |= (al & 0x80); /* sf */
1841 CC_SRC = eflags;
1842 FORCE_RET();
1843}
1844
1845void helper_das(void)
1846{
1847 int al, al1, af, cf;
1848 int eflags;
1849
1850 eflags = cc_table[CC_OP].compute_all();
1851 cf = eflags & CC_C;
1852 af = eflags & CC_A;
1853 al = EAX & 0xff;
1854
1855 eflags = 0;
1856 al1 = al;
1857 if (((al & 0x0f) > 9 ) || af) {
1858 eflags |= CC_A;
1859 if (al < 6 || cf)
1860 eflags |= CC_C;
1861 al = (al - 6) & 0xff;
1862 }
1863 if ((al1 > 0x99) || cf) {
1864 al = (al - 0x60) & 0xff;
1865 eflags |= CC_C;
1866 }
1867 EAX = (EAX & ~0xff) | al;
1868 /* well, speed is not an issue here, so we compute the flags by hand */
1869 eflags |= (al == 0) << 6; /* zf */
1870 eflags |= parity_table[al]; /* pf */
1871 eflags |= (al & 0x80); /* sf */
1872 CC_SRC = eflags;
1873 FORCE_RET();
1874}
1875
b8b6a50b 1876void helper_cmpxchg8b(target_ulong a0)
2c0262af
FB
1877{
1878 uint64_t d;
1879 int eflags;
1880
1881 eflags = cc_table[CC_OP].compute_all();
b8b6a50b 1882 d = ldq(a0);
2c0262af 1883 if (d == (((uint64_t)EDX << 32) | EAX)) {
b8b6a50b 1884 stq(a0, ((uint64_t)ECX << 32) | EBX);
2c0262af
FB
1885 eflags |= CC_Z;
1886 } else {
b8b6a50b
FB
1887 EDX = (uint32_t)(d >> 32);
1888 EAX = (uint32_t)d;
2c0262af
FB
1889 eflags &= ~CC_Z;
1890 }
1891 CC_SRC = eflags;
1892}
1893
3f47aa8c 1894void helper_single_step(void)
88fe8a41
TS
1895{
1896 env->dr[6] |= 0x4000;
1897 raise_exception(EXCP01_SSTP);
1898}
1899
2c0262af
FB
1900void helper_cpuid(void)
1901{
f419b321
FB
1902 uint32_t index;
1903 index = (uint32_t)EAX;
3b46e624 1904
f419b321
FB
1905 /* test if maximum index reached */
1906 if (index & 0x80000000) {
5fafdf24 1907 if (index > env->cpuid_xlevel)
f419b321
FB
1908 index = env->cpuid_level;
1909 } else {
5fafdf24 1910 if (index > env->cpuid_level)
f419b321
FB
1911 index = env->cpuid_level;
1912 }
3b46e624 1913
f419b321 1914 switch(index) {
8e682019 1915 case 0:
f419b321 1916 EAX = env->cpuid_level;
14ce26e7
FB
1917 EBX = env->cpuid_vendor1;
1918 EDX = env->cpuid_vendor2;
1919 ECX = env->cpuid_vendor3;
8e682019
FB
1920 break;
1921 case 1:
14ce26e7 1922 EAX = env->cpuid_version;
eae7629b 1923 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
9df217a3 1924 ECX = env->cpuid_ext_features;
14ce26e7 1925 EDX = env->cpuid_features;
8e682019 1926 break;
f419b321 1927 case 2:
8e682019 1928 /* cache info: needed for Pentium Pro compatibility */
d8134d91 1929 EAX = 1;
2c0262af
FB
1930 EBX = 0;
1931 ECX = 0;
d8134d91 1932 EDX = 0x2c307d;
8e682019 1933 break;
14ce26e7 1934 case 0x80000000:
f419b321 1935 EAX = env->cpuid_xlevel;
14ce26e7
FB
1936 EBX = env->cpuid_vendor1;
1937 EDX = env->cpuid_vendor2;
1938 ECX = env->cpuid_vendor3;
1939 break;
1940 case 0x80000001:
1941 EAX = env->cpuid_features;
1942 EBX = 0;
0573fbfc 1943 ECX = env->cpuid_ext3_features;
f419b321
FB
1944 EDX = env->cpuid_ext2_features;
1945 break;
1946 case 0x80000002:
1947 case 0x80000003:
1948 case 0x80000004:
1949 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1950 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1951 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1952 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
14ce26e7 1953 break;
8f091a59
FB
1954 case 0x80000005:
1955 /* cache info (L1 cache) */
1956 EAX = 0x01ff01ff;
1957 EBX = 0x01ff01ff;
1958 ECX = 0x40020140;
1959 EDX = 0x40020140;
1960 break;
1961 case 0x80000006:
1962 /* cache info (L2 cache) */
1963 EAX = 0;
1964 EBX = 0x42004200;
1965 ECX = 0x02008140;
1966 EDX = 0;
1967 break;
14ce26e7
FB
1968 case 0x80000008:
1969 /* virtual & phys address size in low 2 bytes. */
00f82b8a
AJ
1970/* XXX: This value must match the one used in the MMU code. */
1971#if defined(TARGET_X86_64)
1972# if defined(USE_KQEMU)
1973 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
1974# else
1975/* XXX: The physical address space is limited to 42 bits in exec.c. */
1976 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
1977# endif
1978#else
1979# if defined(USE_KQEMU)
1980 EAX = 0x00000020; /* 32 bits physical */
1981# else
1982 EAX = 0x00000024; /* 36 bits physical */
1983# endif
1984#endif
14ce26e7
FB
1985 EBX = 0;
1986 ECX = 0;
1987 EDX = 0;
1988 break;
45d242b6
AZ
1989 case 0x8000000A:
1990 EAX = 0x00000001;
1991 EBX = 0;
1992 ECX = 0;
1993 EDX = 0;
1994 break;
f419b321
FB
1995 default:
1996 /* reserved values: zero */
1997 EAX = 0;
1998 EBX = 0;
1999 ECX = 0;
2000 EDX = 0;
2001 break;
2c0262af
FB
2002 }
2003}
2004
b8b6a50b 2005void helper_enter_level(int level, int data32, target_ulong t1)
61a8c4ec 2006{
14ce26e7 2007 target_ulong ssp;
61a8c4ec
FB
2008 uint32_t esp_mask, esp, ebp;
2009
2010 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2011 ssp = env->segs[R_SS].base;
2012 ebp = EBP;
2013 esp = ESP;
2014 if (data32) {
2015 /* 32 bit */
2016 esp -= 4;
2017 while (--level) {
2018 esp -= 4;
2019 ebp -= 4;
2020 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2021 }
2022 esp -= 4;
b8b6a50b 2023 stl(ssp + (esp & esp_mask), t1);
61a8c4ec
FB
2024 } else {
2025 /* 16 bit */
2026 esp -= 2;
2027 while (--level) {
2028 esp -= 2;
2029 ebp -= 2;
2030 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2031 }
2032 esp -= 2;
b8b6a50b 2033 stw(ssp + (esp & esp_mask), t1);
61a8c4ec
FB
2034 }
2035}
2036
8f091a59 2037#ifdef TARGET_X86_64
b8b6a50b 2038void helper_enter64_level(int level, int data64, target_ulong t1)
8f091a59
FB
2039{
2040 target_ulong esp, ebp;
2041 ebp = EBP;
2042 esp = ESP;
2043
2044 if (data64) {
2045 /* 64 bit */
2046 esp -= 8;
2047 while (--level) {
2048 esp -= 8;
2049 ebp -= 8;
2050 stq(esp, ldq(ebp));
2051 }
2052 esp -= 8;
b8b6a50b 2053 stq(esp, t1);
8f091a59
FB
2054 } else {
2055 /* 16 bit */
2056 esp -= 2;
2057 while (--level) {
2058 esp -= 2;
2059 ebp -= 2;
2060 stw(esp, lduw(ebp));
2061 }
2062 esp -= 2;
b8b6a50b 2063 stw(esp, t1);
8f091a59
FB
2064 }
2065}
2066#endif
2067
b5b38f61 2068void helper_lldt(int selector)
2c0262af 2069{
2c0262af
FB
2070 SegmentCache *dt;
2071 uint32_t e1, e2;
14ce26e7
FB
2072 int index, entry_limit;
2073 target_ulong ptr;
3b46e624 2074
b5b38f61 2075 selector &= 0xffff;
2c0262af
FB
2076 if ((selector & 0xfffc) == 0) {
2077 /* XXX: NULL selector case: invalid LDT */
14ce26e7 2078 env->ldt.base = 0;
2c0262af
FB
2079 env->ldt.limit = 0;
2080 } else {
2081 if (selector & 0x4)
2082 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2083 dt = &env->gdt;
2084 index = selector & ~7;
14ce26e7
FB
2085#ifdef TARGET_X86_64
2086 if (env->hflags & HF_LMA_MASK)
2087 entry_limit = 15;
2088 else
3b46e624 2089#endif
14ce26e7
FB
2090 entry_limit = 7;
2091 if ((index + entry_limit) > dt->limit)
2c0262af
FB
2092 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2093 ptr = dt->base + index;
61382a50
FB
2094 e1 = ldl_kernel(ptr);
2095 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
2096 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2097 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2098 if (!(e2 & DESC_P_MASK))
2099 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
14ce26e7
FB
2100#ifdef TARGET_X86_64
2101 if (env->hflags & HF_LMA_MASK) {
2102 uint32_t e3;
2103 e3 = ldl_kernel(ptr + 8);
2104 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2105 env->ldt.base |= (target_ulong)e3 << 32;
2106 } else
2107#endif
2108 {
2109 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2110 }
2c0262af
FB
2111 }
2112 env->ldt.selector = selector;
2113}
2114
b5b38f61 2115void helper_ltr(int selector)
2c0262af 2116{
2c0262af
FB
2117 SegmentCache *dt;
2118 uint32_t e1, e2;
14ce26e7
FB
2119 int index, type, entry_limit;
2120 target_ulong ptr;
3b46e624 2121
b5b38f61 2122 selector &= 0xffff;
2c0262af 2123 if ((selector & 0xfffc) == 0) {
14ce26e7
FB
2124 /* NULL selector case: invalid TR */
2125 env->tr.base = 0;
2c0262af
FB
2126 env->tr.limit = 0;
2127 env->tr.flags = 0;
2128 } else {
2129 if (selector & 0x4)
2130 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2131 dt = &env->gdt;
2132 index = selector & ~7;
14ce26e7
FB
2133#ifdef TARGET_X86_64
2134 if (env->hflags & HF_LMA_MASK)
2135 entry_limit = 15;
2136 else
3b46e624 2137#endif
14ce26e7
FB
2138 entry_limit = 7;
2139 if ((index + entry_limit) > dt->limit)
2c0262af
FB
2140 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2141 ptr = dt->base + index;
61382a50
FB
2142 e1 = ldl_kernel(ptr);
2143 e2 = ldl_kernel(ptr + 4);
2c0262af 2144 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
5fafdf24 2145 if ((e2 & DESC_S_MASK) ||
7e84c249 2146 (type != 1 && type != 9))
2c0262af
FB
2147 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2148 if (!(e2 & DESC_P_MASK))
2149 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
14ce26e7
FB
2150#ifdef TARGET_X86_64
2151 if (env->hflags & HF_LMA_MASK) {
b0ee3ff0 2152 uint32_t e3, e4;
14ce26e7 2153 e3 = ldl_kernel(ptr + 8);
b0ee3ff0
TS
2154 e4 = ldl_kernel(ptr + 12);
2155 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2156 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
14ce26e7
FB
2157 load_seg_cache_raw_dt(&env->tr, e1, e2);
2158 env->tr.base |= (target_ulong)e3 << 32;
5fafdf24 2159 } else
14ce26e7
FB
2160#endif
2161 {
2162 load_seg_cache_raw_dt(&env->tr, e1, e2);
2163 }
8e682019 2164 e2 |= DESC_TSS_BUSY_MASK;
61382a50 2165 stl_kernel(ptr + 4, e2);
2c0262af
FB
2166 }
2167 env->tr.selector = selector;
2168}
2169
3ab493de 2170/* only works if protected mode and not VM86. seg_reg must be != R_CS */
b5b38f61 2171void helper_load_seg(int seg_reg, int selector)
2c0262af
FB
2172{
2173 uint32_t e1, e2;
3ab493de
FB
2174 int cpl, dpl, rpl;
2175 SegmentCache *dt;
2176 int index;
14ce26e7 2177 target_ulong ptr;
3ab493de 2178
8e682019 2179 selector &= 0xffff;
b359d4e7 2180 cpl = env->hflags & HF_CPL_MASK;
2c0262af
FB
2181 if ((selector & 0xfffc) == 0) {
2182 /* null selector case */
4d6b6c0a
FB
2183 if (seg_reg == R_SS
2184#ifdef TARGET_X86_64
b359d4e7 2185 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
4d6b6c0a
FB
2186#endif
2187 )
2c0262af 2188 raise_exception_err(EXCP0D_GPF, 0);
14ce26e7 2189 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2c0262af 2190 } else {
3b46e624 2191
3ab493de
FB
2192 if (selector & 0x4)
2193 dt = &env->ldt;
2194 else
2195 dt = &env->gdt;
2196 index = selector & ~7;
8e682019 2197 if ((index + 7) > dt->limit)
2c0262af 2198 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de
FB
2199 ptr = dt->base + index;
2200 e1 = ldl_kernel(ptr);
2201 e2 = ldl_kernel(ptr + 4);
3b46e624 2202
8e682019 2203 if (!(e2 & DESC_S_MASK))
2c0262af 2204 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de
FB
2205 rpl = selector & 3;
2206 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2c0262af 2207 if (seg_reg == R_SS) {
3ab493de 2208 /* must be writable segment */
8e682019 2209 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2c0262af 2210 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
8e682019 2211 if (rpl != cpl || dpl != cpl)
3ab493de 2212 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2c0262af 2213 } else {
3ab493de 2214 /* must be readable segment */
8e682019 2215 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2c0262af 2216 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3b46e624 2217
3ab493de
FB
2218 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2219 /* if not conforming code, test rights */
5fafdf24 2220 if (dpl < cpl || dpl < rpl)
3ab493de 2221 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de 2222 }
2c0262af
FB
2223 }
2224
2225 if (!(e2 & DESC_P_MASK)) {
2c0262af
FB
2226 if (seg_reg == R_SS)
2227 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2228 else
2229 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2230 }
3ab493de
FB
2231
2232 /* set the access bit if not already set */
2233 if (!(e2 & DESC_A_MASK)) {
2234 e2 |= DESC_A_MASK;
2235 stl_kernel(ptr + 4, e2);
2236 }
2237
5fafdf24 2238 cpu_x86_load_seg_cache(env, seg_reg, selector,
2c0262af
FB
2239 get_seg_base(e1, e2),
2240 get_seg_limit(e1, e2),
2241 e2);
2242#if 0
5fafdf24 2243 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2c0262af
FB
2244 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2245#endif
2246 }
2247}
2248
2249/* protected mode jump */
b8b6a50b
FB
2250void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2251 int next_eip_addend)
2c0262af 2252{
b8b6a50b 2253 int gate_cs, type;
2c0262af 2254 uint32_t e1, e2, cpl, dpl, rpl, limit;
b8b6a50b 2255 target_ulong next_eip;
3b46e624 2256
2c0262af
FB
2257 if ((new_cs & 0xfffc) == 0)
2258 raise_exception_err(EXCP0D_GPF, 0);
2259 if (load_segment(&e1, &e2, new_cs) != 0)
2260 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2261 cpl = env->hflags & HF_CPL_MASK;
2262 if (e2 & DESC_S_MASK) {
2263 if (!(e2 & DESC_CS_MASK))
2264 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2265 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 2266 if (e2 & DESC_C_MASK) {
2c0262af
FB
2267 /* conforming code segment */
2268 if (dpl > cpl)
2269 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2270 } else {
2271 /* non conforming code segment */
2272 rpl = new_cs & 3;
2273 if (rpl > cpl)
2274 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2275 if (dpl != cpl)
2276 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2277 }
2278 if (!(e2 & DESC_P_MASK))
2279 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2280 limit = get_seg_limit(e1, e2);
5fafdf24 2281 if (new_eip > limit &&
ca954f6d 2282 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2c0262af
FB
2283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2284 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2285 get_seg_base(e1, e2), limit, e2);
2286 EIP = new_eip;
2287 } else {
7e84c249
FB
2288 /* jump to call or task gate */
2289 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2290 rpl = new_cs & 3;
2291 cpl = env->hflags & HF_CPL_MASK;
2292 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2293 switch(type) {
2294 case 1: /* 286 TSS */
2295 case 9: /* 386 TSS */
2296 case 5: /* task gate */
2297 if (dpl < cpl || dpl < rpl)
2298 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
f419b321 2299 next_eip = env->eip + next_eip_addend;
08cea4ee 2300 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
447c2cef 2301 CC_OP = CC_OP_EFLAGS;
7e84c249
FB
2302 break;
2303 case 4: /* 286 call gate */
2304 case 12: /* 386 call gate */
2305 if ((dpl < cpl) || (dpl < rpl))
2306 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2307 if (!(e2 & DESC_P_MASK))
2308 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2309 gate_cs = e1 >> 16;
516633dc
FB
2310 new_eip = (e1 & 0xffff);
2311 if (type == 12)
2312 new_eip |= (e2 & 0xffff0000);
7e84c249
FB
2313 if (load_segment(&e1, &e2, gate_cs) != 0)
2314 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2315 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2316 /* must be code segment */
5fafdf24 2317 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
7e84c249
FB
2318 (DESC_S_MASK | DESC_CS_MASK)))
2319 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
5fafdf24 2320 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
7e84c249
FB
2321 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2322 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2323 if (!(e2 & DESC_P_MASK))
2324 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
7e84c249
FB
2325 limit = get_seg_limit(e1, e2);
2326 if (new_eip > limit)
2327 raise_exception_err(EXCP0D_GPF, 0);
2328 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2329 get_seg_base(e1, e2), limit, e2);
2330 EIP = new_eip;
2331 break;
2332 default:
2333 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334 break;
2335 }
2c0262af
FB
2336 }
2337}
2338
2339/* real mode call */
b8b6a50b
FB
2340void helper_lcall_real(int new_cs, target_ulong new_eip1,
2341 int shift, int next_eip)
2c0262af 2342{
b8b6a50b 2343 int new_eip;
2c0262af 2344 uint32_t esp, esp_mask;
14ce26e7 2345 target_ulong ssp;
2c0262af 2346
b8b6a50b 2347 new_eip = new_eip1;
2c0262af 2348 esp = ESP;
891b38e4 2349 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2c0262af
FB
2350 ssp = env->segs[R_SS].base;
2351 if (shift) {
891b38e4
FB
2352 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2353 PUSHL(ssp, esp, esp_mask, next_eip);
2c0262af 2354 } else {
891b38e4
FB
2355 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2356 PUSHW(ssp, esp, esp_mask, next_eip);
2c0262af
FB
2357 }
2358
8d7b0fbb 2359 SET_ESP(esp, esp_mask);
2c0262af
FB
2360 env->eip = new_eip;
2361 env->segs[R_CS].selector = new_cs;
14ce26e7 2362 env->segs[R_CS].base = (new_cs << 4);
2c0262af
FB
2363}
2364
2365/* protected mode call */
b8b6a50b
FB
2366void helper_lcall_protected(int new_cs, target_ulong new_eip,
2367 int shift, int next_eip_addend)
2c0262af 2368{
b8b6a50b 2369 int new_stack, i;
2c0262af 2370 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
891b38e4
FB
2371 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2372 uint32_t val, limit, old_sp_mask;
b8b6a50b 2373 target_ulong ssp, old_ssp, next_eip;
3b46e624 2374
f419b321 2375 next_eip = env->eip + next_eip_addend;
f3f2d9be 2376#ifdef DEBUG_PCALL
e19e89a5
FB
2377 if (loglevel & CPU_LOG_PCALL) {
2378 fprintf(logfile, "lcall %04x:%08x s=%d\n",
649ea05a 2379 new_cs, (uint32_t)new_eip, shift);
7fe48483 2380 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
f3f2d9be
FB
2381 }
2382#endif
2c0262af
FB
2383 if ((new_cs & 0xfffc) == 0)
2384 raise_exception_err(EXCP0D_GPF, 0);
2385 if (load_segment(&e1, &e2, new_cs) != 0)
2386 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2387 cpl = env->hflags & HF_CPL_MASK;
f3f2d9be 2388#ifdef DEBUG_PCALL
e19e89a5 2389 if (loglevel & CPU_LOG_PCALL) {
f3f2d9be
FB
2390 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2391 }
2392#endif
2c0262af
FB
2393 if (e2 & DESC_S_MASK) {
2394 if (!(e2 & DESC_CS_MASK))
2395 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2396 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 2397 if (e2 & DESC_C_MASK) {
2c0262af
FB
2398 /* conforming code segment */
2399 if (dpl > cpl)
2400 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2401 } else {
2402 /* non conforming code segment */
2403 rpl = new_cs & 3;
2404 if (rpl > cpl)
2405 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406 if (dpl != cpl)
2407 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2408 }
2409 if (!(e2 & DESC_P_MASK))
2410 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2411
f419b321
FB
2412#ifdef TARGET_X86_64
2413 /* XXX: check 16/32 bit cases in long mode */
2414 if (shift == 2) {
2415 target_ulong rsp;
2416 /* 64 bit case */
2417 rsp = ESP;
2418 PUSHQ(rsp, env->segs[R_CS].selector);
2419 PUSHQ(rsp, next_eip);
2420 /* from this point, not restartable */
2421 ESP = rsp;
2422 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
5fafdf24 2423 get_seg_base(e1, e2),
f419b321
FB
2424 get_seg_limit(e1, e2), e2);
2425 EIP = new_eip;
5fafdf24 2426 } else
f419b321
FB
2427#endif
2428 {
2429 sp = ESP;
2430 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2431 ssp = env->segs[R_SS].base;
2432 if (shift) {
2433 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2434 PUSHL(ssp, sp, sp_mask, next_eip);
2435 } else {
2436 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2437 PUSHW(ssp, sp, sp_mask, next_eip);
2438 }
3b46e624 2439
f419b321
FB
2440 limit = get_seg_limit(e1, e2);
2441 if (new_eip > limit)
2442 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2443 /* from this point, not restartable */
8d7b0fbb 2444 SET_ESP(sp, sp_mask);
f419b321
FB
2445 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2446 get_seg_base(e1, e2), limit, e2);
2447 EIP = new_eip;
2c0262af 2448 }
2c0262af
FB
2449 } else {
2450 /* check gate type */
2451 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
7e84c249
FB
2452 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2453 rpl = new_cs & 3;
2c0262af
FB
2454 switch(type) {
2455 case 1: /* available 286 TSS */
2456 case 9: /* available 386 TSS */
2457 case 5: /* task gate */
7e84c249
FB
2458 if (dpl < cpl || dpl < rpl)
2459 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
883da8e2 2460 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
447c2cef 2461 CC_OP = CC_OP_EFLAGS;
8145122b 2462 return;
2c0262af
FB
2463 case 4: /* 286 call gate */
2464 case 12: /* 386 call gate */
2465 break;
2466 default:
2467 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2468 break;
2469 }
2470 shift = type >> 3;
2471
2c0262af
FB
2472 if (dpl < cpl || dpl < rpl)
2473 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2474 /* check valid bit */
2475 if (!(e2 & DESC_P_MASK))
2476 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2477 selector = e1 >> 16;
2478 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
f3f2d9be 2479 param_count = e2 & 0x1f;
2c0262af
FB
2480 if ((selector & 0xfffc) == 0)
2481 raise_exception_err(EXCP0D_GPF, 0);
2482
2483 if (load_segment(&e1, &e2, selector) != 0)
2484 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2485 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2486 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2487 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2488 if (dpl > cpl)
2489 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2490 if (!(e2 & DESC_P_MASK))
2491 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2492
2493 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
7f75ffd3 2494 /* to inner privilege */
2c0262af 2495 get_ss_esp_from_tss(&ss, &sp, dpl);
f3f2d9be 2496#ifdef DEBUG_PCALL
e19e89a5 2497 if (loglevel & CPU_LOG_PCALL)
5fafdf24 2498 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
f3f2d9be
FB
2499 ss, sp, param_count, ESP);
2500#endif
2c0262af
FB
2501 if ((ss & 0xfffc) == 0)
2502 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2503 if ((ss & 3) != dpl)
2504 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2505 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2506 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2507 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2508 if (ss_dpl != dpl)
2509 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2510 if (!(ss_e2 & DESC_S_MASK) ||
2511 (ss_e2 & DESC_CS_MASK) ||
2512 !(ss_e2 & DESC_W_MASK))
2513 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2514 if (!(ss_e2 & DESC_P_MASK))
2515 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3b46e624 2516
891b38e4 2517 // push_size = ((param_count * 2) + 8) << shift;
2c0262af 2518
891b38e4
FB
2519 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2520 old_ssp = env->segs[R_SS].base;
3b46e624 2521
891b38e4
FB
2522 sp_mask = get_sp_mask(ss_e2);
2523 ssp = get_seg_base(ss_e1, ss_e2);
2c0262af 2524 if (shift) {
891b38e4
FB
2525 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2526 PUSHL(ssp, sp, sp_mask, ESP);
2527 for(i = param_count - 1; i >= 0; i--) {
2528 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2529 PUSHL(ssp, sp, sp_mask, val);
2c0262af
FB
2530 }
2531 } else {
891b38e4
FB
2532 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2533 PUSHW(ssp, sp, sp_mask, ESP);
2534 for(i = param_count - 1; i >= 0; i--) {
2535 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2536 PUSHW(ssp, sp, sp_mask, val);
2c0262af
FB
2537 }
2538 }
891b38e4 2539 new_stack = 1;
2c0262af 2540 } else {
7f75ffd3 2541 /* to same privilege */
891b38e4
FB
2542 sp = ESP;
2543 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2544 ssp = env->segs[R_SS].base;
2545 // push_size = (4 << shift);
2546 new_stack = 0;
2c0262af
FB
2547 }
2548
2549 if (shift) {
891b38e4
FB
2550 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2551 PUSHL(ssp, sp, sp_mask, next_eip);
2c0262af 2552 } else {
891b38e4
FB
2553 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2554 PUSHW(ssp, sp, sp_mask, next_eip);
2555 }
2556
2557 /* from this point, not restartable */
2558
2559 if (new_stack) {
2560 ss = (ss & ~3) | dpl;
5fafdf24 2561 cpu_x86_load_seg_cache(env, R_SS, ss,
891b38e4
FB
2562 ssp,
2563 get_seg_limit(ss_e1, ss_e2),
2564 ss_e2);
2c0262af
FB
2565 }
2566
2c0262af 2567 selector = (selector & ~3) | dpl;
5fafdf24 2568 cpu_x86_load_seg_cache(env, R_CS, selector,
2c0262af
FB
2569 get_seg_base(e1, e2),
2570 get_seg_limit(e1, e2),
2571 e2);
2572 cpu_x86_set_cpl(env, dpl);
8d7b0fbb 2573 SET_ESP(sp, sp_mask);
2c0262af
FB
2574 EIP = offset;
2575 }
9df217a3
FB
2576#ifdef USE_KQEMU
2577 if (kqemu_is_ok(env)) {
2578 env->exception_index = -1;
2579 cpu_loop_exit();
2580 }
2581#endif
2c0262af
FB
2582}
2583
7e84c249 2584/* real and vm86 mode iret */
2c0262af
FB
2585void helper_iret_real(int shift)
2586{
891b38e4 2587 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
14ce26e7 2588 target_ulong ssp;
2c0262af 2589 int eflags_mask;
7e84c249 2590
891b38e4
FB
2591 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2592 sp = ESP;
2593 ssp = env->segs[R_SS].base;
2c0262af
FB
2594 if (shift == 1) {
2595 /* 32 bits */
891b38e4
FB
2596 POPL(ssp, sp, sp_mask, new_eip);
2597 POPL(ssp, sp, sp_mask, new_cs);
2598 new_cs &= 0xffff;
2599 POPL(ssp, sp, sp_mask, new_eflags);
2c0262af
FB
2600 } else {
2601 /* 16 bits */
891b38e4
FB
2602 POPW(ssp, sp, sp_mask, new_eip);
2603 POPW(ssp, sp, sp_mask, new_cs);
2604 POPW(ssp, sp, sp_mask, new_eflags);
2c0262af 2605 }
4136f33c 2606 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2c0262af
FB
2607 load_seg_vm(R_CS, new_cs);
2608 env->eip = new_eip;
7e84c249 2609 if (env->eflags & VM_MASK)
8145122b 2610 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
7e84c249 2611 else
8145122b 2612 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2c0262af
FB
2613 if (shift == 0)
2614 eflags_mask &= 0xffff;
2615 load_eflags(new_eflags, eflags_mask);
474ea849 2616 env->hflags &= ~HF_NMI_MASK;
2c0262af
FB
2617}
2618
8e682019
FB
2619static inline void validate_seg(int seg_reg, int cpl)
2620{
2621 int dpl;
2622 uint32_t e2;
cd072e01
FB
2623
2624 /* XXX: on x86_64, we do not want to nullify FS and GS because
2625 they may still contain a valid base. I would be interested to
2626 know how a real x86_64 CPU behaves */
5fafdf24 2627 if ((seg_reg == R_FS || seg_reg == R_GS) &&
cd072e01
FB
2628 (env->segs[seg_reg].selector & 0xfffc) == 0)
2629 return;
2630
8e682019
FB
2631 e2 = env->segs[seg_reg].flags;
2632 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2633 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2634 /* data or non conforming code segment */
2635 if (dpl < cpl) {
14ce26e7 2636 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
8e682019
FB
2637 }
2638 }
2639}
2640
2c0262af
FB
2641/* protected mode iret */
2642static inline void helper_ret_protected(int shift, int is_iret, int addend)
2643{
14ce26e7 2644 uint32_t new_cs, new_eflags, new_ss;
2c0262af
FB
2645 uint32_t new_es, new_ds, new_fs, new_gs;
2646 uint32_t e1, e2, ss_e1, ss_e2;
4136f33c 2647 int cpl, dpl, rpl, eflags_mask, iopl;
14ce26e7 2648 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3b46e624 2649
14ce26e7
FB
2650#ifdef TARGET_X86_64
2651 if (shift == 2)
2652 sp_mask = -1;
2653 else
2654#endif
2655 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2c0262af 2656 sp = ESP;
891b38e4 2657 ssp = env->segs[R_SS].base;
354ff226 2658 new_eflags = 0; /* avoid warning */
14ce26e7
FB
2659#ifdef TARGET_X86_64
2660 if (shift == 2) {
2661 POPQ(sp, new_eip);
2662 POPQ(sp, new_cs);
2663 new_cs &= 0xffff;
2664 if (is_iret) {
2665 POPQ(sp, new_eflags);
2666 }
2667 } else
2668#endif
2c0262af
FB
2669 if (shift == 1) {
2670 /* 32 bits */
891b38e4
FB
2671 POPL(ssp, sp, sp_mask, new_eip);
2672 POPL(ssp, sp, sp_mask, new_cs);
2673 new_cs &= 0xffff;
2674 if (is_iret) {
2675 POPL(ssp, sp, sp_mask, new_eflags);
2676 if (new_eflags & VM_MASK)
2677 goto return_to_vm86;
2678 }
2c0262af
FB
2679 } else {
2680 /* 16 bits */
891b38e4
FB
2681 POPW(ssp, sp, sp_mask, new_eip);
2682 POPW(ssp, sp, sp_mask, new_cs);
2c0262af 2683 if (is_iret)
891b38e4 2684 POPW(ssp, sp, sp_mask, new_eflags);
2c0262af 2685 }
891b38e4 2686#ifdef DEBUG_PCALL
e19e89a5 2687 if (loglevel & CPU_LOG_PCALL) {
14ce26e7 2688 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
e19e89a5 2689 new_cs, new_eip, shift, addend);
7fe48483 2690 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
891b38e4
FB
2691 }
2692#endif
2c0262af
FB
2693 if ((new_cs & 0xfffc) == 0)
2694 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2695 if (load_segment(&e1, &e2, new_cs) != 0)
2696 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2697 if (!(e2 & DESC_S_MASK) ||
2698 !(e2 & DESC_CS_MASK))
2699 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2700 cpl = env->hflags & HF_CPL_MASK;
5fafdf24 2701 rpl = new_cs & 3;
2c0262af
FB
2702 if (rpl < cpl)
2703 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2704 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 2705 if (e2 & DESC_C_MASK) {
2c0262af
FB
2706 if (dpl > rpl)
2707 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2708 } else {
2709 if (dpl != rpl)
2710 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2711 }
2712 if (!(e2 & DESC_P_MASK))
2713 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3b46e624 2714
891b38e4 2715 sp += addend;
5fafdf24 2716 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
ca954f6d 2717 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2c0262af 2718 /* return to same priledge level */
5fafdf24 2719 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2c0262af
FB
2720 get_seg_base(e1, e2),
2721 get_seg_limit(e1, e2),
2722 e2);
2c0262af 2723 } else {
7f75ffd3 2724 /* return to different privilege level */
14ce26e7
FB
2725#ifdef TARGET_X86_64
2726 if (shift == 2) {
2727 POPQ(sp, new_esp);
2728 POPQ(sp, new_ss);
2729 new_ss &= 0xffff;
2730 } else
2731#endif
2c0262af
FB
2732 if (shift == 1) {
2733 /* 32 bits */
891b38e4
FB
2734 POPL(ssp, sp, sp_mask, new_esp);
2735 POPL(ssp, sp, sp_mask, new_ss);
2736 new_ss &= 0xffff;
2c0262af
FB
2737 } else {
2738 /* 16 bits */
891b38e4
FB
2739 POPW(ssp, sp, sp_mask, new_esp);
2740 POPW(ssp, sp, sp_mask, new_ss);
2c0262af 2741 }
e19e89a5
FB
2742#ifdef DEBUG_PCALL
2743 if (loglevel & CPU_LOG_PCALL) {
14ce26e7 2744 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
e19e89a5
FB
2745 new_ss, new_esp);
2746 }
2747#endif
b359d4e7
FB
2748 if ((new_ss & 0xfffc) == 0) {
2749#ifdef TARGET_X86_64
2750 /* NULL ss is allowed in long mode if cpl != 3*/
d80c7d1c 2751 /* XXX: test CS64 ? */
b359d4e7 2752 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
5fafdf24 2753 cpu_x86_load_seg_cache(env, R_SS, new_ss,
b359d4e7
FB
2754 0, 0xffffffff,
2755 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2756 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2757 DESC_W_MASK | DESC_A_MASK);
d80c7d1c 2758 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
5fafdf24 2759 } else
b359d4e7
FB
2760#endif
2761 {
2762 raise_exception_err(EXCP0D_GPF, 0);
2763 }
14ce26e7
FB
2764 } else {
2765 if ((new_ss & 3) != rpl)
2766 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2767 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2768 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2769 if (!(ss_e2 & DESC_S_MASK) ||
2770 (ss_e2 & DESC_CS_MASK) ||
2771 !(ss_e2 & DESC_W_MASK))
2772 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2773 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2774 if (dpl != rpl)
2775 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2776 if (!(ss_e2 & DESC_P_MASK))
2777 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
5fafdf24 2778 cpu_x86_load_seg_cache(env, R_SS, new_ss,
14ce26e7
FB
2779 get_seg_base(ss_e1, ss_e2),
2780 get_seg_limit(ss_e1, ss_e2),
2781 ss_e2);
2782 }
2c0262af 2783
5fafdf24 2784 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2c0262af
FB
2785 get_seg_base(e1, e2),
2786 get_seg_limit(e1, e2),
2787 e2);
2c0262af 2788 cpu_x86_set_cpl(env, rpl);
891b38e4 2789 sp = new_esp;
14ce26e7 2790#ifdef TARGET_X86_64
2c8e0301 2791 if (env->hflags & HF_CS64_MASK)
14ce26e7
FB
2792 sp_mask = -1;
2793 else
2794#endif
2795 sp_mask = get_sp_mask(ss_e2);
8e682019
FB
2796
2797 /* validate data segments */
89984cd2
FB
2798 validate_seg(R_ES, rpl);
2799 validate_seg(R_DS, rpl);
2800 validate_seg(R_FS, rpl);
2801 validate_seg(R_GS, rpl);
4afa6482
FB
2802
2803 sp += addend;
2c0262af 2804 }
8d7b0fbb 2805 SET_ESP(sp, sp_mask);
2c0262af
FB
2806 env->eip = new_eip;
2807 if (is_iret) {
4136f33c 2808 /* NOTE: 'cpl' is the _old_ CPL */
8145122b 2809 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2c0262af 2810 if (cpl == 0)
4136f33c
FB
2811 eflags_mask |= IOPL_MASK;
2812 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2813 if (cpl <= iopl)
2814 eflags_mask |= IF_MASK;
2c0262af
FB
2815 if (shift == 0)
2816 eflags_mask &= 0xffff;
2817 load_eflags(new_eflags, eflags_mask);
2818 }
2819 return;
2820
2821 return_to_vm86:
891b38e4
FB
2822 POPL(ssp, sp, sp_mask, new_esp);
2823 POPL(ssp, sp, sp_mask, new_ss);
2824 POPL(ssp, sp, sp_mask, new_es);
2825 POPL(ssp, sp, sp_mask, new_ds);
2826 POPL(ssp, sp, sp_mask, new_fs);
2827 POPL(ssp, sp, sp_mask, new_gs);
3b46e624 2828
2c0262af 2829 /* modify processor state */
5fafdf24 2830 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
8145122b 2831 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
891b38e4 2832 load_seg_vm(R_CS, new_cs & 0xffff);
2c0262af 2833 cpu_x86_set_cpl(env, 3);
891b38e4
FB
2834 load_seg_vm(R_SS, new_ss & 0xffff);
2835 load_seg_vm(R_ES, new_es & 0xffff);
2836 load_seg_vm(R_DS, new_ds & 0xffff);
2837 load_seg_vm(R_FS, new_fs & 0xffff);
2838 load_seg_vm(R_GS, new_gs & 0xffff);
2c0262af 2839
fd836909 2840 env->eip = new_eip & 0xffff;
2c0262af
FB
2841 ESP = new_esp;
2842}
2843
08cea4ee 2844void helper_iret_protected(int shift, int next_eip)
2c0262af 2845{
7e84c249
FB
2846 int tss_selector, type;
2847 uint32_t e1, e2;
3b46e624 2848
7e84c249
FB
2849 /* specific case for TSS */
2850 if (env->eflags & NT_MASK) {
14ce26e7
FB
2851#ifdef TARGET_X86_64
2852 if (env->hflags & HF_LMA_MASK)
2853 raise_exception_err(EXCP0D_GPF, 0);
2854#endif
7e84c249
FB
2855 tss_selector = lduw_kernel(env->tr.base + 0);
2856 if (tss_selector & 4)
2857 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2858 if (load_segment(&e1, &e2, tss_selector) != 0)
2859 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2860 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2861 /* NOTE: we check both segment and busy TSS */
2862 if (type != 3)
2863 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
08cea4ee 2864 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
7e84c249
FB
2865 } else {
2866 helper_ret_protected(shift, 1, 0);
2867 }
474ea849 2868 env->hflags &= ~HF_NMI_MASK;
9df217a3
FB
2869#ifdef USE_KQEMU
2870 if (kqemu_is_ok(env)) {
2871 CC_OP = CC_OP_EFLAGS;
2872 env->exception_index = -1;
2873 cpu_loop_exit();
2874 }
2875#endif
2c0262af
FB
2876}
2877
2878void helper_lret_protected(int shift, int addend)
2879{
2880 helper_ret_protected(shift, 0, addend);
9df217a3
FB
2881#ifdef USE_KQEMU
2882 if (kqemu_is_ok(env)) {
9df217a3
FB
2883 env->exception_index = -1;
2884 cpu_loop_exit();
2885 }
2886#endif
2c0262af
FB
2887}
2888
023fe10d
FB
2889void helper_sysenter(void)
2890{
2891 if (env->sysenter_cs == 0) {
2892 raise_exception_err(EXCP0D_GPF, 0);
2893 }
2894 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2895 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
2896 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2897 0, 0xffffffff,
023fe10d
FB
2898 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2899 DESC_S_MASK |
2900 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 2901 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
14ce26e7 2902 0, 0xffffffff,
023fe10d
FB
2903 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2904 DESC_S_MASK |
2905 DESC_W_MASK | DESC_A_MASK);
2906 ESP = env->sysenter_esp;
2907 EIP = env->sysenter_eip;
2908}
2909
2910void helper_sysexit(void)
2911{
2912 int cpl;
2913
2914 cpl = env->hflags & HF_CPL_MASK;
2915 if (env->sysenter_cs == 0 || cpl != 0) {
2916 raise_exception_err(EXCP0D_GPF, 0);
2917 }
2918 cpu_x86_set_cpl(env, 3);
5fafdf24
TS
2919 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2920 0, 0xffffffff,
023fe10d
FB
2921 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2922 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2923 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 2924 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
14ce26e7 2925 0, 0xffffffff,
023fe10d
FB
2926 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2927 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2928 DESC_W_MASK | DESC_A_MASK);
2929 ESP = ECX;
2930 EIP = EDX;
9df217a3
FB
2931#ifdef USE_KQEMU
2932 if (kqemu_is_ok(env)) {
2933 env->exception_index = -1;
2934 cpu_loop_exit();
2935 }
2936#endif
023fe10d
FB
2937}
2938
b8b6a50b 2939void helper_movl_crN_T0(int reg, target_ulong t0)
2c0262af 2940{
5fafdf24 2941#if !defined(CONFIG_USER_ONLY)
2c0262af
FB
2942 switch(reg) {
2943 case 0:
b8b6a50b 2944 cpu_x86_update_cr0(env, t0);
2c0262af
FB
2945 break;
2946 case 3:
b8b6a50b 2947 cpu_x86_update_cr3(env, t0);
1ac157da
FB
2948 break;
2949 case 4:
b8b6a50b 2950 cpu_x86_update_cr4(env, t0);
1ac157da 2951 break;
4d6b6c0a 2952 case 8:
b8b6a50b
FB
2953 cpu_set_apic_tpr(env, t0);
2954 env->cr[8] = t0;
4d6b6c0a 2955 break;
1ac157da 2956 default:
b8b6a50b 2957 env->cr[reg] = t0;
2c0262af
FB
2958 break;
2959 }
4d6b6c0a 2960#endif
2c0262af
FB
2961}
2962
b8b6a50b
FB
2963void helper_lmsw(target_ulong t0)
2964{
2965 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2966 if already set to one. */
2967 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2968 helper_movl_crN_T0(0, t0);
2969}
2970
2971void helper_clts(void)
2972{
2973 env->cr[0] &= ~CR0_TS_MASK;
2974 env->hflags &= ~HF_TS_MASK;
2975}
2976
2977#if !defined(CONFIG_USER_ONLY)
2978target_ulong helper_movtl_T0_cr8(void)
2979{
2980 return cpu_get_apic_tpr(env);
2981}
2982#endif
2983
2c0262af 2984/* XXX: do more */
b8b6a50b 2985void helper_movl_drN_T0(int reg, target_ulong t0)
2c0262af 2986{
b8b6a50b 2987 env->dr[reg] = t0;
2c0262af
FB
2988}
2989
8f091a59 2990void helper_invlpg(target_ulong addr)
2c0262af
FB
2991{
2992 cpu_x86_flush_tlb(env, addr);
2993}
2994
2c0262af
FB
2995void helper_rdtsc(void)
2996{
2997 uint64_t val;
ecada8a2
FB
2998
2999 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3000 raise_exception(EXCP0D_GPF);
3001 }
28ab0e2e 3002 val = cpu_get_tsc(env);
14ce26e7
FB
3003 EAX = (uint32_t)(val);
3004 EDX = (uint32_t)(val >> 32);
3005}
3006
df01e0fc
AZ
3007void helper_rdpmc(void)
3008{
3009 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3010 raise_exception(EXCP0D_GPF);
3011 }
3012
b8b6a50b
FB
3013 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3014
3015 /* currently unimplemented */
3016 raise_exception_err(EXCP06_ILLOP, 0);
df01e0fc
AZ
3017}
3018
5fafdf24 3019#if defined(CONFIG_USER_ONLY)
14ce26e7
FB
3020void helper_wrmsr(void)
3021{
2c0262af
FB
3022}
3023
14ce26e7
FB
3024void helper_rdmsr(void)
3025{
3026}
3027#else
2c0262af
FB
3028void helper_wrmsr(void)
3029{
14ce26e7
FB
3030 uint64_t val;
3031
3032 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3033
3034 switch((uint32_t)ECX) {
2c0262af 3035 case MSR_IA32_SYSENTER_CS:
14ce26e7 3036 env->sysenter_cs = val & 0xffff;
2c0262af
FB
3037 break;
3038 case MSR_IA32_SYSENTER_ESP:
14ce26e7 3039 env->sysenter_esp = val;
2c0262af
FB
3040 break;
3041 case MSR_IA32_SYSENTER_EIP:
14ce26e7
FB
3042 env->sysenter_eip = val;
3043 break;
3044 case MSR_IA32_APICBASE:
3045 cpu_set_apic_base(env, val);
3046 break;
14ce26e7 3047 case MSR_EFER:
f419b321
FB
3048 {
3049 uint64_t update_mask;
3050 update_mask = 0;
3051 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3052 update_mask |= MSR_EFER_SCE;
3053 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3054 update_mask |= MSR_EFER_LME;
3055 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3056 update_mask |= MSR_EFER_FFXSR;
3057 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3058 update_mask |= MSR_EFER_NXE;
5fafdf24 3059 env->efer = (env->efer & ~update_mask) |
f419b321
FB
3060 (val & update_mask);
3061 }
2c0262af 3062 break;
14ce26e7
FB
3063 case MSR_STAR:
3064 env->star = val;
3065 break;
8f091a59
FB
3066 case MSR_PAT:
3067 env->pat = val;
3068 break;
0573fbfc
TS
3069 case MSR_VM_HSAVE_PA:
3070 env->vm_hsave = val;
3071 break;
f419b321 3072#ifdef TARGET_X86_64
14ce26e7
FB
3073 case MSR_LSTAR:
3074 env->lstar = val;
3075 break;
3076 case MSR_CSTAR:
3077 env->cstar = val;
3078 break;
3079 case MSR_FMASK:
3080 env->fmask = val;
3081 break;
3082 case MSR_FSBASE:
3083 env->segs[R_FS].base = val;
3084 break;
3085 case MSR_GSBASE:
3086 env->segs[R_GS].base = val;
3087 break;
3088 case MSR_KERNELGSBASE:
3089 env->kernelgsbase = val;
3090 break;
3091#endif
2c0262af
FB
3092 default:
3093 /* XXX: exception ? */
5fafdf24 3094 break;
2c0262af
FB
3095 }
3096}
3097
3098void helper_rdmsr(void)
3099{
14ce26e7
FB
3100 uint64_t val;
3101 switch((uint32_t)ECX) {
2c0262af 3102 case MSR_IA32_SYSENTER_CS:
14ce26e7 3103 val = env->sysenter_cs;
2c0262af
FB
3104 break;
3105 case MSR_IA32_SYSENTER_ESP:
14ce26e7 3106 val = env->sysenter_esp;
2c0262af
FB
3107 break;
3108 case MSR_IA32_SYSENTER_EIP:
14ce26e7
FB
3109 val = env->sysenter_eip;
3110 break;
3111 case MSR_IA32_APICBASE:
3112 val = cpu_get_apic_base(env);
3113 break;
14ce26e7
FB
3114 case MSR_EFER:
3115 val = env->efer;
3116 break;
3117 case MSR_STAR:
3118 val = env->star;
3119 break;
8f091a59
FB
3120 case MSR_PAT:
3121 val = env->pat;
3122 break;
0573fbfc
TS
3123 case MSR_VM_HSAVE_PA:
3124 val = env->vm_hsave;
3125 break;
f419b321 3126#ifdef TARGET_X86_64
14ce26e7
FB
3127 case MSR_LSTAR:
3128 val = env->lstar;
3129 break;
3130 case MSR_CSTAR:
3131 val = env->cstar;
3132 break;
3133 case MSR_FMASK:
3134 val = env->fmask;
3135 break;
3136 case MSR_FSBASE:
3137 val = env->segs[R_FS].base;
3138 break;
3139 case MSR_GSBASE:
3140 val = env->segs[R_GS].base;
2c0262af 3141 break;
14ce26e7
FB
3142 case MSR_KERNELGSBASE:
3143 val = env->kernelgsbase;
3144 break;
3145#endif
2c0262af
FB
3146 default:
3147 /* XXX: exception ? */
14ce26e7 3148 val = 0;
5fafdf24 3149 break;
2c0262af 3150 }
14ce26e7
FB
3151 EAX = (uint32_t)(val);
3152 EDX = (uint32_t)(val >> 32);
2c0262af 3153}
14ce26e7 3154#endif
2c0262af 3155
b8b6a50b 3156uint32_t helper_lsl(uint32_t selector)
2c0262af 3157{
b5b38f61 3158 unsigned int limit;
5516d670 3159 uint32_t e1, e2, eflags;
3ab493de 3160 int rpl, dpl, cpl, type;
2c0262af 3161
b5b38f61 3162 selector &= 0xffff;
5516d670 3163 eflags = cc_table[CC_OP].compute_all();
2c0262af 3164 if (load_segment(&e1, &e2, selector) != 0)
5516d670 3165 goto fail;
3ab493de
FB
3166 rpl = selector & 3;
3167 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3168 cpl = env->hflags & HF_CPL_MASK;
3169 if (e2 & DESC_S_MASK) {
3170 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3171 /* conforming */
3172 } else {
3173 if (dpl < cpl || dpl < rpl)
5516d670 3174 goto fail;
3ab493de
FB
3175 }
3176 } else {
3177 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3178 switch(type) {
3179 case 1:
3180 case 2:
3181 case 3:
3182 case 9:
3183 case 11:
3184 break;
3185 default:
5516d670 3186 goto fail;
3ab493de 3187 }
5516d670
FB
3188 if (dpl < cpl || dpl < rpl) {
3189 fail:
3190 CC_SRC = eflags & ~CC_Z;
b8b6a50b 3191 return 0;
5516d670 3192 }
3ab493de
FB
3193 }
3194 limit = get_seg_limit(e1, e2);
5516d670 3195 CC_SRC = eflags | CC_Z;
b8b6a50b 3196 return limit;
2c0262af
FB
3197}
3198
b8b6a50b 3199uint32_t helper_lar(uint32_t selector)
2c0262af 3200{
5516d670 3201 uint32_t e1, e2, eflags;
3ab493de 3202 int rpl, dpl, cpl, type;
2c0262af 3203
b5b38f61 3204 selector &= 0xffff;
5516d670 3205 eflags = cc_table[CC_OP].compute_all();
3ab493de 3206 if ((selector & 0xfffc) == 0)
5516d670 3207 goto fail;
2c0262af 3208 if (load_segment(&e1, &e2, selector) != 0)
5516d670 3209 goto fail;
3ab493de
FB
3210 rpl = selector & 3;
3211 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3212 cpl = env->hflags & HF_CPL_MASK;
3213 if (e2 & DESC_S_MASK) {
3214 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3215 /* conforming */
3216 } else {
3217 if (dpl < cpl || dpl < rpl)
5516d670 3218 goto fail;
3ab493de
FB
3219 }
3220 } else {
3221 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3222 switch(type) {
3223 case 1:
3224 case 2:
3225 case 3:
3226 case 4:
3227 case 5:
3228 case 9:
3229 case 11:
3230 case 12:
3231 break;
3232 default:
5516d670 3233 goto fail;
3ab493de 3234 }
5516d670
FB
3235 if (dpl < cpl || dpl < rpl) {
3236 fail:
3237 CC_SRC = eflags & ~CC_Z;
b8b6a50b 3238 return 0;
5516d670 3239 }
3ab493de 3240 }
5516d670 3241 CC_SRC = eflags | CC_Z;
b8b6a50b 3242 return e2 & 0x00f0ff00;
2c0262af
FB
3243}
3244
b5b38f61 3245void helper_verr(uint32_t selector)
3ab493de 3246{
5516d670 3247 uint32_t e1, e2, eflags;
3ab493de
FB
3248 int rpl, dpl, cpl;
3249
b5b38f61 3250 selector &= 0xffff;
5516d670 3251 eflags = cc_table[CC_OP].compute_all();
3ab493de 3252 if ((selector & 0xfffc) == 0)
5516d670 3253 goto fail;
3ab493de 3254 if (load_segment(&e1, &e2, selector) != 0)
5516d670 3255 goto fail;
3ab493de 3256 if (!(e2 & DESC_S_MASK))
5516d670 3257 goto fail;
3ab493de
FB
3258 rpl = selector & 3;
3259 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3260 cpl = env->hflags & HF_CPL_MASK;
3261 if (e2 & DESC_CS_MASK) {
3262 if (!(e2 & DESC_R_MASK))
5516d670 3263 goto fail;
3ab493de
FB
3264 if (!(e2 & DESC_C_MASK)) {
3265 if (dpl < cpl || dpl < rpl)
5516d670 3266 goto fail;
3ab493de
FB
3267 }
3268 } else {
5516d670
FB
3269 if (dpl < cpl || dpl < rpl) {
3270 fail:
3271 CC_SRC = eflags & ~CC_Z;
3ab493de 3272 return;
5516d670 3273 }
3ab493de 3274 }
5516d670 3275 CC_SRC = eflags | CC_Z;
3ab493de
FB
3276}
3277
b5b38f61 3278void helper_verw(uint32_t selector)
3ab493de 3279{
5516d670 3280 uint32_t e1, e2, eflags;
3ab493de
FB
3281 int rpl, dpl, cpl;
3282
b5b38f61 3283 selector &= 0xffff;
5516d670 3284 eflags = cc_table[CC_OP].compute_all();
3ab493de 3285 if ((selector & 0xfffc) == 0)
5516d670 3286 goto fail;
3ab493de 3287 if (load_segment(&e1, &e2, selector) != 0)
5516d670 3288 goto fail;
3ab493de 3289 if (!(e2 & DESC_S_MASK))
5516d670 3290 goto fail;
3ab493de
FB
3291 rpl = selector & 3;
3292 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3293 cpl = env->hflags & HF_CPL_MASK;
3294 if (e2 & DESC_CS_MASK) {
5516d670 3295 goto fail;
3ab493de
FB
3296 } else {
3297 if (dpl < cpl || dpl < rpl)
5516d670
FB
3298 goto fail;
3299 if (!(e2 & DESC_W_MASK)) {
3300 fail:
3301 CC_SRC = eflags & ~CC_Z;
3ab493de 3302 return;
5516d670 3303 }
3ab493de 3304 }
5516d670 3305 CC_SRC = eflags | CC_Z;
3ab493de
FB
3306}
3307
19e6c4b8 3308/* x87 FPU helpers */
2c0262af 3309
9596ebb7 3310static void fpu_set_exception(int mask)
2ee73ac3
FB
3311{
3312 env->fpus |= mask;
3313 if (env->fpus & (~env->fpuc & FPUC_EM))
3314 env->fpus |= FPUS_SE | FPUS_B;
3315}
3316
19e6c4b8 3317static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
2ee73ac3 3318{
5fafdf24 3319 if (b == 0.0)
2ee73ac3
FB
3320 fpu_set_exception(FPUS_ZE);
3321 return a / b;
3322}
3323
3324void fpu_raise_exception(void)
3325{
3326 if (env->cr[0] & CR0_NE_MASK) {
3327 raise_exception(EXCP10_COPR);
5fafdf24
TS
3328 }
3329#if !defined(CONFIG_USER_ONLY)
2ee73ac3
FB
3330 else {
3331 cpu_set_ferr(env);
3332 }
3333#endif
3334}
3335
19e6c4b8
FB
3336void helper_flds_FT0(uint32_t val)
3337{
3338 union {
3339 float32 f;
3340 uint32_t i;
3341 } u;
3342 u.i = val;
3343 FT0 = float32_to_floatx(u.f, &env->fp_status);
3344}
3345
3346void helper_fldl_FT0(uint64_t val)
3347{
3348 union {
3349 float64 f;
3350 uint64_t i;
3351 } u;
3352 u.i = val;
3353 FT0 = float64_to_floatx(u.f, &env->fp_status);
3354}
3355
3356void helper_fildl_FT0(int32_t val)
3357{
3358 FT0 = int32_to_floatx(val, &env->fp_status);
3359}
3360
3361void helper_flds_ST0(uint32_t val)
3362{
3363 int new_fpstt;
3364 union {
3365 float32 f;
3366 uint32_t i;
3367 } u;
3368 new_fpstt = (env->fpstt - 1) & 7;
3369 u.i = val;
3370 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3371 env->fpstt = new_fpstt;
3372 env->fptags[new_fpstt] = 0; /* validate stack entry */
3373}
3374
3375void helper_fldl_ST0(uint64_t val)
3376{
3377 int new_fpstt;
3378 union {
3379 float64 f;
3380 uint64_t i;
3381 } u;
3382 new_fpstt = (env->fpstt - 1) & 7;
3383 u.i = val;
3384 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3385 env->fpstt = new_fpstt;
3386 env->fptags[new_fpstt] = 0; /* validate stack entry */
3387}
3388
3389void helper_fildl_ST0(int32_t val)
3390{
3391 int new_fpstt;
3392 new_fpstt = (env->fpstt - 1) & 7;
3393 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3394 env->fpstt = new_fpstt;
3395 env->fptags[new_fpstt] = 0; /* validate stack entry */
3396}
3397
3398void helper_fildll_ST0(int64_t val)
3399{
3400 int new_fpstt;
3401 new_fpstt = (env->fpstt - 1) & 7;
3402 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3403 env->fpstt = new_fpstt;
3404 env->fptags[new_fpstt] = 0; /* validate stack entry */
3405}
3406
3407uint32_t helper_fsts_ST0(void)
3408{
3409 union {
3410 float32 f;
3411 uint32_t i;
3412 } u;
3413 u.f = floatx_to_float32(ST0, &env->fp_status);
3414 return u.i;
3415}
3416
3417uint64_t helper_fstl_ST0(void)
3418{
3419 union {
3420 float64 f;
3421 uint64_t i;
3422 } u;
3423 u.f = floatx_to_float64(ST0, &env->fp_status);
3424 return u.i;
3425}
3426
3427int32_t helper_fist_ST0(void)
3428{
3429 int32_t val;
3430 val = floatx_to_int32(ST0, &env->fp_status);
3431 if (val != (int16_t)val)
3432 val = -32768;
3433 return val;
3434}
3435
3436int32_t helper_fistl_ST0(void)
3437{
3438 int32_t val;
3439 val = floatx_to_int32(ST0, &env->fp_status);
3440 return val;
3441}
3442
3443int64_t helper_fistll_ST0(void)
3444{
3445 int64_t val;
3446 val = floatx_to_int64(ST0, &env->fp_status);
3447 return val;
3448}
3449
3450int32_t helper_fistt_ST0(void)
3451{
3452 int32_t val;
3453 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3454 if (val != (int16_t)val)
3455 val = -32768;
3456 return val;
3457}
3458
3459int32_t helper_fisttl_ST0(void)
3460{
3461 int32_t val;
3462 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3463 return val;
3464}
3465
3466int64_t helper_fisttll_ST0(void)
3467{
3468 int64_t val;
3469 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3470 return val;
3471}
3472
3473void helper_fldt_ST0(target_ulong ptr)
3474{
3475 int new_fpstt;
3476 new_fpstt = (env->fpstt - 1) & 7;
3477 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3478 env->fpstt = new_fpstt;
3479 env->fptags[new_fpstt] = 0; /* validate stack entry */
3480}
3481
3482void helper_fstt_ST0(target_ulong ptr)
3483{
3484 helper_fstt(ST0, ptr);
3485}
3486
3487void helper_fpush(void)
3488{
3489 fpush();
3490}
3491
3492void helper_fpop(void)
3493{
3494 fpop();
3495}
3496
3497void helper_fdecstp(void)
3498{
3499 env->fpstt = (env->fpstt - 1) & 7;
3500 env->fpus &= (~0x4700);
3501}
3502
3503void helper_fincstp(void)
3504{
3505 env->fpstt = (env->fpstt + 1) & 7;
3506 env->fpus &= (~0x4700);
3507}
3508
3509/* FPU move */
3510
3511void helper_ffree_STN(int st_index)
3512{
3513 env->fptags[(env->fpstt + st_index) & 7] = 1;
3514}
3515
3516void helper_fmov_ST0_FT0(void)
3517{
3518 ST0 = FT0;
3519}
3520
3521void helper_fmov_FT0_STN(int st_index)
3522{
3523 FT0 = ST(st_index);
3524}
3525
3526void helper_fmov_ST0_STN(int st_index)
3527{
3528 ST0 = ST(st_index);
3529}
3530
3531void helper_fmov_STN_ST0(int st_index)
3532{
3533 ST(st_index) = ST0;
3534}
3535
3536void helper_fxchg_ST0_STN(int st_index)
3537{
3538 CPU86_LDouble tmp;
3539 tmp = ST(st_index);
3540 ST(st_index) = ST0;
3541 ST0 = tmp;
3542}
3543
3544/* FPU operations */
3545
3546static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3547
3548void helper_fcom_ST0_FT0(void)
3549{
3550 int ret;
3551
3552 ret = floatx_compare(ST0, FT0, &env->fp_status);
3553 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3554 FORCE_RET();
3555}
3556
3557void helper_fucom_ST0_FT0(void)
3558{
3559 int ret;
3560
3561 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3562 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3563 FORCE_RET();
3564}
3565
3566static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3567
3568void helper_fcomi_ST0_FT0(void)
3569{
3570 int eflags;
3571 int ret;
3572
3573 ret = floatx_compare(ST0, FT0, &env->fp_status);
3574 eflags = cc_table[CC_OP].compute_all();
3575 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3576 CC_SRC = eflags;
3577 FORCE_RET();
3578}
3579
3580void helper_fucomi_ST0_FT0(void)
3581{
3582 int eflags;
3583 int ret;
3584
3585 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3586 eflags = cc_table[CC_OP].compute_all();
3587 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3588 CC_SRC = eflags;
3589 FORCE_RET();
3590}
3591
3592void helper_fadd_ST0_FT0(void)
3593{
3594 ST0 += FT0;
3595}
3596
3597void helper_fmul_ST0_FT0(void)
3598{
3599 ST0 *= FT0;
3600}
3601
3602void helper_fsub_ST0_FT0(void)
3603{
3604 ST0 -= FT0;
3605}
3606
3607void helper_fsubr_ST0_FT0(void)
3608{
3609 ST0 = FT0 - ST0;
3610}
3611
3612void helper_fdiv_ST0_FT0(void)
3613{
3614 ST0 = helper_fdiv(ST0, FT0);
3615}
3616
3617void helper_fdivr_ST0_FT0(void)
3618{
3619 ST0 = helper_fdiv(FT0, ST0);
3620}
3621
3622/* fp operations between STN and ST0 */
3623
3624void helper_fadd_STN_ST0(int st_index)
3625{
3626 ST(st_index) += ST0;
3627}
3628
3629void helper_fmul_STN_ST0(int st_index)
3630{
3631 ST(st_index) *= ST0;
3632}
3633
3634void helper_fsub_STN_ST0(int st_index)
3635{
3636 ST(st_index) -= ST0;
3637}
3638
3639void helper_fsubr_STN_ST0(int st_index)
3640{
3641 CPU86_LDouble *p;
3642 p = &ST(st_index);
3643 *p = ST0 - *p;
3644}
3645
3646void helper_fdiv_STN_ST0(int st_index)
3647{
3648 CPU86_LDouble *p;
3649 p = &ST(st_index);
3650 *p = helper_fdiv(*p, ST0);
3651}
3652
3653void helper_fdivr_STN_ST0(int st_index)
3654{
3655 CPU86_LDouble *p;
3656 p = &ST(st_index);
3657 *p = helper_fdiv(ST0, *p);
3658}
3659
3660/* misc FPU operations */
3661void helper_fchs_ST0(void)
3662{
3663 ST0 = floatx_chs(ST0);
3664}
3665
3666void helper_fabs_ST0(void)
3667{
3668 ST0 = floatx_abs(ST0);
3669}
3670
3671void helper_fld1_ST0(void)
3672{
3673 ST0 = f15rk[1];
3674}
3675
3676void helper_fldl2t_ST0(void)
3677{
3678 ST0 = f15rk[6];
3679}
3680
3681void helper_fldl2e_ST0(void)
3682{
3683 ST0 = f15rk[5];
3684}
3685
3686void helper_fldpi_ST0(void)
3687{
3688 ST0 = f15rk[2];
3689}
3690
3691void helper_fldlg2_ST0(void)
3692{
3693 ST0 = f15rk[3];
3694}
3695
3696void helper_fldln2_ST0(void)
3697{
3698 ST0 = f15rk[4];
3699}
3700
3701void helper_fldz_ST0(void)
3702{
3703 ST0 = f15rk[0];
3704}
3705
3706void helper_fldz_FT0(void)
3707{
3708 FT0 = f15rk[0];
3709}
3710
3711uint32_t helper_fnstsw(void)
3712{
3713 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3714}
3715
3716uint32_t helper_fnstcw(void)
3717{
3718 return env->fpuc;
3719}
3720
b5b38f61
FB
3721static void update_fp_status(void)
3722{
3723 int rnd_type;
3724
3725 /* set rounding mode */
3726 switch(env->fpuc & RC_MASK) {
3727 default:
3728 case RC_NEAR:
3729 rnd_type = float_round_nearest_even;
3730 break;
3731 case RC_DOWN:
3732 rnd_type = float_round_down;
3733 break;
3734 case RC_UP:
3735 rnd_type = float_round_up;
3736 break;
3737 case RC_CHOP:
3738 rnd_type = float_round_to_zero;
3739 break;
3740 }
3741 set_float_rounding_mode(rnd_type, &env->fp_status);
3742#ifdef FLOATX80
3743 switch((env->fpuc >> 8) & 3) {
3744 case 0:
3745 rnd_type = 32;
3746 break;
3747 case 2:
3748 rnd_type = 64;
3749 break;
3750 case 3:
3751 default:
3752 rnd_type = 80;
3753 break;
3754 }
3755 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3756#endif
3757}
3758
19e6c4b8
FB
3759void helper_fldcw(uint32_t val)
3760{
3761 env->fpuc = val;
3762 update_fp_status();
3763}
3764
3765void helper_fclex(void)
3766{
3767 env->fpus &= 0x7f00;
3768}
3769
3770void helper_fwait(void)
3771{
3772 if (env->fpus & FPUS_SE)
3773 fpu_raise_exception();
3774 FORCE_RET();
3775}
3776
3777void helper_fninit(void)
3778{
3779 env->fpus = 0;
3780 env->fpstt = 0;
3781 env->fpuc = 0x37f;
3782 env->fptags[0] = 1;
3783 env->fptags[1] = 1;
3784 env->fptags[2] = 1;
3785 env->fptags[3] = 1;
3786 env->fptags[4] = 1;
3787 env->fptags[5] = 1;
3788 env->fptags[6] = 1;
3789 env->fptags[7] = 1;
3790}
3791
2c0262af
FB
3792/* BCD ops */
3793
19e6c4b8 3794void helper_fbld_ST0(target_ulong ptr)
2c0262af
FB
3795{
3796 CPU86_LDouble tmp;
3797 uint64_t val;
3798 unsigned int v;
3799 int i;
3800
3801 val = 0;
3802 for(i = 8; i >= 0; i--) {
19e6c4b8 3803 v = ldub(ptr + i);
2c0262af
FB
3804 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3805 }
3806 tmp = val;
19e6c4b8 3807 if (ldub(ptr + 9) & 0x80)
2c0262af
FB
3808 tmp = -tmp;
3809 fpush();
3810 ST0 = tmp;
3811}
3812
19e6c4b8 3813void helper_fbst_ST0(target_ulong ptr)
2c0262af 3814{
2c0262af 3815 int v;
14ce26e7 3816 target_ulong mem_ref, mem_end;
2c0262af
FB
3817 int64_t val;
3818
7a0e1f41 3819 val = floatx_to_int64(ST0, &env->fp_status);
19e6c4b8 3820 mem_ref = ptr;
2c0262af
FB
3821 mem_end = mem_ref + 9;
3822 if (val < 0) {
3823 stb(mem_end, 0x80);
3824 val = -val;
3825 } else {
3826 stb(mem_end, 0x00);
3827 }
3828 while (mem_ref < mem_end) {
3829 if (val == 0)
3830 break;
3831 v = val % 100;
3832 val = val / 100;
3833 v = ((v / 10) << 4) | (v % 10);
3834 stb(mem_ref++, v);
3835 }
3836 while (mem_ref < mem_end) {
3837 stb(mem_ref++, 0);
3838 }
3839}
3840
3841void helper_f2xm1(void)
3842{
3843 ST0 = pow(2.0,ST0) - 1.0;
3844}
3845
3846void helper_fyl2x(void)
3847{
3848 CPU86_LDouble fptemp;
3b46e624 3849
2c0262af
FB
3850 fptemp = ST0;
3851 if (fptemp>0.0){
3852 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3853 ST1 *= fptemp;
3854 fpop();
5fafdf24 3855 } else {
2c0262af
FB
3856 env->fpus &= (~0x4700);
3857 env->fpus |= 0x400;
3858 }
3859}
3860
3861void helper_fptan(void)
3862{
3863 CPU86_LDouble fptemp;
3864
3865 fptemp = ST0;
3866 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3867 env->fpus |= 0x400;
3868 } else {
3869 ST0 = tan(fptemp);
3870 fpush();
3871 ST0 = 1.0;
3872 env->fpus &= (~0x400); /* C2 <-- 0 */
3873 /* the above code is for |arg| < 2**52 only */
3874 }
3875}
3876
3877void helper_fpatan(void)
3878{
3879 CPU86_LDouble fptemp, fpsrcop;
3880
3881 fpsrcop = ST1;
3882 fptemp = ST0;
3883 ST1 = atan2(fpsrcop,fptemp);
3884 fpop();
3885}
3886
3887void helper_fxtract(void)
3888{
3889 CPU86_LDoubleU temp;
3890 unsigned int expdif;
3891
3892 temp.d = ST0;
3893 expdif = EXPD(temp) - EXPBIAS;
3894 /*DP exponent bias*/
3895 ST0 = expdif;
3896 fpush();
3897 BIASEXPONENT(temp);
3898 ST0 = temp.d;
3899}
3900
3901void helper_fprem1(void)
3902{
3903 CPU86_LDouble dblq, fpsrcop, fptemp;
3904 CPU86_LDoubleU fpsrcop1, fptemp1;
3905 int expdif;
7524c84d
TS
3906 signed long long int q;
3907
3908 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3909 ST0 = 0.0 / 0.0; /* NaN */
3910 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3911 return;
3912 }
2c0262af
FB
3913
3914 fpsrcop = ST0;
3915 fptemp = ST1;
3916 fpsrcop1.d = fpsrcop;
3917 fptemp1.d = fptemp;
3918 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
7524c84d
TS
3919
3920 if (expdif < 0) {
3921 /* optimisation? taken from the AMD docs */
3922 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3923 /* ST0 is unchanged */
3924 return;
3925 }
3926
2c0262af
FB
3927 if (expdif < 53) {
3928 dblq = fpsrcop / fptemp;
7524c84d
TS
3929 /* round dblq towards nearest integer */
3930 dblq = rint(dblq);
3931 ST0 = fpsrcop - fptemp * dblq;
3932
3933 /* convert dblq to q by truncating towards zero */
3934 if (dblq < 0.0)
3935 q = (signed long long int)(-dblq);
3936 else
3937 q = (signed long long int)dblq;
3938
2c0262af 3939 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
7524c84d
TS
3940 /* (C0,C3,C1) <-- (q2,q1,q0) */
3941 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3942 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3943 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
2c0262af
FB
3944 } else {
3945 env->fpus |= 0x400; /* C2 <-- 1 */
7524c84d 3946 fptemp = pow(2.0, expdif - 50);
2c0262af 3947 fpsrcop = (ST0 / ST1) / fptemp;
7524c84d
TS
3948 /* fpsrcop = integer obtained by chopping */
3949 fpsrcop = (fpsrcop < 0.0) ?
3950 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
2c0262af
FB
3951 ST0 -= (ST1 * fpsrcop * fptemp);
3952 }
3953}
3954
3955void helper_fprem(void)
3956{
3957 CPU86_LDouble dblq, fpsrcop, fptemp;
3958 CPU86_LDoubleU fpsrcop1, fptemp1;
3959 int expdif;
7524c84d
TS
3960 signed long long int q;
3961
3962 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3963 ST0 = 0.0 / 0.0; /* NaN */
3964 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3965 return;
3966 }
3967
3968 fpsrcop = (CPU86_LDouble)ST0;
3969 fptemp = (CPU86_LDouble)ST1;
2c0262af
FB
3970 fpsrcop1.d = fpsrcop;
3971 fptemp1.d = fptemp;
3972 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
7524c84d
TS
3973
3974 if (expdif < 0) {
3975 /* optimisation? taken from the AMD docs */
3976 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3977 /* ST0 is unchanged */
3978 return;
3979 }
3980
2c0262af 3981 if ( expdif < 53 ) {
7524c84d
TS
3982 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3983 /* round dblq towards zero */
3984 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3985 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3986
3987 /* convert dblq to q by truncating towards zero */
3988 if (dblq < 0.0)
3989 q = (signed long long int)(-dblq);
3990 else
3991 q = (signed long long int)dblq;
3992
2c0262af 3993 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
7524c84d
TS
3994 /* (C0,C3,C1) <-- (q2,q1,q0) */
3995 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3996 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3997 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
2c0262af 3998 } else {
7524c84d 3999 int N = 32 + (expdif % 32); /* as per AMD docs */
2c0262af 4000 env->fpus |= 0x400; /* C2 <-- 1 */
7524c84d 4001 fptemp = pow(2.0, (double)(expdif - N));
2c0262af
FB
4002 fpsrcop = (ST0 / ST1) / fptemp;
4003 /* fpsrcop = integer obtained by chopping */
7524c84d
TS
4004 fpsrcop = (fpsrcop < 0.0) ?
4005 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
2c0262af
FB
4006 ST0 -= (ST1 * fpsrcop * fptemp);
4007 }
4008}
4009
4010void helper_fyl2xp1(void)
4011{
4012 CPU86_LDouble fptemp;
4013
4014 fptemp = ST0;
4015 if ((fptemp+1.0)>0.0) {
4016 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4017 ST1 *= fptemp;
4018 fpop();
5fafdf24 4019 } else {
2c0262af
FB
4020 env->fpus &= (~0x4700);
4021 env->fpus |= 0x400;
4022 }
4023}
4024
4025void helper_fsqrt(void)
4026{
4027 CPU86_LDouble fptemp;
4028
4029 fptemp = ST0;
5fafdf24 4030 if (fptemp<0.0) {
2c0262af
FB
4031 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4032 env->fpus |= 0x400;
4033 }
4034 ST0 = sqrt(fptemp);
4035}
4036
4037void helper_fsincos(void)
4038{
4039 CPU86_LDouble fptemp;
4040
4041 fptemp = ST0;
4042 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4043 env->fpus |= 0x400;
4044 } else {
4045 ST0 = sin(fptemp);
4046 fpush();
4047 ST0 = cos(fptemp);
4048 env->fpus &= (~0x400); /* C2 <-- 0 */
4049 /* the above code is for |arg| < 2**63 only */
4050 }
4051}
4052
4053void helper_frndint(void)
4054{
7a0e1f41 4055 ST0 = floatx_round_to_int(ST0, &env->fp_status);
2c0262af
FB
4056}
4057
4058void helper_fscale(void)
4059{
5fafdf24 4060 ST0 = ldexp (ST0, (int)(ST1));
2c0262af
FB
4061}
4062
4063void helper_fsin(void)
4064{
4065 CPU86_LDouble fptemp;
4066
4067 fptemp = ST0;
4068 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4069 env->fpus |= 0x400;
4070 } else {
4071 ST0 = sin(fptemp);
4072 env->fpus &= (~0x400); /* C2 <-- 0 */
4073 /* the above code is for |arg| < 2**53 only */
4074 }
4075}
4076
4077void helper_fcos(void)
4078{
4079 CPU86_LDouble fptemp;
4080
4081 fptemp = ST0;
4082 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4083 env->fpus |= 0x400;
4084 } else {
4085 ST0 = cos(fptemp);
4086 env->fpus &= (~0x400); /* C2 <-- 0 */
4087 /* the above code is for |arg5 < 2**63 only */
4088 }
4089}
4090
4091void helper_fxam_ST0(void)
4092{
4093 CPU86_LDoubleU temp;
4094 int expdif;
4095
4096 temp.d = ST0;
4097
4098 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4099 if (SIGND(temp))
4100 env->fpus |= 0x200; /* C1 <-- 1 */
4101
a891c7a1 4102 /* XXX: test fptags too */
2c0262af
FB
4103 expdif = EXPD(temp);
4104 if (expdif == MAXEXPD) {
a891c7a1
FB
4105#ifdef USE_X86LDOUBLE
4106 if (MANTD(temp) == 0x8000000000000000ULL)
4107#else
2c0262af 4108 if (MANTD(temp) == 0)
a891c7a1 4109#endif
2c0262af
FB
4110 env->fpus |= 0x500 /*Infinity*/;
4111 else
4112 env->fpus |= 0x100 /*NaN*/;
4113 } else if (expdif == 0) {
4114 if (MANTD(temp) == 0)
4115 env->fpus |= 0x4000 /*Zero*/;
4116 else
4117 env->fpus |= 0x4400 /*Denormal*/;
4118 } else {
4119 env->fpus |= 0x400;
4120 }
4121}
4122
14ce26e7 4123void helper_fstenv(target_ulong ptr, int data32)
2c0262af
FB
4124{
4125 int fpus, fptag, exp, i;
4126 uint64_t mant;
4127 CPU86_LDoubleU tmp;
4128
4129 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4130 fptag = 0;
4131 for (i=7; i>=0; i--) {
4132 fptag <<= 2;
4133 if (env->fptags[i]) {
4134 fptag |= 3;
4135 } else {
664e0f19 4136 tmp.d = env->fpregs[i].d;
2c0262af
FB
4137 exp = EXPD(tmp);
4138 mant = MANTD(tmp);
4139 if (exp == 0 && mant == 0) {
4140 /* zero */
4141 fptag |= 1;
4142 } else if (exp == 0 || exp == MAXEXPD
4143#ifdef USE_X86LDOUBLE
4144 || (mant & (1LL << 63)) == 0
4145#endif
4146 ) {
4147 /* NaNs, infinity, denormal */
4148 fptag |= 2;
4149 }
4150 }
4151 }
4152 if (data32) {
4153 /* 32 bit */
4154 stl(ptr, env->fpuc);
4155 stl(ptr + 4, fpus);
4156 stl(ptr + 8, fptag);
2edcdce3
FB
4157 stl(ptr + 12, 0); /* fpip */
4158 stl(ptr + 16, 0); /* fpcs */
4159 stl(ptr + 20, 0); /* fpoo */
4160 stl(ptr + 24, 0); /* fpos */
2c0262af
FB
4161 } else {
4162 /* 16 bit */
4163 stw(ptr, env->fpuc);
4164 stw(ptr + 2, fpus);
4165 stw(ptr + 4, fptag);
4166 stw(ptr + 6, 0);
4167 stw(ptr + 8, 0);
4168 stw(ptr + 10, 0);
4169 stw(ptr + 12, 0);
4170 }
4171}
4172
14ce26e7 4173void helper_fldenv(target_ulong ptr, int data32)
2c0262af
FB
4174{
4175 int i, fpus, fptag;
4176
4177 if (data32) {
4178 env->fpuc = lduw(ptr);
4179 fpus = lduw(ptr + 4);
4180 fptag = lduw(ptr + 8);
4181 }
4182 else {
4183 env->fpuc = lduw(ptr);
4184 fpus = lduw(ptr + 2);
4185 fptag = lduw(ptr + 4);
4186 }
4187 env->fpstt = (fpus >> 11) & 7;
4188 env->fpus = fpus & ~0x3800;
2edcdce3 4189 for(i = 0;i < 8; i++) {
2c0262af
FB
4190 env->fptags[i] = ((fptag & 3) == 3);
4191 fptag >>= 2;
4192 }
4193}
4194
14ce26e7 4195void helper_fsave(target_ulong ptr, int data32)
2c0262af
FB
4196{
4197 CPU86_LDouble tmp;
4198 int i;
4199
4200 helper_fstenv(ptr, data32);
4201
4202 ptr += (14 << data32);
4203 for(i = 0;i < 8; i++) {
4204 tmp = ST(i);
2c0262af 4205 helper_fstt(tmp, ptr);
2c0262af
FB
4206 ptr += 10;
4207 }
4208
4209 /* fninit */
4210 env->fpus = 0;
4211 env->fpstt = 0;
4212 env->fpuc = 0x37f;
4213 env->fptags[0] = 1;
4214 env->fptags[1] = 1;
4215 env->fptags[2] = 1;
4216 env->fptags[3] = 1;
4217 env->fptags[4] = 1;
4218 env->fptags[5] = 1;
4219 env->fptags[6] = 1;
4220 env->fptags[7] = 1;
4221}
4222
14ce26e7 4223void helper_frstor(target_ulong ptr, int data32)
2c0262af
FB
4224{
4225 CPU86_LDouble tmp;
4226 int i;
4227
4228 helper_fldenv(ptr, data32);
4229 ptr += (14 << data32);
4230
4231 for(i = 0;i < 8; i++) {
2c0262af 4232 tmp = helper_fldt(ptr);
2c0262af
FB
4233 ST(i) = tmp;
4234 ptr += 10;
4235 }
4236}
4237
14ce26e7
FB
4238void helper_fxsave(target_ulong ptr, int data64)
4239{
4240 int fpus, fptag, i, nb_xmm_regs;
4241 CPU86_LDouble tmp;
4242 target_ulong addr;
4243
4244 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4245 fptag = 0;
4246 for(i = 0; i < 8; i++) {
d3c61721 4247 fptag |= (env->fptags[i] << i);
14ce26e7
FB
4248 }
4249 stw(ptr, env->fpuc);
4250 stw(ptr + 2, fpus);
d3c61721 4251 stw(ptr + 4, fptag ^ 0xff);
14ce26e7
FB
4252
4253 addr = ptr + 0x20;
4254 for(i = 0;i < 8; i++) {
4255 tmp = ST(i);
4256 helper_fstt(tmp, addr);
4257 addr += 16;
4258 }
3b46e624 4259
14ce26e7 4260 if (env->cr[4] & CR4_OSFXSR_MASK) {
a8ede8ba 4261 /* XXX: finish it */
664e0f19 4262 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
d3c61721 4263 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
14ce26e7
FB
4264 nb_xmm_regs = 8 << data64;
4265 addr = ptr + 0xa0;
4266 for(i = 0; i < nb_xmm_regs; i++) {
a8ede8ba
FB
4267 stq(addr, env->xmm_regs[i].XMM_Q(0));
4268 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
14ce26e7
FB
4269 addr += 16;
4270 }
4271 }
4272}
4273
4274void helper_fxrstor(target_ulong ptr, int data64)
4275{
4276 int i, fpus, fptag, nb_xmm_regs;
4277 CPU86_LDouble tmp;
4278 target_ulong addr;
4279
4280 env->fpuc = lduw(ptr);
4281 fpus = lduw(ptr + 2);
d3c61721 4282 fptag = lduw(ptr + 4);
14ce26e7
FB
4283 env->fpstt = (fpus >> 11) & 7;
4284 env->fpus = fpus & ~0x3800;
4285 fptag ^= 0xff;
4286 for(i = 0;i < 8; i++) {
d3c61721 4287 env->fptags[i] = ((fptag >> i) & 1);
14ce26e7
FB
4288 }
4289
4290 addr = ptr + 0x20;
4291 for(i = 0;i < 8; i++) {
4292 tmp = helper_fldt(addr);
4293 ST(i) = tmp;
4294 addr += 16;
4295 }
4296
4297 if (env->cr[4] & CR4_OSFXSR_MASK) {
31313213 4298 /* XXX: finish it */
664e0f19 4299 env->mxcsr = ldl(ptr + 0x18);
14ce26e7
FB
4300 //ldl(ptr + 0x1c);
4301 nb_xmm_regs = 8 << data64;
4302 addr = ptr + 0xa0;
4303 for(i = 0; i < nb_xmm_regs; i++) {
a8ede8ba
FB
4304 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4305 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
14ce26e7
FB
4306 addr += 16;
4307 }
4308 }
4309}
1f1af9fd
FB
4310
4311#ifndef USE_X86LDOUBLE
4312
4313void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4314{
4315 CPU86_LDoubleU temp;
4316 int e;
4317
4318 temp.d = f;
4319 /* mantissa */
4320 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4321 /* exponent + sign */
4322 e = EXPD(temp) - EXPBIAS + 16383;
4323 e |= SIGND(temp) >> 16;
4324 *pexp = e;
4325}
4326
4327CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4328{
4329 CPU86_LDoubleU temp;
4330 int e;
4331 uint64_t ll;
4332
4333 /* XXX: handle overflow ? */
4334 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4335 e |= (upper >> 4) & 0x800; /* sign */
4336 ll = (mant >> 11) & ((1LL << 52) - 1);
4337#ifdef __arm__
4338 temp.l.upper = (e << 20) | (ll >> 32);
4339 temp.l.lower = ll;
4340#else
4341 temp.ll = ll | ((uint64_t)e << 52);
4342#endif
4343 return temp.d;
4344}
4345
4346#else
4347
4348void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4349{
4350 CPU86_LDoubleU temp;
4351
4352 temp.d = f;
4353 *pmant = temp.l.lower;
4354 *pexp = temp.l.upper;
4355}
4356
4357CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4358{
4359 CPU86_LDoubleU temp;
4360
4361 temp.l.upper = upper;
4362 temp.l.lower = mant;
4363 return temp.d;
4364}
4365#endif
4366
14ce26e7
FB
4367#ifdef TARGET_X86_64
4368
4369//#define DEBUG_MULDIV
4370
4371static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4372{
4373 *plow += a;
4374 /* carry test */
4375 if (*plow < a)
4376 (*phigh)++;
4377 *phigh += b;
4378}
4379
4380static void neg128(uint64_t *plow, uint64_t *phigh)
4381{
4382 *plow = ~ *plow;
4383 *phigh = ~ *phigh;
4384 add128(plow, phigh, 1, 0);
4385}
4386
45bbbb46
FB
4387/* return TRUE if overflow */
4388static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
14ce26e7
FB
4389{
4390 uint64_t q, r, a1, a0;
c0b24a1d 4391 int i, qb, ab;
14ce26e7
FB
4392
4393 a0 = *plow;
4394 a1 = *phigh;
4395 if (a1 == 0) {
4396 q = a0 / b;
4397 r = a0 % b;
4398 *plow = q;
4399 *phigh = r;
4400 } else {
45bbbb46
FB
4401 if (a1 >= b)
4402 return 1;
14ce26e7
FB
4403 /* XXX: use a better algorithm */
4404 for(i = 0; i < 64; i++) {
c0b24a1d 4405 ab = a1 >> 63;
a8ede8ba 4406 a1 = (a1 << 1) | (a0 >> 63);
c0b24a1d 4407 if (ab || a1 >= b) {
14ce26e7
FB
4408 a1 -= b;
4409 qb = 1;
4410 } else {
4411 qb = 0;
4412 }
14ce26e7
FB
4413 a0 = (a0 << 1) | qb;
4414 }
a8ede8ba 4415#if defined(DEBUG_MULDIV)
26a76461 4416 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
14ce26e7
FB
4417 *phigh, *plow, b, a0, a1);
4418#endif
4419 *plow = a0;
4420 *phigh = a1;
4421 }
45bbbb46 4422 return 0;
14ce26e7
FB
4423}
4424
45bbbb46
FB
4425/* return TRUE if overflow */
4426static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
14ce26e7
FB
4427{
4428 int sa, sb;
4429 sa = ((int64_t)*phigh < 0);
4430 if (sa)
4431 neg128(plow, phigh);
4432 sb = (b < 0);
4433 if (sb)
4434 b = -b;
45bbbb46
FB
4435 if (div64(plow, phigh, b) != 0)
4436 return 1;
4437 if (sa ^ sb) {
4438 if (*plow > (1ULL << 63))
4439 return 1;
14ce26e7 4440 *plow = - *plow;
45bbbb46
FB
4441 } else {
4442 if (*plow >= (1ULL << 63))
4443 return 1;
4444 }
31313213 4445 if (sa)
14ce26e7 4446 *phigh = - *phigh;
45bbbb46 4447 return 0;
14ce26e7
FB
4448}
4449
b8b6a50b 4450void helper_mulq_EAX_T0(target_ulong t0)
14ce26e7
FB
4451{
4452 uint64_t r0, r1;
4453
b8b6a50b 4454 mulu64(&r0, &r1, EAX, t0);
14ce26e7
FB
4455 EAX = r0;
4456 EDX = r1;
4457 CC_DST = r0;
4458 CC_SRC = r1;
4459}
4460
b8b6a50b 4461void helper_imulq_EAX_T0(target_ulong t0)
14ce26e7
FB
4462{
4463 uint64_t r0, r1;
4464
b8b6a50b 4465 muls64(&r0, &r1, EAX, t0);
14ce26e7
FB
4466 EAX = r0;
4467 EDX = r1;
4468 CC_DST = r0;
a8ede8ba 4469 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
14ce26e7
FB
4470}
4471
b8b6a50b 4472target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
14ce26e7
FB
4473{
4474 uint64_t r0, r1;
4475
b8b6a50b 4476 muls64(&r0, &r1, t0, t1);
14ce26e7
FB
4477 CC_DST = r0;
4478 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
b8b6a50b 4479 return r0;
14ce26e7
FB
4480}
4481
b5b38f61 4482void helper_divq_EAX(target_ulong t0)
14ce26e7
FB
4483{
4484 uint64_t r0, r1;
b5b38f61 4485 if (t0 == 0) {
14ce26e7
FB
4486 raise_exception(EXCP00_DIVZ);
4487 }
4488 r0 = EAX;
4489 r1 = EDX;
b5b38f61 4490 if (div64(&r0, &r1, t0))
45bbbb46 4491 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
4492 EAX = r0;
4493 EDX = r1;
4494}
4495
b5b38f61 4496void helper_idivq_EAX(target_ulong t0)
14ce26e7
FB
4497{
4498 uint64_t r0, r1;
b5b38f61 4499 if (t0 == 0) {
14ce26e7
FB
4500 raise_exception(EXCP00_DIVZ);
4501 }
4502 r0 = EAX;
4503 r1 = EDX;
b5b38f61 4504 if (idiv64(&r0, &r1, t0))
45bbbb46 4505 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
4506 EAX = r0;
4507 EDX = r1;
4508}
14ce26e7
FB
4509#endif
4510
3d7374c5
FB
4511void helper_hlt(void)
4512{
4513 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4514 env->hflags |= HF_HALTED_MASK;
4515 env->exception_index = EXCP_HLT;
4516 cpu_loop_exit();
4517}
4518
b5b38f61 4519void helper_monitor(target_ulong ptr)
3d7374c5 4520{
d80c7d1c 4521 if ((uint32_t)ECX != 0)
3d7374c5
FB
4522 raise_exception(EXCP0D_GPF);
4523 /* XXX: store address ? */
4524}
4525
4526void helper_mwait(void)
4527{
d80c7d1c 4528 if ((uint32_t)ECX != 0)
3d7374c5
FB
4529 raise_exception(EXCP0D_GPF);
4530 /* XXX: not complete but not completely erroneous */
4531 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4532 /* more than one CPU: do not sleep because another CPU may
4533 wake this one */
4534 } else {
4535 helper_hlt();
4536 }
4537}
4538
b5b38f61 4539void helper_debug(void)
664e0f19 4540{
b5b38f61
FB
4541 env->exception_index = EXCP_DEBUG;
4542 cpu_loop_exit();
664e0f19
FB
4543}
4544
b5b38f61 4545void helper_raise_interrupt(int intno, int next_eip_addend)
664e0f19 4546{
b5b38f61 4547 raise_interrupt(intno, 1, 0, next_eip_addend);
664e0f19
FB
4548}
4549
b5b38f61 4550void helper_raise_exception(int exception_index)
4d6b6c0a 4551{
b5b38f61
FB
4552 raise_exception(exception_index);
4553}
4d6b6c0a 4554
b5b38f61
FB
4555void helper_cli(void)
4556{
4557 env->eflags &= ~IF_MASK;
4558}
4559
4560void helper_sti(void)
4561{
4562 env->eflags |= IF_MASK;
4563}
4564
4565#if 0
4566/* vm86plus instructions */
4567void helper_cli_vm(void)
4568{
4569 env->eflags &= ~VIF_MASK;
4570}
4571
4572void helper_sti_vm(void)
4573{
4574 env->eflags |= VIF_MASK;
4575 if (env->eflags & VIP_MASK) {
4576 raise_exception(EXCP0D_GPF);
7a0e1f41 4577 }
b5b38f61 4578}
4d6b6c0a 4579#endif
b5b38f61
FB
4580
4581void helper_set_inhibit_irq(void)
4582{
4583 env->hflags |= HF_INHIBIT_IRQ_MASK;
4584}
4585
4586void helper_reset_inhibit_irq(void)
4587{
4588 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4589}
4590
b8b6a50b 4591void helper_boundw(target_ulong a0, int v)
b5b38f61 4592{
b8b6a50b
FB
4593 int low, high;
4594 low = ldsw(a0);
4595 high = ldsw(a0 + 2);
4596 v = (int16_t)v;
b5b38f61
FB
4597 if (v < low || v > high) {
4598 raise_exception(EXCP05_BOUND);
4599 }
4600 FORCE_RET();
4601}
4602
b8b6a50b 4603void helper_boundl(target_ulong a0, int v)
b5b38f61 4604{
b8b6a50b
FB
4605 int low, high;
4606 low = ldl(a0);
4607 high = ldl(a0 + 4);
b5b38f61
FB
4608 if (v < low || v > high) {
4609 raise_exception(EXCP05_BOUND);
4610 }
4611 FORCE_RET();
4612}
4613
4614static float approx_rsqrt(float a)
4615{
4616 return 1.0 / sqrt(a);
4617}
4618
4619static float approx_rcp(float a)
4620{
4621 return 1.0 / a;
7a0e1f41 4622}
664e0f19 4623
5fafdf24 4624#if !defined(CONFIG_USER_ONLY)
61382a50
FB
4625
4626#define MMUSUFFIX _mmu
273af660
TS
4627#ifdef __s390__
4628# define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
4629#else
4630# define GETPC() (__builtin_return_address(0))
4631#endif
61382a50 4632
2c0262af
FB
4633#define SHIFT 0
4634#include "softmmu_template.h"
4635
4636#define SHIFT 1
4637#include "softmmu_template.h"
4638
4639#define SHIFT 2
4640#include "softmmu_template.h"
4641
4642#define SHIFT 3
4643#include "softmmu_template.h"
4644
61382a50
FB
4645#endif
4646
4647/* try to fill the TLB and return an exception if error. If retaddr is
4648 NULL, it means that the function was called in C code (i.e. not
4649 from generated code or from helper.c) */
4650/* XXX: fix it to restore all registers */
6ebbf390 4651void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2c0262af
FB
4652{
4653 TranslationBlock *tb;
4654 int ret;
4655 unsigned long pc;
61382a50
FB
4656 CPUX86State *saved_env;
4657
4658 /* XXX: hack to restore env in all cases, even if not called from
4659 generated code */
4660 saved_env = env;
4661 env = cpu_single_env;
61382a50 4662
6ebbf390 4663 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2c0262af 4664 if (ret) {
61382a50
FB
4665 if (retaddr) {
4666 /* now we have a real cpu fault */
4667 pc = (unsigned long)retaddr;
4668 tb = tb_find_pc(pc);
4669 if (tb) {
4670 /* the PC is inside the translated code. It means that we have
4671 a virtual CPU fault */
58fe2f10 4672 cpu_restore_state(tb, env, pc, NULL);
61382a50 4673 }
2c0262af 4674 }
0d1a29f9 4675 if (retaddr)
54ca9095 4676 raise_exception_err(env->exception_index, env->error_code);
0d1a29f9 4677 else
54ca9095 4678 raise_exception_err_norestore(env->exception_index, env->error_code);
2c0262af 4679 }
61382a50 4680 env = saved_env;
2c0262af 4681}
0573fbfc
TS
4682
4683
4684/* Secure Virtual Machine helpers */
4685
4686void helper_stgi(void)
4687{
4688 env->hflags |= HF_GIF_MASK;
4689}
4690
4691void helper_clgi(void)
4692{
4693 env->hflags &= ~HF_GIF_MASK;
4694}
4695
4696#if defined(CONFIG_USER_ONLY)
4697
b8b6a50b
FB
4698void helper_vmrun(void)
4699{
4700}
4701void helper_vmmcall(void)
4702{
4703}
4704void helper_vmload(void)
4705{
4706}
4707void helper_vmsave(void)
4708{
4709}
4710void helper_skinit(void)
4711{
4712}
4713void helper_invlpga(void)
4714{
4715}
4716void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4717{
4718}
4719void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
0573fbfc 4720{
0573fbfc
TS
4721}
4722
b8b6a50b
FB
4723void helper_svm_check_io(uint32_t port, uint32_t param,
4724 uint32_t next_eip_addend)
4725{
4726}
0573fbfc
TS
4727#else
4728
4729static inline uint32_t
4730vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4731{
4732 return ((vmcb_attrib & 0x00ff) << 8) /* Type, S, DPL, P */
4733 | ((vmcb_attrib & 0x0f00) << 12) /* AVL, L, DB, G */
4734 | ((vmcb_base >> 16) & 0xff) /* Base 23-16 */
4735 | (vmcb_base & 0xff000000) /* Base 31-24 */
4736 | (vmcb_limit & 0xf0000); /* Limit 19-16 */
4737}
4738
4739static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4740{
4741 return ((cpu_attrib >> 8) & 0xff) /* Type, S, DPL, P */
4742 | ((cpu_attrib & 0xf00000) >> 12); /* AVL, L, DB, G */
4743}
4744
b5b38f61 4745void helper_vmrun(void)
0573fbfc 4746{
b5b38f61 4747 target_ulong addr;
0573fbfc
TS
4748 uint32_t event_inj;
4749 uint32_t int_ctl;
4750
b5b38f61 4751 addr = EAX;
0573fbfc
TS
4752 if (loglevel & CPU_LOG_TB_IN_ASM)
4753 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4754
4755 env->vm_vmcb = addr;
0573fbfc
TS
4756
4757 /* save the current CPU state in the hsave page */
4758 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4759 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4760
4761 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4762 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4763
4764 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4765 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4766 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4767 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4768 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4769 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4770 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4771
4772 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4773 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4774
4775 SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4776 SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4777 SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4778 SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4779
4780 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4781 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4782 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4783
4784 /* load the interception bitmaps so we do not need to access the
4785 vmcb in svm mode */
4786 /* We shift all the intercept bits so we can OR them with the TB
4787 flags later on */
4788 env->intercept = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4789 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4790 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4791 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4792 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4793 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4794
4795 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4796 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4797
4798 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4799 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4800
4801 /* clear exit_info_2 so we behave like the real hardware */
4802 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4803
4804 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4805 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4806 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4807 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4808 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4809 if (int_ctl & V_INTR_MASKING_MASK) {
4810 env->cr[8] = int_ctl & V_TPR_MASK;
3d575329 4811 cpu_set_apic_tpr(env, env->cr[8]);
0573fbfc
TS
4812 if (env->eflags & IF_MASK)
4813 env->hflags |= HF_HIF_MASK;
4814 }
4815
4816#ifdef TARGET_X86_64
4817 env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4818 env->hflags &= ~HF_LMA_MASK;
4819 if (env->efer & MSR_EFER_LMA)
4820 env->hflags |= HF_LMA_MASK;
4821#endif
4822 env->eflags = 0;
4823 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4824 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4825 CC_OP = CC_OP_EFLAGS;
4826 CC_DST = 0xffffffff;
4827
4828 SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4829 SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4830 SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4831 SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4832
4833 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4834 env->eip = EIP;
4835 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4836 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4837 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4838 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4839 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4840
4841 /* FIXME: guest state consistency checks */
4842
4843 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4844 case TLB_CONTROL_DO_NOTHING:
4845 break;
4846 case TLB_CONTROL_FLUSH_ALL_ASID:
4847 /* FIXME: this is not 100% correct but should work for now */
4848 tlb_flush(env, 1);
4849 break;
4850 }
4851
4852 helper_stgi();
4853
0573fbfc
TS
4854 /* maybe we need to inject an event */
4855 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4856 if (event_inj & SVM_EVTINJ_VALID) {
4857 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4858 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4859 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4860 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4861
4862 if (loglevel & CPU_LOG_TB_IN_ASM)
4863 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4864 /* FIXME: need to implement valid_err */
4865 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4866 case SVM_EVTINJ_TYPE_INTR:
4867 env->exception_index = vector;
4868 env->error_code = event_inj_err;
7241f532 4869 env->exception_is_int = 0;
0573fbfc
TS
4870 env->exception_next_eip = -1;
4871 if (loglevel & CPU_LOG_TB_IN_ASM)
4872 fprintf(logfile, "INTR");
4873 break;
4874 case SVM_EVTINJ_TYPE_NMI:
4875 env->exception_index = vector;
4876 env->error_code = event_inj_err;
7241f532 4877 env->exception_is_int = 0;
0573fbfc
TS
4878 env->exception_next_eip = EIP;
4879 if (loglevel & CPU_LOG_TB_IN_ASM)
4880 fprintf(logfile, "NMI");
4881 break;
4882 case SVM_EVTINJ_TYPE_EXEPT:
4883 env->exception_index = vector;
4884 env->error_code = event_inj_err;
4885 env->exception_is_int = 0;
4886 env->exception_next_eip = -1;
4887 if (loglevel & CPU_LOG_TB_IN_ASM)
4888 fprintf(logfile, "EXEPT");
4889 break;
4890 case SVM_EVTINJ_TYPE_SOFT:
4891 env->exception_index = vector;
4892 env->error_code = event_inj_err;
4893 env->exception_is_int = 1;
4894 env->exception_next_eip = EIP;
4895 if (loglevel & CPU_LOG_TB_IN_ASM)
4896 fprintf(logfile, "SOFT");
4897 break;
4898 }
4899 if (loglevel & CPU_LOG_TB_IN_ASM)
4900 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4901 }
52621688 4902 if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
0573fbfc 4903 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
52621688 4904 }
0573fbfc
TS
4905
4906 cpu_loop_exit();
4907}
4908
4909void helper_vmmcall(void)
4910{
4911 if (loglevel & CPU_LOG_TB_IN_ASM)
4912 fprintf(logfile,"vmmcall!\n");
4913}
4914
b5b38f61 4915void helper_vmload(void)
0573fbfc 4916{
b5b38f61
FB
4917 target_ulong addr;
4918 addr = EAX;
0573fbfc
TS
4919 if (loglevel & CPU_LOG_TB_IN_ASM)
4920 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4921 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4922 env->segs[R_FS].base);
4923
4924 SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4925 SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4926 SVM_LOAD_SEG2(addr, tr, tr);
4927 SVM_LOAD_SEG2(addr, ldt, ldtr);
4928
4929#ifdef TARGET_X86_64
4930 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4931 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4932 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4933 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4934#endif
4935 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4936 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4937 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4938 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4939}
4940
b5b38f61 4941void helper_vmsave(void)
0573fbfc 4942{
b5b38f61
FB
4943 target_ulong addr;
4944 addr = EAX;
0573fbfc
TS
4945 if (loglevel & CPU_LOG_TB_IN_ASM)
4946 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4947 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4948 env->segs[R_FS].base);
4949
4950 SVM_SAVE_SEG(addr, segs[R_FS], fs);
4951 SVM_SAVE_SEG(addr, segs[R_GS], gs);
4952 SVM_SAVE_SEG(addr, tr, tr);
4953 SVM_SAVE_SEG(addr, ldt, ldtr);
4954
4955#ifdef TARGET_X86_64
4956 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4957 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4958 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4959 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4960#endif
4961 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4962 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4963 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4964 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4965}
4966
4967void helper_skinit(void)
4968{
4969 if (loglevel & CPU_LOG_TB_IN_ASM)
4970 fprintf(logfile,"skinit!\n");
4971}
4972
4973void helper_invlpga(void)
4974{
4975 tlb_flush(env, 0);
4976}
4977
b8b6a50b 4978void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
0573fbfc
TS
4979{
4980 switch(type) {
4981 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4982 if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
b8b6a50b 4983 helper_vmexit(type, param);
0573fbfc
TS
4984 }
4985 break;
4986 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4987 if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
b8b6a50b 4988 helper_vmexit(type, param);
0573fbfc
TS
4989 }
4990 break;
4991 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4992 if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
b8b6a50b 4993 helper_vmexit(type, param);
0573fbfc
TS
4994 }
4995 break;
4996 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4997 if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
b8b6a50b 4998 helper_vmexit(type, param);
0573fbfc
TS
4999 }
5000 break;
5001 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
5002 if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
b8b6a50b 5003 helper_vmexit(type, param);
0573fbfc
TS
5004 }
5005 break;
5006 case SVM_EXIT_IOIO:
0573fbfc
TS
5007 break;
5008
5009 case SVM_EXIT_MSR:
5010 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
5011 /* FIXME: this should be read in at vmrun (faster this way?) */
5012 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
b8b6a50b 5013 uint32_t t0, t1;
0573fbfc
TS
5014 switch((uint32_t)ECX) {
5015 case 0 ... 0x1fff:
b8b6a50b
FB
5016 t0 = (ECX * 2) % 8;
5017 t1 = ECX / 8;
0573fbfc
TS
5018 break;
5019 case 0xc0000000 ... 0xc0001fff:
b8b6a50b
FB
5020 t0 = (8192 + ECX - 0xc0000000) * 2;
5021 t1 = (t0 / 8);
5022 t0 %= 8;
0573fbfc
TS
5023 break;
5024 case 0xc0010000 ... 0xc0011fff:
b8b6a50b
FB
5025 t0 = (16384 + ECX - 0xc0010000) * 2;
5026 t1 = (t0 / 8);
5027 t0 %= 8;
0573fbfc
TS
5028 break;
5029 default:
b8b6a50b
FB
5030 helper_vmexit(type, param);
5031 t0 = 0;
5032 t1 = 0;
5033 break;
0573fbfc 5034 }
b8b6a50b
FB
5035 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5036 helper_vmexit(type, param);
0573fbfc
TS
5037 }
5038 break;
5039 default:
5040 if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
b8b6a50b 5041 helper_vmexit(type, param);
0573fbfc
TS
5042 }
5043 break;
5044 }
0573fbfc
TS
5045}
5046
b8b6a50b
FB
5047void helper_svm_check_io(uint32_t port, uint32_t param,
5048 uint32_t next_eip_addend)
5049{
5050 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
5051 /* FIXME: this should be read in at vmrun (faster this way?) */
5052 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5053 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5054 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5055 /* next EIP */
5056 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5057 env->eip + next_eip_addend);
5058 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5059 }
5060 }
5061}
5062
5063/* Note: currently only 32 bits of exit_code are used */
5064void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
0573fbfc
TS
5065{
5066 uint32_t int_ctl;
5067
5068 if (loglevel & CPU_LOG_TB_IN_ASM)
b8b6a50b 5069 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
0573fbfc
TS
5070 exit_code, exit_info_1,
5071 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5072 EIP);
5073
52621688
TS
5074 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5075 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5076 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5077 } else {
5078 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5079 }
5080
0573fbfc
TS
5081 /* Save the VM state in the vmcb */
5082 SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
5083 SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
5084 SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
5085 SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
5086
5087 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5088 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5089
5090 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5091 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5092
5093 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5094 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5095 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5096 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5097 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5098
5099 if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5100 int_ctl &= ~V_TPR_MASK;
5101 int_ctl |= env->cr[8] & V_TPR_MASK;
5102 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5103 }
5104
5105 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5106 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5107 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5108 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5109 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5110 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5111 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5112
5113 /* Reload the host state from vm_hsave */
5114 env->hflags &= ~HF_HIF_MASK;
5115 env->intercept = 0;
5116 env->intercept_exceptions = 0;
5117 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5118
5119 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5120 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5121
5122 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5123 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5124
5125 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5126 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5127 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
3d575329 5128 if (int_ctl & V_INTR_MASKING_MASK) {
0573fbfc 5129 env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
3d575329
AZ
5130 cpu_set_apic_tpr(env, env->cr[8]);
5131 }
0573fbfc
TS
5132 /* we need to set the efer after the crs so the hidden flags get set properly */
5133#ifdef TARGET_X86_64
5134 env->efer = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
5135 env->hflags &= ~HF_LMA_MASK;
5136 if (env->efer & MSR_EFER_LMA)
5137 env->hflags |= HF_LMA_MASK;
5138#endif
5139
5140 env->eflags = 0;
5141 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5142 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5143 CC_OP = CC_OP_EFLAGS;
5144
5145 SVM_LOAD_SEG(env->vm_hsave, ES, es);
5146 SVM_LOAD_SEG(env->vm_hsave, CS, cs);
5147 SVM_LOAD_SEG(env->vm_hsave, SS, ss);
5148 SVM_LOAD_SEG(env->vm_hsave, DS, ds);
5149
5150 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5151 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5152 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5153
5154 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5155 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5156
5157 /* other setups */
5158 cpu_x86_set_cpl(env, 0);
b8b6a50b 5159 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
0573fbfc
TS
5160 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5161
5162 helper_clgi();
5163 /* FIXME: Resets the current ASID register to zero (host ASID). */
5164
5165 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5166
5167 /* Clears the TSC_OFFSET inside the processor. */
5168
5169 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5170 from the page table indicated the host's CR3. If the PDPEs contain
5171 illegal state, the processor causes a shutdown. */
5172
5173 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5174 env->cr[0] |= CR0_PE_MASK;
5175 env->eflags &= ~VM_MASK;
5176
5177 /* Disables all breakpoints in the host DR7 register. */
5178
5179 /* Checks the reloaded host state for consistency. */
5180
5181 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5182 host's code segment or non-canonical (in the case of long mode), a
5183 #GP fault is delivered inside the host.) */
5184
5185 /* remove any pending exception */
5186 env->exception_index = -1;
5187 env->error_code = 0;
5188 env->old_exception = -1;
5189
0573fbfc
TS
5190 cpu_loop_exit();
5191}
5192
5193#endif
5af45186
FB
5194
5195/* MMX/SSE */
5196/* XXX: optimize by storing fptt and fptags in the static cpu state */
5197void helper_enter_mmx(void)
5198{
5199 env->fpstt = 0;
5200 *(uint32_t *)(env->fptags) = 0;
5201 *(uint32_t *)(env->fptags + 4) = 0;
5202}
5203
5204void helper_emms(void)
5205{
5206 /* set to empty state */
5207 *(uint32_t *)(env->fptags) = 0x01010101;
5208 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5209}
5210
5211/* XXX: suppress */
5212void helper_movq(uint64_t *d, uint64_t *s)
5213{
5214 *d = *s;
5215}
5216
5217#define SHIFT 0
5218#include "ops_sse.h"
5219
5220#define SHIFT 1
5221#include "ops_sse.h"
5222
b6abf97d
FB
5223#define SHIFT 0
5224#include "helper_template.h"
5225#undef SHIFT
5226
5227#define SHIFT 1
5228#include "helper_template.h"
5229#undef SHIFT
5230
5231#define SHIFT 2
5232#include "helper_template.h"
5233#undef SHIFT
5234
5235#ifdef TARGET_X86_64
5236
5237#define SHIFT 3
5238#include "helper_template.h"
5239#undef SHIFT
5240
5241#endif
07d2c595 5242
6191b059
FB
5243/* bit operations */
5244target_ulong helper_bsf(target_ulong t0)
5245{
5246 int count;
5247 target_ulong res;
5248
5249 res = t0;
5250 count = 0;
5251 while ((res & 1) == 0) {
5252 count++;
5253 res >>= 1;
5254 }
5255 return count;
5256}
5257
5258target_ulong helper_bsr(target_ulong t0)
5259{
5260 int count;
5261 target_ulong res, mask;
5262
5263 res = t0;
5264 count = TARGET_LONG_BITS - 1;
5265 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5266 while ((res & mask) == 0) {
5267 count--;
5268 res <<= 1;
5269 }
5270 return count;
5271}
5272
5273
07d2c595
FB
5274static int compute_all_eflags(void)
5275{
5276 return CC_SRC;
5277}
5278
5279static int compute_c_eflags(void)
5280{
5281 return CC_SRC & CC_C;
5282}
5283
5284CCTable cc_table[CC_OP_NB] = {
5285 [CC_OP_DYNAMIC] = { /* should never happen */ },
5286
5287 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5288
5289 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5290 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5291 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5292
5293 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5294 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
5295 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
5296
5297 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5298 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
5299 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
5300
5301 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
5302 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
5303 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
5304
5305 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
5306 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
5307 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
5308
5309 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5310 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5311 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5312
5313 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5314 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5315 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5316
5317 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5318 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5319 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5320
5321 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5322 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5323 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5324
5325 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5326 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5327 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5328
5329#ifdef TARGET_X86_64
5330 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5331
5332 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
5333
5334 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
5335
5336 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
5337
5338 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
5339
5340 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5341
5342 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5343
5344 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5345
5346 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5347
5348 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5349#endif
5350};
5351