]> git.proxmox.com Git - qemu.git/blame - target-i386/helper.c
update
[qemu.git] / target-i386 / helper.c
CommitLineData
2c0262af
FB
1/*
2 * i386 helpers
5fafdf24 3 *
2c0262af
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
b8b6a50b 20#define CPU_NO_GLOBAL_REGS
2c0262af 21#include "exec.h"
7a51ad82 22#include "host-utils.h"
2c0262af 23
f3f2d9be
FB
24//#define DEBUG_PCALL
25
8145122b
FB
26#if 0
27#define raise_exception_err(a, b)\
28do {\
9540a78b
FB
29 if (logfile)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
8145122b
FB
31 (raise_exception_err)(a, b);\
32} while (0)
33#endif
34
2c0262af
FB
35const uint8_t parity_table[256] = {
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68};
69
70/* modulo 17 table */
71const uint8_t rclw_table[32] = {
5fafdf24 72 0, 1, 2, 3, 4, 5, 6, 7,
2c0262af
FB
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
76};
77
78/* modulo 9 table */
79const uint8_t rclb_table[32] = {
5fafdf24 80 0, 1, 2, 3, 4, 5, 6, 7,
2c0262af 81 8, 0, 1, 2, 3, 4, 5, 6,
5fafdf24 82 7, 8, 0, 1, 2, 3, 4, 5,
2c0262af
FB
83 6, 7, 8, 0, 1, 2, 3, 4,
84};
85
86const CPU86_LDouble f15rk[7] =
87{
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
95};
3b46e624 96
b8b6a50b 97/* broken thread support */
2c0262af
FB
98
99spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
100
b8b6a50b 101void helper_lock(void)
2c0262af
FB
102{
103 spin_lock(&global_cpu_lock);
104}
105
b8b6a50b 106void helper_unlock(void)
2c0262af
FB
107{
108 spin_unlock(&global_cpu_lock);
109}
110
bd7a7b33
FB
111void helper_write_eflags(target_ulong t0, uint32_t update_mask)
112{
113 load_eflags(t0, update_mask);
114}
115
116target_ulong helper_read_eflags(void)
117{
118 uint32_t eflags;
119 eflags = cc_table[CC_OP].compute_all();
120 eflags |= (DF & DF_MASK);
121 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
122 return eflags;
123}
124
7e84c249
FB
125/* return non zero if error */
126static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
127 int selector)
128{
129 SegmentCache *dt;
130 int index;
14ce26e7 131 target_ulong ptr;
7e84c249
FB
132
133 if (selector & 0x4)
134 dt = &env->ldt;
135 else
136 dt = &env->gdt;
137 index = selector & ~7;
138 if ((index + 7) > dt->limit)
139 return -1;
140 ptr = dt->base + index;
141 *e1_ptr = ldl_kernel(ptr);
142 *e2_ptr = ldl_kernel(ptr + 4);
143 return 0;
144}
3b46e624 145
7e84c249
FB
146static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
147{
148 unsigned int limit;
149 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
150 if (e2 & DESC_G_MASK)
151 limit = (limit << 12) | 0xfff;
152 return limit;
153}
154
14ce26e7 155static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
7e84c249 156{
14ce26e7 157 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
7e84c249
FB
158}
159
160static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
161{
162 sc->base = get_seg_base(e1, e2);
163 sc->limit = get_seg_limit(e1, e2);
164 sc->flags = e2;
165}
166
167/* init the segment cache in vm86 mode. */
168static inline void load_seg_vm(int seg, int selector)
169{
170 selector &= 0xffff;
5fafdf24 171 cpu_x86_load_seg_cache(env, seg, selector,
14ce26e7 172 (selector << 4), 0xffff, 0);
7e84c249
FB
173}
174
5fafdf24 175static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
2c0262af
FB
176 uint32_t *esp_ptr, int dpl)
177{
178 int type, index, shift;
3b46e624 179
2c0262af
FB
180#if 0
181 {
182 int i;
183 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
184 for(i=0;i<env->tr.limit;i++) {
185 printf("%02x ", env->tr.base[i]);
186 if ((i & 7) == 7) printf("\n");
187 }
188 printf("\n");
189 }
190#endif
191
192 if (!(env->tr.flags & DESC_P_MASK))
193 cpu_abort(env, "invalid tss");
194 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
195 if ((type & 7) != 1)
196 cpu_abort(env, "invalid tss type");
197 shift = type >> 3;
198 index = (dpl * 4 + 2) << shift;
199 if (index + (4 << shift) - 1 > env->tr.limit)
200 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
201 if (shift == 0) {
61382a50
FB
202 *esp_ptr = lduw_kernel(env->tr.base + index);
203 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
2c0262af 204 } else {
61382a50
FB
205 *esp_ptr = ldl_kernel(env->tr.base + index);
206 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
2c0262af
FB
207 }
208}
209
7e84c249
FB
210/* XXX: merge with load_seg() */
211static void tss_load_seg(int seg_reg, int selector)
212{
213 uint32_t e1, e2;
214 int rpl, dpl, cpl;
215
216 if ((selector & 0xfffc) != 0) {
217 if (load_segment(&e1, &e2, selector) != 0)
218 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
219 if (!(e2 & DESC_S_MASK))
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 rpl = selector & 3;
222 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
223 cpl = env->hflags & HF_CPL_MASK;
224 if (seg_reg == R_CS) {
225 if (!(e2 & DESC_CS_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
9540a78b 227 /* XXX: is it correct ? */
7e84c249
FB
228 if (dpl != rpl)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if ((e2 & DESC_C_MASK) && dpl > rpl)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
7e84c249
FB
232 } else if (seg_reg == R_SS) {
233 /* SS must be writable data */
234 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
235 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
236 if (dpl != cpl || dpl != rpl)
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 } else {
239 /* not readable code */
240 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 /* if data or non conforming code, checks the rights */
243 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
244 if (dpl < cpl || dpl < rpl)
245 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246 }
247 }
248 if (!(e2 & DESC_P_MASK))
249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
5fafdf24 250 cpu_x86_load_seg_cache(env, seg_reg, selector,
7e84c249
FB
251 get_seg_base(e1, e2),
252 get_seg_limit(e1, e2),
253 e2);
254 } else {
5fafdf24 255 if (seg_reg == R_SS || seg_reg == R_CS)
7e84c249
FB
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257 }
258}
259
260#define SWITCH_TSS_JMP 0
261#define SWITCH_TSS_IRET 1
262#define SWITCH_TSS_CALL 2
263
264/* XXX: restore CPU state in registers (PowerPC case) */
5fafdf24 265static void switch_tss(int tss_selector,
883da8e2
FB
266 uint32_t e1, uint32_t e2, int source,
267 uint32_t next_eip)
2c0262af 268{
7e84c249 269 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
14ce26e7 270 target_ulong tss_base;
7e84c249
FB
271 uint32_t new_regs[8], new_segs[6];
272 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
273 uint32_t old_eflags, eflags_mask;
2c0262af
FB
274 SegmentCache *dt;
275 int index;
14ce26e7 276 target_ulong ptr;
2c0262af 277
7e84c249 278 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
dc6f57fd 279#ifdef DEBUG_PCALL
e19e89a5 280 if (loglevel & CPU_LOG_PCALL)
dc6f57fd
FB
281 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
282#endif
7e84c249
FB
283
284 /* if task gate, we read the TSS segment and we load it */
285 if (type == 5) {
286 if (!(e2 & DESC_P_MASK))
287 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
288 tss_selector = e1 >> 16;
289 if (tss_selector & 4)
290 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
291 if (load_segment(&e1, &e2, tss_selector) != 0)
292 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
293 if (e2 & DESC_S_MASK)
294 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
296 if ((type & 7) != 1)
297 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
298 }
299
300 if (!(e2 & DESC_P_MASK))
301 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
302
303 if (type & 8)
304 tss_limit_max = 103;
2c0262af 305 else
7e84c249
FB
306 tss_limit_max = 43;
307 tss_limit = get_seg_limit(e1, e2);
308 tss_base = get_seg_base(e1, e2);
5fafdf24 309 if ((tss_selector & 4) != 0 ||
7e84c249
FB
310 tss_limit < tss_limit_max)
311 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
312 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
313 if (old_type & 8)
314 old_tss_limit_max = 103;
315 else
316 old_tss_limit_max = 43;
317
318 /* read all the registers from the new TSS */
319 if (type & 8) {
320 /* 32 bit */
321 new_cr3 = ldl_kernel(tss_base + 0x1c);
322 new_eip = ldl_kernel(tss_base + 0x20);
323 new_eflags = ldl_kernel(tss_base + 0x24);
324 for(i = 0; i < 8; i++)
325 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
326 for(i = 0; i < 6; i++)
327 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
328 new_ldt = lduw_kernel(tss_base + 0x60);
329 new_trap = ldl_kernel(tss_base + 0x64);
330 } else {
331 /* 16 bit */
332 new_cr3 = 0;
333 new_eip = lduw_kernel(tss_base + 0x0e);
334 new_eflags = lduw_kernel(tss_base + 0x10);
335 for(i = 0; i < 8; i++)
336 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
337 for(i = 0; i < 4; i++)
338 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
339 new_ldt = lduw_kernel(tss_base + 0x2a);
340 new_segs[R_FS] = 0;
341 new_segs[R_GS] = 0;
342 new_trap = 0;
343 }
3b46e624 344
7e84c249
FB
345 /* NOTE: we must avoid memory exceptions during the task switch,
346 so we make dummy accesses before */
347 /* XXX: it can still fail in some cases, so a bigger hack is
348 necessary to valid the TLB after having done the accesses */
349
350 v1 = ldub_kernel(env->tr.base);
265d3497 351 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
7e84c249
FB
352 stb_kernel(env->tr.base, v1);
353 stb_kernel(env->tr.base + old_tss_limit_max, v2);
3b46e624 354
7e84c249
FB
355 /* clear busy bit (it is restartable) */
356 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
14ce26e7 357 target_ulong ptr;
7e84c249 358 uint32_t e2;
883da8e2 359 ptr = env->gdt.base + (env->tr.selector & ~7);
7e84c249
FB
360 e2 = ldl_kernel(ptr + 4);
361 e2 &= ~DESC_TSS_BUSY_MASK;
362 stl_kernel(ptr + 4, e2);
363 }
364 old_eflags = compute_eflags();
365 if (source == SWITCH_TSS_IRET)
366 old_eflags &= ~NT_MASK;
3b46e624 367
7e84c249
FB
368 /* save the current state in the old TSS */
369 if (type & 8) {
370 /* 32 bit */
883da8e2 371 stl_kernel(env->tr.base + 0x20, next_eip);
7e84c249 372 stl_kernel(env->tr.base + 0x24, old_eflags);
0d1a29f9
FB
373 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
374 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
375 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
376 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
377 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
378 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
379 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
380 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
7e84c249
FB
381 for(i = 0; i < 6; i++)
382 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
383 } else {
384 /* 16 bit */
883da8e2 385 stw_kernel(env->tr.base + 0x0e, next_eip);
7e84c249 386 stw_kernel(env->tr.base + 0x10, old_eflags);
0d1a29f9
FB
387 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
388 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
389 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
390 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
391 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
392 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
393 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
394 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
7e84c249
FB
395 for(i = 0; i < 4; i++)
396 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
397 }
3b46e624 398
7e84c249
FB
399 /* now if an exception occurs, it will occurs in the next task
400 context */
401
402 if (source == SWITCH_TSS_CALL) {
403 stw_kernel(tss_base, env->tr.selector);
404 new_eflags |= NT_MASK;
405 }
406
407 /* set busy bit */
408 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
14ce26e7 409 target_ulong ptr;
7e84c249 410 uint32_t e2;
883da8e2 411 ptr = env->gdt.base + (tss_selector & ~7);
7e84c249
FB
412 e2 = ldl_kernel(ptr + 4);
413 e2 |= DESC_TSS_BUSY_MASK;
414 stl_kernel(ptr + 4, e2);
415 }
416
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
883da8e2 420 env->hflags |= HF_TS_MASK;
7e84c249
FB
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
3b46e624 425
7e84c249 426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
1ac157da 427 cpu_x86_update_cr3(env, new_cr3);
7e84c249 428 }
3b46e624 429
7e84c249
FB
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
5fafdf24 433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
8145122b 434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
7e84c249
FB
435 if (!(type & 8))
436 eflags_mask &= 0xffff;
437 load_eflags(new_eflags, eflags_mask);
0d1a29f9
FB
438 /* XXX: what to do in 16 bit case ? */
439 EAX = new_regs[0];
440 ECX = new_regs[1];
441 EDX = new_regs[2];
442 EBX = new_regs[3];
443 ESP = new_regs[4];
444 EBP = new_regs[5];
445 ESI = new_regs[6];
446 EDI = new_regs[7];
7e84c249 447 if (new_eflags & VM_MASK) {
5fafdf24 448 for(i = 0; i < 6; i++)
7e84c249
FB
449 load_seg_vm(i, new_segs[i]);
450 /* in vm86, CPL is always 3 */
451 cpu_x86_set_cpl(env, 3);
452 } else {
453 /* CPL is set the RPL of CS */
454 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
455 /* first just selectors as the rest may trigger exceptions */
456 for(i = 0; i < 6; i++)
14ce26e7 457 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
7e84c249 458 }
3b46e624 459
7e84c249 460 env->ldt.selector = new_ldt & ~4;
14ce26e7 461 env->ldt.base = 0;
7e84c249
FB
462 env->ldt.limit = 0;
463 env->ldt.flags = 0;
464
465 /* load the LDT */
466 if (new_ldt & 4)
467 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
468
8145122b
FB
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
472 if ((index + 7) > dt->limit)
473 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474 ptr = dt->base + index;
475 e1 = ldl_kernel(ptr);
476 e2 = ldl_kernel(ptr + 4);
477 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
478 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
479 if (!(e2 & DESC_P_MASK))
480 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
482 }
3b46e624 483
7e84c249
FB
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
486 tss_load_seg(R_CS, new_segs[R_CS]);
487 tss_load_seg(R_SS, new_segs[R_SS]);
488 tss_load_seg(R_ES, new_segs[R_ES]);
489 tss_load_seg(R_DS, new_segs[R_DS]);
490 tss_load_seg(R_FS, new_segs[R_FS]);
491 tss_load_seg(R_GS, new_segs[R_GS]);
492 }
3b46e624 493
7e84c249
FB
494 /* check that EIP is in the CS segment limits */
495 if (new_eip > env->segs[R_CS].limit) {
883da8e2 496 /* XXX: different exception if CALL ? */
7e84c249
FB
497 raise_exception_err(EXCP0D_GPF, 0);
498 }
2c0262af 499}
7e84c249
FB
500
501/* check if Port I/O is allowed in TSS */
502static inline void check_io(int addr, int size)
2c0262af 503{
7e84c249 504 int io_offset, val, mask;
3b46e624 505
7e84c249
FB
506 /* TSS must be a valid 32 bit one */
507 if (!(env->tr.flags & DESC_P_MASK) ||
508 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
509 env->tr.limit < 103)
510 goto fail;
511 io_offset = lduw_kernel(env->tr.base + 0x66);
512 io_offset += (addr >> 3);
513 /* Note: the check needs two bytes */
514 if ((io_offset + 1) > env->tr.limit)
515 goto fail;
516 val = lduw_kernel(env->tr.base + io_offset);
517 val >>= (addr & 7);
518 mask = (1 << size) - 1;
519 /* all bits must be zero to allow the I/O */
520 if ((val & mask) != 0) {
521 fail:
522 raise_exception_err(EXCP0D_GPF, 0);
523 }
2c0262af
FB
524}
525
b8b6a50b 526void helper_check_iob(uint32_t t0)
2c0262af 527{
b8b6a50b 528 check_io(t0, 1);
2c0262af
FB
529}
530
b8b6a50b 531void helper_check_iow(uint32_t t0)
2c0262af 532{
b8b6a50b 533 check_io(t0, 2);
2c0262af
FB
534}
535
b8b6a50b 536void helper_check_iol(uint32_t t0)
2c0262af 537{
b8b6a50b 538 check_io(t0, 4);
7e84c249
FB
539}
540
b8b6a50b 541void helper_outb(uint32_t port, uint32_t data)
7e84c249 542{
b8b6a50b 543 cpu_outb(env, port, data & 0xff);
7e84c249
FB
544}
545
b8b6a50b 546target_ulong helper_inb(uint32_t port)
7e84c249 547{
b8b6a50b 548 return cpu_inb(env, port);
7e84c249
FB
549}
550
b8b6a50b 551void helper_outw(uint32_t port, uint32_t data)
7e84c249 552{
b8b6a50b
FB
553 cpu_outw(env, port, data & 0xffff);
554}
555
556target_ulong helper_inw(uint32_t port)
557{
558 return cpu_inw(env, port);
559}
560
561void helper_outl(uint32_t port, uint32_t data)
562{
563 cpu_outl(env, port, data);
564}
565
566target_ulong helper_inl(uint32_t port)
567{
568 return cpu_inl(env, port);
2c0262af
FB
569}
570
891b38e4
FB
571static inline unsigned int get_sp_mask(unsigned int e2)
572{
573 if (e2 & DESC_B_MASK)
574 return 0xffffffff;
575 else
576 return 0xffff;
577}
578
8d7b0fbb
FB
579#ifdef TARGET_X86_64
580#define SET_ESP(val, sp_mask)\
581do {\
582 if ((sp_mask) == 0xffff)\
583 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
584 else if ((sp_mask) == 0xffffffffLL)\
585 ESP = (uint32_t)(val);\
586 else\
587 ESP = (val);\
588} while (0)
589#else
590#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
591#endif
592
891b38e4
FB
593/* XXX: add a is_user flag to have proper security support */
594#define PUSHW(ssp, sp, sp_mask, val)\
595{\
596 sp -= 2;\
597 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
598}
599
600#define PUSHL(ssp, sp, sp_mask, val)\
601{\
602 sp -= 4;\
603 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
604}
605
606#define POPW(ssp, sp, sp_mask, val)\
607{\
608 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
609 sp += 2;\
610}
611
612#define POPL(ssp, sp, sp_mask, val)\
613{\
14ce26e7 614 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
891b38e4
FB
615 sp += 4;\
616}
617
2c0262af
FB
618/* protected mode interrupt */
619static void do_interrupt_protected(int intno, int is_int, int error_code,
620 unsigned int next_eip, int is_hw)
621{
622 SegmentCache *dt;
14ce26e7 623 target_ulong ptr, ssp;
8d7b0fbb 624 int type, dpl, selector, ss_dpl, cpl;
2c0262af 625 int has_error_code, new_stack, shift;
891b38e4 626 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
8d7b0fbb 627 uint32_t old_eip, sp_mask;
0573fbfc 628 int svm_should_check = 1;
2c0262af 629
0573fbfc
TS
630 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
631 next_eip = EIP;
632 svm_should_check = 0;
633 }
634
635 if (svm_should_check
636 && (INTERCEPTEDl(_exceptions, 1 << intno)
637 && !is_int)) {
638 raise_interrupt(intno, is_int, error_code, 0);
639 }
7e84c249
FB
640 has_error_code = 0;
641 if (!is_int && !is_hw) {
642 switch(intno) {
643 case 8:
644 case 10:
645 case 11:
646 case 12:
647 case 13:
648 case 14:
649 case 17:
650 has_error_code = 1;
651 break;
652 }
653 }
883da8e2
FB
654 if (is_int)
655 old_eip = next_eip;
656 else
657 old_eip = env->eip;
7e84c249 658
2c0262af
FB
659 dt = &env->idt;
660 if (intno * 8 + 7 > dt->limit)
661 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
662 ptr = dt->base + intno * 8;
61382a50
FB
663 e1 = ldl_kernel(ptr);
664 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
665 /* check gate type */
666 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
667 switch(type) {
668 case 5: /* task gate */
7e84c249
FB
669 /* must do that check here to return the correct error code */
670 if (!(e2 & DESC_P_MASK))
671 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
883da8e2 672 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
7e84c249 673 if (has_error_code) {
8d7b0fbb
FB
674 int type;
675 uint32_t mask;
7e84c249 676 /* push the error code */
3f20e1dd
FB
677 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
678 shift = type >> 3;
7e84c249
FB
679 if (env->segs[R_SS].flags & DESC_B_MASK)
680 mask = 0xffffffff;
681 else
682 mask = 0xffff;
0d1a29f9 683 esp = (ESP - (2 << shift)) & mask;
7e84c249
FB
684 ssp = env->segs[R_SS].base + esp;
685 if (shift)
686 stl_kernel(ssp, error_code);
687 else
688 stw_kernel(ssp, error_code);
8d7b0fbb 689 SET_ESP(esp, mask);
7e84c249
FB
690 }
691 return;
2c0262af
FB
692 case 6: /* 286 interrupt gate */
693 case 7: /* 286 trap gate */
694 case 14: /* 386 interrupt gate */
695 case 15: /* 386 trap gate */
696 break;
697 default:
698 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
699 break;
700 }
701 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
702 cpl = env->hflags & HF_CPL_MASK;
703 /* check privledge if software int */
704 if (is_int && dpl < cpl)
705 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
706 /* check valid bit */
707 if (!(e2 & DESC_P_MASK))
708 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
709 selector = e1 >> 16;
710 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
711 if ((selector & 0xfffc) == 0)
712 raise_exception_err(EXCP0D_GPF, 0);
713
714 if (load_segment(&e1, &e2, selector) != 0)
715 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
716 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
717 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
718 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
719 if (dpl > cpl)
720 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
721 if (!(e2 & DESC_P_MASK))
722 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
723 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
7f75ffd3 724 /* to inner privilege */
2c0262af
FB
725 get_ss_esp_from_tss(&ss, &esp, dpl);
726 if ((ss & 0xfffc) == 0)
727 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728 if ((ss & 3) != dpl)
729 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
730 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
731 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
732 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
733 if (ss_dpl != dpl)
734 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
735 if (!(ss_e2 & DESC_S_MASK) ||
736 (ss_e2 & DESC_CS_MASK) ||
737 !(ss_e2 & DESC_W_MASK))
738 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
739 if (!(ss_e2 & DESC_P_MASK))
740 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
741 new_stack = 1;
891b38e4
FB
742 sp_mask = get_sp_mask(ss_e2);
743 ssp = get_seg_base(ss_e1, ss_e2);
2c0262af 744 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
7f75ffd3 745 /* to same privilege */
8e682019
FB
746 if (env->eflags & VM_MASK)
747 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2c0262af 748 new_stack = 0;
891b38e4
FB
749 sp_mask = get_sp_mask(env->segs[R_SS].flags);
750 ssp = env->segs[R_SS].base;
751 esp = ESP;
4796f5e9 752 dpl = cpl;
2c0262af
FB
753 } else {
754 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
755 new_stack = 0; /* avoid warning */
891b38e4 756 sp_mask = 0; /* avoid warning */
14ce26e7 757 ssp = 0; /* avoid warning */
891b38e4 758 esp = 0; /* avoid warning */
2c0262af
FB
759 }
760
761 shift = type >> 3;
891b38e4
FB
762
763#if 0
764 /* XXX: check that enough room is available */
2c0262af
FB
765 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
766 if (env->eflags & VM_MASK)
767 push_size += 8;
768 push_size <<= shift;
891b38e4 769#endif
2c0262af 770 if (shift == 1) {
2c0262af 771 if (new_stack) {
8e682019
FB
772 if (env->eflags & VM_MASK) {
773 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
774 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
775 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
776 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
777 }
891b38e4
FB
778 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
779 PUSHL(ssp, esp, sp_mask, ESP);
2c0262af 780 }
891b38e4
FB
781 PUSHL(ssp, esp, sp_mask, compute_eflags());
782 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
783 PUSHL(ssp, esp, sp_mask, old_eip);
2c0262af 784 if (has_error_code) {
891b38e4 785 PUSHL(ssp, esp, sp_mask, error_code);
2c0262af
FB
786 }
787 } else {
788 if (new_stack) {
8e682019
FB
789 if (env->eflags & VM_MASK) {
790 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
791 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
792 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
793 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
794 }
891b38e4
FB
795 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
796 PUSHW(ssp, esp, sp_mask, ESP);
2c0262af 797 }
891b38e4
FB
798 PUSHW(ssp, esp, sp_mask, compute_eflags());
799 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
800 PUSHW(ssp, esp, sp_mask, old_eip);
2c0262af 801 if (has_error_code) {
891b38e4 802 PUSHW(ssp, esp, sp_mask, error_code);
2c0262af
FB
803 }
804 }
3b46e624 805
891b38e4 806 if (new_stack) {
8e682019 807 if (env->eflags & VM_MASK) {
14ce26e7
FB
808 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
809 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
810 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
811 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
8e682019 812 }
891b38e4 813 ss = (ss & ~3) | dpl;
5fafdf24 814 cpu_x86_load_seg_cache(env, R_SS, ss,
891b38e4
FB
815 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
816 }
8d7b0fbb 817 SET_ESP(esp, sp_mask);
891b38e4
FB
818
819 selector = (selector & ~3) | dpl;
5fafdf24 820 cpu_x86_load_seg_cache(env, R_CS, selector,
891b38e4
FB
821 get_seg_base(e1, e2),
822 get_seg_limit(e1, e2),
823 e2);
824 cpu_x86_set_cpl(env, dpl);
825 env->eip = offset;
826
2c0262af
FB
827 /* interrupt gate clear IF mask */
828 if ((type & 1) == 0) {
829 env->eflags &= ~IF_MASK;
830 }
831 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
832}
833
14ce26e7
FB
834#ifdef TARGET_X86_64
835
836#define PUSHQ(sp, val)\
837{\
838 sp -= 8;\
839 stq_kernel(sp, (val));\
840}
841
842#define POPQ(sp, val)\
843{\
844 val = ldq_kernel(sp);\
845 sp += 8;\
846}
847
848static inline target_ulong get_rsp_from_tss(int level)
849{
850 int index;
3b46e624 851
14ce26e7 852#if 0
5fafdf24 853 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
14ce26e7
FB
854 env->tr.base, env->tr.limit);
855#endif
856
857 if (!(env->tr.flags & DESC_P_MASK))
858 cpu_abort(env, "invalid tss");
859 index = 8 * level + 4;
860 if ((index + 7) > env->tr.limit)
861 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
862 return ldq_kernel(env->tr.base + index);
863}
864
865/* 64 bit interrupt */
866static void do_interrupt64(int intno, int is_int, int error_code,
867 target_ulong next_eip, int is_hw)
868{
869 SegmentCache *dt;
870 target_ulong ptr;
871 int type, dpl, selector, cpl, ist;
872 int has_error_code, new_stack;
873 uint32_t e1, e2, e3, ss;
874 target_ulong old_eip, esp, offset;
0573fbfc 875 int svm_should_check = 1;
14ce26e7 876
0573fbfc
TS
877 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
878 next_eip = EIP;
879 svm_should_check = 0;
880 }
881 if (svm_should_check
882 && INTERCEPTEDl(_exceptions, 1 << intno)
883 && !is_int) {
884 raise_interrupt(intno, is_int, error_code, 0);
885 }
14ce26e7
FB
886 has_error_code = 0;
887 if (!is_int && !is_hw) {
888 switch(intno) {
889 case 8:
890 case 10:
891 case 11:
892 case 12:
893 case 13:
894 case 14:
895 case 17:
896 has_error_code = 1;
897 break;
898 }
899 }
900 if (is_int)
901 old_eip = next_eip;
902 else
903 old_eip = env->eip;
904
905 dt = &env->idt;
906 if (intno * 16 + 15 > dt->limit)
907 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
908 ptr = dt->base + intno * 16;
909 e1 = ldl_kernel(ptr);
910 e2 = ldl_kernel(ptr + 4);
911 e3 = ldl_kernel(ptr + 8);
912 /* check gate type */
913 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
914 switch(type) {
915 case 14: /* 386 interrupt gate */
916 case 15: /* 386 trap gate */
917 break;
918 default:
919 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
920 break;
921 }
922 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
923 cpl = env->hflags & HF_CPL_MASK;
924 /* check privledge if software int */
925 if (is_int && dpl < cpl)
926 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
927 /* check valid bit */
928 if (!(e2 & DESC_P_MASK))
929 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
930 selector = e1 >> 16;
931 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
932 ist = e2 & 7;
933 if ((selector & 0xfffc) == 0)
934 raise_exception_err(EXCP0D_GPF, 0);
935
936 if (load_segment(&e1, &e2, selector) != 0)
937 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
939 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
941 if (dpl > cpl)
942 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
943 if (!(e2 & DESC_P_MASK))
944 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
945 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
946 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
947 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
7f75ffd3 948 /* to inner privilege */
14ce26e7
FB
949 if (ist != 0)
950 esp = get_rsp_from_tss(ist + 3);
951 else
952 esp = get_rsp_from_tss(dpl);
9540a78b 953 esp &= ~0xfLL; /* align stack */
14ce26e7
FB
954 ss = 0;
955 new_stack = 1;
956 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
7f75ffd3 957 /* to same privilege */
14ce26e7
FB
958 if (env->eflags & VM_MASK)
959 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
960 new_stack = 0;
9540a78b
FB
961 if (ist != 0)
962 esp = get_rsp_from_tss(ist + 3);
963 else
964 esp = ESP;
965 esp &= ~0xfLL; /* align stack */
14ce26e7
FB
966 dpl = cpl;
967 } else {
968 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
969 new_stack = 0; /* avoid warning */
970 esp = 0; /* avoid warning */
971 }
972
973 PUSHQ(esp, env->segs[R_SS].selector);
974 PUSHQ(esp, ESP);
975 PUSHQ(esp, compute_eflags());
976 PUSHQ(esp, env->segs[R_CS].selector);
977 PUSHQ(esp, old_eip);
978 if (has_error_code) {
979 PUSHQ(esp, error_code);
980 }
3b46e624 981
14ce26e7
FB
982 if (new_stack) {
983 ss = 0 | dpl;
984 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
985 }
986 ESP = esp;
987
988 selector = (selector & ~3) | dpl;
5fafdf24 989 cpu_x86_load_seg_cache(env, R_CS, selector,
14ce26e7
FB
990 get_seg_base(e1, e2),
991 get_seg_limit(e1, e2),
992 e2);
993 cpu_x86_set_cpl(env, dpl);
994 env->eip = offset;
995
996 /* interrupt gate clear IF mask */
997 if ((type & 1) == 0) {
998 env->eflags &= ~IF_MASK;
999 }
1000 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1001}
f419b321 1002#endif
14ce26e7 1003
d2fd1af7
FB
1004#if defined(CONFIG_USER_ONLY)
1005void helper_syscall(int next_eip_addend)
1006{
1007 env->exception_index = EXCP_SYSCALL;
1008 env->exception_next_eip = env->eip + next_eip_addend;
1009 cpu_loop_exit();
1010}
1011#else
06c2f506 1012void helper_syscall(int next_eip_addend)
14ce26e7
FB
1013{
1014 int selector;
1015
1016 if (!(env->efer & MSR_EFER_SCE)) {
1017 raise_exception_err(EXCP06_ILLOP, 0);
1018 }
1019 selector = (env->star >> 32) & 0xffff;
f419b321 1020#ifdef TARGET_X86_64
14ce26e7 1021 if (env->hflags & HF_LMA_MASK) {
9540a78b
FB
1022 int code64;
1023
06c2f506 1024 ECX = env->eip + next_eip_addend;
14ce26e7 1025 env->regs[11] = compute_eflags();
3b46e624 1026
9540a78b 1027 code64 = env->hflags & HF_CS64_MASK;
14ce26e7
FB
1028
1029 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
1030 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1031 0, 0xffffffff,
d80c7d1c 1032 DESC_G_MASK | DESC_P_MASK |
14ce26e7
FB
1033 DESC_S_MASK |
1034 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
5fafdf24 1035 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
14ce26e7
FB
1036 0, 0xffffffff,
1037 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1038 DESC_S_MASK |
1039 DESC_W_MASK | DESC_A_MASK);
1040 env->eflags &= ~env->fmask;
f94f7181 1041 load_eflags(env->eflags, 0);
9540a78b 1042 if (code64)
14ce26e7
FB
1043 env->eip = env->lstar;
1044 else
1045 env->eip = env->cstar;
5fafdf24 1046 } else
f419b321
FB
1047#endif
1048 {
06c2f506 1049 ECX = (uint32_t)(env->eip + next_eip_addend);
3b46e624 1050
14ce26e7 1051 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
1052 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1053 0, 0xffffffff,
14ce26e7
FB
1054 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1055 DESC_S_MASK |
1056 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 1057 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
14ce26e7
FB
1058 0, 0xffffffff,
1059 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1060 DESC_S_MASK |
1061 DESC_W_MASK | DESC_A_MASK);
1062 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1063 env->eip = (uint32_t)env->star;
1064 }
1065}
d2fd1af7 1066#endif
14ce26e7
FB
1067
1068void helper_sysret(int dflag)
1069{
1070 int cpl, selector;
1071
f419b321
FB
1072 if (!(env->efer & MSR_EFER_SCE)) {
1073 raise_exception_err(EXCP06_ILLOP, 0);
1074 }
14ce26e7
FB
1075 cpl = env->hflags & HF_CPL_MASK;
1076 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1077 raise_exception_err(EXCP0D_GPF, 0);
1078 }
1079 selector = (env->star >> 48) & 0xffff;
f419b321 1080#ifdef TARGET_X86_64
14ce26e7
FB
1081 if (env->hflags & HF_LMA_MASK) {
1082 if (dflag == 2) {
5fafdf24
TS
1083 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1084 0, 0xffffffff,
d80c7d1c 1085 DESC_G_MASK | DESC_P_MASK |
14ce26e7 1086 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
5fafdf24 1087 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
14ce26e7
FB
1088 DESC_L_MASK);
1089 env->eip = ECX;
1090 } else {
5fafdf24
TS
1091 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1092 0, 0xffffffff,
14ce26e7
FB
1093 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1094 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1095 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1096 env->eip = (uint32_t)ECX;
1097 }
5fafdf24 1098 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
14ce26e7
FB
1099 0, 0xffffffff,
1100 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1101 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1102 DESC_W_MASK | DESC_A_MASK);
5fafdf24 1103 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
31313213 1104 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
14ce26e7 1105 cpu_x86_set_cpl(env, 3);
5fafdf24 1106 } else
f419b321
FB
1107#endif
1108 {
5fafdf24
TS
1109 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1110 0, 0xffffffff,
14ce26e7
FB
1111 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1112 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1113 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1114 env->eip = (uint32_t)ECX;
5fafdf24 1115 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
14ce26e7
FB
1116 0, 0xffffffff,
1117 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1118 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1119 DESC_W_MASK | DESC_A_MASK);
1120 env->eflags |= IF_MASK;
1121 cpu_x86_set_cpl(env, 3);
1122 }
f419b321
FB
1123#ifdef USE_KQEMU
1124 if (kqemu_is_ok(env)) {
1125 if (env->hflags & HF_LMA_MASK)
1126 CC_OP = CC_OP_EFLAGS;
1127 env->exception_index = -1;
1128 cpu_loop_exit();
1129 }
14ce26e7 1130#endif
f419b321 1131}
14ce26e7 1132
2c0262af
FB
1133/* real mode interrupt */
1134static void do_interrupt_real(int intno, int is_int, int error_code,
4136f33c 1135 unsigned int next_eip)
2c0262af
FB
1136{
1137 SegmentCache *dt;
14ce26e7 1138 target_ulong ptr, ssp;
2c0262af
FB
1139 int selector;
1140 uint32_t offset, esp;
1141 uint32_t old_cs, old_eip;
0573fbfc 1142 int svm_should_check = 1;
2c0262af 1143
0573fbfc
TS
1144 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1145 next_eip = EIP;
1146 svm_should_check = 0;
1147 }
1148 if (svm_should_check
1149 && INTERCEPTEDl(_exceptions, 1 << intno)
1150 && !is_int) {
1151 raise_interrupt(intno, is_int, error_code, 0);
1152 }
2c0262af
FB
1153 /* real mode (simpler !) */
1154 dt = &env->idt;
1155 if (intno * 4 + 3 > dt->limit)
1156 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1157 ptr = dt->base + intno * 4;
61382a50
FB
1158 offset = lduw_kernel(ptr);
1159 selector = lduw_kernel(ptr + 2);
2c0262af
FB
1160 esp = ESP;
1161 ssp = env->segs[R_SS].base;
1162 if (is_int)
1163 old_eip = next_eip;
1164 else
1165 old_eip = env->eip;
1166 old_cs = env->segs[R_CS].selector;
891b38e4
FB
1167 /* XXX: use SS segment size ? */
1168 PUSHW(ssp, esp, 0xffff, compute_eflags());
1169 PUSHW(ssp, esp, 0xffff, old_cs);
1170 PUSHW(ssp, esp, 0xffff, old_eip);
3b46e624 1171
2c0262af
FB
1172 /* update processor state */
1173 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1174 env->eip = offset;
1175 env->segs[R_CS].selector = selector;
14ce26e7 1176 env->segs[R_CS].base = (selector << 4);
2c0262af
FB
1177 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1178}
1179
1180/* fake user mode interrupt */
5fafdf24 1181void do_interrupt_user(int intno, int is_int, int error_code,
14ce26e7 1182 target_ulong next_eip)
2c0262af
FB
1183{
1184 SegmentCache *dt;
14ce26e7 1185 target_ulong ptr;
d2fd1af7 1186 int dpl, cpl, shift;
2c0262af
FB
1187 uint32_t e2;
1188
1189 dt = &env->idt;
d2fd1af7
FB
1190 if (env->hflags & HF_LMA_MASK) {
1191 shift = 4;
1192 } else {
1193 shift = 3;
1194 }
1195 ptr = dt->base + (intno << shift);
61382a50 1196 e2 = ldl_kernel(ptr + 4);
3b46e624 1197
2c0262af
FB
1198 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1199 cpl = env->hflags & HF_CPL_MASK;
1200 /* check privledge if software int */
1201 if (is_int && dpl < cpl)
d2fd1af7 1202 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
2c0262af
FB
1203
1204 /* Since we emulate only user space, we cannot do more than
1205 exiting the emulation with the suitable exception and error
1206 code */
1207 if (is_int)
1208 EIP = next_eip;
1209}
1210
1211/*
e19e89a5 1212 * Begin execution of an interruption. is_int is TRUE if coming from
2c0262af 1213 * the int instruction. next_eip is the EIP value AFTER the interrupt
3b46e624 1214 * instruction. It is only relevant if is_int is TRUE.
2c0262af 1215 */
5fafdf24 1216void do_interrupt(int intno, int is_int, int error_code,
14ce26e7 1217 target_ulong next_eip, int is_hw)
2c0262af 1218{
1247c5f7 1219 if (loglevel & CPU_LOG_INT) {
e19e89a5
FB
1220 if ((env->cr[0] & CR0_PE_MASK)) {
1221 static int count;
14ce26e7 1222 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
dc6f57fd
FB
1223 count, intno, error_code, is_int,
1224 env->hflags & HF_CPL_MASK,
1225 env->segs[R_CS].selector, EIP,
2ee73ac3 1226 (int)env->segs[R_CS].base + EIP,
8145122b
FB
1227 env->segs[R_SS].selector, ESP);
1228 if (intno == 0x0e) {
14ce26e7 1229 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
8145122b 1230 } else {
14ce26e7 1231 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
8145122b 1232 }
e19e89a5 1233 fprintf(logfile, "\n");
06c2f506 1234 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1247c5f7 1235#if 0
e19e89a5
FB
1236 {
1237 int i;
1238 uint8_t *ptr;
1239 fprintf(logfile, " code=");
1240 ptr = env->segs[R_CS].base + env->eip;
1241 for(i = 0; i < 16; i++) {
1242 fprintf(logfile, " %02x", ldub(ptr + i));
dc6f57fd 1243 }
e19e89a5 1244 fprintf(logfile, "\n");
dc6f57fd 1245 }
8e682019 1246#endif
e19e89a5 1247 count++;
4136f33c 1248 }
4136f33c 1249 }
2c0262af 1250 if (env->cr[0] & CR0_PE_MASK) {
14ce26e7
FB
1251#if TARGET_X86_64
1252 if (env->hflags & HF_LMA_MASK) {
1253 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1254 } else
1255#endif
1256 {
1257 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1258 }
2c0262af
FB
1259 } else {
1260 do_interrupt_real(intno, is_int, error_code, next_eip);
1261 }
1262}
1263
678dde13
TS
1264/*
1265 * Check nested exceptions and change to double or triple fault if
1266 * needed. It should only be called, if this is not an interrupt.
1267 * Returns the new exception number.
1268 */
9596ebb7 1269static int check_exception(int intno, int *error_code)
678dde13 1270{
75d28b05 1271 int first_contributory = env->old_exception == 0 ||
678dde13
TS
1272 (env->old_exception >= 10 &&
1273 env->old_exception <= 13);
75d28b05 1274 int second_contributory = intno == 0 ||
678dde13
TS
1275 (intno >= 10 && intno <= 13);
1276
1277 if (loglevel & CPU_LOG_INT)
75d28b05 1278 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
678dde13
TS
1279 env->old_exception, intno);
1280
1281 if (env->old_exception == EXCP08_DBLE)
1282 cpu_abort(env, "triple fault");
1283
1284 if ((first_contributory && second_contributory)
1285 || (env->old_exception == EXCP0E_PAGE &&
1286 (second_contributory || (intno == EXCP0E_PAGE)))) {
1287 intno = EXCP08_DBLE;
1288 *error_code = 0;
1289 }
1290
1291 if (second_contributory || (intno == EXCP0E_PAGE) ||
1292 (intno == EXCP08_DBLE))
1293 env->old_exception = intno;
1294
1295 return intno;
1296}
1297
2c0262af
FB
1298/*
1299 * Signal an interruption. It is executed in the main CPU loop.
1300 * is_int is TRUE if coming from the int instruction. next_eip is the
1301 * EIP value AFTER the interrupt instruction. It is only relevant if
3b46e624 1302 * is_int is TRUE.
2c0262af 1303 */
5fafdf24 1304void raise_interrupt(int intno, int is_int, int error_code,
a8ede8ba 1305 int next_eip_addend)
2c0262af 1306{
0573fbfc 1307 if (!is_int) {
b8b6a50b 1308 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
678dde13 1309 intno = check_exception(intno, &error_code);
0573fbfc 1310 }
678dde13 1311
2c0262af
FB
1312 env->exception_index = intno;
1313 env->error_code = error_code;
1314 env->exception_is_int = is_int;
a8ede8ba 1315 env->exception_next_eip = env->eip + next_eip_addend;
2c0262af
FB
1316 cpu_loop_exit();
1317}
1318
0d1a29f9
FB
1319/* same as raise_exception_err, but do not restore global registers */
1320static void raise_exception_err_norestore(int exception_index, int error_code)
1321{
678dde13
TS
1322 exception_index = check_exception(exception_index, &error_code);
1323
0d1a29f9
FB
1324 env->exception_index = exception_index;
1325 env->error_code = error_code;
1326 env->exception_is_int = 0;
1327 env->exception_next_eip = 0;
1328 longjmp(env->jmp_env, 1);
1329}
1330
2c0262af 1331/* shortcuts to generate exceptions */
8145122b
FB
1332
1333void (raise_exception_err)(int exception_index, int error_code)
2c0262af
FB
1334{
1335 raise_interrupt(exception_index, 0, error_code, 0);
1336}
1337
1338void raise_exception(int exception_index)
1339{
1340 raise_interrupt(exception_index, 0, 0, 0);
1341}
1342
3b21e03e
FB
1343/* SMM support */
1344
5fafdf24 1345#if defined(CONFIG_USER_ONLY)
74ce674f
FB
1346
1347void do_smm_enter(void)
1348{
1349}
1350
1351void helper_rsm(void)
1352{
1353}
1354
1355#else
1356
3b21e03e
FB
1357#ifdef TARGET_X86_64
1358#define SMM_REVISION_ID 0x00020064
1359#else
1360#define SMM_REVISION_ID 0x00020000
1361#endif
1362
1363void do_smm_enter(void)
1364{
1365 target_ulong sm_state;
1366 SegmentCache *dt;
1367 int i, offset;
1368
1369 if (loglevel & CPU_LOG_INT) {
1370 fprintf(logfile, "SMM: enter\n");
1371 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1372 }
1373
1374 env->hflags |= HF_SMM_MASK;
1375 cpu_smm_update(env);
1376
1377 sm_state = env->smbase + 0x8000;
3b46e624 1378
3b21e03e
FB
1379#ifdef TARGET_X86_64
1380 for(i = 0; i < 6; i++) {
1381 dt = &env->segs[i];
1382 offset = 0x7e00 + i * 16;
1383 stw_phys(sm_state + offset, dt->selector);
1384 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1385 stl_phys(sm_state + offset + 4, dt->limit);
1386 stq_phys(sm_state + offset + 8, dt->base);
1387 }
1388
1389 stq_phys(sm_state + 0x7e68, env->gdt.base);
1390 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1391
1392 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1393 stq_phys(sm_state + 0x7e78, env->ldt.base);
1394 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1395 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
3b46e624 1396
3b21e03e
FB
1397 stq_phys(sm_state + 0x7e88, env->idt.base);
1398 stl_phys(sm_state + 0x7e84, env->idt.limit);
1399
1400 stw_phys(sm_state + 0x7e90, env->tr.selector);
1401 stq_phys(sm_state + 0x7e98, env->tr.base);
1402 stl_phys(sm_state + 0x7e94, env->tr.limit);
1403 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
3b46e624 1404
3b21e03e
FB
1405 stq_phys(sm_state + 0x7ed0, env->efer);
1406
1407 stq_phys(sm_state + 0x7ff8, EAX);
1408 stq_phys(sm_state + 0x7ff0, ECX);
1409 stq_phys(sm_state + 0x7fe8, EDX);
1410 stq_phys(sm_state + 0x7fe0, EBX);
1411 stq_phys(sm_state + 0x7fd8, ESP);
1412 stq_phys(sm_state + 0x7fd0, EBP);
1413 stq_phys(sm_state + 0x7fc8, ESI);
1414 stq_phys(sm_state + 0x7fc0, EDI);
5fafdf24 1415 for(i = 8; i < 16; i++)
3b21e03e
FB
1416 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1417 stq_phys(sm_state + 0x7f78, env->eip);
1418 stl_phys(sm_state + 0x7f70, compute_eflags());
1419 stl_phys(sm_state + 0x7f68, env->dr[6]);
1420 stl_phys(sm_state + 0x7f60, env->dr[7]);
1421
1422 stl_phys(sm_state + 0x7f48, env->cr[4]);
1423 stl_phys(sm_state + 0x7f50, env->cr[3]);
1424 stl_phys(sm_state + 0x7f58, env->cr[0]);
1425
1426 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1427 stl_phys(sm_state + 0x7f00, env->smbase);
1428#else
1429 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1430 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1431 stl_phys(sm_state + 0x7ff4, compute_eflags());
1432 stl_phys(sm_state + 0x7ff0, env->eip);
1433 stl_phys(sm_state + 0x7fec, EDI);
1434 stl_phys(sm_state + 0x7fe8, ESI);
1435 stl_phys(sm_state + 0x7fe4, EBP);
1436 stl_phys(sm_state + 0x7fe0, ESP);
1437 stl_phys(sm_state + 0x7fdc, EBX);
1438 stl_phys(sm_state + 0x7fd8, EDX);
1439 stl_phys(sm_state + 0x7fd4, ECX);
1440 stl_phys(sm_state + 0x7fd0, EAX);
1441 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1442 stl_phys(sm_state + 0x7fc8, env->dr[7]);
3b46e624 1443
3b21e03e
FB
1444 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1445 stl_phys(sm_state + 0x7f64, env->tr.base);
1446 stl_phys(sm_state + 0x7f60, env->tr.limit);
1447 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
3b46e624 1448
3b21e03e
FB
1449 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1450 stl_phys(sm_state + 0x7f80, env->ldt.base);
1451 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1452 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
3b46e624 1453
3b21e03e
FB
1454 stl_phys(sm_state + 0x7f74, env->gdt.base);
1455 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1456
1457 stl_phys(sm_state + 0x7f58, env->idt.base);
1458 stl_phys(sm_state + 0x7f54, env->idt.limit);
1459
1460 for(i = 0; i < 6; i++) {
1461 dt = &env->segs[i];
1462 if (i < 3)
1463 offset = 0x7f84 + i * 12;
1464 else
1465 offset = 0x7f2c + (i - 3) * 12;
1466 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1467 stl_phys(sm_state + offset + 8, dt->base);
1468 stl_phys(sm_state + offset + 4, dt->limit);
1469 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1470 }
1471 stl_phys(sm_state + 0x7f14, env->cr[4]);
1472
1473 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1474 stl_phys(sm_state + 0x7ef8, env->smbase);
1475#endif
1476 /* init SMM cpu state */
1477
8988ae89
FB
1478#ifdef TARGET_X86_64
1479 env->efer = 0;
1480 env->hflags &= ~HF_LMA_MASK;
1481#endif
3b21e03e
FB
1482 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1483 env->eip = 0x00008000;
1484 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1485 0xffffffff, 0);
1486 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1487 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1488 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1489 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1490 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
3b46e624 1491
5fafdf24 1492 cpu_x86_update_cr0(env,
3b21e03e
FB
1493 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1494 cpu_x86_update_cr4(env, 0);
1495 env->dr[7] = 0x00000400;
3b21e03e
FB
1496 CC_OP = CC_OP_EFLAGS;
1497}
1498
1499void helper_rsm(void)
1500{
1501 target_ulong sm_state;
1502 int i, offset;
1503 uint32_t val;
1504
1505 sm_state = env->smbase + 0x8000;
1506#ifdef TARGET_X86_64
8988ae89
FB
1507 env->efer = ldq_phys(sm_state + 0x7ed0);
1508 if (env->efer & MSR_EFER_LMA)
1509 env->hflags |= HF_LMA_MASK;
1510 else
1511 env->hflags &= ~HF_LMA_MASK;
1512
3b21e03e
FB
1513 for(i = 0; i < 6; i++) {
1514 offset = 0x7e00 + i * 16;
5fafdf24 1515 cpu_x86_load_seg_cache(env, i,
3b21e03e
FB
1516 lduw_phys(sm_state + offset),
1517 ldq_phys(sm_state + offset + 8),
1518 ldl_phys(sm_state + offset + 4),
1519 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1520 }
1521
1522 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1523 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1524
1525 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1526 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1527 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1528 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
3b46e624 1529
3b21e03e
FB
1530 env->idt.base = ldq_phys(sm_state + 0x7e88);
1531 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1532
1533 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1534 env->tr.base = ldq_phys(sm_state + 0x7e98);
1535 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1536 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
3b46e624 1537
3b21e03e
FB
1538 EAX = ldq_phys(sm_state + 0x7ff8);
1539 ECX = ldq_phys(sm_state + 0x7ff0);
1540 EDX = ldq_phys(sm_state + 0x7fe8);
1541 EBX = ldq_phys(sm_state + 0x7fe0);
1542 ESP = ldq_phys(sm_state + 0x7fd8);
1543 EBP = ldq_phys(sm_state + 0x7fd0);
1544 ESI = ldq_phys(sm_state + 0x7fc8);
1545 EDI = ldq_phys(sm_state + 0x7fc0);
5fafdf24 1546 for(i = 8; i < 16; i++)
3b21e03e
FB
1547 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1548 env->eip = ldq_phys(sm_state + 0x7f78);
5fafdf24 1549 load_eflags(ldl_phys(sm_state + 0x7f70),
3b21e03e
FB
1550 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1551 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1552 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1553
1554 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1555 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1556 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1557
1558 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1559 if (val & 0x20000) {
1560 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1561 }
1562#else
1563 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1564 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
5fafdf24 1565 load_eflags(ldl_phys(sm_state + 0x7ff4),
3b21e03e
FB
1566 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1567 env->eip = ldl_phys(sm_state + 0x7ff0);
1568 EDI = ldl_phys(sm_state + 0x7fec);
1569 ESI = ldl_phys(sm_state + 0x7fe8);
1570 EBP = ldl_phys(sm_state + 0x7fe4);
1571 ESP = ldl_phys(sm_state + 0x7fe0);
1572 EBX = ldl_phys(sm_state + 0x7fdc);
1573 EDX = ldl_phys(sm_state + 0x7fd8);
1574 ECX = ldl_phys(sm_state + 0x7fd4);
1575 EAX = ldl_phys(sm_state + 0x7fd0);
1576 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1577 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
3b46e624 1578
3b21e03e
FB
1579 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1580 env->tr.base = ldl_phys(sm_state + 0x7f64);
1581 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1582 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
3b46e624 1583
3b21e03e
FB
1584 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1585 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1586 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1587 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
3b46e624 1588
3b21e03e
FB
1589 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1590 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1591
1592 env->idt.base = ldl_phys(sm_state + 0x7f58);
1593 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1594
1595 for(i = 0; i < 6; i++) {
1596 if (i < 3)
1597 offset = 0x7f84 + i * 12;
1598 else
1599 offset = 0x7f2c + (i - 3) * 12;
5fafdf24 1600 cpu_x86_load_seg_cache(env, i,
3b21e03e
FB
1601 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1602 ldl_phys(sm_state + offset + 8),
1603 ldl_phys(sm_state + offset + 4),
1604 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1605 }
1606 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1607
1608 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1609 if (val & 0x20000) {
1610 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1611 }
1612#endif
1613 CC_OP = CC_OP_EFLAGS;
1614 env->hflags &= ~HF_SMM_MASK;
1615 cpu_smm_update(env);
1616
1617 if (loglevel & CPU_LOG_INT) {
1618 fprintf(logfile, "SMM: after RSM\n");
1619 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1620 }
1621}
1622
74ce674f
FB
1623#endif /* !CONFIG_USER_ONLY */
1624
1625
b5b38f61
FB
1626/* division, flags are undefined */
1627
1628void helper_divb_AL(target_ulong t0)
1629{
1630 unsigned int num, den, q, r;
1631
1632 num = (EAX & 0xffff);
1633 den = (t0 & 0xff);
1634 if (den == 0) {
1635 raise_exception(EXCP00_DIVZ);
1636 }
1637 q = (num / den);
1638 if (q > 0xff)
1639 raise_exception(EXCP00_DIVZ);
1640 q &= 0xff;
1641 r = (num % den) & 0xff;
1642 EAX = (EAX & ~0xffff) | (r << 8) | q;
1643}
1644
1645void helper_idivb_AL(target_ulong t0)
1646{
1647 int num, den, q, r;
1648
1649 num = (int16_t)EAX;
1650 den = (int8_t)t0;
1651 if (den == 0) {
1652 raise_exception(EXCP00_DIVZ);
1653 }
1654 q = (num / den);
1655 if (q != (int8_t)q)
1656 raise_exception(EXCP00_DIVZ);
1657 q &= 0xff;
1658 r = (num % den) & 0xff;
1659 EAX = (EAX & ~0xffff) | (r << 8) | q;
1660}
1661
1662void helper_divw_AX(target_ulong t0)
1663{
1664 unsigned int num, den, q, r;
1665
1666 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1667 den = (t0 & 0xffff);
1668 if (den == 0) {
1669 raise_exception(EXCP00_DIVZ);
1670 }
1671 q = (num / den);
1672 if (q > 0xffff)
1673 raise_exception(EXCP00_DIVZ);
1674 q &= 0xffff;
1675 r = (num % den) & 0xffff;
1676 EAX = (EAX & ~0xffff) | q;
1677 EDX = (EDX & ~0xffff) | r;
1678}
1679
1680void helper_idivw_AX(target_ulong t0)
1681{
1682 int num, den, q, r;
1683
1684 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1685 den = (int16_t)t0;
1686 if (den == 0) {
1687 raise_exception(EXCP00_DIVZ);
1688 }
1689 q = (num / den);
1690 if (q != (int16_t)q)
1691 raise_exception(EXCP00_DIVZ);
1692 q &= 0xffff;
1693 r = (num % den) & 0xffff;
1694 EAX = (EAX & ~0xffff) | q;
1695 EDX = (EDX & ~0xffff) | r;
1696}
1697
1698void helper_divl_EAX(target_ulong t0)
2c0262af 1699{
45bbbb46
FB
1700 unsigned int den, r;
1701 uint64_t num, q;
3b46e624 1702
31313213 1703 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
57fec1fe 1704 den = t0;
2c0262af 1705 if (den == 0) {
2c0262af
FB
1706 raise_exception(EXCP00_DIVZ);
1707 }
2c0262af
FB
1708 q = (num / den);
1709 r = (num % den);
45bbbb46
FB
1710 if (q > 0xffffffff)
1711 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
1712 EAX = (uint32_t)q;
1713 EDX = (uint32_t)r;
2c0262af
FB
1714}
1715
b5b38f61 1716void helper_idivl_EAX(target_ulong t0)
2c0262af 1717{
45bbbb46
FB
1718 int den, r;
1719 int64_t num, q;
3b46e624 1720
31313213 1721 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
57fec1fe 1722 den = t0;
2c0262af 1723 if (den == 0) {
2c0262af
FB
1724 raise_exception(EXCP00_DIVZ);
1725 }
2c0262af
FB
1726 q = (num / den);
1727 r = (num % den);
45bbbb46
FB
1728 if (q != (int32_t)q)
1729 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
1730 EAX = (uint32_t)q;
1731 EDX = (uint32_t)r;
2c0262af
FB
1732}
1733
b5b38f61
FB
1734/* bcd */
1735
1736/* XXX: exception */
1737void helper_aam(int base)
1738{
1739 int al, ah;
1740 al = EAX & 0xff;
1741 ah = al / base;
1742 al = al % base;
1743 EAX = (EAX & ~0xffff) | al | (ah << 8);
1744 CC_DST = al;
1745}
1746
1747void helper_aad(int base)
1748{
1749 int al, ah;
1750 al = EAX & 0xff;
1751 ah = (EAX >> 8) & 0xff;
1752 al = ((ah * base) + al) & 0xff;
1753 EAX = (EAX & ~0xffff) | al;
1754 CC_DST = al;
1755}
1756
1757void helper_aaa(void)
1758{
1759 int icarry;
1760 int al, ah, af;
1761 int eflags;
1762
1763 eflags = cc_table[CC_OP].compute_all();
1764 af = eflags & CC_A;
1765 al = EAX & 0xff;
1766 ah = (EAX >> 8) & 0xff;
1767
1768 icarry = (al > 0xf9);
1769 if (((al & 0x0f) > 9 ) || af) {
1770 al = (al + 6) & 0x0f;
1771 ah = (ah + 1 + icarry) & 0xff;
1772 eflags |= CC_C | CC_A;
1773 } else {
1774 eflags &= ~(CC_C | CC_A);
1775 al &= 0x0f;
1776 }
1777 EAX = (EAX & ~0xffff) | al | (ah << 8);
1778 CC_SRC = eflags;
1779 FORCE_RET();
1780}
1781
1782void helper_aas(void)
1783{
1784 int icarry;
1785 int al, ah, af;
1786 int eflags;
1787
1788 eflags = cc_table[CC_OP].compute_all();
1789 af = eflags & CC_A;
1790 al = EAX & 0xff;
1791 ah = (EAX >> 8) & 0xff;
1792
1793 icarry = (al < 6);
1794 if (((al & 0x0f) > 9 ) || af) {
1795 al = (al - 6) & 0x0f;
1796 ah = (ah - 1 - icarry) & 0xff;
1797 eflags |= CC_C | CC_A;
1798 } else {
1799 eflags &= ~(CC_C | CC_A);
1800 al &= 0x0f;
1801 }
1802 EAX = (EAX & ~0xffff) | al | (ah << 8);
1803 CC_SRC = eflags;
1804 FORCE_RET();
1805}
1806
1807void helper_daa(void)
1808{
1809 int al, af, cf;
1810 int eflags;
1811
1812 eflags = cc_table[CC_OP].compute_all();
1813 cf = eflags & CC_C;
1814 af = eflags & CC_A;
1815 al = EAX & 0xff;
1816
1817 eflags = 0;
1818 if (((al & 0x0f) > 9 ) || af) {
1819 al = (al + 6) & 0xff;
1820 eflags |= CC_A;
1821 }
1822 if ((al > 0x9f) || cf) {
1823 al = (al + 0x60) & 0xff;
1824 eflags |= CC_C;
1825 }
1826 EAX = (EAX & ~0xff) | al;
1827 /* well, speed is not an issue here, so we compute the flags by hand */
1828 eflags |= (al == 0) << 6; /* zf */
1829 eflags |= parity_table[al]; /* pf */
1830 eflags |= (al & 0x80); /* sf */
1831 CC_SRC = eflags;
1832 FORCE_RET();
1833}
1834
1835void helper_das(void)
1836{
1837 int al, al1, af, cf;
1838 int eflags;
1839
1840 eflags = cc_table[CC_OP].compute_all();
1841 cf = eflags & CC_C;
1842 af = eflags & CC_A;
1843 al = EAX & 0xff;
1844
1845 eflags = 0;
1846 al1 = al;
1847 if (((al & 0x0f) > 9 ) || af) {
1848 eflags |= CC_A;
1849 if (al < 6 || cf)
1850 eflags |= CC_C;
1851 al = (al - 6) & 0xff;
1852 }
1853 if ((al1 > 0x99) || cf) {
1854 al = (al - 0x60) & 0xff;
1855 eflags |= CC_C;
1856 }
1857 EAX = (EAX & ~0xff) | al;
1858 /* well, speed is not an issue here, so we compute the flags by hand */
1859 eflags |= (al == 0) << 6; /* zf */
1860 eflags |= parity_table[al]; /* pf */
1861 eflags |= (al & 0x80); /* sf */
1862 CC_SRC = eflags;
1863 FORCE_RET();
1864}
1865
07be379f
FB
1866void helper_into(int next_eip_addend)
1867{
1868 int eflags;
1869 eflags = cc_table[CC_OP].compute_all();
1870 if (eflags & CC_O) {
1871 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1872 }
1873}
1874
b8b6a50b 1875void helper_cmpxchg8b(target_ulong a0)
2c0262af
FB
1876{
1877 uint64_t d;
1878 int eflags;
1879
1880 eflags = cc_table[CC_OP].compute_all();
b8b6a50b 1881 d = ldq(a0);
1b9d9ebb
FB
1882 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1883 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2c0262af
FB
1884 eflags |= CC_Z;
1885 } else {
b8b6a50b
FB
1886 EDX = (uint32_t)(d >> 32);
1887 EAX = (uint32_t)d;
2c0262af
FB
1888 eflags &= ~CC_Z;
1889 }
1890 CC_SRC = eflags;
1891}
1892
1b9d9ebb
FB
1893#ifdef TARGET_X86_64
1894void helper_cmpxchg16b(target_ulong a0)
1895{
1896 uint64_t d0, d1;
1897 int eflags;
1898
1899 eflags = cc_table[CC_OP].compute_all();
1900 d0 = ldq(a0);
1901 d1 = ldq(a0 + 8);
1902 if (d0 == EAX && d1 == EDX) {
1903 stq(a0, EBX);
1904 stq(a0 + 8, ECX);
1905 eflags |= CC_Z;
1906 } else {
1907 EDX = d1;
1908 EAX = d0;
1909 eflags &= ~CC_Z;
1910 }
1911 CC_SRC = eflags;
1912}
1913#endif
1914
3f47aa8c 1915void helper_single_step(void)
88fe8a41
TS
1916{
1917 env->dr[6] |= 0x4000;
1918 raise_exception(EXCP01_SSTP);
1919}
1920
2c0262af
FB
1921void helper_cpuid(void)
1922{
f419b321
FB
1923 uint32_t index;
1924 index = (uint32_t)EAX;
3b46e624 1925
f419b321
FB
1926 /* test if maximum index reached */
1927 if (index & 0x80000000) {
5fafdf24 1928 if (index > env->cpuid_xlevel)
f419b321
FB
1929 index = env->cpuid_level;
1930 } else {
5fafdf24 1931 if (index > env->cpuid_level)
f419b321
FB
1932 index = env->cpuid_level;
1933 }
3b46e624 1934
f419b321 1935 switch(index) {
8e682019 1936 case 0:
f419b321 1937 EAX = env->cpuid_level;
14ce26e7
FB
1938 EBX = env->cpuid_vendor1;
1939 EDX = env->cpuid_vendor2;
1940 ECX = env->cpuid_vendor3;
8e682019
FB
1941 break;
1942 case 1:
14ce26e7 1943 EAX = env->cpuid_version;
eae7629b 1944 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
9df217a3 1945 ECX = env->cpuid_ext_features;
14ce26e7 1946 EDX = env->cpuid_features;
8e682019 1947 break;
f419b321 1948 case 2:
8e682019 1949 /* cache info: needed for Pentium Pro compatibility */
d8134d91 1950 EAX = 1;
2c0262af
FB
1951 EBX = 0;
1952 ECX = 0;
d8134d91 1953 EDX = 0x2c307d;
8e682019 1954 break;
14ce26e7 1955 case 0x80000000:
f419b321 1956 EAX = env->cpuid_xlevel;
14ce26e7
FB
1957 EBX = env->cpuid_vendor1;
1958 EDX = env->cpuid_vendor2;
1959 ECX = env->cpuid_vendor3;
1960 break;
1961 case 0x80000001:
1962 EAX = env->cpuid_features;
1963 EBX = 0;
0573fbfc 1964 ECX = env->cpuid_ext3_features;
f419b321
FB
1965 EDX = env->cpuid_ext2_features;
1966 break;
1967 case 0x80000002:
1968 case 0x80000003:
1969 case 0x80000004:
1970 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1971 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1972 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1973 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
14ce26e7 1974 break;
8f091a59
FB
1975 case 0x80000005:
1976 /* cache info (L1 cache) */
1977 EAX = 0x01ff01ff;
1978 EBX = 0x01ff01ff;
1979 ECX = 0x40020140;
1980 EDX = 0x40020140;
1981 break;
1982 case 0x80000006:
1983 /* cache info (L2 cache) */
1984 EAX = 0;
1985 EBX = 0x42004200;
1986 ECX = 0x02008140;
1987 EDX = 0;
1988 break;
14ce26e7
FB
1989 case 0x80000008:
1990 /* virtual & phys address size in low 2 bytes. */
00f82b8a
AJ
1991/* XXX: This value must match the one used in the MMU code. */
1992#if defined(TARGET_X86_64)
1993# if defined(USE_KQEMU)
1994 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
1995# else
1996/* XXX: The physical address space is limited to 42 bits in exec.c. */
1997 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
1998# endif
1999#else
2000# if defined(USE_KQEMU)
2001 EAX = 0x00000020; /* 32 bits physical */
2002# else
2003 EAX = 0x00000024; /* 36 bits physical */
2004# endif
2005#endif
14ce26e7
FB
2006 EBX = 0;
2007 ECX = 0;
2008 EDX = 0;
2009 break;
45d242b6
AZ
2010 case 0x8000000A:
2011 EAX = 0x00000001;
2012 EBX = 0;
2013 ECX = 0;
2014 EDX = 0;
2015 break;
f419b321
FB
2016 default:
2017 /* reserved values: zero */
2018 EAX = 0;
2019 EBX = 0;
2020 ECX = 0;
2021 EDX = 0;
2022 break;
2c0262af
FB
2023 }
2024}
2025
b8b6a50b 2026void helper_enter_level(int level, int data32, target_ulong t1)
61a8c4ec 2027{
14ce26e7 2028 target_ulong ssp;
61a8c4ec
FB
2029 uint32_t esp_mask, esp, ebp;
2030
2031 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2032 ssp = env->segs[R_SS].base;
2033 ebp = EBP;
2034 esp = ESP;
2035 if (data32) {
2036 /* 32 bit */
2037 esp -= 4;
2038 while (--level) {
2039 esp -= 4;
2040 ebp -= 4;
2041 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2042 }
2043 esp -= 4;
b8b6a50b 2044 stl(ssp + (esp & esp_mask), t1);
61a8c4ec
FB
2045 } else {
2046 /* 16 bit */
2047 esp -= 2;
2048 while (--level) {
2049 esp -= 2;
2050 ebp -= 2;
2051 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2052 }
2053 esp -= 2;
b8b6a50b 2054 stw(ssp + (esp & esp_mask), t1);
61a8c4ec
FB
2055 }
2056}
2057
8f091a59 2058#ifdef TARGET_X86_64
b8b6a50b 2059void helper_enter64_level(int level, int data64, target_ulong t1)
8f091a59
FB
2060{
2061 target_ulong esp, ebp;
2062 ebp = EBP;
2063 esp = ESP;
2064
2065 if (data64) {
2066 /* 64 bit */
2067 esp -= 8;
2068 while (--level) {
2069 esp -= 8;
2070 ebp -= 8;
2071 stq(esp, ldq(ebp));
2072 }
2073 esp -= 8;
b8b6a50b 2074 stq(esp, t1);
8f091a59
FB
2075 } else {
2076 /* 16 bit */
2077 esp -= 2;
2078 while (--level) {
2079 esp -= 2;
2080 ebp -= 2;
2081 stw(esp, lduw(ebp));
2082 }
2083 esp -= 2;
b8b6a50b 2084 stw(esp, t1);
8f091a59
FB
2085 }
2086}
2087#endif
2088
b5b38f61 2089void helper_lldt(int selector)
2c0262af 2090{
2c0262af
FB
2091 SegmentCache *dt;
2092 uint32_t e1, e2;
14ce26e7
FB
2093 int index, entry_limit;
2094 target_ulong ptr;
3b46e624 2095
b5b38f61 2096 selector &= 0xffff;
2c0262af
FB
2097 if ((selector & 0xfffc) == 0) {
2098 /* XXX: NULL selector case: invalid LDT */
14ce26e7 2099 env->ldt.base = 0;
2c0262af
FB
2100 env->ldt.limit = 0;
2101 } else {
2102 if (selector & 0x4)
2103 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2104 dt = &env->gdt;
2105 index = selector & ~7;
14ce26e7
FB
2106#ifdef TARGET_X86_64
2107 if (env->hflags & HF_LMA_MASK)
2108 entry_limit = 15;
2109 else
3b46e624 2110#endif
14ce26e7
FB
2111 entry_limit = 7;
2112 if ((index + entry_limit) > dt->limit)
2c0262af
FB
2113 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2114 ptr = dt->base + index;
61382a50
FB
2115 e1 = ldl_kernel(ptr);
2116 e2 = ldl_kernel(ptr + 4);
2c0262af
FB
2117 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2118 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2119 if (!(e2 & DESC_P_MASK))
2120 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
14ce26e7
FB
2121#ifdef TARGET_X86_64
2122 if (env->hflags & HF_LMA_MASK) {
2123 uint32_t e3;
2124 e3 = ldl_kernel(ptr + 8);
2125 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2126 env->ldt.base |= (target_ulong)e3 << 32;
2127 } else
2128#endif
2129 {
2130 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2131 }
2c0262af
FB
2132 }
2133 env->ldt.selector = selector;
2134}
2135
b5b38f61 2136void helper_ltr(int selector)
2c0262af 2137{
2c0262af
FB
2138 SegmentCache *dt;
2139 uint32_t e1, e2;
14ce26e7
FB
2140 int index, type, entry_limit;
2141 target_ulong ptr;
3b46e624 2142
b5b38f61 2143 selector &= 0xffff;
2c0262af 2144 if ((selector & 0xfffc) == 0) {
14ce26e7
FB
2145 /* NULL selector case: invalid TR */
2146 env->tr.base = 0;
2c0262af
FB
2147 env->tr.limit = 0;
2148 env->tr.flags = 0;
2149 } else {
2150 if (selector & 0x4)
2151 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2152 dt = &env->gdt;
2153 index = selector & ~7;
14ce26e7
FB
2154#ifdef TARGET_X86_64
2155 if (env->hflags & HF_LMA_MASK)
2156 entry_limit = 15;
2157 else
3b46e624 2158#endif
14ce26e7
FB
2159 entry_limit = 7;
2160 if ((index + entry_limit) > dt->limit)
2c0262af
FB
2161 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2162 ptr = dt->base + index;
61382a50
FB
2163 e1 = ldl_kernel(ptr);
2164 e2 = ldl_kernel(ptr + 4);
2c0262af 2165 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
5fafdf24 2166 if ((e2 & DESC_S_MASK) ||
7e84c249 2167 (type != 1 && type != 9))
2c0262af
FB
2168 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2169 if (!(e2 & DESC_P_MASK))
2170 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
14ce26e7
FB
2171#ifdef TARGET_X86_64
2172 if (env->hflags & HF_LMA_MASK) {
b0ee3ff0 2173 uint32_t e3, e4;
14ce26e7 2174 e3 = ldl_kernel(ptr + 8);
b0ee3ff0
TS
2175 e4 = ldl_kernel(ptr + 12);
2176 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2177 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
14ce26e7
FB
2178 load_seg_cache_raw_dt(&env->tr, e1, e2);
2179 env->tr.base |= (target_ulong)e3 << 32;
5fafdf24 2180 } else
14ce26e7
FB
2181#endif
2182 {
2183 load_seg_cache_raw_dt(&env->tr, e1, e2);
2184 }
8e682019 2185 e2 |= DESC_TSS_BUSY_MASK;
61382a50 2186 stl_kernel(ptr + 4, e2);
2c0262af
FB
2187 }
2188 env->tr.selector = selector;
2189}
2190
3ab493de 2191/* only works if protected mode and not VM86. seg_reg must be != R_CS */
b5b38f61 2192void helper_load_seg(int seg_reg, int selector)
2c0262af
FB
2193{
2194 uint32_t e1, e2;
3ab493de
FB
2195 int cpl, dpl, rpl;
2196 SegmentCache *dt;
2197 int index;
14ce26e7 2198 target_ulong ptr;
3ab493de 2199
8e682019 2200 selector &= 0xffff;
b359d4e7 2201 cpl = env->hflags & HF_CPL_MASK;
2c0262af
FB
2202 if ((selector & 0xfffc) == 0) {
2203 /* null selector case */
4d6b6c0a
FB
2204 if (seg_reg == R_SS
2205#ifdef TARGET_X86_64
b359d4e7 2206 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
4d6b6c0a
FB
2207#endif
2208 )
2c0262af 2209 raise_exception_err(EXCP0D_GPF, 0);
14ce26e7 2210 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2c0262af 2211 } else {
3b46e624 2212
3ab493de
FB
2213 if (selector & 0x4)
2214 dt = &env->ldt;
2215 else
2216 dt = &env->gdt;
2217 index = selector & ~7;
8e682019 2218 if ((index + 7) > dt->limit)
2c0262af 2219 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de
FB
2220 ptr = dt->base + index;
2221 e1 = ldl_kernel(ptr);
2222 e2 = ldl_kernel(ptr + 4);
3b46e624 2223
8e682019 2224 if (!(e2 & DESC_S_MASK))
2c0262af 2225 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de
FB
2226 rpl = selector & 3;
2227 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2c0262af 2228 if (seg_reg == R_SS) {
3ab493de 2229 /* must be writable segment */
8e682019 2230 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2c0262af 2231 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
8e682019 2232 if (rpl != cpl || dpl != cpl)
3ab493de 2233 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2c0262af 2234 } else {
3ab493de 2235 /* must be readable segment */
8e682019 2236 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2c0262af 2237 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3b46e624 2238
3ab493de
FB
2239 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2240 /* if not conforming code, test rights */
5fafdf24 2241 if (dpl < cpl || dpl < rpl)
3ab493de 2242 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
3ab493de 2243 }
2c0262af
FB
2244 }
2245
2246 if (!(e2 & DESC_P_MASK)) {
2c0262af
FB
2247 if (seg_reg == R_SS)
2248 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2249 else
2250 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2251 }
3ab493de
FB
2252
2253 /* set the access bit if not already set */
2254 if (!(e2 & DESC_A_MASK)) {
2255 e2 |= DESC_A_MASK;
2256 stl_kernel(ptr + 4, e2);
2257 }
2258
5fafdf24 2259 cpu_x86_load_seg_cache(env, seg_reg, selector,
2c0262af
FB
2260 get_seg_base(e1, e2),
2261 get_seg_limit(e1, e2),
2262 e2);
2263#if 0
5fafdf24 2264 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2c0262af
FB
2265 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2266#endif
2267 }
2268}
2269
2270/* protected mode jump */
b8b6a50b
FB
2271void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2272 int next_eip_addend)
2c0262af 2273{
b8b6a50b 2274 int gate_cs, type;
2c0262af 2275 uint32_t e1, e2, cpl, dpl, rpl, limit;
b8b6a50b 2276 target_ulong next_eip;
3b46e624 2277
2c0262af
FB
2278 if ((new_cs & 0xfffc) == 0)
2279 raise_exception_err(EXCP0D_GPF, 0);
2280 if (load_segment(&e1, &e2, new_cs) != 0)
2281 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2282 cpl = env->hflags & HF_CPL_MASK;
2283 if (e2 & DESC_S_MASK) {
2284 if (!(e2 & DESC_CS_MASK))
2285 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2286 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 2287 if (e2 & DESC_C_MASK) {
2c0262af
FB
2288 /* conforming code segment */
2289 if (dpl > cpl)
2290 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2291 } else {
2292 /* non conforming code segment */
2293 rpl = new_cs & 3;
2294 if (rpl > cpl)
2295 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2296 if (dpl != cpl)
2297 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2298 }
2299 if (!(e2 & DESC_P_MASK))
2300 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2301 limit = get_seg_limit(e1, e2);
5fafdf24 2302 if (new_eip > limit &&
ca954f6d 2303 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2c0262af
FB
2304 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2305 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2306 get_seg_base(e1, e2), limit, e2);
2307 EIP = new_eip;
2308 } else {
7e84c249
FB
2309 /* jump to call or task gate */
2310 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2311 rpl = new_cs & 3;
2312 cpl = env->hflags & HF_CPL_MASK;
2313 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2314 switch(type) {
2315 case 1: /* 286 TSS */
2316 case 9: /* 386 TSS */
2317 case 5: /* task gate */
2318 if (dpl < cpl || dpl < rpl)
2319 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
f419b321 2320 next_eip = env->eip + next_eip_addend;
08cea4ee 2321 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
447c2cef 2322 CC_OP = CC_OP_EFLAGS;
7e84c249
FB
2323 break;
2324 case 4: /* 286 call gate */
2325 case 12: /* 386 call gate */
2326 if ((dpl < cpl) || (dpl < rpl))
2327 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2328 if (!(e2 & DESC_P_MASK))
2329 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2330 gate_cs = e1 >> 16;
516633dc
FB
2331 new_eip = (e1 & 0xffff);
2332 if (type == 12)
2333 new_eip |= (e2 & 0xffff0000);
7e84c249
FB
2334 if (load_segment(&e1, &e2, gate_cs) != 0)
2335 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2336 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2337 /* must be code segment */
5fafdf24 2338 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
7e84c249
FB
2339 (DESC_S_MASK | DESC_CS_MASK)))
2340 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
5fafdf24 2341 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
7e84c249
FB
2342 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2343 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2344 if (!(e2 & DESC_P_MASK))
2345 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
7e84c249
FB
2346 limit = get_seg_limit(e1, e2);
2347 if (new_eip > limit)
2348 raise_exception_err(EXCP0D_GPF, 0);
2349 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2350 get_seg_base(e1, e2), limit, e2);
2351 EIP = new_eip;
2352 break;
2353 default:
2354 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2355 break;
2356 }
2c0262af
FB
2357 }
2358}
2359
2360/* real mode call */
b8b6a50b
FB
2361void helper_lcall_real(int new_cs, target_ulong new_eip1,
2362 int shift, int next_eip)
2c0262af 2363{
b8b6a50b 2364 int new_eip;
2c0262af 2365 uint32_t esp, esp_mask;
14ce26e7 2366 target_ulong ssp;
2c0262af 2367
b8b6a50b 2368 new_eip = new_eip1;
2c0262af 2369 esp = ESP;
891b38e4 2370 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2c0262af
FB
2371 ssp = env->segs[R_SS].base;
2372 if (shift) {
891b38e4
FB
2373 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2374 PUSHL(ssp, esp, esp_mask, next_eip);
2c0262af 2375 } else {
891b38e4
FB
2376 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2377 PUSHW(ssp, esp, esp_mask, next_eip);
2c0262af
FB
2378 }
2379
8d7b0fbb 2380 SET_ESP(esp, esp_mask);
2c0262af
FB
2381 env->eip = new_eip;
2382 env->segs[R_CS].selector = new_cs;
14ce26e7 2383 env->segs[R_CS].base = (new_cs << 4);
2c0262af
FB
2384}
2385
2386/* protected mode call */
b8b6a50b
FB
2387void helper_lcall_protected(int new_cs, target_ulong new_eip,
2388 int shift, int next_eip_addend)
2c0262af 2389{
b8b6a50b 2390 int new_stack, i;
2c0262af 2391 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
891b38e4
FB
2392 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2393 uint32_t val, limit, old_sp_mask;
b8b6a50b 2394 target_ulong ssp, old_ssp, next_eip;
3b46e624 2395
f419b321 2396 next_eip = env->eip + next_eip_addend;
f3f2d9be 2397#ifdef DEBUG_PCALL
e19e89a5
FB
2398 if (loglevel & CPU_LOG_PCALL) {
2399 fprintf(logfile, "lcall %04x:%08x s=%d\n",
649ea05a 2400 new_cs, (uint32_t)new_eip, shift);
7fe48483 2401 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
f3f2d9be
FB
2402 }
2403#endif
2c0262af
FB
2404 if ((new_cs & 0xfffc) == 0)
2405 raise_exception_err(EXCP0D_GPF, 0);
2406 if (load_segment(&e1, &e2, new_cs) != 0)
2407 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2408 cpl = env->hflags & HF_CPL_MASK;
f3f2d9be 2409#ifdef DEBUG_PCALL
e19e89a5 2410 if (loglevel & CPU_LOG_PCALL) {
f3f2d9be
FB
2411 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2412 }
2413#endif
2c0262af
FB
2414 if (e2 & DESC_S_MASK) {
2415 if (!(e2 & DESC_CS_MASK))
2416 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2417 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 2418 if (e2 & DESC_C_MASK) {
2c0262af
FB
2419 /* conforming code segment */
2420 if (dpl > cpl)
2421 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2422 } else {
2423 /* non conforming code segment */
2424 rpl = new_cs & 3;
2425 if (rpl > cpl)
2426 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2427 if (dpl != cpl)
2428 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2429 }
2430 if (!(e2 & DESC_P_MASK))
2431 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2432
f419b321
FB
2433#ifdef TARGET_X86_64
2434 /* XXX: check 16/32 bit cases in long mode */
2435 if (shift == 2) {
2436 target_ulong rsp;
2437 /* 64 bit case */
2438 rsp = ESP;
2439 PUSHQ(rsp, env->segs[R_CS].selector);
2440 PUSHQ(rsp, next_eip);
2441 /* from this point, not restartable */
2442 ESP = rsp;
2443 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
5fafdf24 2444 get_seg_base(e1, e2),
f419b321
FB
2445 get_seg_limit(e1, e2), e2);
2446 EIP = new_eip;
5fafdf24 2447 } else
f419b321
FB
2448#endif
2449 {
2450 sp = ESP;
2451 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2452 ssp = env->segs[R_SS].base;
2453 if (shift) {
2454 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2455 PUSHL(ssp, sp, sp_mask, next_eip);
2456 } else {
2457 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2458 PUSHW(ssp, sp, sp_mask, next_eip);
2459 }
3b46e624 2460
f419b321
FB
2461 limit = get_seg_limit(e1, e2);
2462 if (new_eip > limit)
2463 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2464 /* from this point, not restartable */
8d7b0fbb 2465 SET_ESP(sp, sp_mask);
f419b321
FB
2466 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2467 get_seg_base(e1, e2), limit, e2);
2468 EIP = new_eip;
2c0262af 2469 }
2c0262af
FB
2470 } else {
2471 /* check gate type */
2472 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
7e84c249
FB
2473 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2474 rpl = new_cs & 3;
2c0262af
FB
2475 switch(type) {
2476 case 1: /* available 286 TSS */
2477 case 9: /* available 386 TSS */
2478 case 5: /* task gate */
7e84c249
FB
2479 if (dpl < cpl || dpl < rpl)
2480 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
883da8e2 2481 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
447c2cef 2482 CC_OP = CC_OP_EFLAGS;
8145122b 2483 return;
2c0262af
FB
2484 case 4: /* 286 call gate */
2485 case 12: /* 386 call gate */
2486 break;
2487 default:
2488 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2489 break;
2490 }
2491 shift = type >> 3;
2492
2c0262af
FB
2493 if (dpl < cpl || dpl < rpl)
2494 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2495 /* check valid bit */
2496 if (!(e2 & DESC_P_MASK))
2497 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2498 selector = e1 >> 16;
2499 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
f3f2d9be 2500 param_count = e2 & 0x1f;
2c0262af
FB
2501 if ((selector & 0xfffc) == 0)
2502 raise_exception_err(EXCP0D_GPF, 0);
2503
2504 if (load_segment(&e1, &e2, selector) != 0)
2505 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2506 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2507 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2508 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2509 if (dpl > cpl)
2510 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2511 if (!(e2 & DESC_P_MASK))
2512 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2513
2514 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
7f75ffd3 2515 /* to inner privilege */
2c0262af 2516 get_ss_esp_from_tss(&ss, &sp, dpl);
f3f2d9be 2517#ifdef DEBUG_PCALL
e19e89a5 2518 if (loglevel & CPU_LOG_PCALL)
5fafdf24 2519 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
f3f2d9be
FB
2520 ss, sp, param_count, ESP);
2521#endif
2c0262af
FB
2522 if ((ss & 0xfffc) == 0)
2523 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2524 if ((ss & 3) != dpl)
2525 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2526 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2527 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2528 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2529 if (ss_dpl != dpl)
2530 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2531 if (!(ss_e2 & DESC_S_MASK) ||
2532 (ss_e2 & DESC_CS_MASK) ||
2533 !(ss_e2 & DESC_W_MASK))
2534 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2535 if (!(ss_e2 & DESC_P_MASK))
2536 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
3b46e624 2537
891b38e4 2538 // push_size = ((param_count * 2) + 8) << shift;
2c0262af 2539
891b38e4
FB
2540 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2541 old_ssp = env->segs[R_SS].base;
3b46e624 2542
891b38e4
FB
2543 sp_mask = get_sp_mask(ss_e2);
2544 ssp = get_seg_base(ss_e1, ss_e2);
2c0262af 2545 if (shift) {
891b38e4
FB
2546 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2547 PUSHL(ssp, sp, sp_mask, ESP);
2548 for(i = param_count - 1; i >= 0; i--) {
2549 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2550 PUSHL(ssp, sp, sp_mask, val);
2c0262af
FB
2551 }
2552 } else {
891b38e4
FB
2553 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2554 PUSHW(ssp, sp, sp_mask, ESP);
2555 for(i = param_count - 1; i >= 0; i--) {
2556 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2557 PUSHW(ssp, sp, sp_mask, val);
2c0262af
FB
2558 }
2559 }
891b38e4 2560 new_stack = 1;
2c0262af 2561 } else {
7f75ffd3 2562 /* to same privilege */
891b38e4
FB
2563 sp = ESP;
2564 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2565 ssp = env->segs[R_SS].base;
2566 // push_size = (4 << shift);
2567 new_stack = 0;
2c0262af
FB
2568 }
2569
2570 if (shift) {
891b38e4
FB
2571 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2572 PUSHL(ssp, sp, sp_mask, next_eip);
2c0262af 2573 } else {
891b38e4
FB
2574 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2575 PUSHW(ssp, sp, sp_mask, next_eip);
2576 }
2577
2578 /* from this point, not restartable */
2579
2580 if (new_stack) {
2581 ss = (ss & ~3) | dpl;
5fafdf24 2582 cpu_x86_load_seg_cache(env, R_SS, ss,
891b38e4
FB
2583 ssp,
2584 get_seg_limit(ss_e1, ss_e2),
2585 ss_e2);
2c0262af
FB
2586 }
2587
2c0262af 2588 selector = (selector & ~3) | dpl;
5fafdf24 2589 cpu_x86_load_seg_cache(env, R_CS, selector,
2c0262af
FB
2590 get_seg_base(e1, e2),
2591 get_seg_limit(e1, e2),
2592 e2);
2593 cpu_x86_set_cpl(env, dpl);
8d7b0fbb 2594 SET_ESP(sp, sp_mask);
2c0262af
FB
2595 EIP = offset;
2596 }
9df217a3
FB
2597#ifdef USE_KQEMU
2598 if (kqemu_is_ok(env)) {
2599 env->exception_index = -1;
2600 cpu_loop_exit();
2601 }
2602#endif
2c0262af
FB
2603}
2604
7e84c249 2605/* real and vm86 mode iret */
2c0262af
FB
2606void helper_iret_real(int shift)
2607{
891b38e4 2608 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
14ce26e7 2609 target_ulong ssp;
2c0262af 2610 int eflags_mask;
7e84c249 2611
891b38e4
FB
2612 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2613 sp = ESP;
2614 ssp = env->segs[R_SS].base;
2c0262af
FB
2615 if (shift == 1) {
2616 /* 32 bits */
891b38e4
FB
2617 POPL(ssp, sp, sp_mask, new_eip);
2618 POPL(ssp, sp, sp_mask, new_cs);
2619 new_cs &= 0xffff;
2620 POPL(ssp, sp, sp_mask, new_eflags);
2c0262af
FB
2621 } else {
2622 /* 16 bits */
891b38e4
FB
2623 POPW(ssp, sp, sp_mask, new_eip);
2624 POPW(ssp, sp, sp_mask, new_cs);
2625 POPW(ssp, sp, sp_mask, new_eflags);
2c0262af 2626 }
4136f33c 2627 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2c0262af
FB
2628 load_seg_vm(R_CS, new_cs);
2629 env->eip = new_eip;
7e84c249 2630 if (env->eflags & VM_MASK)
8145122b 2631 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
7e84c249 2632 else
8145122b 2633 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2c0262af
FB
2634 if (shift == 0)
2635 eflags_mask &= 0xffff;
2636 load_eflags(new_eflags, eflags_mask);
474ea849 2637 env->hflags &= ~HF_NMI_MASK;
2c0262af
FB
2638}
2639
8e682019
FB
2640static inline void validate_seg(int seg_reg, int cpl)
2641{
2642 int dpl;
2643 uint32_t e2;
cd072e01
FB
2644
2645 /* XXX: on x86_64, we do not want to nullify FS and GS because
2646 they may still contain a valid base. I would be interested to
2647 know how a real x86_64 CPU behaves */
5fafdf24 2648 if ((seg_reg == R_FS || seg_reg == R_GS) &&
cd072e01
FB
2649 (env->segs[seg_reg].selector & 0xfffc) == 0)
2650 return;
2651
8e682019
FB
2652 e2 = env->segs[seg_reg].flags;
2653 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2654 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2655 /* data or non conforming code segment */
2656 if (dpl < cpl) {
14ce26e7 2657 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
8e682019
FB
2658 }
2659 }
2660}
2661
2c0262af
FB
2662/* protected mode iret */
2663static inline void helper_ret_protected(int shift, int is_iret, int addend)
2664{
14ce26e7 2665 uint32_t new_cs, new_eflags, new_ss;
2c0262af
FB
2666 uint32_t new_es, new_ds, new_fs, new_gs;
2667 uint32_t e1, e2, ss_e1, ss_e2;
4136f33c 2668 int cpl, dpl, rpl, eflags_mask, iopl;
14ce26e7 2669 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
3b46e624 2670
14ce26e7
FB
2671#ifdef TARGET_X86_64
2672 if (shift == 2)
2673 sp_mask = -1;
2674 else
2675#endif
2676 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2c0262af 2677 sp = ESP;
891b38e4 2678 ssp = env->segs[R_SS].base;
354ff226 2679 new_eflags = 0; /* avoid warning */
14ce26e7
FB
2680#ifdef TARGET_X86_64
2681 if (shift == 2) {
2682 POPQ(sp, new_eip);
2683 POPQ(sp, new_cs);
2684 new_cs &= 0xffff;
2685 if (is_iret) {
2686 POPQ(sp, new_eflags);
2687 }
2688 } else
2689#endif
2c0262af
FB
2690 if (shift == 1) {
2691 /* 32 bits */
891b38e4
FB
2692 POPL(ssp, sp, sp_mask, new_eip);
2693 POPL(ssp, sp, sp_mask, new_cs);
2694 new_cs &= 0xffff;
2695 if (is_iret) {
2696 POPL(ssp, sp, sp_mask, new_eflags);
2697 if (new_eflags & VM_MASK)
2698 goto return_to_vm86;
2699 }
2c0262af
FB
2700 } else {
2701 /* 16 bits */
891b38e4
FB
2702 POPW(ssp, sp, sp_mask, new_eip);
2703 POPW(ssp, sp, sp_mask, new_cs);
2c0262af 2704 if (is_iret)
891b38e4 2705 POPW(ssp, sp, sp_mask, new_eflags);
2c0262af 2706 }
891b38e4 2707#ifdef DEBUG_PCALL
e19e89a5 2708 if (loglevel & CPU_LOG_PCALL) {
14ce26e7 2709 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
e19e89a5 2710 new_cs, new_eip, shift, addend);
7fe48483 2711 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
891b38e4
FB
2712 }
2713#endif
2c0262af
FB
2714 if ((new_cs & 0xfffc) == 0)
2715 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2716 if (load_segment(&e1, &e2, new_cs) != 0)
2717 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2718 if (!(e2 & DESC_S_MASK) ||
2719 !(e2 & DESC_CS_MASK))
2720 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2721 cpl = env->hflags & HF_CPL_MASK;
5fafdf24 2722 rpl = new_cs & 3;
2c0262af
FB
2723 if (rpl < cpl)
2724 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2725 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
7e84c249 2726 if (e2 & DESC_C_MASK) {
2c0262af
FB
2727 if (dpl > rpl)
2728 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2729 } else {
2730 if (dpl != rpl)
2731 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2732 }
2733 if (!(e2 & DESC_P_MASK))
2734 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
3b46e624 2735
891b38e4 2736 sp += addend;
5fafdf24 2737 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
ca954f6d 2738 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2c0262af 2739 /* return to same priledge level */
5fafdf24 2740 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2c0262af
FB
2741 get_seg_base(e1, e2),
2742 get_seg_limit(e1, e2),
2743 e2);
2c0262af 2744 } else {
7f75ffd3 2745 /* return to different privilege level */
14ce26e7
FB
2746#ifdef TARGET_X86_64
2747 if (shift == 2) {
2748 POPQ(sp, new_esp);
2749 POPQ(sp, new_ss);
2750 new_ss &= 0xffff;
2751 } else
2752#endif
2c0262af
FB
2753 if (shift == 1) {
2754 /* 32 bits */
891b38e4
FB
2755 POPL(ssp, sp, sp_mask, new_esp);
2756 POPL(ssp, sp, sp_mask, new_ss);
2757 new_ss &= 0xffff;
2c0262af
FB
2758 } else {
2759 /* 16 bits */
891b38e4
FB
2760 POPW(ssp, sp, sp_mask, new_esp);
2761 POPW(ssp, sp, sp_mask, new_ss);
2c0262af 2762 }
e19e89a5
FB
2763#ifdef DEBUG_PCALL
2764 if (loglevel & CPU_LOG_PCALL) {
14ce26e7 2765 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
e19e89a5
FB
2766 new_ss, new_esp);
2767 }
2768#endif
b359d4e7
FB
2769 if ((new_ss & 0xfffc) == 0) {
2770#ifdef TARGET_X86_64
2771 /* NULL ss is allowed in long mode if cpl != 3*/
d80c7d1c 2772 /* XXX: test CS64 ? */
b359d4e7 2773 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
5fafdf24 2774 cpu_x86_load_seg_cache(env, R_SS, new_ss,
b359d4e7
FB
2775 0, 0xffffffff,
2776 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2777 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2778 DESC_W_MASK | DESC_A_MASK);
d80c7d1c 2779 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
5fafdf24 2780 } else
b359d4e7
FB
2781#endif
2782 {
2783 raise_exception_err(EXCP0D_GPF, 0);
2784 }
14ce26e7
FB
2785 } else {
2786 if ((new_ss & 3) != rpl)
2787 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2788 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2789 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2790 if (!(ss_e2 & DESC_S_MASK) ||
2791 (ss_e2 & DESC_CS_MASK) ||
2792 !(ss_e2 & DESC_W_MASK))
2793 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2794 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2795 if (dpl != rpl)
2796 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2797 if (!(ss_e2 & DESC_P_MASK))
2798 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
5fafdf24 2799 cpu_x86_load_seg_cache(env, R_SS, new_ss,
14ce26e7
FB
2800 get_seg_base(ss_e1, ss_e2),
2801 get_seg_limit(ss_e1, ss_e2),
2802 ss_e2);
2803 }
2c0262af 2804
5fafdf24 2805 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2c0262af
FB
2806 get_seg_base(e1, e2),
2807 get_seg_limit(e1, e2),
2808 e2);
2c0262af 2809 cpu_x86_set_cpl(env, rpl);
891b38e4 2810 sp = new_esp;
14ce26e7 2811#ifdef TARGET_X86_64
2c8e0301 2812 if (env->hflags & HF_CS64_MASK)
14ce26e7
FB
2813 sp_mask = -1;
2814 else
2815#endif
2816 sp_mask = get_sp_mask(ss_e2);
8e682019
FB
2817
2818 /* validate data segments */
89984cd2
FB
2819 validate_seg(R_ES, rpl);
2820 validate_seg(R_DS, rpl);
2821 validate_seg(R_FS, rpl);
2822 validate_seg(R_GS, rpl);
4afa6482
FB
2823
2824 sp += addend;
2c0262af 2825 }
8d7b0fbb 2826 SET_ESP(sp, sp_mask);
2c0262af
FB
2827 env->eip = new_eip;
2828 if (is_iret) {
4136f33c 2829 /* NOTE: 'cpl' is the _old_ CPL */
8145122b 2830 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2c0262af 2831 if (cpl == 0)
4136f33c
FB
2832 eflags_mask |= IOPL_MASK;
2833 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2834 if (cpl <= iopl)
2835 eflags_mask |= IF_MASK;
2c0262af
FB
2836 if (shift == 0)
2837 eflags_mask &= 0xffff;
2838 load_eflags(new_eflags, eflags_mask);
2839 }
2840 return;
2841
2842 return_to_vm86:
891b38e4
FB
2843 POPL(ssp, sp, sp_mask, new_esp);
2844 POPL(ssp, sp, sp_mask, new_ss);
2845 POPL(ssp, sp, sp_mask, new_es);
2846 POPL(ssp, sp, sp_mask, new_ds);
2847 POPL(ssp, sp, sp_mask, new_fs);
2848 POPL(ssp, sp, sp_mask, new_gs);
3b46e624 2849
2c0262af 2850 /* modify processor state */
5fafdf24 2851 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
8145122b 2852 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
891b38e4 2853 load_seg_vm(R_CS, new_cs & 0xffff);
2c0262af 2854 cpu_x86_set_cpl(env, 3);
891b38e4
FB
2855 load_seg_vm(R_SS, new_ss & 0xffff);
2856 load_seg_vm(R_ES, new_es & 0xffff);
2857 load_seg_vm(R_DS, new_ds & 0xffff);
2858 load_seg_vm(R_FS, new_fs & 0xffff);
2859 load_seg_vm(R_GS, new_gs & 0xffff);
2c0262af 2860
fd836909 2861 env->eip = new_eip & 0xffff;
2c0262af
FB
2862 ESP = new_esp;
2863}
2864
08cea4ee 2865void helper_iret_protected(int shift, int next_eip)
2c0262af 2866{
7e84c249
FB
2867 int tss_selector, type;
2868 uint32_t e1, e2;
3b46e624 2869
7e84c249
FB
2870 /* specific case for TSS */
2871 if (env->eflags & NT_MASK) {
14ce26e7
FB
2872#ifdef TARGET_X86_64
2873 if (env->hflags & HF_LMA_MASK)
2874 raise_exception_err(EXCP0D_GPF, 0);
2875#endif
7e84c249
FB
2876 tss_selector = lduw_kernel(env->tr.base + 0);
2877 if (tss_selector & 4)
2878 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2879 if (load_segment(&e1, &e2, tss_selector) != 0)
2880 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2881 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2882 /* NOTE: we check both segment and busy TSS */
2883 if (type != 3)
2884 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
08cea4ee 2885 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
7e84c249
FB
2886 } else {
2887 helper_ret_protected(shift, 1, 0);
2888 }
474ea849 2889 env->hflags &= ~HF_NMI_MASK;
9df217a3
FB
2890#ifdef USE_KQEMU
2891 if (kqemu_is_ok(env)) {
2892 CC_OP = CC_OP_EFLAGS;
2893 env->exception_index = -1;
2894 cpu_loop_exit();
2895 }
2896#endif
2c0262af
FB
2897}
2898
2899void helper_lret_protected(int shift, int addend)
2900{
2901 helper_ret_protected(shift, 0, addend);
9df217a3
FB
2902#ifdef USE_KQEMU
2903 if (kqemu_is_ok(env)) {
9df217a3
FB
2904 env->exception_index = -1;
2905 cpu_loop_exit();
2906 }
2907#endif
2c0262af
FB
2908}
2909
023fe10d
FB
2910void helper_sysenter(void)
2911{
2912 if (env->sysenter_cs == 0) {
2913 raise_exception_err(EXCP0D_GPF, 0);
2914 }
2915 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2916 cpu_x86_set_cpl(env, 0);
5fafdf24
TS
2917 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2918 0, 0xffffffff,
023fe10d
FB
2919 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2920 DESC_S_MASK |
2921 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 2922 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
14ce26e7 2923 0, 0xffffffff,
023fe10d
FB
2924 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2925 DESC_S_MASK |
2926 DESC_W_MASK | DESC_A_MASK);
2927 ESP = env->sysenter_esp;
2928 EIP = env->sysenter_eip;
2929}
2930
2931void helper_sysexit(void)
2932{
2933 int cpl;
2934
2935 cpl = env->hflags & HF_CPL_MASK;
2936 if (env->sysenter_cs == 0 || cpl != 0) {
2937 raise_exception_err(EXCP0D_GPF, 0);
2938 }
2939 cpu_x86_set_cpl(env, 3);
5fafdf24
TS
2940 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2941 0, 0xffffffff,
023fe10d
FB
2942 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2943 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2944 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
5fafdf24 2945 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
14ce26e7 2946 0, 0xffffffff,
023fe10d
FB
2947 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2948 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2949 DESC_W_MASK | DESC_A_MASK);
2950 ESP = ECX;
2951 EIP = EDX;
9df217a3
FB
2952#ifdef USE_KQEMU
2953 if (kqemu_is_ok(env)) {
2954 env->exception_index = -1;
2955 cpu_loop_exit();
2956 }
2957#endif
023fe10d
FB
2958}
2959
b8b6a50b 2960void helper_movl_crN_T0(int reg, target_ulong t0)
2c0262af 2961{
5fafdf24 2962#if !defined(CONFIG_USER_ONLY)
2c0262af
FB
2963 switch(reg) {
2964 case 0:
b8b6a50b 2965 cpu_x86_update_cr0(env, t0);
2c0262af
FB
2966 break;
2967 case 3:
b8b6a50b 2968 cpu_x86_update_cr3(env, t0);
1ac157da
FB
2969 break;
2970 case 4:
b8b6a50b 2971 cpu_x86_update_cr4(env, t0);
1ac157da 2972 break;
4d6b6c0a 2973 case 8:
b8b6a50b
FB
2974 cpu_set_apic_tpr(env, t0);
2975 env->cr[8] = t0;
4d6b6c0a 2976 break;
1ac157da 2977 default:
b8b6a50b 2978 env->cr[reg] = t0;
2c0262af
FB
2979 break;
2980 }
4d6b6c0a 2981#endif
2c0262af
FB
2982}
2983
b8b6a50b
FB
2984void helper_lmsw(target_ulong t0)
2985{
2986 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2987 if already set to one. */
2988 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2989 helper_movl_crN_T0(0, t0);
2990}
2991
2992void helper_clts(void)
2993{
2994 env->cr[0] &= ~CR0_TS_MASK;
2995 env->hflags &= ~HF_TS_MASK;
2996}
2997
2998#if !defined(CONFIG_USER_ONLY)
2999target_ulong helper_movtl_T0_cr8(void)
3000{
3001 return cpu_get_apic_tpr(env);
3002}
3003#endif
3004
2c0262af 3005/* XXX: do more */
b8b6a50b 3006void helper_movl_drN_T0(int reg, target_ulong t0)
2c0262af 3007{
b8b6a50b 3008 env->dr[reg] = t0;
2c0262af
FB
3009}
3010
8f091a59 3011void helper_invlpg(target_ulong addr)
2c0262af
FB
3012{
3013 cpu_x86_flush_tlb(env, addr);
3014}
3015
2c0262af
FB
3016void helper_rdtsc(void)
3017{
3018 uint64_t val;
ecada8a2
FB
3019
3020 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3021 raise_exception(EXCP0D_GPF);
3022 }
28ab0e2e 3023 val = cpu_get_tsc(env);
14ce26e7
FB
3024 EAX = (uint32_t)(val);
3025 EDX = (uint32_t)(val >> 32);
3026}
3027
df01e0fc
AZ
3028void helper_rdpmc(void)
3029{
3030 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3031 raise_exception(EXCP0D_GPF);
3032 }
3033
b8b6a50b
FB
3034 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3035
3036 /* currently unimplemented */
3037 raise_exception_err(EXCP06_ILLOP, 0);
df01e0fc
AZ
3038}
3039
5fafdf24 3040#if defined(CONFIG_USER_ONLY)
14ce26e7
FB
3041void helper_wrmsr(void)
3042{
2c0262af
FB
3043}
3044
14ce26e7
FB
3045void helper_rdmsr(void)
3046{
3047}
3048#else
2c0262af
FB
3049void helper_wrmsr(void)
3050{
14ce26e7
FB
3051 uint64_t val;
3052
3053 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3054
3055 switch((uint32_t)ECX) {
2c0262af 3056 case MSR_IA32_SYSENTER_CS:
14ce26e7 3057 env->sysenter_cs = val & 0xffff;
2c0262af
FB
3058 break;
3059 case MSR_IA32_SYSENTER_ESP:
14ce26e7 3060 env->sysenter_esp = val;
2c0262af
FB
3061 break;
3062 case MSR_IA32_SYSENTER_EIP:
14ce26e7
FB
3063 env->sysenter_eip = val;
3064 break;
3065 case MSR_IA32_APICBASE:
3066 cpu_set_apic_base(env, val);
3067 break;
14ce26e7 3068 case MSR_EFER:
f419b321
FB
3069 {
3070 uint64_t update_mask;
3071 update_mask = 0;
3072 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3073 update_mask |= MSR_EFER_SCE;
3074 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3075 update_mask |= MSR_EFER_LME;
3076 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3077 update_mask |= MSR_EFER_FFXSR;
3078 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3079 update_mask |= MSR_EFER_NXE;
5fafdf24 3080 env->efer = (env->efer & ~update_mask) |
f419b321
FB
3081 (val & update_mask);
3082 }
2c0262af 3083 break;
14ce26e7
FB
3084 case MSR_STAR:
3085 env->star = val;
3086 break;
8f091a59
FB
3087 case MSR_PAT:
3088 env->pat = val;
3089 break;
0573fbfc
TS
3090 case MSR_VM_HSAVE_PA:
3091 env->vm_hsave = val;
3092 break;
f419b321 3093#ifdef TARGET_X86_64
14ce26e7
FB
3094 case MSR_LSTAR:
3095 env->lstar = val;
3096 break;
3097 case MSR_CSTAR:
3098 env->cstar = val;
3099 break;
3100 case MSR_FMASK:
3101 env->fmask = val;
3102 break;
3103 case MSR_FSBASE:
3104 env->segs[R_FS].base = val;
3105 break;
3106 case MSR_GSBASE:
3107 env->segs[R_GS].base = val;
3108 break;
3109 case MSR_KERNELGSBASE:
3110 env->kernelgsbase = val;
3111 break;
3112#endif
2c0262af
FB
3113 default:
3114 /* XXX: exception ? */
5fafdf24 3115 break;
2c0262af
FB
3116 }
3117}
3118
3119void helper_rdmsr(void)
3120{
14ce26e7
FB
3121 uint64_t val;
3122 switch((uint32_t)ECX) {
2c0262af 3123 case MSR_IA32_SYSENTER_CS:
14ce26e7 3124 val = env->sysenter_cs;
2c0262af
FB
3125 break;
3126 case MSR_IA32_SYSENTER_ESP:
14ce26e7 3127 val = env->sysenter_esp;
2c0262af
FB
3128 break;
3129 case MSR_IA32_SYSENTER_EIP:
14ce26e7
FB
3130 val = env->sysenter_eip;
3131 break;
3132 case MSR_IA32_APICBASE:
3133 val = cpu_get_apic_base(env);
3134 break;
14ce26e7
FB
3135 case MSR_EFER:
3136 val = env->efer;
3137 break;
3138 case MSR_STAR:
3139 val = env->star;
3140 break;
8f091a59
FB
3141 case MSR_PAT:
3142 val = env->pat;
3143 break;
0573fbfc
TS
3144 case MSR_VM_HSAVE_PA:
3145 val = env->vm_hsave;
3146 break;
f419b321 3147#ifdef TARGET_X86_64
14ce26e7
FB
3148 case MSR_LSTAR:
3149 val = env->lstar;
3150 break;
3151 case MSR_CSTAR:
3152 val = env->cstar;
3153 break;
3154 case MSR_FMASK:
3155 val = env->fmask;
3156 break;
3157 case MSR_FSBASE:
3158 val = env->segs[R_FS].base;
3159 break;
3160 case MSR_GSBASE:
3161 val = env->segs[R_GS].base;
2c0262af 3162 break;
14ce26e7
FB
3163 case MSR_KERNELGSBASE:
3164 val = env->kernelgsbase;
3165 break;
3166#endif
2c0262af
FB
3167 default:
3168 /* XXX: exception ? */
14ce26e7 3169 val = 0;
5fafdf24 3170 break;
2c0262af 3171 }
14ce26e7
FB
3172 EAX = (uint32_t)(val);
3173 EDX = (uint32_t)(val >> 32);
2c0262af 3174}
14ce26e7 3175#endif
2c0262af 3176
cec6843e 3177target_ulong helper_lsl(target_ulong selector1)
2c0262af 3178{
b5b38f61 3179 unsigned int limit;
cec6843e 3180 uint32_t e1, e2, eflags, selector;
3ab493de 3181 int rpl, dpl, cpl, type;
2c0262af 3182
cec6843e 3183 selector = selector1 & 0xffff;
5516d670 3184 eflags = cc_table[CC_OP].compute_all();
2c0262af 3185 if (load_segment(&e1, &e2, selector) != 0)
5516d670 3186 goto fail;
3ab493de
FB
3187 rpl = selector & 3;
3188 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3189 cpl = env->hflags & HF_CPL_MASK;
3190 if (e2 & DESC_S_MASK) {
3191 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3192 /* conforming */
3193 } else {
3194 if (dpl < cpl || dpl < rpl)
5516d670 3195 goto fail;
3ab493de
FB
3196 }
3197 } else {
3198 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3199 switch(type) {
3200 case 1:
3201 case 2:
3202 case 3:
3203 case 9:
3204 case 11:
3205 break;
3206 default:
5516d670 3207 goto fail;
3ab493de 3208 }
5516d670
FB
3209 if (dpl < cpl || dpl < rpl) {
3210 fail:
3211 CC_SRC = eflags & ~CC_Z;
b8b6a50b 3212 return 0;
5516d670 3213 }
3ab493de
FB
3214 }
3215 limit = get_seg_limit(e1, e2);
5516d670 3216 CC_SRC = eflags | CC_Z;
b8b6a50b 3217 return limit;
2c0262af
FB
3218}
3219
cec6843e 3220target_ulong helper_lar(target_ulong selector1)
2c0262af 3221{
cec6843e 3222 uint32_t e1, e2, eflags, selector;
3ab493de 3223 int rpl, dpl, cpl, type;
2c0262af 3224
cec6843e 3225 selector = selector1 & 0xffff;
5516d670 3226 eflags = cc_table[CC_OP].compute_all();
3ab493de 3227 if ((selector & 0xfffc) == 0)
5516d670 3228 goto fail;
2c0262af 3229 if (load_segment(&e1, &e2, selector) != 0)
5516d670 3230 goto fail;
3ab493de
FB
3231 rpl = selector & 3;
3232 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3233 cpl = env->hflags & HF_CPL_MASK;
3234 if (e2 & DESC_S_MASK) {
3235 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3236 /* conforming */
3237 } else {
3238 if (dpl < cpl || dpl < rpl)
5516d670 3239 goto fail;
3ab493de
FB
3240 }
3241 } else {
3242 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3243 switch(type) {
3244 case 1:
3245 case 2:
3246 case 3:
3247 case 4:
3248 case 5:
3249 case 9:
3250 case 11:
3251 case 12:
3252 break;
3253 default:
5516d670 3254 goto fail;
3ab493de 3255 }
5516d670
FB
3256 if (dpl < cpl || dpl < rpl) {
3257 fail:
3258 CC_SRC = eflags & ~CC_Z;
b8b6a50b 3259 return 0;
5516d670 3260 }
3ab493de 3261 }
5516d670 3262 CC_SRC = eflags | CC_Z;
b8b6a50b 3263 return e2 & 0x00f0ff00;
2c0262af
FB
3264}
3265
cec6843e 3266void helper_verr(target_ulong selector1)
3ab493de 3267{
cec6843e 3268 uint32_t e1, e2, eflags, selector;
3ab493de
FB
3269 int rpl, dpl, cpl;
3270
cec6843e 3271 selector = selector1 & 0xffff;
5516d670 3272 eflags = cc_table[CC_OP].compute_all();
3ab493de 3273 if ((selector & 0xfffc) == 0)
5516d670 3274 goto fail;
3ab493de 3275 if (load_segment(&e1, &e2, selector) != 0)
5516d670 3276 goto fail;
3ab493de 3277 if (!(e2 & DESC_S_MASK))
5516d670 3278 goto fail;
3ab493de
FB
3279 rpl = selector & 3;
3280 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3281 cpl = env->hflags & HF_CPL_MASK;
3282 if (e2 & DESC_CS_MASK) {
3283 if (!(e2 & DESC_R_MASK))
5516d670 3284 goto fail;
3ab493de
FB
3285 if (!(e2 & DESC_C_MASK)) {
3286 if (dpl < cpl || dpl < rpl)
5516d670 3287 goto fail;
3ab493de
FB
3288 }
3289 } else {
5516d670
FB
3290 if (dpl < cpl || dpl < rpl) {
3291 fail:
3292 CC_SRC = eflags & ~CC_Z;
3ab493de 3293 return;
5516d670 3294 }
3ab493de 3295 }
5516d670 3296 CC_SRC = eflags | CC_Z;
3ab493de
FB
3297}
3298
cec6843e 3299void helper_verw(target_ulong selector1)
3ab493de 3300{
cec6843e 3301 uint32_t e1, e2, eflags, selector;
3ab493de
FB
3302 int rpl, dpl, cpl;
3303
cec6843e 3304 selector = selector1 & 0xffff;
5516d670 3305 eflags = cc_table[CC_OP].compute_all();
3ab493de 3306 if ((selector & 0xfffc) == 0)
5516d670 3307 goto fail;
3ab493de 3308 if (load_segment(&e1, &e2, selector) != 0)
5516d670 3309 goto fail;
3ab493de 3310 if (!(e2 & DESC_S_MASK))
5516d670 3311 goto fail;
3ab493de
FB
3312 rpl = selector & 3;
3313 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3314 cpl = env->hflags & HF_CPL_MASK;
3315 if (e2 & DESC_CS_MASK) {
5516d670 3316 goto fail;
3ab493de
FB
3317 } else {
3318 if (dpl < cpl || dpl < rpl)
5516d670
FB
3319 goto fail;
3320 if (!(e2 & DESC_W_MASK)) {
3321 fail:
3322 CC_SRC = eflags & ~CC_Z;
3ab493de 3323 return;
5516d670 3324 }
3ab493de 3325 }
5516d670 3326 CC_SRC = eflags | CC_Z;
3ab493de
FB
3327}
3328
19e6c4b8 3329/* x87 FPU helpers */
2c0262af 3330
9596ebb7 3331static void fpu_set_exception(int mask)
2ee73ac3
FB
3332{
3333 env->fpus |= mask;
3334 if (env->fpus & (~env->fpuc & FPUC_EM))
3335 env->fpus |= FPUS_SE | FPUS_B;
3336}
3337
19e6c4b8 3338static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
2ee73ac3 3339{
5fafdf24 3340 if (b == 0.0)
2ee73ac3
FB
3341 fpu_set_exception(FPUS_ZE);
3342 return a / b;
3343}
3344
3345void fpu_raise_exception(void)
3346{
3347 if (env->cr[0] & CR0_NE_MASK) {
3348 raise_exception(EXCP10_COPR);
5fafdf24
TS
3349 }
3350#if !defined(CONFIG_USER_ONLY)
2ee73ac3
FB
3351 else {
3352 cpu_set_ferr(env);
3353 }
3354#endif
3355}
3356
19e6c4b8
FB
3357void helper_flds_FT0(uint32_t val)
3358{
3359 union {
3360 float32 f;
3361 uint32_t i;
3362 } u;
3363 u.i = val;
3364 FT0 = float32_to_floatx(u.f, &env->fp_status);
3365}
3366
3367void helper_fldl_FT0(uint64_t val)
3368{
3369 union {
3370 float64 f;
3371 uint64_t i;
3372 } u;
3373 u.i = val;
3374 FT0 = float64_to_floatx(u.f, &env->fp_status);
3375}
3376
3377void helper_fildl_FT0(int32_t val)
3378{
3379 FT0 = int32_to_floatx(val, &env->fp_status);
3380}
3381
3382void helper_flds_ST0(uint32_t val)
3383{
3384 int new_fpstt;
3385 union {
3386 float32 f;
3387 uint32_t i;
3388 } u;
3389 new_fpstt = (env->fpstt - 1) & 7;
3390 u.i = val;
3391 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3392 env->fpstt = new_fpstt;
3393 env->fptags[new_fpstt] = 0; /* validate stack entry */
3394}
3395
3396void helper_fldl_ST0(uint64_t val)
3397{
3398 int new_fpstt;
3399 union {
3400 float64 f;
3401 uint64_t i;
3402 } u;
3403 new_fpstt = (env->fpstt - 1) & 7;
3404 u.i = val;
3405 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3406 env->fpstt = new_fpstt;
3407 env->fptags[new_fpstt] = 0; /* validate stack entry */
3408}
3409
3410void helper_fildl_ST0(int32_t val)
3411{
3412 int new_fpstt;
3413 new_fpstt = (env->fpstt - 1) & 7;
3414 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3415 env->fpstt = new_fpstt;
3416 env->fptags[new_fpstt] = 0; /* validate stack entry */
3417}
3418
3419void helper_fildll_ST0(int64_t val)
3420{
3421 int new_fpstt;
3422 new_fpstt = (env->fpstt - 1) & 7;
3423 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3424 env->fpstt = new_fpstt;
3425 env->fptags[new_fpstt] = 0; /* validate stack entry */
3426}
3427
3428uint32_t helper_fsts_ST0(void)
3429{
3430 union {
3431 float32 f;
3432 uint32_t i;
3433 } u;
3434 u.f = floatx_to_float32(ST0, &env->fp_status);
3435 return u.i;
3436}
3437
3438uint64_t helper_fstl_ST0(void)
3439{
3440 union {
3441 float64 f;
3442 uint64_t i;
3443 } u;
3444 u.f = floatx_to_float64(ST0, &env->fp_status);
3445 return u.i;
3446}
3447
3448int32_t helper_fist_ST0(void)
3449{
3450 int32_t val;
3451 val = floatx_to_int32(ST0, &env->fp_status);
3452 if (val != (int16_t)val)
3453 val = -32768;
3454 return val;
3455}
3456
3457int32_t helper_fistl_ST0(void)
3458{
3459 int32_t val;
3460 val = floatx_to_int32(ST0, &env->fp_status);
3461 return val;
3462}
3463
3464int64_t helper_fistll_ST0(void)
3465{
3466 int64_t val;
3467 val = floatx_to_int64(ST0, &env->fp_status);
3468 return val;
3469}
3470
3471int32_t helper_fistt_ST0(void)
3472{
3473 int32_t val;
3474 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3475 if (val != (int16_t)val)
3476 val = -32768;
3477 return val;
3478}
3479
3480int32_t helper_fisttl_ST0(void)
3481{
3482 int32_t val;
3483 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3484 return val;
3485}
3486
3487int64_t helper_fisttll_ST0(void)
3488{
3489 int64_t val;
3490 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3491 return val;
3492}
3493
3494void helper_fldt_ST0(target_ulong ptr)
3495{
3496 int new_fpstt;
3497 new_fpstt = (env->fpstt - 1) & 7;
3498 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3499 env->fpstt = new_fpstt;
3500 env->fptags[new_fpstt] = 0; /* validate stack entry */
3501}
3502
3503void helper_fstt_ST0(target_ulong ptr)
3504{
3505 helper_fstt(ST0, ptr);
3506}
3507
3508void helper_fpush(void)
3509{
3510 fpush();
3511}
3512
3513void helper_fpop(void)
3514{
3515 fpop();
3516}
3517
3518void helper_fdecstp(void)
3519{
3520 env->fpstt = (env->fpstt - 1) & 7;
3521 env->fpus &= (~0x4700);
3522}
3523
3524void helper_fincstp(void)
3525{
3526 env->fpstt = (env->fpstt + 1) & 7;
3527 env->fpus &= (~0x4700);
3528}
3529
3530/* FPU move */
3531
3532void helper_ffree_STN(int st_index)
3533{
3534 env->fptags[(env->fpstt + st_index) & 7] = 1;
3535}
3536
3537void helper_fmov_ST0_FT0(void)
3538{
3539 ST0 = FT0;
3540}
3541
3542void helper_fmov_FT0_STN(int st_index)
3543{
3544 FT0 = ST(st_index);
3545}
3546
3547void helper_fmov_ST0_STN(int st_index)
3548{
3549 ST0 = ST(st_index);
3550}
3551
3552void helper_fmov_STN_ST0(int st_index)
3553{
3554 ST(st_index) = ST0;
3555}
3556
3557void helper_fxchg_ST0_STN(int st_index)
3558{
3559 CPU86_LDouble tmp;
3560 tmp = ST(st_index);
3561 ST(st_index) = ST0;
3562 ST0 = tmp;
3563}
3564
3565/* FPU operations */
3566
3567static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3568
3569void helper_fcom_ST0_FT0(void)
3570{
3571 int ret;
3572
3573 ret = floatx_compare(ST0, FT0, &env->fp_status);
3574 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3575 FORCE_RET();
3576}
3577
3578void helper_fucom_ST0_FT0(void)
3579{
3580 int ret;
3581
3582 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3583 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3584 FORCE_RET();
3585}
3586
3587static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3588
3589void helper_fcomi_ST0_FT0(void)
3590{
3591 int eflags;
3592 int ret;
3593
3594 ret = floatx_compare(ST0, FT0, &env->fp_status);
3595 eflags = cc_table[CC_OP].compute_all();
3596 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3597 CC_SRC = eflags;
3598 FORCE_RET();
3599}
3600
3601void helper_fucomi_ST0_FT0(void)
3602{
3603 int eflags;
3604 int ret;
3605
3606 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3607 eflags = cc_table[CC_OP].compute_all();
3608 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3609 CC_SRC = eflags;
3610 FORCE_RET();
3611}
3612
3613void helper_fadd_ST0_FT0(void)
3614{
3615 ST0 += FT0;
3616}
3617
3618void helper_fmul_ST0_FT0(void)
3619{
3620 ST0 *= FT0;
3621}
3622
3623void helper_fsub_ST0_FT0(void)
3624{
3625 ST0 -= FT0;
3626}
3627
3628void helper_fsubr_ST0_FT0(void)
3629{
3630 ST0 = FT0 - ST0;
3631}
3632
3633void helper_fdiv_ST0_FT0(void)
3634{
3635 ST0 = helper_fdiv(ST0, FT0);
3636}
3637
3638void helper_fdivr_ST0_FT0(void)
3639{
3640 ST0 = helper_fdiv(FT0, ST0);
3641}
3642
3643/* fp operations between STN and ST0 */
3644
3645void helper_fadd_STN_ST0(int st_index)
3646{
3647 ST(st_index) += ST0;
3648}
3649
3650void helper_fmul_STN_ST0(int st_index)
3651{
3652 ST(st_index) *= ST0;
3653}
3654
3655void helper_fsub_STN_ST0(int st_index)
3656{
3657 ST(st_index) -= ST0;
3658}
3659
3660void helper_fsubr_STN_ST0(int st_index)
3661{
3662 CPU86_LDouble *p;
3663 p = &ST(st_index);
3664 *p = ST0 - *p;
3665}
3666
3667void helper_fdiv_STN_ST0(int st_index)
3668{
3669 CPU86_LDouble *p;
3670 p = &ST(st_index);
3671 *p = helper_fdiv(*p, ST0);
3672}
3673
3674void helper_fdivr_STN_ST0(int st_index)
3675{
3676 CPU86_LDouble *p;
3677 p = &ST(st_index);
3678 *p = helper_fdiv(ST0, *p);
3679}
3680
3681/* misc FPU operations */
3682void helper_fchs_ST0(void)
3683{
3684 ST0 = floatx_chs(ST0);
3685}
3686
3687void helper_fabs_ST0(void)
3688{
3689 ST0 = floatx_abs(ST0);
3690}
3691
3692void helper_fld1_ST0(void)
3693{
3694 ST0 = f15rk[1];
3695}
3696
3697void helper_fldl2t_ST0(void)
3698{
3699 ST0 = f15rk[6];
3700}
3701
3702void helper_fldl2e_ST0(void)
3703{
3704 ST0 = f15rk[5];
3705}
3706
3707void helper_fldpi_ST0(void)
3708{
3709 ST0 = f15rk[2];
3710}
3711
3712void helper_fldlg2_ST0(void)
3713{
3714 ST0 = f15rk[3];
3715}
3716
3717void helper_fldln2_ST0(void)
3718{
3719 ST0 = f15rk[4];
3720}
3721
3722void helper_fldz_ST0(void)
3723{
3724 ST0 = f15rk[0];
3725}
3726
3727void helper_fldz_FT0(void)
3728{
3729 FT0 = f15rk[0];
3730}
3731
3732uint32_t helper_fnstsw(void)
3733{
3734 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3735}
3736
3737uint32_t helper_fnstcw(void)
3738{
3739 return env->fpuc;
3740}
3741
b5b38f61
FB
3742static void update_fp_status(void)
3743{
3744 int rnd_type;
3745
3746 /* set rounding mode */
3747 switch(env->fpuc & RC_MASK) {
3748 default:
3749 case RC_NEAR:
3750 rnd_type = float_round_nearest_even;
3751 break;
3752 case RC_DOWN:
3753 rnd_type = float_round_down;
3754 break;
3755 case RC_UP:
3756 rnd_type = float_round_up;
3757 break;
3758 case RC_CHOP:
3759 rnd_type = float_round_to_zero;
3760 break;
3761 }
3762 set_float_rounding_mode(rnd_type, &env->fp_status);
3763#ifdef FLOATX80
3764 switch((env->fpuc >> 8) & 3) {
3765 case 0:
3766 rnd_type = 32;
3767 break;
3768 case 2:
3769 rnd_type = 64;
3770 break;
3771 case 3:
3772 default:
3773 rnd_type = 80;
3774 break;
3775 }
3776 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3777#endif
3778}
3779
19e6c4b8
FB
3780void helper_fldcw(uint32_t val)
3781{
3782 env->fpuc = val;
3783 update_fp_status();
3784}
3785
3786void helper_fclex(void)
3787{
3788 env->fpus &= 0x7f00;
3789}
3790
3791void helper_fwait(void)
3792{
3793 if (env->fpus & FPUS_SE)
3794 fpu_raise_exception();
3795 FORCE_RET();
3796}
3797
3798void helper_fninit(void)
3799{
3800 env->fpus = 0;
3801 env->fpstt = 0;
3802 env->fpuc = 0x37f;
3803 env->fptags[0] = 1;
3804 env->fptags[1] = 1;
3805 env->fptags[2] = 1;
3806 env->fptags[3] = 1;
3807 env->fptags[4] = 1;
3808 env->fptags[5] = 1;
3809 env->fptags[6] = 1;
3810 env->fptags[7] = 1;
3811}
3812
2c0262af
FB
3813/* BCD ops */
3814
19e6c4b8 3815void helper_fbld_ST0(target_ulong ptr)
2c0262af
FB
3816{
3817 CPU86_LDouble tmp;
3818 uint64_t val;
3819 unsigned int v;
3820 int i;
3821
3822 val = 0;
3823 for(i = 8; i >= 0; i--) {
19e6c4b8 3824 v = ldub(ptr + i);
2c0262af
FB
3825 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3826 }
3827 tmp = val;
19e6c4b8 3828 if (ldub(ptr + 9) & 0x80)
2c0262af
FB
3829 tmp = -tmp;
3830 fpush();
3831 ST0 = tmp;
3832}
3833
19e6c4b8 3834void helper_fbst_ST0(target_ulong ptr)
2c0262af 3835{
2c0262af 3836 int v;
14ce26e7 3837 target_ulong mem_ref, mem_end;
2c0262af
FB
3838 int64_t val;
3839
7a0e1f41 3840 val = floatx_to_int64(ST0, &env->fp_status);
19e6c4b8 3841 mem_ref = ptr;
2c0262af
FB
3842 mem_end = mem_ref + 9;
3843 if (val < 0) {
3844 stb(mem_end, 0x80);
3845 val = -val;
3846 } else {
3847 stb(mem_end, 0x00);
3848 }
3849 while (mem_ref < mem_end) {
3850 if (val == 0)
3851 break;
3852 v = val % 100;
3853 val = val / 100;
3854 v = ((v / 10) << 4) | (v % 10);
3855 stb(mem_ref++, v);
3856 }
3857 while (mem_ref < mem_end) {
3858 stb(mem_ref++, 0);
3859 }
3860}
3861
3862void helper_f2xm1(void)
3863{
3864 ST0 = pow(2.0,ST0) - 1.0;
3865}
3866
3867void helper_fyl2x(void)
3868{
3869 CPU86_LDouble fptemp;
3b46e624 3870
2c0262af
FB
3871 fptemp = ST0;
3872 if (fptemp>0.0){
3873 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3874 ST1 *= fptemp;
3875 fpop();
5fafdf24 3876 } else {
2c0262af
FB
3877 env->fpus &= (~0x4700);
3878 env->fpus |= 0x400;
3879 }
3880}
3881
3882void helper_fptan(void)
3883{
3884 CPU86_LDouble fptemp;
3885
3886 fptemp = ST0;
3887 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3888 env->fpus |= 0x400;
3889 } else {
3890 ST0 = tan(fptemp);
3891 fpush();
3892 ST0 = 1.0;
3893 env->fpus &= (~0x400); /* C2 <-- 0 */
3894 /* the above code is for |arg| < 2**52 only */
3895 }
3896}
3897
3898void helper_fpatan(void)
3899{
3900 CPU86_LDouble fptemp, fpsrcop;
3901
3902 fpsrcop = ST1;
3903 fptemp = ST0;
3904 ST1 = atan2(fpsrcop,fptemp);
3905 fpop();
3906}
3907
3908void helper_fxtract(void)
3909{
3910 CPU86_LDoubleU temp;
3911 unsigned int expdif;
3912
3913 temp.d = ST0;
3914 expdif = EXPD(temp) - EXPBIAS;
3915 /*DP exponent bias*/
3916 ST0 = expdif;
3917 fpush();
3918 BIASEXPONENT(temp);
3919 ST0 = temp.d;
3920}
3921
3922void helper_fprem1(void)
3923{
3924 CPU86_LDouble dblq, fpsrcop, fptemp;
3925 CPU86_LDoubleU fpsrcop1, fptemp1;
3926 int expdif;
7524c84d
TS
3927 signed long long int q;
3928
3929 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3930 ST0 = 0.0 / 0.0; /* NaN */
3931 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3932 return;
3933 }
2c0262af
FB
3934
3935 fpsrcop = ST0;
3936 fptemp = ST1;
3937 fpsrcop1.d = fpsrcop;
3938 fptemp1.d = fptemp;
3939 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
7524c84d
TS
3940
3941 if (expdif < 0) {
3942 /* optimisation? taken from the AMD docs */
3943 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3944 /* ST0 is unchanged */
3945 return;
3946 }
3947
2c0262af
FB
3948 if (expdif < 53) {
3949 dblq = fpsrcop / fptemp;
7524c84d
TS
3950 /* round dblq towards nearest integer */
3951 dblq = rint(dblq);
3952 ST0 = fpsrcop - fptemp * dblq;
3953
3954 /* convert dblq to q by truncating towards zero */
3955 if (dblq < 0.0)
3956 q = (signed long long int)(-dblq);
3957 else
3958 q = (signed long long int)dblq;
3959
2c0262af 3960 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
7524c84d
TS
3961 /* (C0,C3,C1) <-- (q2,q1,q0) */
3962 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3963 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3964 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
2c0262af
FB
3965 } else {
3966 env->fpus |= 0x400; /* C2 <-- 1 */
7524c84d 3967 fptemp = pow(2.0, expdif - 50);
2c0262af 3968 fpsrcop = (ST0 / ST1) / fptemp;
7524c84d
TS
3969 /* fpsrcop = integer obtained by chopping */
3970 fpsrcop = (fpsrcop < 0.0) ?
3971 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
2c0262af
FB
3972 ST0 -= (ST1 * fpsrcop * fptemp);
3973 }
3974}
3975
3976void helper_fprem(void)
3977{
3978 CPU86_LDouble dblq, fpsrcop, fptemp;
3979 CPU86_LDoubleU fpsrcop1, fptemp1;
3980 int expdif;
7524c84d
TS
3981 signed long long int q;
3982
3983 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3984 ST0 = 0.0 / 0.0; /* NaN */
3985 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3986 return;
3987 }
3988
3989 fpsrcop = (CPU86_LDouble)ST0;
3990 fptemp = (CPU86_LDouble)ST1;
2c0262af
FB
3991 fpsrcop1.d = fpsrcop;
3992 fptemp1.d = fptemp;
3993 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
7524c84d
TS
3994
3995 if (expdif < 0) {
3996 /* optimisation? taken from the AMD docs */
3997 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3998 /* ST0 is unchanged */
3999 return;
4000 }
4001
2c0262af 4002 if ( expdif < 53 ) {
7524c84d
TS
4003 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4004 /* round dblq towards zero */
4005 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4006 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4007
4008 /* convert dblq to q by truncating towards zero */
4009 if (dblq < 0.0)
4010 q = (signed long long int)(-dblq);
4011 else
4012 q = (signed long long int)dblq;
4013
2c0262af 4014 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
7524c84d
TS
4015 /* (C0,C3,C1) <-- (q2,q1,q0) */
4016 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4017 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4018 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
2c0262af 4019 } else {
7524c84d 4020 int N = 32 + (expdif % 32); /* as per AMD docs */
2c0262af 4021 env->fpus |= 0x400; /* C2 <-- 1 */
7524c84d 4022 fptemp = pow(2.0, (double)(expdif - N));
2c0262af
FB
4023 fpsrcop = (ST0 / ST1) / fptemp;
4024 /* fpsrcop = integer obtained by chopping */
7524c84d
TS
4025 fpsrcop = (fpsrcop < 0.0) ?
4026 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
2c0262af
FB
4027 ST0 -= (ST1 * fpsrcop * fptemp);
4028 }
4029}
4030
4031void helper_fyl2xp1(void)
4032{
4033 CPU86_LDouble fptemp;
4034
4035 fptemp = ST0;
4036 if ((fptemp+1.0)>0.0) {
4037 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4038 ST1 *= fptemp;
4039 fpop();
5fafdf24 4040 } else {
2c0262af
FB
4041 env->fpus &= (~0x4700);
4042 env->fpus |= 0x400;
4043 }
4044}
4045
4046void helper_fsqrt(void)
4047{
4048 CPU86_LDouble fptemp;
4049
4050 fptemp = ST0;
5fafdf24 4051 if (fptemp<0.0) {
2c0262af
FB
4052 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4053 env->fpus |= 0x400;
4054 }
4055 ST0 = sqrt(fptemp);
4056}
4057
4058void helper_fsincos(void)
4059{
4060 CPU86_LDouble fptemp;
4061
4062 fptemp = ST0;
4063 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4064 env->fpus |= 0x400;
4065 } else {
4066 ST0 = sin(fptemp);
4067 fpush();
4068 ST0 = cos(fptemp);
4069 env->fpus &= (~0x400); /* C2 <-- 0 */
4070 /* the above code is for |arg| < 2**63 only */
4071 }
4072}
4073
4074void helper_frndint(void)
4075{
7a0e1f41 4076 ST0 = floatx_round_to_int(ST0, &env->fp_status);
2c0262af
FB
4077}
4078
4079void helper_fscale(void)
4080{
5fafdf24 4081 ST0 = ldexp (ST0, (int)(ST1));
2c0262af
FB
4082}
4083
4084void helper_fsin(void)
4085{
4086 CPU86_LDouble fptemp;
4087
4088 fptemp = ST0;
4089 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4090 env->fpus |= 0x400;
4091 } else {
4092 ST0 = sin(fptemp);
4093 env->fpus &= (~0x400); /* C2 <-- 0 */
4094 /* the above code is for |arg| < 2**53 only */
4095 }
4096}
4097
4098void helper_fcos(void)
4099{
4100 CPU86_LDouble fptemp;
4101
4102 fptemp = ST0;
4103 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4104 env->fpus |= 0x400;
4105 } else {
4106 ST0 = cos(fptemp);
4107 env->fpus &= (~0x400); /* C2 <-- 0 */
4108 /* the above code is for |arg5 < 2**63 only */
4109 }
4110}
4111
4112void helper_fxam_ST0(void)
4113{
4114 CPU86_LDoubleU temp;
4115 int expdif;
4116
4117 temp.d = ST0;
4118
4119 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4120 if (SIGND(temp))
4121 env->fpus |= 0x200; /* C1 <-- 1 */
4122
a891c7a1 4123 /* XXX: test fptags too */
2c0262af
FB
4124 expdif = EXPD(temp);
4125 if (expdif == MAXEXPD) {
a891c7a1
FB
4126#ifdef USE_X86LDOUBLE
4127 if (MANTD(temp) == 0x8000000000000000ULL)
4128#else
2c0262af 4129 if (MANTD(temp) == 0)
a891c7a1 4130#endif
2c0262af
FB
4131 env->fpus |= 0x500 /*Infinity*/;
4132 else
4133 env->fpus |= 0x100 /*NaN*/;
4134 } else if (expdif == 0) {
4135 if (MANTD(temp) == 0)
4136 env->fpus |= 0x4000 /*Zero*/;
4137 else
4138 env->fpus |= 0x4400 /*Denormal*/;
4139 } else {
4140 env->fpus |= 0x400;
4141 }
4142}
4143
14ce26e7 4144void helper_fstenv(target_ulong ptr, int data32)
2c0262af
FB
4145{
4146 int fpus, fptag, exp, i;
4147 uint64_t mant;
4148 CPU86_LDoubleU tmp;
4149
4150 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4151 fptag = 0;
4152 for (i=7; i>=0; i--) {
4153 fptag <<= 2;
4154 if (env->fptags[i]) {
4155 fptag |= 3;
4156 } else {
664e0f19 4157 tmp.d = env->fpregs[i].d;
2c0262af
FB
4158 exp = EXPD(tmp);
4159 mant = MANTD(tmp);
4160 if (exp == 0 && mant == 0) {
4161 /* zero */
4162 fptag |= 1;
4163 } else if (exp == 0 || exp == MAXEXPD
4164#ifdef USE_X86LDOUBLE
4165 || (mant & (1LL << 63)) == 0
4166#endif
4167 ) {
4168 /* NaNs, infinity, denormal */
4169 fptag |= 2;
4170 }
4171 }
4172 }
4173 if (data32) {
4174 /* 32 bit */
4175 stl(ptr, env->fpuc);
4176 stl(ptr + 4, fpus);
4177 stl(ptr + 8, fptag);
2edcdce3
FB
4178 stl(ptr + 12, 0); /* fpip */
4179 stl(ptr + 16, 0); /* fpcs */
4180 stl(ptr + 20, 0); /* fpoo */
4181 stl(ptr + 24, 0); /* fpos */
2c0262af
FB
4182 } else {
4183 /* 16 bit */
4184 stw(ptr, env->fpuc);
4185 stw(ptr + 2, fpus);
4186 stw(ptr + 4, fptag);
4187 stw(ptr + 6, 0);
4188 stw(ptr + 8, 0);
4189 stw(ptr + 10, 0);
4190 stw(ptr + 12, 0);
4191 }
4192}
4193
14ce26e7 4194void helper_fldenv(target_ulong ptr, int data32)
2c0262af
FB
4195{
4196 int i, fpus, fptag;
4197
4198 if (data32) {
4199 env->fpuc = lduw(ptr);
4200 fpus = lduw(ptr + 4);
4201 fptag = lduw(ptr + 8);
4202 }
4203 else {
4204 env->fpuc = lduw(ptr);
4205 fpus = lduw(ptr + 2);
4206 fptag = lduw(ptr + 4);
4207 }
4208 env->fpstt = (fpus >> 11) & 7;
4209 env->fpus = fpus & ~0x3800;
2edcdce3 4210 for(i = 0;i < 8; i++) {
2c0262af
FB
4211 env->fptags[i] = ((fptag & 3) == 3);
4212 fptag >>= 2;
4213 }
4214}
4215
14ce26e7 4216void helper_fsave(target_ulong ptr, int data32)
2c0262af
FB
4217{
4218 CPU86_LDouble tmp;
4219 int i;
4220
4221 helper_fstenv(ptr, data32);
4222
4223 ptr += (14 << data32);
4224 for(i = 0;i < 8; i++) {
4225 tmp = ST(i);
2c0262af 4226 helper_fstt(tmp, ptr);
2c0262af
FB
4227 ptr += 10;
4228 }
4229
4230 /* fninit */
4231 env->fpus = 0;
4232 env->fpstt = 0;
4233 env->fpuc = 0x37f;
4234 env->fptags[0] = 1;
4235 env->fptags[1] = 1;
4236 env->fptags[2] = 1;
4237 env->fptags[3] = 1;
4238 env->fptags[4] = 1;
4239 env->fptags[5] = 1;
4240 env->fptags[6] = 1;
4241 env->fptags[7] = 1;
4242}
4243
14ce26e7 4244void helper_frstor(target_ulong ptr, int data32)
2c0262af
FB
4245{
4246 CPU86_LDouble tmp;
4247 int i;
4248
4249 helper_fldenv(ptr, data32);
4250 ptr += (14 << data32);
4251
4252 for(i = 0;i < 8; i++) {
2c0262af 4253 tmp = helper_fldt(ptr);
2c0262af
FB
4254 ST(i) = tmp;
4255 ptr += 10;
4256 }
4257}
4258
14ce26e7
FB
4259void helper_fxsave(target_ulong ptr, int data64)
4260{
4261 int fpus, fptag, i, nb_xmm_regs;
4262 CPU86_LDouble tmp;
4263 target_ulong addr;
4264
4265 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4266 fptag = 0;
4267 for(i = 0; i < 8; i++) {
d3c61721 4268 fptag |= (env->fptags[i] << i);
14ce26e7
FB
4269 }
4270 stw(ptr, env->fpuc);
4271 stw(ptr + 2, fpus);
d3c61721 4272 stw(ptr + 4, fptag ^ 0xff);
d6205959
FB
4273#ifdef TARGET_X86_64
4274 if (data64) {
4275 stq(ptr + 0x08, 0); /* rip */
4276 stq(ptr + 0x10, 0); /* rdp */
4277 } else
4278#endif
4279 {
4280 stl(ptr + 0x08, 0); /* eip */
4281 stl(ptr + 0x0c, 0); /* sel */
4282 stl(ptr + 0x10, 0); /* dp */
4283 stl(ptr + 0x14, 0); /* sel */
4284 }
14ce26e7
FB
4285
4286 addr = ptr + 0x20;
4287 for(i = 0;i < 8; i++) {
4288 tmp = ST(i);
4289 helper_fstt(tmp, addr);
4290 addr += 16;
4291 }
3b46e624 4292
14ce26e7 4293 if (env->cr[4] & CR4_OSFXSR_MASK) {
a8ede8ba 4294 /* XXX: finish it */
664e0f19 4295 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
d3c61721 4296 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
d6205959
FB
4297 if (env->hflags & HF_CS64_MASK)
4298 nb_xmm_regs = 16;
4299 else
4300 nb_xmm_regs = 8;
14ce26e7
FB
4301 addr = ptr + 0xa0;
4302 for(i = 0; i < nb_xmm_regs; i++) {
a8ede8ba
FB
4303 stq(addr, env->xmm_regs[i].XMM_Q(0));
4304 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
14ce26e7
FB
4305 addr += 16;
4306 }
4307 }
4308}
4309
4310void helper_fxrstor(target_ulong ptr, int data64)
4311{
4312 int i, fpus, fptag, nb_xmm_regs;
4313 CPU86_LDouble tmp;
4314 target_ulong addr;
4315
4316 env->fpuc = lduw(ptr);
4317 fpus = lduw(ptr + 2);
d3c61721 4318 fptag = lduw(ptr + 4);
14ce26e7
FB
4319 env->fpstt = (fpus >> 11) & 7;
4320 env->fpus = fpus & ~0x3800;
4321 fptag ^= 0xff;
4322 for(i = 0;i < 8; i++) {
d3c61721 4323 env->fptags[i] = ((fptag >> i) & 1);
14ce26e7
FB
4324 }
4325
4326 addr = ptr + 0x20;
4327 for(i = 0;i < 8; i++) {
4328 tmp = helper_fldt(addr);
4329 ST(i) = tmp;
4330 addr += 16;
4331 }
4332
4333 if (env->cr[4] & CR4_OSFXSR_MASK) {
31313213 4334 /* XXX: finish it */
664e0f19 4335 env->mxcsr = ldl(ptr + 0x18);
14ce26e7 4336 //ldl(ptr + 0x1c);
d6205959
FB
4337 if (env->hflags & HF_CS64_MASK)
4338 nb_xmm_regs = 16;
4339 else
4340 nb_xmm_regs = 8;
14ce26e7
FB
4341 addr = ptr + 0xa0;
4342 for(i = 0; i < nb_xmm_regs; i++) {
a8ede8ba
FB
4343 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4344 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
14ce26e7
FB
4345 addr += 16;
4346 }
4347 }
4348}
1f1af9fd
FB
4349
4350#ifndef USE_X86LDOUBLE
4351
4352void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4353{
4354 CPU86_LDoubleU temp;
4355 int e;
4356
4357 temp.d = f;
4358 /* mantissa */
4359 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4360 /* exponent + sign */
4361 e = EXPD(temp) - EXPBIAS + 16383;
4362 e |= SIGND(temp) >> 16;
4363 *pexp = e;
4364}
4365
4366CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4367{
4368 CPU86_LDoubleU temp;
4369 int e;
4370 uint64_t ll;
4371
4372 /* XXX: handle overflow ? */
4373 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4374 e |= (upper >> 4) & 0x800; /* sign */
4375 ll = (mant >> 11) & ((1LL << 52) - 1);
4376#ifdef __arm__
4377 temp.l.upper = (e << 20) | (ll >> 32);
4378 temp.l.lower = ll;
4379#else
4380 temp.ll = ll | ((uint64_t)e << 52);
4381#endif
4382 return temp.d;
4383}
4384
4385#else
4386
4387void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4388{
4389 CPU86_LDoubleU temp;
4390
4391 temp.d = f;
4392 *pmant = temp.l.lower;
4393 *pexp = temp.l.upper;
4394}
4395
4396CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4397{
4398 CPU86_LDoubleU temp;
4399
4400 temp.l.upper = upper;
4401 temp.l.lower = mant;
4402 return temp.d;
4403}
4404#endif
4405
14ce26e7
FB
4406#ifdef TARGET_X86_64
4407
4408//#define DEBUG_MULDIV
4409
4410static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4411{
4412 *plow += a;
4413 /* carry test */
4414 if (*plow < a)
4415 (*phigh)++;
4416 *phigh += b;
4417}
4418
4419static void neg128(uint64_t *plow, uint64_t *phigh)
4420{
4421 *plow = ~ *plow;
4422 *phigh = ~ *phigh;
4423 add128(plow, phigh, 1, 0);
4424}
4425
45bbbb46
FB
4426/* return TRUE if overflow */
4427static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
14ce26e7
FB
4428{
4429 uint64_t q, r, a1, a0;
c0b24a1d 4430 int i, qb, ab;
14ce26e7
FB
4431
4432 a0 = *plow;
4433 a1 = *phigh;
4434 if (a1 == 0) {
4435 q = a0 / b;
4436 r = a0 % b;
4437 *plow = q;
4438 *phigh = r;
4439 } else {
45bbbb46
FB
4440 if (a1 >= b)
4441 return 1;
14ce26e7
FB
4442 /* XXX: use a better algorithm */
4443 for(i = 0; i < 64; i++) {
c0b24a1d 4444 ab = a1 >> 63;
a8ede8ba 4445 a1 = (a1 << 1) | (a0 >> 63);
c0b24a1d 4446 if (ab || a1 >= b) {
14ce26e7
FB
4447 a1 -= b;
4448 qb = 1;
4449 } else {
4450 qb = 0;
4451 }
14ce26e7
FB
4452 a0 = (a0 << 1) | qb;
4453 }
a8ede8ba 4454#if defined(DEBUG_MULDIV)
26a76461 4455 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
14ce26e7
FB
4456 *phigh, *plow, b, a0, a1);
4457#endif
4458 *plow = a0;
4459 *phigh = a1;
4460 }
45bbbb46 4461 return 0;
14ce26e7
FB
4462}
4463
45bbbb46
FB
4464/* return TRUE if overflow */
4465static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
14ce26e7
FB
4466{
4467 int sa, sb;
4468 sa = ((int64_t)*phigh < 0);
4469 if (sa)
4470 neg128(plow, phigh);
4471 sb = (b < 0);
4472 if (sb)
4473 b = -b;
45bbbb46
FB
4474 if (div64(plow, phigh, b) != 0)
4475 return 1;
4476 if (sa ^ sb) {
4477 if (*plow > (1ULL << 63))
4478 return 1;
14ce26e7 4479 *plow = - *plow;
45bbbb46
FB
4480 } else {
4481 if (*plow >= (1ULL << 63))
4482 return 1;
4483 }
31313213 4484 if (sa)
14ce26e7 4485 *phigh = - *phigh;
45bbbb46 4486 return 0;
14ce26e7
FB
4487}
4488
b8b6a50b 4489void helper_mulq_EAX_T0(target_ulong t0)
14ce26e7
FB
4490{
4491 uint64_t r0, r1;
4492
b8b6a50b 4493 mulu64(&r0, &r1, EAX, t0);
14ce26e7
FB
4494 EAX = r0;
4495 EDX = r1;
4496 CC_DST = r0;
4497 CC_SRC = r1;
4498}
4499
b8b6a50b 4500void helper_imulq_EAX_T0(target_ulong t0)
14ce26e7
FB
4501{
4502 uint64_t r0, r1;
4503
b8b6a50b 4504 muls64(&r0, &r1, EAX, t0);
14ce26e7
FB
4505 EAX = r0;
4506 EDX = r1;
4507 CC_DST = r0;
a8ede8ba 4508 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
14ce26e7
FB
4509}
4510
b8b6a50b 4511target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
14ce26e7
FB
4512{
4513 uint64_t r0, r1;
4514
b8b6a50b 4515 muls64(&r0, &r1, t0, t1);
14ce26e7
FB
4516 CC_DST = r0;
4517 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
b8b6a50b 4518 return r0;
14ce26e7
FB
4519}
4520
b5b38f61 4521void helper_divq_EAX(target_ulong t0)
14ce26e7
FB
4522{
4523 uint64_t r0, r1;
b5b38f61 4524 if (t0 == 0) {
14ce26e7
FB
4525 raise_exception(EXCP00_DIVZ);
4526 }
4527 r0 = EAX;
4528 r1 = EDX;
b5b38f61 4529 if (div64(&r0, &r1, t0))
45bbbb46 4530 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
4531 EAX = r0;
4532 EDX = r1;
4533}
4534
b5b38f61 4535void helper_idivq_EAX(target_ulong t0)
14ce26e7
FB
4536{
4537 uint64_t r0, r1;
b5b38f61 4538 if (t0 == 0) {
14ce26e7
FB
4539 raise_exception(EXCP00_DIVZ);
4540 }
4541 r0 = EAX;
4542 r1 = EDX;
b5b38f61 4543 if (idiv64(&r0, &r1, t0))
45bbbb46 4544 raise_exception(EXCP00_DIVZ);
14ce26e7
FB
4545 EAX = r0;
4546 EDX = r1;
4547}
14ce26e7
FB
4548#endif
4549
3d7374c5
FB
4550void helper_hlt(void)
4551{
4552 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4553 env->hflags |= HF_HALTED_MASK;
4554 env->exception_index = EXCP_HLT;
4555 cpu_loop_exit();
4556}
4557
b5b38f61 4558void helper_monitor(target_ulong ptr)
3d7374c5 4559{
d80c7d1c 4560 if ((uint32_t)ECX != 0)
3d7374c5
FB
4561 raise_exception(EXCP0D_GPF);
4562 /* XXX: store address ? */
4563}
4564
4565void helper_mwait(void)
4566{
d80c7d1c 4567 if ((uint32_t)ECX != 0)
3d7374c5
FB
4568 raise_exception(EXCP0D_GPF);
4569 /* XXX: not complete but not completely erroneous */
4570 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4571 /* more than one CPU: do not sleep because another CPU may
4572 wake this one */
4573 } else {
4574 helper_hlt();
4575 }
4576}
4577
b5b38f61 4578void helper_debug(void)
664e0f19 4579{
b5b38f61
FB
4580 env->exception_index = EXCP_DEBUG;
4581 cpu_loop_exit();
664e0f19
FB
4582}
4583
b5b38f61 4584void helper_raise_interrupt(int intno, int next_eip_addend)
664e0f19 4585{
b5b38f61 4586 raise_interrupt(intno, 1, 0, next_eip_addend);
664e0f19
FB
4587}
4588
b5b38f61 4589void helper_raise_exception(int exception_index)
4d6b6c0a 4590{
b5b38f61
FB
4591 raise_exception(exception_index);
4592}
4d6b6c0a 4593
b5b38f61
FB
4594void helper_cli(void)
4595{
4596 env->eflags &= ~IF_MASK;
4597}
4598
4599void helper_sti(void)
4600{
4601 env->eflags |= IF_MASK;
4602}
4603
4604#if 0
4605/* vm86plus instructions */
4606void helper_cli_vm(void)
4607{
4608 env->eflags &= ~VIF_MASK;
4609}
4610
4611void helper_sti_vm(void)
4612{
4613 env->eflags |= VIF_MASK;
4614 if (env->eflags & VIP_MASK) {
4615 raise_exception(EXCP0D_GPF);
7a0e1f41 4616 }
b5b38f61 4617}
4d6b6c0a 4618#endif
b5b38f61
FB
4619
4620void helper_set_inhibit_irq(void)
4621{
4622 env->hflags |= HF_INHIBIT_IRQ_MASK;
4623}
4624
4625void helper_reset_inhibit_irq(void)
4626{
4627 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4628}
4629
b8b6a50b 4630void helper_boundw(target_ulong a0, int v)
b5b38f61 4631{
b8b6a50b
FB
4632 int low, high;
4633 low = ldsw(a0);
4634 high = ldsw(a0 + 2);
4635 v = (int16_t)v;
b5b38f61
FB
4636 if (v < low || v > high) {
4637 raise_exception(EXCP05_BOUND);
4638 }
4639 FORCE_RET();
4640}
4641
b8b6a50b 4642void helper_boundl(target_ulong a0, int v)
b5b38f61 4643{
b8b6a50b
FB
4644 int low, high;
4645 low = ldl(a0);
4646 high = ldl(a0 + 4);
b5b38f61
FB
4647 if (v < low || v > high) {
4648 raise_exception(EXCP05_BOUND);
4649 }
4650 FORCE_RET();
4651}
4652
4653static float approx_rsqrt(float a)
4654{
4655 return 1.0 / sqrt(a);
4656}
4657
4658static float approx_rcp(float a)
4659{
4660 return 1.0 / a;
7a0e1f41 4661}
664e0f19 4662
5fafdf24 4663#if !defined(CONFIG_USER_ONLY)
61382a50
FB
4664
4665#define MMUSUFFIX _mmu
61382a50 4666
2c0262af
FB
4667#define SHIFT 0
4668#include "softmmu_template.h"
4669
4670#define SHIFT 1
4671#include "softmmu_template.h"
4672
4673#define SHIFT 2
4674#include "softmmu_template.h"
4675
4676#define SHIFT 3
4677#include "softmmu_template.h"
4678
61382a50
FB
4679#endif
4680
4681/* try to fill the TLB and return an exception if error. If retaddr is
4682 NULL, it means that the function was called in C code (i.e. not
4683 from generated code or from helper.c) */
4684/* XXX: fix it to restore all registers */
6ebbf390 4685void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2c0262af
FB
4686{
4687 TranslationBlock *tb;
4688 int ret;
4689 unsigned long pc;
61382a50
FB
4690 CPUX86State *saved_env;
4691
4692 /* XXX: hack to restore env in all cases, even if not called from
4693 generated code */
4694 saved_env = env;
4695 env = cpu_single_env;
61382a50 4696
6ebbf390 4697 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2c0262af 4698 if (ret) {
61382a50
FB
4699 if (retaddr) {
4700 /* now we have a real cpu fault */
4701 pc = (unsigned long)retaddr;
4702 tb = tb_find_pc(pc);
4703 if (tb) {
4704 /* the PC is inside the translated code. It means that we have
4705 a virtual CPU fault */
58fe2f10 4706 cpu_restore_state(tb, env, pc, NULL);
61382a50 4707 }
2c0262af 4708 }
0d1a29f9 4709 if (retaddr)
54ca9095 4710 raise_exception_err(env->exception_index, env->error_code);
0d1a29f9 4711 else
54ca9095 4712 raise_exception_err_norestore(env->exception_index, env->error_code);
2c0262af 4713 }
61382a50 4714 env = saved_env;
2c0262af 4715}
0573fbfc
TS
4716
4717
4718/* Secure Virtual Machine helpers */
4719
4720void helper_stgi(void)
4721{
4722 env->hflags |= HF_GIF_MASK;
4723}
4724
4725void helper_clgi(void)
4726{
4727 env->hflags &= ~HF_GIF_MASK;
4728}
4729
4730#if defined(CONFIG_USER_ONLY)
4731
b8b6a50b
FB
4732void helper_vmrun(void)
4733{
4734}
4735void helper_vmmcall(void)
4736{
4737}
4738void helper_vmload(void)
4739{
4740}
4741void helper_vmsave(void)
4742{
4743}
4744void helper_skinit(void)
4745{
4746}
4747void helper_invlpga(void)
4748{
4749}
4750void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4751{
4752}
4753void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
0573fbfc 4754{
0573fbfc
TS
4755}
4756
b8b6a50b
FB
4757void helper_svm_check_io(uint32_t port, uint32_t param,
4758 uint32_t next_eip_addend)
4759{
4760}
0573fbfc
TS
4761#else
4762
4763static inline uint32_t
4764vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4765{
4766 return ((vmcb_attrib & 0x00ff) << 8) /* Type, S, DPL, P */
4767 | ((vmcb_attrib & 0x0f00) << 12) /* AVL, L, DB, G */
4768 | ((vmcb_base >> 16) & 0xff) /* Base 23-16 */
4769 | (vmcb_base & 0xff000000) /* Base 31-24 */
4770 | (vmcb_limit & 0xf0000); /* Limit 19-16 */
4771}
4772
4773static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4774{
4775 return ((cpu_attrib >> 8) & 0xff) /* Type, S, DPL, P */
4776 | ((cpu_attrib & 0xf00000) >> 12); /* AVL, L, DB, G */
4777}
4778
b5b38f61 4779void helper_vmrun(void)
0573fbfc 4780{
b5b38f61 4781 target_ulong addr;
0573fbfc
TS
4782 uint32_t event_inj;
4783 uint32_t int_ctl;
4784
b5b38f61 4785 addr = EAX;
0573fbfc
TS
4786 if (loglevel & CPU_LOG_TB_IN_ASM)
4787 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4788
4789 env->vm_vmcb = addr;
0573fbfc
TS
4790
4791 /* save the current CPU state in the hsave page */
4792 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4793 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4794
4795 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4796 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4797
4798 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4799 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4800 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4801 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4802 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4803 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4804 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4805
4806 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4807 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4808
4809 SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4810 SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4811 SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4812 SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4813
4814 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4815 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4816 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4817
4818 /* load the interception bitmaps so we do not need to access the
4819 vmcb in svm mode */
4820 /* We shift all the intercept bits so we can OR them with the TB
4821 flags later on */
4822 env->intercept = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4823 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4824 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4825 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4826 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4827 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4828
4829 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4830 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4831
4832 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4833 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4834
4835 /* clear exit_info_2 so we behave like the real hardware */
4836 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4837
4838 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4839 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4840 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4841 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4842 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4843 if (int_ctl & V_INTR_MASKING_MASK) {
4844 env->cr[8] = int_ctl & V_TPR_MASK;
3d575329 4845 cpu_set_apic_tpr(env, env->cr[8]);
0573fbfc
TS
4846 if (env->eflags & IF_MASK)
4847 env->hflags |= HF_HIF_MASK;
4848 }
4849
4850#ifdef TARGET_X86_64
4851 env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4852 env->hflags &= ~HF_LMA_MASK;
4853 if (env->efer & MSR_EFER_LMA)
4854 env->hflags |= HF_LMA_MASK;
4855#endif
4856 env->eflags = 0;
4857 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4858 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4859 CC_OP = CC_OP_EFLAGS;
4860 CC_DST = 0xffffffff;
4861
4862 SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4863 SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4864 SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4865 SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4866
4867 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4868 env->eip = EIP;
4869 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4870 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4871 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4872 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4873 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4874
4875 /* FIXME: guest state consistency checks */
4876
4877 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4878 case TLB_CONTROL_DO_NOTHING:
4879 break;
4880 case TLB_CONTROL_FLUSH_ALL_ASID:
4881 /* FIXME: this is not 100% correct but should work for now */
4882 tlb_flush(env, 1);
4883 break;
4884 }
4885
4886 helper_stgi();
4887
0573fbfc
TS
4888 /* maybe we need to inject an event */
4889 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4890 if (event_inj & SVM_EVTINJ_VALID) {
4891 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4892 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4893 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4894 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4895
4896 if (loglevel & CPU_LOG_TB_IN_ASM)
4897 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4898 /* FIXME: need to implement valid_err */
4899 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4900 case SVM_EVTINJ_TYPE_INTR:
4901 env->exception_index = vector;
4902 env->error_code = event_inj_err;
7241f532 4903 env->exception_is_int = 0;
0573fbfc
TS
4904 env->exception_next_eip = -1;
4905 if (loglevel & CPU_LOG_TB_IN_ASM)
4906 fprintf(logfile, "INTR");
4907 break;
4908 case SVM_EVTINJ_TYPE_NMI:
4909 env->exception_index = vector;
4910 env->error_code = event_inj_err;
7241f532 4911 env->exception_is_int = 0;
0573fbfc
TS
4912 env->exception_next_eip = EIP;
4913 if (loglevel & CPU_LOG_TB_IN_ASM)
4914 fprintf(logfile, "NMI");
4915 break;
4916 case SVM_EVTINJ_TYPE_EXEPT:
4917 env->exception_index = vector;
4918 env->error_code = event_inj_err;
4919 env->exception_is_int = 0;
4920 env->exception_next_eip = -1;
4921 if (loglevel & CPU_LOG_TB_IN_ASM)
4922 fprintf(logfile, "EXEPT");
4923 break;
4924 case SVM_EVTINJ_TYPE_SOFT:
4925 env->exception_index = vector;
4926 env->error_code = event_inj_err;
4927 env->exception_is_int = 1;
4928 env->exception_next_eip = EIP;
4929 if (loglevel & CPU_LOG_TB_IN_ASM)
4930 fprintf(logfile, "SOFT");
4931 break;
4932 }
4933 if (loglevel & CPU_LOG_TB_IN_ASM)
4934 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4935 }
52621688 4936 if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
0573fbfc 4937 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
52621688 4938 }
0573fbfc
TS
4939
4940 cpu_loop_exit();
4941}
4942
4943void helper_vmmcall(void)
4944{
4945 if (loglevel & CPU_LOG_TB_IN_ASM)
4946 fprintf(logfile,"vmmcall!\n");
4947}
4948
b5b38f61 4949void helper_vmload(void)
0573fbfc 4950{
b5b38f61
FB
4951 target_ulong addr;
4952 addr = EAX;
0573fbfc
TS
4953 if (loglevel & CPU_LOG_TB_IN_ASM)
4954 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4955 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4956 env->segs[R_FS].base);
4957
4958 SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4959 SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4960 SVM_LOAD_SEG2(addr, tr, tr);
4961 SVM_LOAD_SEG2(addr, ldt, ldtr);
4962
4963#ifdef TARGET_X86_64
4964 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4965 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4966 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4967 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4968#endif
4969 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4970 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4971 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4972 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4973}
4974
b5b38f61 4975void helper_vmsave(void)
0573fbfc 4976{
b5b38f61
FB
4977 target_ulong addr;
4978 addr = EAX;
0573fbfc
TS
4979 if (loglevel & CPU_LOG_TB_IN_ASM)
4980 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4981 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4982 env->segs[R_FS].base);
4983
4984 SVM_SAVE_SEG(addr, segs[R_FS], fs);
4985 SVM_SAVE_SEG(addr, segs[R_GS], gs);
4986 SVM_SAVE_SEG(addr, tr, tr);
4987 SVM_SAVE_SEG(addr, ldt, ldtr);
4988
4989#ifdef TARGET_X86_64
4990 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4991 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4992 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4993 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4994#endif
4995 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4996 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4997 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4998 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4999}
5000
5001void helper_skinit(void)
5002{
5003 if (loglevel & CPU_LOG_TB_IN_ASM)
5004 fprintf(logfile,"skinit!\n");
5005}
5006
5007void helper_invlpga(void)
5008{
5009 tlb_flush(env, 0);
5010}
5011
b8b6a50b 5012void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
0573fbfc
TS
5013{
5014 switch(type) {
5015 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5016 if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
b8b6a50b 5017 helper_vmexit(type, param);
0573fbfc
TS
5018 }
5019 break;
5020 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
5021 if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
b8b6a50b 5022 helper_vmexit(type, param);
0573fbfc
TS
5023 }
5024 break;
5025 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5026 if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
b8b6a50b 5027 helper_vmexit(type, param);
0573fbfc
TS
5028 }
5029 break;
5030 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
5031 if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
b8b6a50b 5032 helper_vmexit(type, param);
0573fbfc
TS
5033 }
5034 break;
5035 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
5036 if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
b8b6a50b 5037 helper_vmexit(type, param);
0573fbfc
TS
5038 }
5039 break;
5040 case SVM_EXIT_IOIO:
0573fbfc
TS
5041 break;
5042
5043 case SVM_EXIT_MSR:
5044 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
5045 /* FIXME: this should be read in at vmrun (faster this way?) */
5046 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
b8b6a50b 5047 uint32_t t0, t1;
0573fbfc
TS
5048 switch((uint32_t)ECX) {
5049 case 0 ... 0x1fff:
b8b6a50b
FB
5050 t0 = (ECX * 2) % 8;
5051 t1 = ECX / 8;
0573fbfc
TS
5052 break;
5053 case 0xc0000000 ... 0xc0001fff:
b8b6a50b
FB
5054 t0 = (8192 + ECX - 0xc0000000) * 2;
5055 t1 = (t0 / 8);
5056 t0 %= 8;
0573fbfc
TS
5057 break;
5058 case 0xc0010000 ... 0xc0011fff:
b8b6a50b
FB
5059 t0 = (16384 + ECX - 0xc0010000) * 2;
5060 t1 = (t0 / 8);
5061 t0 %= 8;
0573fbfc
TS
5062 break;
5063 default:
b8b6a50b
FB
5064 helper_vmexit(type, param);
5065 t0 = 0;
5066 t1 = 0;
5067 break;
0573fbfc 5068 }
b8b6a50b
FB
5069 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5070 helper_vmexit(type, param);
0573fbfc
TS
5071 }
5072 break;
5073 default:
5074 if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
b8b6a50b 5075 helper_vmexit(type, param);
0573fbfc
TS
5076 }
5077 break;
5078 }
0573fbfc
TS
5079}
5080
b8b6a50b
FB
5081void helper_svm_check_io(uint32_t port, uint32_t param,
5082 uint32_t next_eip_addend)
5083{
5084 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
5085 /* FIXME: this should be read in at vmrun (faster this way?) */
5086 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5087 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5088 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5089 /* next EIP */
5090 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5091 env->eip + next_eip_addend);
5092 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5093 }
5094 }
5095}
5096
5097/* Note: currently only 32 bits of exit_code are used */
5098void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
0573fbfc
TS
5099{
5100 uint32_t int_ctl;
5101
5102 if (loglevel & CPU_LOG_TB_IN_ASM)
b8b6a50b 5103 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
0573fbfc
TS
5104 exit_code, exit_info_1,
5105 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5106 EIP);
5107
52621688
TS
5108 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5109 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5110 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5111 } else {
5112 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5113 }
5114
0573fbfc
TS
5115 /* Save the VM state in the vmcb */
5116 SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
5117 SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
5118 SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
5119 SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
5120
5121 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5122 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5123
5124 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5125 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5126
5127 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5128 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5129 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5130 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5131 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5132
5133 if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5134 int_ctl &= ~V_TPR_MASK;
5135 int_ctl |= env->cr[8] & V_TPR_MASK;
5136 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5137 }
5138
5139 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5140 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5141 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5142 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5143 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5144 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5145 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5146
5147 /* Reload the host state from vm_hsave */
5148 env->hflags &= ~HF_HIF_MASK;
5149 env->intercept = 0;
5150 env->intercept_exceptions = 0;
5151 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5152
5153 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5154 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5155
5156 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5157 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5158
5159 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5160 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5161 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
3d575329 5162 if (int_ctl & V_INTR_MASKING_MASK) {
0573fbfc 5163 env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
3d575329
AZ
5164 cpu_set_apic_tpr(env, env->cr[8]);
5165 }
0573fbfc
TS
5166 /* we need to set the efer after the crs so the hidden flags get set properly */
5167#ifdef TARGET_X86_64
5168 env->efer = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
5169 env->hflags &= ~HF_LMA_MASK;
5170 if (env->efer & MSR_EFER_LMA)
5171 env->hflags |= HF_LMA_MASK;
5172#endif
5173
5174 env->eflags = 0;
5175 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5176 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5177 CC_OP = CC_OP_EFLAGS;
5178
5179 SVM_LOAD_SEG(env->vm_hsave, ES, es);
5180 SVM_LOAD_SEG(env->vm_hsave, CS, cs);
5181 SVM_LOAD_SEG(env->vm_hsave, SS, ss);
5182 SVM_LOAD_SEG(env->vm_hsave, DS, ds);
5183
5184 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5185 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5186 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5187
5188 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5189 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5190
5191 /* other setups */
5192 cpu_x86_set_cpl(env, 0);
b8b6a50b 5193 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
0573fbfc
TS
5194 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5195
5196 helper_clgi();
5197 /* FIXME: Resets the current ASID register to zero (host ASID). */
5198
5199 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5200
5201 /* Clears the TSC_OFFSET inside the processor. */
5202
5203 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5204 from the page table indicated the host's CR3. If the PDPEs contain
5205 illegal state, the processor causes a shutdown. */
5206
5207 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5208 env->cr[0] |= CR0_PE_MASK;
5209 env->eflags &= ~VM_MASK;
5210
5211 /* Disables all breakpoints in the host DR7 register. */
5212
5213 /* Checks the reloaded host state for consistency. */
5214
5215 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5216 host's code segment or non-canonical (in the case of long mode), a
5217 #GP fault is delivered inside the host.) */
5218
5219 /* remove any pending exception */
5220 env->exception_index = -1;
5221 env->error_code = 0;
5222 env->old_exception = -1;
5223
0573fbfc
TS
5224 cpu_loop_exit();
5225}
5226
5227#endif
5af45186
FB
5228
5229/* MMX/SSE */
5230/* XXX: optimize by storing fptt and fptags in the static cpu state */
5231void helper_enter_mmx(void)
5232{
5233 env->fpstt = 0;
5234 *(uint32_t *)(env->fptags) = 0;
5235 *(uint32_t *)(env->fptags + 4) = 0;
5236}
5237
5238void helper_emms(void)
5239{
5240 /* set to empty state */
5241 *(uint32_t *)(env->fptags) = 0x01010101;
5242 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5243}
5244
5245/* XXX: suppress */
5246void helper_movq(uint64_t *d, uint64_t *s)
5247{
5248 *d = *s;
5249}
5250
5251#define SHIFT 0
5252#include "ops_sse.h"
5253
5254#define SHIFT 1
5255#include "ops_sse.h"
5256
b6abf97d
FB
5257#define SHIFT 0
5258#include "helper_template.h"
5259#undef SHIFT
5260
5261#define SHIFT 1
5262#include "helper_template.h"
5263#undef SHIFT
5264
5265#define SHIFT 2
5266#include "helper_template.h"
5267#undef SHIFT
5268
5269#ifdef TARGET_X86_64
5270
5271#define SHIFT 3
5272#include "helper_template.h"
5273#undef SHIFT
5274
5275#endif
07d2c595 5276
6191b059
FB
5277/* bit operations */
5278target_ulong helper_bsf(target_ulong t0)
5279{
5280 int count;
5281 target_ulong res;
5282
5283 res = t0;
5284 count = 0;
5285 while ((res & 1) == 0) {
5286 count++;
5287 res >>= 1;
5288 }
5289 return count;
5290}
5291
5292target_ulong helper_bsr(target_ulong t0)
5293{
5294 int count;
5295 target_ulong res, mask;
5296
5297 res = t0;
5298 count = TARGET_LONG_BITS - 1;
5299 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5300 while ((res & mask) == 0) {
5301 count--;
5302 res <<= 1;
5303 }
5304 return count;
5305}
5306
5307
07d2c595
FB
5308static int compute_all_eflags(void)
5309{
5310 return CC_SRC;
5311}
5312
5313static int compute_c_eflags(void)
5314{
5315 return CC_SRC & CC_C;
5316}
5317
5318CCTable cc_table[CC_OP_NB] = {
5319 [CC_OP_DYNAMIC] = { /* should never happen */ },
5320
5321 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
5322
5323 [CC_OP_MULB] = { compute_all_mulb, compute_c_mull },
5324 [CC_OP_MULW] = { compute_all_mulw, compute_c_mull },
5325 [CC_OP_MULL] = { compute_all_mull, compute_c_mull },
5326
5327 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
5328 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
5329 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
5330
5331 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
5332 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
5333 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
5334
5335 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
5336 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
5337 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
5338
5339 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
5340 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
5341 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
5342
5343 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
5344 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
5345 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
5346
5347 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
5348 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
5349 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
5350
5351 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
5352 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
5353 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
5354
5355 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
5356 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
5357 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
5358
5359 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
5360 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
5361 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
5362
5363#ifdef TARGET_X86_64
5364 [CC_OP_MULQ] = { compute_all_mulq, compute_c_mull },
5365
5366 [CC_OP_ADDQ] = { compute_all_addq, compute_c_addq },
5367
5368 [CC_OP_ADCQ] = { compute_all_adcq, compute_c_adcq },
5369
5370 [CC_OP_SUBQ] = { compute_all_subq, compute_c_subq },
5371
5372 [CC_OP_SBBQ] = { compute_all_sbbq, compute_c_sbbq },
5373
5374 [CC_OP_LOGICQ] = { compute_all_logicq, compute_c_logicq },
5375
5376 [CC_OP_INCQ] = { compute_all_incq, compute_c_incl },
5377
5378 [CC_OP_DECQ] = { compute_all_decq, compute_c_incl },
5379
5380 [CC_OP_SHLQ] = { compute_all_shlq, compute_c_shlq },
5381
5382 [CC_OP_SARQ] = { compute_all_sarq, compute_c_sarl },
5383#endif
5384};
5385