]> git.proxmox.com Git - qemu.git/blame - target-i386/op_helper.c
x86: Allow multiple cpu feature matches of lookup_feature
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 18 */
83dae095 19
a2c9ed3c 20#include <math.h>
eaa728ee 21#include "exec.h"
d9957a8b 22#include "exec-all.h"
eaa728ee 23#include "host-utils.h"
35bed8ee 24#include "ioport.h"
eaa728ee
FB
25
26//#define DEBUG_PCALL
27
d12d51d5
AL
28
29#ifdef DEBUG_PCALL
93fcfe39
AL
30# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
31# define LOG_PCALL_STATE(env) \
32 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
d12d51d5
AL
33#else
34# define LOG_PCALL(...) do { } while (0)
35# define LOG_PCALL_STATE(env) do { } while (0)
36#endif
37
38
eaa728ee
FB
39#if 0
40#define raise_exception_err(a, b)\
41do {\
93fcfe39 42 qemu_log("raise_exception line=%d\n", __LINE__);\
eaa728ee
FB
43 (raise_exception_err)(a, b);\
44} while (0)
45#endif
46
d9957a8b 47static const uint8_t parity_table[256] = {
eaa728ee
FB
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80};
81
82/* modulo 17 table */
d9957a8b 83static const uint8_t rclw_table[32] = {
eaa728ee
FB
84 0, 1, 2, 3, 4, 5, 6, 7,
85 8, 9,10,11,12,13,14,15,
86 16, 0, 1, 2, 3, 4, 5, 6,
87 7, 8, 9,10,11,12,13,14,
88};
89
90/* modulo 9 table */
d9957a8b 91static const uint8_t rclb_table[32] = {
eaa728ee
FB
92 0, 1, 2, 3, 4, 5, 6, 7,
93 8, 0, 1, 2, 3, 4, 5, 6,
94 7, 8, 0, 1, 2, 3, 4, 5,
95 6, 7, 8, 0, 1, 2, 3, 4,
96};
97
a1d8db07
AJ
98#if defined(CONFIG_SOFTFLOAT)
99# define floatx_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
100# define floatx_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
101# define floatx_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
102#else
103# define floatx_lg2 (0.30102999566398119523L)
104# define floatx_l2e (1.44269504088896340739L)
105# define floatx_l2t (3.32192809488736234781L)
106#endif
107
d9957a8b 108static const CPU86_LDouble f15rk[7] =
eaa728ee 109{
a1d8db07
AJ
110 floatx_zero,
111 floatx_one,
112 floatx_pi,
113 floatx_lg2,
114 floatx_ln2,
115 floatx_l2e,
116 floatx_l2t,
eaa728ee
FB
117};
118
119/* broken thread support */
120
c227f099 121static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
eaa728ee
FB
122
123void helper_lock(void)
124{
125 spin_lock(&global_cpu_lock);
126}
127
128void helper_unlock(void)
129{
130 spin_unlock(&global_cpu_lock);
131}
132
133void helper_write_eflags(target_ulong t0, uint32_t update_mask)
134{
135 load_eflags(t0, update_mask);
136}
137
138target_ulong helper_read_eflags(void)
139{
140 uint32_t eflags;
a7812ae4 141 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
142 eflags |= (DF & DF_MASK);
143 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
144 return eflags;
145}
146
147/* return non zero if error */
148static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
149 int selector)
150{
151 SegmentCache *dt;
152 int index;
153 target_ulong ptr;
154
155 if (selector & 0x4)
156 dt = &env->ldt;
157 else
158 dt = &env->gdt;
159 index = selector & ~7;
160 if ((index + 7) > dt->limit)
161 return -1;
162 ptr = dt->base + index;
163 *e1_ptr = ldl_kernel(ptr);
164 *e2_ptr = ldl_kernel(ptr + 4);
165 return 0;
166}
167
168static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
169{
170 unsigned int limit;
171 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
172 if (e2 & DESC_G_MASK)
173 limit = (limit << 12) | 0xfff;
174 return limit;
175}
176
177static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
178{
179 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
180}
181
182static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
183{
184 sc->base = get_seg_base(e1, e2);
185 sc->limit = get_seg_limit(e1, e2);
186 sc->flags = e2;
187}
188
189/* init the segment cache in vm86 mode. */
190static inline void load_seg_vm(int seg, int selector)
191{
192 selector &= 0xffff;
193 cpu_x86_load_seg_cache(env, seg, selector,
194 (selector << 4), 0xffff, 0);
195}
196
197static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
198 uint32_t *esp_ptr, int dpl)
199{
200 int type, index, shift;
201
202#if 0
203 {
204 int i;
205 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
206 for(i=0;i<env->tr.limit;i++) {
207 printf("%02x ", env->tr.base[i]);
208 if ((i & 7) == 7) printf("\n");
209 }
210 printf("\n");
211 }
212#endif
213
214 if (!(env->tr.flags & DESC_P_MASK))
215 cpu_abort(env, "invalid tss");
216 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
217 if ((type & 7) != 1)
218 cpu_abort(env, "invalid tss type");
219 shift = type >> 3;
220 index = (dpl * 4 + 2) << shift;
221 if (index + (4 << shift) - 1 > env->tr.limit)
222 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
223 if (shift == 0) {
224 *esp_ptr = lduw_kernel(env->tr.base + index);
225 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
226 } else {
227 *esp_ptr = ldl_kernel(env->tr.base + index);
228 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
229 }
230}
231
232/* XXX: merge with load_seg() */
233static void tss_load_seg(int seg_reg, int selector)
234{
235 uint32_t e1, e2;
236 int rpl, dpl, cpl;
237
238 if ((selector & 0xfffc) != 0) {
239 if (load_segment(&e1, &e2, selector) != 0)
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 if (!(e2 & DESC_S_MASK))
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 rpl = selector & 3;
244 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
245 cpl = env->hflags & HF_CPL_MASK;
246 if (seg_reg == R_CS) {
247 if (!(e2 & DESC_CS_MASK))
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 /* XXX: is it correct ? */
250 if (dpl != rpl)
251 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252 if ((e2 & DESC_C_MASK) && dpl > rpl)
253 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
254 } else if (seg_reg == R_SS) {
255 /* SS must be writable data */
256 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
257 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
258 if (dpl != cpl || dpl != rpl)
259 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
260 } else {
261 /* not readable code */
262 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
263 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
264 /* if data or non conforming code, checks the rights */
265 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
266 if (dpl < cpl || dpl < rpl)
267 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
268 }
269 }
270 if (!(e2 & DESC_P_MASK))
271 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
272 cpu_x86_load_seg_cache(env, seg_reg, selector,
273 get_seg_base(e1, e2),
274 get_seg_limit(e1, e2),
275 e2);
276 } else {
277 if (seg_reg == R_SS || seg_reg == R_CS)
278 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
279 }
280}
281
282#define SWITCH_TSS_JMP 0
283#define SWITCH_TSS_IRET 1
284#define SWITCH_TSS_CALL 2
285
286/* XXX: restore CPU state in registers (PowerPC case) */
287static void switch_tss(int tss_selector,
288 uint32_t e1, uint32_t e2, int source,
289 uint32_t next_eip)
290{
291 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
292 target_ulong tss_base;
293 uint32_t new_regs[8], new_segs[6];
294 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
295 uint32_t old_eflags, eflags_mask;
296 SegmentCache *dt;
297 int index;
298 target_ulong ptr;
299
300 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
d12d51d5 301 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
eaa728ee
FB
302
303 /* if task gate, we read the TSS segment and we load it */
304 if (type == 5) {
305 if (!(e2 & DESC_P_MASK))
306 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
307 tss_selector = e1 >> 16;
308 if (tss_selector & 4)
309 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
310 if (load_segment(&e1, &e2, tss_selector) != 0)
311 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
312 if (e2 & DESC_S_MASK)
313 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
314 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
315 if ((type & 7) != 1)
316 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
317 }
318
319 if (!(e2 & DESC_P_MASK))
320 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
321
322 if (type & 8)
323 tss_limit_max = 103;
324 else
325 tss_limit_max = 43;
326 tss_limit = get_seg_limit(e1, e2);
327 tss_base = get_seg_base(e1, e2);
328 if ((tss_selector & 4) != 0 ||
329 tss_limit < tss_limit_max)
330 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
331 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
332 if (old_type & 8)
333 old_tss_limit_max = 103;
334 else
335 old_tss_limit_max = 43;
336
337 /* read all the registers from the new TSS */
338 if (type & 8) {
339 /* 32 bit */
340 new_cr3 = ldl_kernel(tss_base + 0x1c);
341 new_eip = ldl_kernel(tss_base + 0x20);
342 new_eflags = ldl_kernel(tss_base + 0x24);
343 for(i = 0; i < 8; i++)
344 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
345 for(i = 0; i < 6; i++)
346 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
347 new_ldt = lduw_kernel(tss_base + 0x60);
348 new_trap = ldl_kernel(tss_base + 0x64);
349 } else {
350 /* 16 bit */
351 new_cr3 = 0;
352 new_eip = lduw_kernel(tss_base + 0x0e);
353 new_eflags = lduw_kernel(tss_base + 0x10);
354 for(i = 0; i < 8; i++)
355 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
356 for(i = 0; i < 4; i++)
357 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
358 new_ldt = lduw_kernel(tss_base + 0x2a);
359 new_segs[R_FS] = 0;
360 new_segs[R_GS] = 0;
361 new_trap = 0;
362 }
4581cbcd
BS
363 /* XXX: avoid a compiler warning, see
364 http://support.amd.com/us/Processor_TechDocs/24593.pdf
365 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
366 (void)new_trap;
eaa728ee
FB
367
368 /* NOTE: we must avoid memory exceptions during the task switch,
369 so we make dummy accesses before */
370 /* XXX: it can still fail in some cases, so a bigger hack is
371 necessary to valid the TLB after having done the accesses */
372
373 v1 = ldub_kernel(env->tr.base);
374 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
375 stb_kernel(env->tr.base, v1);
376 stb_kernel(env->tr.base + old_tss_limit_max, v2);
377
378 /* clear busy bit (it is restartable) */
379 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
380 target_ulong ptr;
381 uint32_t e2;
382 ptr = env->gdt.base + (env->tr.selector & ~7);
383 e2 = ldl_kernel(ptr + 4);
384 e2 &= ~DESC_TSS_BUSY_MASK;
385 stl_kernel(ptr + 4, e2);
386 }
387 old_eflags = compute_eflags();
388 if (source == SWITCH_TSS_IRET)
389 old_eflags &= ~NT_MASK;
390
391 /* save the current state in the old TSS */
392 if (type & 8) {
393 /* 32 bit */
394 stl_kernel(env->tr.base + 0x20, next_eip);
395 stl_kernel(env->tr.base + 0x24, old_eflags);
396 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
397 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
398 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
399 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
400 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
401 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
402 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
403 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
404 for(i = 0; i < 6; i++)
405 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
406 } else {
407 /* 16 bit */
408 stw_kernel(env->tr.base + 0x0e, next_eip);
409 stw_kernel(env->tr.base + 0x10, old_eflags);
410 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
411 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
412 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
413 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
414 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
415 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
416 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
417 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
418 for(i = 0; i < 4; i++)
419 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
420 }
421
422 /* now if an exception occurs, it will occurs in the next task
423 context */
424
425 if (source == SWITCH_TSS_CALL) {
426 stw_kernel(tss_base, env->tr.selector);
427 new_eflags |= NT_MASK;
428 }
429
430 /* set busy bit */
431 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
432 target_ulong ptr;
433 uint32_t e2;
434 ptr = env->gdt.base + (tss_selector & ~7);
435 e2 = ldl_kernel(ptr + 4);
436 e2 |= DESC_TSS_BUSY_MASK;
437 stl_kernel(ptr + 4, e2);
438 }
439
440 /* set the new CPU state */
441 /* from this point, any exception which occurs can give problems */
442 env->cr[0] |= CR0_TS_MASK;
443 env->hflags |= HF_TS_MASK;
444 env->tr.selector = tss_selector;
445 env->tr.base = tss_base;
446 env->tr.limit = tss_limit;
447 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
448
449 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
450 cpu_x86_update_cr3(env, new_cr3);
451 }
452
453 /* load all registers without an exception, then reload them with
454 possible exception */
455 env->eip = new_eip;
456 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
457 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
458 if (!(type & 8))
459 eflags_mask &= 0xffff;
460 load_eflags(new_eflags, eflags_mask);
461 /* XXX: what to do in 16 bit case ? */
462 EAX = new_regs[0];
463 ECX = new_regs[1];
464 EDX = new_regs[2];
465 EBX = new_regs[3];
466 ESP = new_regs[4];
467 EBP = new_regs[5];
468 ESI = new_regs[6];
469 EDI = new_regs[7];
470 if (new_eflags & VM_MASK) {
471 for(i = 0; i < 6; i++)
472 load_seg_vm(i, new_segs[i]);
473 /* in vm86, CPL is always 3 */
474 cpu_x86_set_cpl(env, 3);
475 } else {
476 /* CPL is set the RPL of CS */
477 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
478 /* first just selectors as the rest may trigger exceptions */
479 for(i = 0; i < 6; i++)
480 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
481 }
482
483 env->ldt.selector = new_ldt & ~4;
484 env->ldt.base = 0;
485 env->ldt.limit = 0;
486 env->ldt.flags = 0;
487
488 /* load the LDT */
489 if (new_ldt & 4)
490 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
491
492 if ((new_ldt & 0xfffc) != 0) {
493 dt = &env->gdt;
494 index = new_ldt & ~7;
495 if ((index + 7) > dt->limit)
496 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
497 ptr = dt->base + index;
498 e1 = ldl_kernel(ptr);
499 e2 = ldl_kernel(ptr + 4);
500 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
501 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
502 if (!(e2 & DESC_P_MASK))
503 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
504 load_seg_cache_raw_dt(&env->ldt, e1, e2);
505 }
506
507 /* load the segments */
508 if (!(new_eflags & VM_MASK)) {
509 tss_load_seg(R_CS, new_segs[R_CS]);
510 tss_load_seg(R_SS, new_segs[R_SS]);
511 tss_load_seg(R_ES, new_segs[R_ES]);
512 tss_load_seg(R_DS, new_segs[R_DS]);
513 tss_load_seg(R_FS, new_segs[R_FS]);
514 tss_load_seg(R_GS, new_segs[R_GS]);
515 }
516
517 /* check that EIP is in the CS segment limits */
518 if (new_eip > env->segs[R_CS].limit) {
519 /* XXX: different exception if CALL ? */
520 raise_exception_err(EXCP0D_GPF, 0);
521 }
01df040b
AL
522
523#ifndef CONFIG_USER_ONLY
524 /* reset local breakpoints */
525 if (env->dr[7] & 0x55) {
526 for (i = 0; i < 4; i++) {
527 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
528 hw_breakpoint_remove(env, i);
529 }
530 env->dr[7] &= ~0x55;
531 }
532#endif
eaa728ee
FB
533}
534
535/* check if Port I/O is allowed in TSS */
536static inline void check_io(int addr, int size)
537{
538 int io_offset, val, mask;
539
540 /* TSS must be a valid 32 bit one */
541 if (!(env->tr.flags & DESC_P_MASK) ||
542 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
543 env->tr.limit < 103)
544 goto fail;
545 io_offset = lduw_kernel(env->tr.base + 0x66);
546 io_offset += (addr >> 3);
547 /* Note: the check needs two bytes */
548 if ((io_offset + 1) > env->tr.limit)
549 goto fail;
550 val = lduw_kernel(env->tr.base + io_offset);
551 val >>= (addr & 7);
552 mask = (1 << size) - 1;
553 /* all bits must be zero to allow the I/O */
554 if ((val & mask) != 0) {
555 fail:
556 raise_exception_err(EXCP0D_GPF, 0);
557 }
558}
559
560void helper_check_iob(uint32_t t0)
561{
562 check_io(t0, 1);
563}
564
565void helper_check_iow(uint32_t t0)
566{
567 check_io(t0, 2);
568}
569
570void helper_check_iol(uint32_t t0)
571{
572 check_io(t0, 4);
573}
574
575void helper_outb(uint32_t port, uint32_t data)
576{
afcea8cb 577 cpu_outb(port, data & 0xff);
eaa728ee
FB
578}
579
580target_ulong helper_inb(uint32_t port)
581{
afcea8cb 582 return cpu_inb(port);
eaa728ee
FB
583}
584
585void helper_outw(uint32_t port, uint32_t data)
586{
afcea8cb 587 cpu_outw(port, data & 0xffff);
eaa728ee
FB
588}
589
590target_ulong helper_inw(uint32_t port)
591{
afcea8cb 592 return cpu_inw(port);
eaa728ee
FB
593}
594
595void helper_outl(uint32_t port, uint32_t data)
596{
afcea8cb 597 cpu_outl(port, data);
eaa728ee
FB
598}
599
600target_ulong helper_inl(uint32_t port)
601{
afcea8cb 602 return cpu_inl(port);
eaa728ee
FB
603}
604
605static inline unsigned int get_sp_mask(unsigned int e2)
606{
607 if (e2 & DESC_B_MASK)
608 return 0xffffffff;
609 else
610 return 0xffff;
611}
612
2ed51f5b
AL
613static int exeption_has_error_code(int intno)
614{
615 switch(intno) {
616 case 8:
617 case 10:
618 case 11:
619 case 12:
620 case 13:
621 case 14:
622 case 17:
623 return 1;
624 }
625 return 0;
626}
627
eaa728ee
FB
628#ifdef TARGET_X86_64
629#define SET_ESP(val, sp_mask)\
630do {\
631 if ((sp_mask) == 0xffff)\
632 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
633 else if ((sp_mask) == 0xffffffffLL)\
634 ESP = (uint32_t)(val);\
635 else\
636 ESP = (val);\
637} while (0)
638#else
639#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
640#endif
641
c0a04f0e
AL
642/* in 64-bit machines, this can overflow. So this segment addition macro
643 * can be used to trim the value to 32-bit whenever needed */
644#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
645
eaa728ee
FB
646/* XXX: add a is_user flag to have proper security support */
647#define PUSHW(ssp, sp, sp_mask, val)\
648{\
649 sp -= 2;\
650 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
651}
652
653#define PUSHL(ssp, sp, sp_mask, val)\
654{\
655 sp -= 4;\
c0a04f0e 656 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
eaa728ee
FB
657}
658
659#define POPW(ssp, sp, sp_mask, val)\
660{\
661 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
662 sp += 2;\
663}
664
665#define POPL(ssp, sp, sp_mask, val)\
666{\
c0a04f0e 667 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
eaa728ee
FB
668 sp += 4;\
669}
670
671/* protected mode interrupt */
672static void do_interrupt_protected(int intno, int is_int, int error_code,
673 unsigned int next_eip, int is_hw)
674{
675 SegmentCache *dt;
676 target_ulong ptr, ssp;
677 int type, dpl, selector, ss_dpl, cpl;
678 int has_error_code, new_stack, shift;
1c918eba 679 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 680 uint32_t old_eip, sp_mask;
eaa728ee 681
eaa728ee 682 has_error_code = 0;
2ed51f5b
AL
683 if (!is_int && !is_hw)
684 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
685 if (is_int)
686 old_eip = next_eip;
687 else
688 old_eip = env->eip;
689
690 dt = &env->idt;
691 if (intno * 8 + 7 > dt->limit)
692 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
693 ptr = dt->base + intno * 8;
694 e1 = ldl_kernel(ptr);
695 e2 = ldl_kernel(ptr + 4);
696 /* check gate type */
697 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
698 switch(type) {
699 case 5: /* task gate */
700 /* must do that check here to return the correct error code */
701 if (!(e2 & DESC_P_MASK))
702 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
703 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
704 if (has_error_code) {
705 int type;
706 uint32_t mask;
707 /* push the error code */
708 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
709 shift = type >> 3;
710 if (env->segs[R_SS].flags & DESC_B_MASK)
711 mask = 0xffffffff;
712 else
713 mask = 0xffff;
714 esp = (ESP - (2 << shift)) & mask;
715 ssp = env->segs[R_SS].base + esp;
716 if (shift)
717 stl_kernel(ssp, error_code);
718 else
719 stw_kernel(ssp, error_code);
720 SET_ESP(esp, mask);
721 }
722 return;
723 case 6: /* 286 interrupt gate */
724 case 7: /* 286 trap gate */
725 case 14: /* 386 interrupt gate */
726 case 15: /* 386 trap gate */
727 break;
728 default:
729 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
730 break;
731 }
732 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
733 cpl = env->hflags & HF_CPL_MASK;
1235fc06 734 /* check privilege if software int */
eaa728ee
FB
735 if (is_int && dpl < cpl)
736 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
737 /* check valid bit */
738 if (!(e2 & DESC_P_MASK))
739 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
740 selector = e1 >> 16;
741 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
742 if ((selector & 0xfffc) == 0)
743 raise_exception_err(EXCP0D_GPF, 0);
744
745 if (load_segment(&e1, &e2, selector) != 0)
746 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
747 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
748 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
749 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
750 if (dpl > cpl)
751 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
752 if (!(e2 & DESC_P_MASK))
753 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
754 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
755 /* to inner privilege */
756 get_ss_esp_from_tss(&ss, &esp, dpl);
757 if ((ss & 0xfffc) == 0)
758 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
759 if ((ss & 3) != dpl)
760 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
761 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
762 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
763 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
764 if (ss_dpl != dpl)
765 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
766 if (!(ss_e2 & DESC_S_MASK) ||
767 (ss_e2 & DESC_CS_MASK) ||
768 !(ss_e2 & DESC_W_MASK))
769 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
770 if (!(ss_e2 & DESC_P_MASK))
771 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
772 new_stack = 1;
773 sp_mask = get_sp_mask(ss_e2);
774 ssp = get_seg_base(ss_e1, ss_e2);
775 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
776 /* to same privilege */
777 if (env->eflags & VM_MASK)
778 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
779 new_stack = 0;
780 sp_mask = get_sp_mask(env->segs[R_SS].flags);
781 ssp = env->segs[R_SS].base;
782 esp = ESP;
783 dpl = cpl;
784 } else {
785 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
786 new_stack = 0; /* avoid warning */
787 sp_mask = 0; /* avoid warning */
788 ssp = 0; /* avoid warning */
789 esp = 0; /* avoid warning */
790 }
791
792 shift = type >> 3;
793
794#if 0
795 /* XXX: check that enough room is available */
796 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
797 if (env->eflags & VM_MASK)
798 push_size += 8;
799 push_size <<= shift;
800#endif
801 if (shift == 1) {
802 if (new_stack) {
803 if (env->eflags & VM_MASK) {
804 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
805 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
806 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
807 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
808 }
809 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
810 PUSHL(ssp, esp, sp_mask, ESP);
811 }
812 PUSHL(ssp, esp, sp_mask, compute_eflags());
813 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
814 PUSHL(ssp, esp, sp_mask, old_eip);
815 if (has_error_code) {
816 PUSHL(ssp, esp, sp_mask, error_code);
817 }
818 } else {
819 if (new_stack) {
820 if (env->eflags & VM_MASK) {
821 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
822 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
823 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
824 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
825 }
826 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
827 PUSHW(ssp, esp, sp_mask, ESP);
828 }
829 PUSHW(ssp, esp, sp_mask, compute_eflags());
830 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
831 PUSHW(ssp, esp, sp_mask, old_eip);
832 if (has_error_code) {
833 PUSHW(ssp, esp, sp_mask, error_code);
834 }
835 }
836
837 if (new_stack) {
838 if (env->eflags & VM_MASK) {
839 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
840 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
841 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
842 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
843 }
844 ss = (ss & ~3) | dpl;
845 cpu_x86_load_seg_cache(env, R_SS, ss,
846 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
847 }
848 SET_ESP(esp, sp_mask);
849
850 selector = (selector & ~3) | dpl;
851 cpu_x86_load_seg_cache(env, R_CS, selector,
852 get_seg_base(e1, e2),
853 get_seg_limit(e1, e2),
854 e2);
855 cpu_x86_set_cpl(env, dpl);
856 env->eip = offset;
857
858 /* interrupt gate clear IF mask */
859 if ((type & 1) == 0) {
860 env->eflags &= ~IF_MASK;
861 }
862 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
863}
864
865#ifdef TARGET_X86_64
866
867#define PUSHQ(sp, val)\
868{\
869 sp -= 8;\
870 stq_kernel(sp, (val));\
871}
872
873#define POPQ(sp, val)\
874{\
875 val = ldq_kernel(sp);\
876 sp += 8;\
877}
878
879static inline target_ulong get_rsp_from_tss(int level)
880{
881 int index;
882
883#if 0
884 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
885 env->tr.base, env->tr.limit);
886#endif
887
888 if (!(env->tr.flags & DESC_P_MASK))
889 cpu_abort(env, "invalid tss");
890 index = 8 * level + 4;
891 if ((index + 7) > env->tr.limit)
892 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
893 return ldq_kernel(env->tr.base + index);
894}
895
896/* 64 bit interrupt */
897static void do_interrupt64(int intno, int is_int, int error_code,
898 target_ulong next_eip, int is_hw)
899{
900 SegmentCache *dt;
901 target_ulong ptr;
902 int type, dpl, selector, cpl, ist;
903 int has_error_code, new_stack;
904 uint32_t e1, e2, e3, ss;
905 target_ulong old_eip, esp, offset;
eaa728ee 906
eaa728ee 907 has_error_code = 0;
2ed51f5b
AL
908 if (!is_int && !is_hw)
909 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
910 if (is_int)
911 old_eip = next_eip;
912 else
913 old_eip = env->eip;
914
915 dt = &env->idt;
916 if (intno * 16 + 15 > dt->limit)
917 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
918 ptr = dt->base + intno * 16;
919 e1 = ldl_kernel(ptr);
920 e2 = ldl_kernel(ptr + 4);
921 e3 = ldl_kernel(ptr + 8);
922 /* check gate type */
923 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
924 switch(type) {
925 case 14: /* 386 interrupt gate */
926 case 15: /* 386 trap gate */
927 break;
928 default:
929 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
930 break;
931 }
932 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
933 cpl = env->hflags & HF_CPL_MASK;
1235fc06 934 /* check privilege if software int */
eaa728ee
FB
935 if (is_int && dpl < cpl)
936 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
937 /* check valid bit */
938 if (!(e2 & DESC_P_MASK))
939 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
940 selector = e1 >> 16;
941 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
942 ist = e2 & 7;
943 if ((selector & 0xfffc) == 0)
944 raise_exception_err(EXCP0D_GPF, 0);
945
946 if (load_segment(&e1, &e2, selector) != 0)
947 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
949 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
950 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
951 if (dpl > cpl)
952 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
953 if (!(e2 & DESC_P_MASK))
954 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
955 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
956 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
957 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
958 /* to inner privilege */
959 if (ist != 0)
960 esp = get_rsp_from_tss(ist + 3);
961 else
962 esp = get_rsp_from_tss(dpl);
963 esp &= ~0xfLL; /* align stack */
964 ss = 0;
965 new_stack = 1;
966 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
967 /* to same privilege */
968 if (env->eflags & VM_MASK)
969 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
970 new_stack = 0;
971 if (ist != 0)
972 esp = get_rsp_from_tss(ist + 3);
973 else
974 esp = ESP;
975 esp &= ~0xfLL; /* align stack */
976 dpl = cpl;
977 } else {
978 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
979 new_stack = 0; /* avoid warning */
980 esp = 0; /* avoid warning */
981 }
982
983 PUSHQ(esp, env->segs[R_SS].selector);
984 PUSHQ(esp, ESP);
985 PUSHQ(esp, compute_eflags());
986 PUSHQ(esp, env->segs[R_CS].selector);
987 PUSHQ(esp, old_eip);
988 if (has_error_code) {
989 PUSHQ(esp, error_code);
990 }
991
992 if (new_stack) {
993 ss = 0 | dpl;
994 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
995 }
996 ESP = esp;
997
998 selector = (selector & ~3) | dpl;
999 cpu_x86_load_seg_cache(env, R_CS, selector,
1000 get_seg_base(e1, e2),
1001 get_seg_limit(e1, e2),
1002 e2);
1003 cpu_x86_set_cpl(env, dpl);
1004 env->eip = offset;
1005
1006 /* interrupt gate clear IF mask */
1007 if ((type & 1) == 0) {
1008 env->eflags &= ~IF_MASK;
1009 }
1010 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1011}
1012#endif
1013
d9957a8b 1014#ifdef TARGET_X86_64
eaa728ee
FB
1015#if defined(CONFIG_USER_ONLY)
1016void helper_syscall(int next_eip_addend)
1017{
1018 env->exception_index = EXCP_SYSCALL;
1019 env->exception_next_eip = env->eip + next_eip_addend;
1020 cpu_loop_exit();
1021}
1022#else
1023void helper_syscall(int next_eip_addend)
1024{
1025 int selector;
1026
1027 if (!(env->efer & MSR_EFER_SCE)) {
1028 raise_exception_err(EXCP06_ILLOP, 0);
1029 }
1030 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
1031 if (env->hflags & HF_LMA_MASK) {
1032 int code64;
1033
1034 ECX = env->eip + next_eip_addend;
1035 env->regs[11] = compute_eflags();
1036
1037 code64 = env->hflags & HF_CS64_MASK;
1038
1039 cpu_x86_set_cpl(env, 0);
1040 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1041 0, 0xffffffff,
1042 DESC_G_MASK | DESC_P_MASK |
1043 DESC_S_MASK |
1044 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1045 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1046 0, 0xffffffff,
1047 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1048 DESC_S_MASK |
1049 DESC_W_MASK | DESC_A_MASK);
1050 env->eflags &= ~env->fmask;
1051 load_eflags(env->eflags, 0);
1052 if (code64)
1053 env->eip = env->lstar;
1054 else
1055 env->eip = env->cstar;
d9957a8b 1056 } else {
eaa728ee
FB
1057 ECX = (uint32_t)(env->eip + next_eip_addend);
1058
1059 cpu_x86_set_cpl(env, 0);
1060 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1061 0, 0xffffffff,
1062 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1063 DESC_S_MASK |
1064 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1065 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1066 0, 0xffffffff,
1067 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1068 DESC_S_MASK |
1069 DESC_W_MASK | DESC_A_MASK);
1070 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1071 env->eip = (uint32_t)env->star;
1072 }
1073}
1074#endif
d9957a8b 1075#endif
eaa728ee 1076
d9957a8b 1077#ifdef TARGET_X86_64
eaa728ee
FB
1078void helper_sysret(int dflag)
1079{
1080 int cpl, selector;
1081
1082 if (!(env->efer & MSR_EFER_SCE)) {
1083 raise_exception_err(EXCP06_ILLOP, 0);
1084 }
1085 cpl = env->hflags & HF_CPL_MASK;
1086 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1087 raise_exception_err(EXCP0D_GPF, 0);
1088 }
1089 selector = (env->star >> 48) & 0xffff;
eaa728ee
FB
1090 if (env->hflags & HF_LMA_MASK) {
1091 if (dflag == 2) {
1092 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1093 0, 0xffffffff,
1094 DESC_G_MASK | DESC_P_MASK |
1095 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1097 DESC_L_MASK);
1098 env->eip = ECX;
1099 } else {
1100 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1101 0, 0xffffffff,
1102 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1103 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1104 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1105 env->eip = (uint32_t)ECX;
1106 }
1107 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1108 0, 0xffffffff,
1109 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1110 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1111 DESC_W_MASK | DESC_A_MASK);
1112 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1113 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1114 cpu_x86_set_cpl(env, 3);
d9957a8b 1115 } else {
eaa728ee
FB
1116 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1117 0, 0xffffffff,
1118 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1119 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1120 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1121 env->eip = (uint32_t)ECX;
1122 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1123 0, 0xffffffff,
1124 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1125 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1126 DESC_W_MASK | DESC_A_MASK);
1127 env->eflags |= IF_MASK;
1128 cpu_x86_set_cpl(env, 3);
1129 }
eaa728ee 1130}
d9957a8b 1131#endif
eaa728ee
FB
1132
1133/* real mode interrupt */
1134static void do_interrupt_real(int intno, int is_int, int error_code,
1135 unsigned int next_eip)
1136{
1137 SegmentCache *dt;
1138 target_ulong ptr, ssp;
1139 int selector;
1140 uint32_t offset, esp;
1141 uint32_t old_cs, old_eip;
eaa728ee 1142
eaa728ee
FB
1143 /* real mode (simpler !) */
1144 dt = &env->idt;
1145 if (intno * 4 + 3 > dt->limit)
1146 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1147 ptr = dt->base + intno * 4;
1148 offset = lduw_kernel(ptr);
1149 selector = lduw_kernel(ptr + 2);
1150 esp = ESP;
1151 ssp = env->segs[R_SS].base;
1152 if (is_int)
1153 old_eip = next_eip;
1154 else
1155 old_eip = env->eip;
1156 old_cs = env->segs[R_CS].selector;
1157 /* XXX: use SS segment size ? */
1158 PUSHW(ssp, esp, 0xffff, compute_eflags());
1159 PUSHW(ssp, esp, 0xffff, old_cs);
1160 PUSHW(ssp, esp, 0xffff, old_eip);
1161
1162 /* update processor state */
1163 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1164 env->eip = offset;
1165 env->segs[R_CS].selector = selector;
1166 env->segs[R_CS].base = (selector << 4);
1167 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1168}
1169
1170/* fake user mode interrupt */
1171void do_interrupt_user(int intno, int is_int, int error_code,
1172 target_ulong next_eip)
1173{
1174 SegmentCache *dt;
1175 target_ulong ptr;
1176 int dpl, cpl, shift;
1177 uint32_t e2;
1178
1179 dt = &env->idt;
1180 if (env->hflags & HF_LMA_MASK) {
1181 shift = 4;
1182 } else {
1183 shift = 3;
1184 }
1185 ptr = dt->base + (intno << shift);
1186 e2 = ldl_kernel(ptr + 4);
1187
1188 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1189 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1190 /* check privilege if software int */
eaa728ee
FB
1191 if (is_int && dpl < cpl)
1192 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1193
1194 /* Since we emulate only user space, we cannot do more than
1195 exiting the emulation with the suitable exception and error
1196 code */
1197 if (is_int)
1198 EIP = next_eip;
1199}
1200
00ea18d1 1201#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1202static void handle_even_inj(int intno, int is_int, int error_code,
1203 int is_hw, int rm)
1204{
1205 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1206 if (!(event_inj & SVM_EVTINJ_VALID)) {
1207 int type;
1208 if (is_int)
1209 type = SVM_EVTINJ_TYPE_SOFT;
1210 else
1211 type = SVM_EVTINJ_TYPE_EXEPT;
1212 event_inj = intno | type | SVM_EVTINJ_VALID;
1213 if (!rm && exeption_has_error_code(intno)) {
1214 event_inj |= SVM_EVTINJ_VALID_ERR;
1215 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1216 }
1217 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1218 }
1219}
00ea18d1 1220#endif
2ed51f5b 1221
eaa728ee
FB
1222/*
1223 * Begin execution of an interruption. is_int is TRUE if coming from
1224 * the int instruction. next_eip is the EIP value AFTER the interrupt
1225 * instruction. It is only relevant if is_int is TRUE.
1226 */
1227void do_interrupt(int intno, int is_int, int error_code,
1228 target_ulong next_eip, int is_hw)
1229{
8fec2b8c 1230 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1231 if ((env->cr[0] & CR0_PE_MASK)) {
1232 static int count;
93fcfe39 1233 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
eaa728ee
FB
1234 count, intno, error_code, is_int,
1235 env->hflags & HF_CPL_MASK,
1236 env->segs[R_CS].selector, EIP,
1237 (int)env->segs[R_CS].base + EIP,
1238 env->segs[R_SS].selector, ESP);
1239 if (intno == 0x0e) {
93fcfe39 1240 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1241 } else {
93fcfe39 1242 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
eaa728ee 1243 }
93fcfe39
AL
1244 qemu_log("\n");
1245 log_cpu_state(env, X86_DUMP_CCOP);
eaa728ee
FB
1246#if 0
1247 {
1248 int i;
9bd5494e 1249 target_ulong ptr;
93fcfe39 1250 qemu_log(" code=");
eaa728ee
FB
1251 ptr = env->segs[R_CS].base + env->eip;
1252 for(i = 0; i < 16; i++) {
93fcfe39 1253 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1254 }
93fcfe39 1255 qemu_log("\n");
eaa728ee
FB
1256 }
1257#endif
1258 count++;
1259 }
1260 }
1261 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1262#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1263 if (env->hflags & HF_SVMI_MASK)
1264 handle_even_inj(intno, is_int, error_code, is_hw, 0);
00ea18d1 1265#endif
eb38c52c 1266#ifdef TARGET_X86_64
eaa728ee
FB
1267 if (env->hflags & HF_LMA_MASK) {
1268 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1269 } else
1270#endif
1271 {
1272 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1273 }
1274 } else {
00ea18d1 1275#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1276 if (env->hflags & HF_SVMI_MASK)
1277 handle_even_inj(intno, is_int, error_code, is_hw, 1);
00ea18d1 1278#endif
eaa728ee
FB
1279 do_interrupt_real(intno, is_int, error_code, next_eip);
1280 }
2ed51f5b 1281
00ea18d1 1282#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1283 if (env->hflags & HF_SVMI_MASK) {
1284 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1285 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1286 }
00ea18d1 1287#endif
eaa728ee
FB
1288}
1289
f55761a0
AL
1290/* This should come from sysemu.h - if we could include it here... */
1291void qemu_system_reset_request(void);
1292
eaa728ee
FB
1293/*
1294 * Check nested exceptions and change to double or triple fault if
1295 * needed. It should only be called, if this is not an interrupt.
1296 * Returns the new exception number.
1297 */
1298static int check_exception(int intno, int *error_code)
1299{
1300 int first_contributory = env->old_exception == 0 ||
1301 (env->old_exception >= 10 &&
1302 env->old_exception <= 13);
1303 int second_contributory = intno == 0 ||
1304 (intno >= 10 && intno <= 13);
1305
93fcfe39 1306 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
eaa728ee
FB
1307 env->old_exception, intno);
1308
f55761a0
AL
1309#if !defined(CONFIG_USER_ONLY)
1310 if (env->old_exception == EXCP08_DBLE) {
1311 if (env->hflags & HF_SVMI_MASK)
1312 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1313
680c3069 1314 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
f55761a0
AL
1315
1316 qemu_system_reset_request();
1317 return EXCP_HLT;
1318 }
1319#endif
eaa728ee
FB
1320
1321 if ((first_contributory && second_contributory)
1322 || (env->old_exception == EXCP0E_PAGE &&
1323 (second_contributory || (intno == EXCP0E_PAGE)))) {
1324 intno = EXCP08_DBLE;
1325 *error_code = 0;
1326 }
1327
1328 if (second_contributory || (intno == EXCP0E_PAGE) ||
1329 (intno == EXCP08_DBLE))
1330 env->old_exception = intno;
1331
1332 return intno;
1333}
1334
1335/*
1336 * Signal an interruption. It is executed in the main CPU loop.
1337 * is_int is TRUE if coming from the int instruction. next_eip is the
1338 * EIP value AFTER the interrupt instruction. It is only relevant if
1339 * is_int is TRUE.
1340 */
a5e50b26 1341static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1342 int next_eip_addend)
eaa728ee
FB
1343{
1344 if (!is_int) {
1345 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1346 intno = check_exception(intno, &error_code);
872929aa
FB
1347 } else {
1348 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1349 }
1350
1351 env->exception_index = intno;
1352 env->error_code = error_code;
1353 env->exception_is_int = is_int;
1354 env->exception_next_eip = env->eip + next_eip_addend;
1355 cpu_loop_exit();
1356}
1357
eaa728ee
FB
1358/* shortcuts to generate exceptions */
1359
d9957a8b 1360void raise_exception_err(int exception_index, int error_code)
eaa728ee
FB
1361{
1362 raise_interrupt(exception_index, 0, error_code, 0);
1363}
1364
1365void raise_exception(int exception_index)
1366{
1367 raise_interrupt(exception_index, 0, 0, 0);
1368}
1369
63a54736
JW
1370void raise_exception_env(int exception_index, CPUState *nenv)
1371{
1372 env = nenv;
1373 raise_exception(exception_index);
1374}
eaa728ee
FB
1375/* SMM support */
1376
1377#if defined(CONFIG_USER_ONLY)
1378
1379void do_smm_enter(void)
1380{
1381}
1382
1383void helper_rsm(void)
1384{
1385}
1386
1387#else
1388
1389#ifdef TARGET_X86_64
1390#define SMM_REVISION_ID 0x00020064
1391#else
1392#define SMM_REVISION_ID 0x00020000
1393#endif
1394
1395void do_smm_enter(void)
1396{
1397 target_ulong sm_state;
1398 SegmentCache *dt;
1399 int i, offset;
1400
93fcfe39
AL
1401 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1402 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1403
1404 env->hflags |= HF_SMM_MASK;
1405 cpu_smm_update(env);
1406
1407 sm_state = env->smbase + 0x8000;
1408
1409#ifdef TARGET_X86_64
1410 for(i = 0; i < 6; i++) {
1411 dt = &env->segs[i];
1412 offset = 0x7e00 + i * 16;
1413 stw_phys(sm_state + offset, dt->selector);
1414 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1415 stl_phys(sm_state + offset + 4, dt->limit);
1416 stq_phys(sm_state + offset + 8, dt->base);
1417 }
1418
1419 stq_phys(sm_state + 0x7e68, env->gdt.base);
1420 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1421
1422 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1423 stq_phys(sm_state + 0x7e78, env->ldt.base);
1424 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1425 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1426
1427 stq_phys(sm_state + 0x7e88, env->idt.base);
1428 stl_phys(sm_state + 0x7e84, env->idt.limit);
1429
1430 stw_phys(sm_state + 0x7e90, env->tr.selector);
1431 stq_phys(sm_state + 0x7e98, env->tr.base);
1432 stl_phys(sm_state + 0x7e94, env->tr.limit);
1433 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1434
1435 stq_phys(sm_state + 0x7ed0, env->efer);
1436
1437 stq_phys(sm_state + 0x7ff8, EAX);
1438 stq_phys(sm_state + 0x7ff0, ECX);
1439 stq_phys(sm_state + 0x7fe8, EDX);
1440 stq_phys(sm_state + 0x7fe0, EBX);
1441 stq_phys(sm_state + 0x7fd8, ESP);
1442 stq_phys(sm_state + 0x7fd0, EBP);
1443 stq_phys(sm_state + 0x7fc8, ESI);
1444 stq_phys(sm_state + 0x7fc0, EDI);
1445 for(i = 8; i < 16; i++)
1446 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1447 stq_phys(sm_state + 0x7f78, env->eip);
1448 stl_phys(sm_state + 0x7f70, compute_eflags());
1449 stl_phys(sm_state + 0x7f68, env->dr[6]);
1450 stl_phys(sm_state + 0x7f60, env->dr[7]);
1451
1452 stl_phys(sm_state + 0x7f48, env->cr[4]);
1453 stl_phys(sm_state + 0x7f50, env->cr[3]);
1454 stl_phys(sm_state + 0x7f58, env->cr[0]);
1455
1456 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1457 stl_phys(sm_state + 0x7f00, env->smbase);
1458#else
1459 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1460 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1461 stl_phys(sm_state + 0x7ff4, compute_eflags());
1462 stl_phys(sm_state + 0x7ff0, env->eip);
1463 stl_phys(sm_state + 0x7fec, EDI);
1464 stl_phys(sm_state + 0x7fe8, ESI);
1465 stl_phys(sm_state + 0x7fe4, EBP);
1466 stl_phys(sm_state + 0x7fe0, ESP);
1467 stl_phys(sm_state + 0x7fdc, EBX);
1468 stl_phys(sm_state + 0x7fd8, EDX);
1469 stl_phys(sm_state + 0x7fd4, ECX);
1470 stl_phys(sm_state + 0x7fd0, EAX);
1471 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1472 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1473
1474 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1475 stl_phys(sm_state + 0x7f64, env->tr.base);
1476 stl_phys(sm_state + 0x7f60, env->tr.limit);
1477 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1478
1479 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1480 stl_phys(sm_state + 0x7f80, env->ldt.base);
1481 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1482 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1483
1484 stl_phys(sm_state + 0x7f74, env->gdt.base);
1485 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1486
1487 stl_phys(sm_state + 0x7f58, env->idt.base);
1488 stl_phys(sm_state + 0x7f54, env->idt.limit);
1489
1490 for(i = 0; i < 6; i++) {
1491 dt = &env->segs[i];
1492 if (i < 3)
1493 offset = 0x7f84 + i * 12;
1494 else
1495 offset = 0x7f2c + (i - 3) * 12;
1496 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1497 stl_phys(sm_state + offset + 8, dt->base);
1498 stl_phys(sm_state + offset + 4, dt->limit);
1499 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1500 }
1501 stl_phys(sm_state + 0x7f14, env->cr[4]);
1502
1503 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1504 stl_phys(sm_state + 0x7ef8, env->smbase);
1505#endif
1506 /* init SMM cpu state */
1507
1508#ifdef TARGET_X86_64
5efc27bb 1509 cpu_load_efer(env, 0);
eaa728ee
FB
1510#endif
1511 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1512 env->eip = 0x00008000;
1513 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1514 0xffffffff, 0);
1515 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1516 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1517 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1518 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1519 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1520
1521 cpu_x86_update_cr0(env,
1522 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1523 cpu_x86_update_cr4(env, 0);
1524 env->dr[7] = 0x00000400;
1525 CC_OP = CC_OP_EFLAGS;
1526}
1527
1528void helper_rsm(void)
1529{
1530 target_ulong sm_state;
1531 int i, offset;
1532 uint32_t val;
1533
1534 sm_state = env->smbase + 0x8000;
1535#ifdef TARGET_X86_64
5efc27bb 1536 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee
FB
1537
1538 for(i = 0; i < 6; i++) {
1539 offset = 0x7e00 + i * 16;
1540 cpu_x86_load_seg_cache(env, i,
1541 lduw_phys(sm_state + offset),
1542 ldq_phys(sm_state + offset + 8),
1543 ldl_phys(sm_state + offset + 4),
1544 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1545 }
1546
1547 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1548 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1549
1550 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1551 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1552 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1553 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1554
1555 env->idt.base = ldq_phys(sm_state + 0x7e88);
1556 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1557
1558 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1559 env->tr.base = ldq_phys(sm_state + 0x7e98);
1560 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1561 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1562
1563 EAX = ldq_phys(sm_state + 0x7ff8);
1564 ECX = ldq_phys(sm_state + 0x7ff0);
1565 EDX = ldq_phys(sm_state + 0x7fe8);
1566 EBX = ldq_phys(sm_state + 0x7fe0);
1567 ESP = ldq_phys(sm_state + 0x7fd8);
1568 EBP = ldq_phys(sm_state + 0x7fd0);
1569 ESI = ldq_phys(sm_state + 0x7fc8);
1570 EDI = ldq_phys(sm_state + 0x7fc0);
1571 for(i = 8; i < 16; i++)
1572 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1573 env->eip = ldq_phys(sm_state + 0x7f78);
1574 load_eflags(ldl_phys(sm_state + 0x7f70),
1575 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1576 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1577 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1578
1579 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1580 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1581 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1582
1583 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1584 if (val & 0x20000) {
1585 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1586 }
1587#else
1588 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1589 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1590 load_eflags(ldl_phys(sm_state + 0x7ff4),
1591 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1592 env->eip = ldl_phys(sm_state + 0x7ff0);
1593 EDI = ldl_phys(sm_state + 0x7fec);
1594 ESI = ldl_phys(sm_state + 0x7fe8);
1595 EBP = ldl_phys(sm_state + 0x7fe4);
1596 ESP = ldl_phys(sm_state + 0x7fe0);
1597 EBX = ldl_phys(sm_state + 0x7fdc);
1598 EDX = ldl_phys(sm_state + 0x7fd8);
1599 ECX = ldl_phys(sm_state + 0x7fd4);
1600 EAX = ldl_phys(sm_state + 0x7fd0);
1601 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1602 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1603
1604 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1605 env->tr.base = ldl_phys(sm_state + 0x7f64);
1606 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1607 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1608
1609 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1610 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1611 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1612 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1613
1614 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1615 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1616
1617 env->idt.base = ldl_phys(sm_state + 0x7f58);
1618 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1619
1620 for(i = 0; i < 6; i++) {
1621 if (i < 3)
1622 offset = 0x7f84 + i * 12;
1623 else
1624 offset = 0x7f2c + (i - 3) * 12;
1625 cpu_x86_load_seg_cache(env, i,
1626 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1627 ldl_phys(sm_state + offset + 8),
1628 ldl_phys(sm_state + offset + 4),
1629 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1630 }
1631 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1632
1633 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1634 if (val & 0x20000) {
1635 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1636 }
1637#endif
1638 CC_OP = CC_OP_EFLAGS;
1639 env->hflags &= ~HF_SMM_MASK;
1640 cpu_smm_update(env);
1641
93fcfe39
AL
1642 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1643 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1644}
1645
1646#endif /* !CONFIG_USER_ONLY */
1647
1648
1649/* division, flags are undefined */
1650
1651void helper_divb_AL(target_ulong t0)
1652{
1653 unsigned int num, den, q, r;
1654
1655 num = (EAX & 0xffff);
1656 den = (t0 & 0xff);
1657 if (den == 0) {
1658 raise_exception(EXCP00_DIVZ);
1659 }
1660 q = (num / den);
1661 if (q > 0xff)
1662 raise_exception(EXCP00_DIVZ);
1663 q &= 0xff;
1664 r = (num % den) & 0xff;
1665 EAX = (EAX & ~0xffff) | (r << 8) | q;
1666}
1667
1668void helper_idivb_AL(target_ulong t0)
1669{
1670 int num, den, q, r;
1671
1672 num = (int16_t)EAX;
1673 den = (int8_t)t0;
1674 if (den == 0) {
1675 raise_exception(EXCP00_DIVZ);
1676 }
1677 q = (num / den);
1678 if (q != (int8_t)q)
1679 raise_exception(EXCP00_DIVZ);
1680 q &= 0xff;
1681 r = (num % den) & 0xff;
1682 EAX = (EAX & ~0xffff) | (r << 8) | q;
1683}
1684
1685void helper_divw_AX(target_ulong t0)
1686{
1687 unsigned int num, den, q, r;
1688
1689 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1690 den = (t0 & 0xffff);
1691 if (den == 0) {
1692 raise_exception(EXCP00_DIVZ);
1693 }
1694 q = (num / den);
1695 if (q > 0xffff)
1696 raise_exception(EXCP00_DIVZ);
1697 q &= 0xffff;
1698 r = (num % den) & 0xffff;
1699 EAX = (EAX & ~0xffff) | q;
1700 EDX = (EDX & ~0xffff) | r;
1701}
1702
1703void helper_idivw_AX(target_ulong t0)
1704{
1705 int num, den, q, r;
1706
1707 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1708 den = (int16_t)t0;
1709 if (den == 0) {
1710 raise_exception(EXCP00_DIVZ);
1711 }
1712 q = (num / den);
1713 if (q != (int16_t)q)
1714 raise_exception(EXCP00_DIVZ);
1715 q &= 0xffff;
1716 r = (num % den) & 0xffff;
1717 EAX = (EAX & ~0xffff) | q;
1718 EDX = (EDX & ~0xffff) | r;
1719}
1720
1721void helper_divl_EAX(target_ulong t0)
1722{
1723 unsigned int den, r;
1724 uint64_t num, q;
1725
1726 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1727 den = t0;
1728 if (den == 0) {
1729 raise_exception(EXCP00_DIVZ);
1730 }
1731 q = (num / den);
1732 r = (num % den);
1733 if (q > 0xffffffff)
1734 raise_exception(EXCP00_DIVZ);
1735 EAX = (uint32_t)q;
1736 EDX = (uint32_t)r;
1737}
1738
1739void helper_idivl_EAX(target_ulong t0)
1740{
1741 int den, r;
1742 int64_t num, q;
1743
1744 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1745 den = t0;
1746 if (den == 0) {
1747 raise_exception(EXCP00_DIVZ);
1748 }
1749 q = (num / den);
1750 r = (num % den);
1751 if (q != (int32_t)q)
1752 raise_exception(EXCP00_DIVZ);
1753 EAX = (uint32_t)q;
1754 EDX = (uint32_t)r;
1755}
1756
1757/* bcd */
1758
1759/* XXX: exception */
1760void helper_aam(int base)
1761{
1762 int al, ah;
1763 al = EAX & 0xff;
1764 ah = al / base;
1765 al = al % base;
1766 EAX = (EAX & ~0xffff) | al | (ah << 8);
1767 CC_DST = al;
1768}
1769
1770void helper_aad(int base)
1771{
1772 int al, ah;
1773 al = EAX & 0xff;
1774 ah = (EAX >> 8) & 0xff;
1775 al = ((ah * base) + al) & 0xff;
1776 EAX = (EAX & ~0xffff) | al;
1777 CC_DST = al;
1778}
1779
1780void helper_aaa(void)
1781{
1782 int icarry;
1783 int al, ah, af;
1784 int eflags;
1785
a7812ae4 1786 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1787 af = eflags & CC_A;
1788 al = EAX & 0xff;
1789 ah = (EAX >> 8) & 0xff;
1790
1791 icarry = (al > 0xf9);
1792 if (((al & 0x0f) > 9 ) || af) {
1793 al = (al + 6) & 0x0f;
1794 ah = (ah + 1 + icarry) & 0xff;
1795 eflags |= CC_C | CC_A;
1796 } else {
1797 eflags &= ~(CC_C | CC_A);
1798 al &= 0x0f;
1799 }
1800 EAX = (EAX & ~0xffff) | al | (ah << 8);
1801 CC_SRC = eflags;
eaa728ee
FB
1802}
1803
1804void helper_aas(void)
1805{
1806 int icarry;
1807 int al, ah, af;
1808 int eflags;
1809
a7812ae4 1810 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1811 af = eflags & CC_A;
1812 al = EAX & 0xff;
1813 ah = (EAX >> 8) & 0xff;
1814
1815 icarry = (al < 6);
1816 if (((al & 0x0f) > 9 ) || af) {
1817 al = (al - 6) & 0x0f;
1818 ah = (ah - 1 - icarry) & 0xff;
1819 eflags |= CC_C | CC_A;
1820 } else {
1821 eflags &= ~(CC_C | CC_A);
1822 al &= 0x0f;
1823 }
1824 EAX = (EAX & ~0xffff) | al | (ah << 8);
1825 CC_SRC = eflags;
eaa728ee
FB
1826}
1827
1828void helper_daa(void)
1829{
1830 int al, af, cf;
1831 int eflags;
1832
a7812ae4 1833 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1834 cf = eflags & CC_C;
1835 af = eflags & CC_A;
1836 al = EAX & 0xff;
1837
1838 eflags = 0;
1839 if (((al & 0x0f) > 9 ) || af) {
1840 al = (al + 6) & 0xff;
1841 eflags |= CC_A;
1842 }
1843 if ((al > 0x9f) || cf) {
1844 al = (al + 0x60) & 0xff;
1845 eflags |= CC_C;
1846 }
1847 EAX = (EAX & ~0xff) | al;
1848 /* well, speed is not an issue here, so we compute the flags by hand */
1849 eflags |= (al == 0) << 6; /* zf */
1850 eflags |= parity_table[al]; /* pf */
1851 eflags |= (al & 0x80); /* sf */
1852 CC_SRC = eflags;
eaa728ee
FB
1853}
1854
1855void helper_das(void)
1856{
1857 int al, al1, af, cf;
1858 int eflags;
1859
a7812ae4 1860 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1861 cf = eflags & CC_C;
1862 af = eflags & CC_A;
1863 al = EAX & 0xff;
1864
1865 eflags = 0;
1866 al1 = al;
1867 if (((al & 0x0f) > 9 ) || af) {
1868 eflags |= CC_A;
1869 if (al < 6 || cf)
1870 eflags |= CC_C;
1871 al = (al - 6) & 0xff;
1872 }
1873 if ((al1 > 0x99) || cf) {
1874 al = (al - 0x60) & 0xff;
1875 eflags |= CC_C;
1876 }
1877 EAX = (EAX & ~0xff) | al;
1878 /* well, speed is not an issue here, so we compute the flags by hand */
1879 eflags |= (al == 0) << 6; /* zf */
1880 eflags |= parity_table[al]; /* pf */
1881 eflags |= (al & 0x80); /* sf */
1882 CC_SRC = eflags;
eaa728ee
FB
1883}
1884
1885void helper_into(int next_eip_addend)
1886{
1887 int eflags;
a7812ae4 1888 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1889 if (eflags & CC_O) {
1890 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1891 }
1892}
1893
1894void helper_cmpxchg8b(target_ulong a0)
1895{
1896 uint64_t d;
1897 int eflags;
1898
a7812ae4 1899 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1900 d = ldq(a0);
1901 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1902 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1903 eflags |= CC_Z;
1904 } else {
278ed7c3
FB
1905 /* always do the store */
1906 stq(a0, d);
eaa728ee
FB
1907 EDX = (uint32_t)(d >> 32);
1908 EAX = (uint32_t)d;
1909 eflags &= ~CC_Z;
1910 }
1911 CC_SRC = eflags;
1912}
1913
1914#ifdef TARGET_X86_64
1915void helper_cmpxchg16b(target_ulong a0)
1916{
1917 uint64_t d0, d1;
1918 int eflags;
1919
278ed7c3
FB
1920 if ((a0 & 0xf) != 0)
1921 raise_exception(EXCP0D_GPF);
a7812ae4 1922 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1923 d0 = ldq(a0);
1924 d1 = ldq(a0 + 8);
1925 if (d0 == EAX && d1 == EDX) {
1926 stq(a0, EBX);
1927 stq(a0 + 8, ECX);
1928 eflags |= CC_Z;
1929 } else {
278ed7c3
FB
1930 /* always do the store */
1931 stq(a0, d0);
1932 stq(a0 + 8, d1);
eaa728ee
FB
1933 EDX = d1;
1934 EAX = d0;
1935 eflags &= ~CC_Z;
1936 }
1937 CC_SRC = eflags;
1938}
1939#endif
1940
1941void helper_single_step(void)
1942{
01df040b
AL
1943#ifndef CONFIG_USER_ONLY
1944 check_hw_breakpoints(env, 1);
1945 env->dr[6] |= DR6_BS;
1946#endif
1947 raise_exception(EXCP01_DB);
eaa728ee
FB
1948}
1949
1950void helper_cpuid(void)
1951{
6fd805e1 1952 uint32_t eax, ebx, ecx, edx;
eaa728ee 1953
872929aa 1954 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
e737b32a 1955
e00b6f80 1956 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
6fd805e1
AL
1957 EAX = eax;
1958 EBX = ebx;
1959 ECX = ecx;
1960 EDX = edx;
eaa728ee
FB
1961}
1962
1963void helper_enter_level(int level, int data32, target_ulong t1)
1964{
1965 target_ulong ssp;
1966 uint32_t esp_mask, esp, ebp;
1967
1968 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1969 ssp = env->segs[R_SS].base;
1970 ebp = EBP;
1971 esp = ESP;
1972 if (data32) {
1973 /* 32 bit */
1974 esp -= 4;
1975 while (--level) {
1976 esp -= 4;
1977 ebp -= 4;
1978 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1979 }
1980 esp -= 4;
1981 stl(ssp + (esp & esp_mask), t1);
1982 } else {
1983 /* 16 bit */
1984 esp -= 2;
1985 while (--level) {
1986 esp -= 2;
1987 ebp -= 2;
1988 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1989 }
1990 esp -= 2;
1991 stw(ssp + (esp & esp_mask), t1);
1992 }
1993}
1994
1995#ifdef TARGET_X86_64
1996void helper_enter64_level(int level, int data64, target_ulong t1)
1997{
1998 target_ulong esp, ebp;
1999 ebp = EBP;
2000 esp = ESP;
2001
2002 if (data64) {
2003 /* 64 bit */
2004 esp -= 8;
2005 while (--level) {
2006 esp -= 8;
2007 ebp -= 8;
2008 stq(esp, ldq(ebp));
2009 }
2010 esp -= 8;
2011 stq(esp, t1);
2012 } else {
2013 /* 16 bit */
2014 esp -= 2;
2015 while (--level) {
2016 esp -= 2;
2017 ebp -= 2;
2018 stw(esp, lduw(ebp));
2019 }
2020 esp -= 2;
2021 stw(esp, t1);
2022 }
2023}
2024#endif
2025
2026void helper_lldt(int selector)
2027{
2028 SegmentCache *dt;
2029 uint32_t e1, e2;
2030 int index, entry_limit;
2031 target_ulong ptr;
2032
2033 selector &= 0xffff;
2034 if ((selector & 0xfffc) == 0) {
2035 /* XXX: NULL selector case: invalid LDT */
2036 env->ldt.base = 0;
2037 env->ldt.limit = 0;
2038 } else {
2039 if (selector & 0x4)
2040 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2041 dt = &env->gdt;
2042 index = selector & ~7;
2043#ifdef TARGET_X86_64
2044 if (env->hflags & HF_LMA_MASK)
2045 entry_limit = 15;
2046 else
2047#endif
2048 entry_limit = 7;
2049 if ((index + entry_limit) > dt->limit)
2050 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2051 ptr = dt->base + index;
2052 e1 = ldl_kernel(ptr);
2053 e2 = ldl_kernel(ptr + 4);
2054 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2055 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2056 if (!(e2 & DESC_P_MASK))
2057 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2058#ifdef TARGET_X86_64
2059 if (env->hflags & HF_LMA_MASK) {
2060 uint32_t e3;
2061 e3 = ldl_kernel(ptr + 8);
2062 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2063 env->ldt.base |= (target_ulong)e3 << 32;
2064 } else
2065#endif
2066 {
2067 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2068 }
2069 }
2070 env->ldt.selector = selector;
2071}
2072
2073void helper_ltr(int selector)
2074{
2075 SegmentCache *dt;
2076 uint32_t e1, e2;
2077 int index, type, entry_limit;
2078 target_ulong ptr;
2079
2080 selector &= 0xffff;
2081 if ((selector & 0xfffc) == 0) {
2082 /* NULL selector case: invalid TR */
2083 env->tr.base = 0;
2084 env->tr.limit = 0;
2085 env->tr.flags = 0;
2086 } else {
2087 if (selector & 0x4)
2088 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2089 dt = &env->gdt;
2090 index = selector & ~7;
2091#ifdef TARGET_X86_64
2092 if (env->hflags & HF_LMA_MASK)
2093 entry_limit = 15;
2094 else
2095#endif
2096 entry_limit = 7;
2097 if ((index + entry_limit) > dt->limit)
2098 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2099 ptr = dt->base + index;
2100 e1 = ldl_kernel(ptr);
2101 e2 = ldl_kernel(ptr + 4);
2102 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2103 if ((e2 & DESC_S_MASK) ||
2104 (type != 1 && type != 9))
2105 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2106 if (!(e2 & DESC_P_MASK))
2107 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2108#ifdef TARGET_X86_64
2109 if (env->hflags & HF_LMA_MASK) {
2110 uint32_t e3, e4;
2111 e3 = ldl_kernel(ptr + 8);
2112 e4 = ldl_kernel(ptr + 12);
2113 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2114 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2115 load_seg_cache_raw_dt(&env->tr, e1, e2);
2116 env->tr.base |= (target_ulong)e3 << 32;
2117 } else
2118#endif
2119 {
2120 load_seg_cache_raw_dt(&env->tr, e1, e2);
2121 }
2122 e2 |= DESC_TSS_BUSY_MASK;
2123 stl_kernel(ptr + 4, e2);
2124 }
2125 env->tr.selector = selector;
2126}
2127
2128/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2129void helper_load_seg(int seg_reg, int selector)
2130{
2131 uint32_t e1, e2;
2132 int cpl, dpl, rpl;
2133 SegmentCache *dt;
2134 int index;
2135 target_ulong ptr;
2136
2137 selector &= 0xffff;
2138 cpl = env->hflags & HF_CPL_MASK;
2139 if ((selector & 0xfffc) == 0) {
2140 /* null selector case */
2141 if (seg_reg == R_SS
2142#ifdef TARGET_X86_64
2143 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2144#endif
2145 )
2146 raise_exception_err(EXCP0D_GPF, 0);
2147 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2148 } else {
2149
2150 if (selector & 0x4)
2151 dt = &env->ldt;
2152 else
2153 dt = &env->gdt;
2154 index = selector & ~7;
2155 if ((index + 7) > dt->limit)
2156 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2157 ptr = dt->base + index;
2158 e1 = ldl_kernel(ptr);
2159 e2 = ldl_kernel(ptr + 4);
2160
2161 if (!(e2 & DESC_S_MASK))
2162 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2163 rpl = selector & 3;
2164 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2165 if (seg_reg == R_SS) {
2166 /* must be writable segment */
2167 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2168 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2169 if (rpl != cpl || dpl != cpl)
2170 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2171 } else {
2172 /* must be readable segment */
2173 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2174 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2175
2176 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2177 /* if not conforming code, test rights */
2178 if (dpl < cpl || dpl < rpl)
2179 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2180 }
2181 }
2182
2183 if (!(e2 & DESC_P_MASK)) {
2184 if (seg_reg == R_SS)
2185 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2186 else
2187 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2188 }
2189
2190 /* set the access bit if not already set */
2191 if (!(e2 & DESC_A_MASK)) {
2192 e2 |= DESC_A_MASK;
2193 stl_kernel(ptr + 4, e2);
2194 }
2195
2196 cpu_x86_load_seg_cache(env, seg_reg, selector,
2197 get_seg_base(e1, e2),
2198 get_seg_limit(e1, e2),
2199 e2);
2200#if 0
93fcfe39 2201 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
2202 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2203#endif
2204 }
2205}
2206
2207/* protected mode jump */
2208void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2209 int next_eip_addend)
2210{
2211 int gate_cs, type;
2212 uint32_t e1, e2, cpl, dpl, rpl, limit;
2213 target_ulong next_eip;
2214
2215 if ((new_cs & 0xfffc) == 0)
2216 raise_exception_err(EXCP0D_GPF, 0);
2217 if (load_segment(&e1, &e2, new_cs) != 0)
2218 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2219 cpl = env->hflags & HF_CPL_MASK;
2220 if (e2 & DESC_S_MASK) {
2221 if (!(e2 & DESC_CS_MASK))
2222 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2223 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2224 if (e2 & DESC_C_MASK) {
2225 /* conforming code segment */
2226 if (dpl > cpl)
2227 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2228 } else {
2229 /* non conforming code segment */
2230 rpl = new_cs & 3;
2231 if (rpl > cpl)
2232 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2233 if (dpl != cpl)
2234 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2235 }
2236 if (!(e2 & DESC_P_MASK))
2237 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2238 limit = get_seg_limit(e1, e2);
2239 if (new_eip > limit &&
2240 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2241 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2242 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2243 get_seg_base(e1, e2), limit, e2);
2244 EIP = new_eip;
2245 } else {
2246 /* jump to call or task gate */
2247 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2248 rpl = new_cs & 3;
2249 cpl = env->hflags & HF_CPL_MASK;
2250 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2251 switch(type) {
2252 case 1: /* 286 TSS */
2253 case 9: /* 386 TSS */
2254 case 5: /* task gate */
2255 if (dpl < cpl || dpl < rpl)
2256 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2257 next_eip = env->eip + next_eip_addend;
2258 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2259 CC_OP = CC_OP_EFLAGS;
2260 break;
2261 case 4: /* 286 call gate */
2262 case 12: /* 386 call gate */
2263 if ((dpl < cpl) || (dpl < rpl))
2264 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2265 if (!(e2 & DESC_P_MASK))
2266 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2267 gate_cs = e1 >> 16;
2268 new_eip = (e1 & 0xffff);
2269 if (type == 12)
2270 new_eip |= (e2 & 0xffff0000);
2271 if (load_segment(&e1, &e2, gate_cs) != 0)
2272 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2273 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2274 /* must be code segment */
2275 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2276 (DESC_S_MASK | DESC_CS_MASK)))
2277 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2278 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2279 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2280 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2281 if (!(e2 & DESC_P_MASK))
2282 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2283 limit = get_seg_limit(e1, e2);
2284 if (new_eip > limit)
2285 raise_exception_err(EXCP0D_GPF, 0);
2286 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2287 get_seg_base(e1, e2), limit, e2);
2288 EIP = new_eip;
2289 break;
2290 default:
2291 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2292 break;
2293 }
2294 }
2295}
2296
2297/* real mode call */
2298void helper_lcall_real(int new_cs, target_ulong new_eip1,
2299 int shift, int next_eip)
2300{
2301 int new_eip;
2302 uint32_t esp, esp_mask;
2303 target_ulong ssp;
2304
2305 new_eip = new_eip1;
2306 esp = ESP;
2307 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2308 ssp = env->segs[R_SS].base;
2309 if (shift) {
2310 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2311 PUSHL(ssp, esp, esp_mask, next_eip);
2312 } else {
2313 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2314 PUSHW(ssp, esp, esp_mask, next_eip);
2315 }
2316
2317 SET_ESP(esp, esp_mask);
2318 env->eip = new_eip;
2319 env->segs[R_CS].selector = new_cs;
2320 env->segs[R_CS].base = (new_cs << 4);
2321}
2322
2323/* protected mode call */
2324void helper_lcall_protected(int new_cs, target_ulong new_eip,
2325 int shift, int next_eip_addend)
2326{
2327 int new_stack, i;
2328 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 2329 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
2330 uint32_t val, limit, old_sp_mask;
2331 target_ulong ssp, old_ssp, next_eip;
2332
2333 next_eip = env->eip + next_eip_addend;
d12d51d5
AL
2334 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2335 LOG_PCALL_STATE(env);
eaa728ee
FB
2336 if ((new_cs & 0xfffc) == 0)
2337 raise_exception_err(EXCP0D_GPF, 0);
2338 if (load_segment(&e1, &e2, new_cs) != 0)
2339 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2340 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 2341 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee
FB
2342 if (e2 & DESC_S_MASK) {
2343 if (!(e2 & DESC_CS_MASK))
2344 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2345 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2346 if (e2 & DESC_C_MASK) {
2347 /* conforming code segment */
2348 if (dpl > cpl)
2349 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2350 } else {
2351 /* non conforming code segment */
2352 rpl = new_cs & 3;
2353 if (rpl > cpl)
2354 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2355 if (dpl != cpl)
2356 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2357 }
2358 if (!(e2 & DESC_P_MASK))
2359 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2360
2361#ifdef TARGET_X86_64
2362 /* XXX: check 16/32 bit cases in long mode */
2363 if (shift == 2) {
2364 target_ulong rsp;
2365 /* 64 bit case */
2366 rsp = ESP;
2367 PUSHQ(rsp, env->segs[R_CS].selector);
2368 PUSHQ(rsp, next_eip);
2369 /* from this point, not restartable */
2370 ESP = rsp;
2371 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2372 get_seg_base(e1, e2),
2373 get_seg_limit(e1, e2), e2);
2374 EIP = new_eip;
2375 } else
2376#endif
2377 {
2378 sp = ESP;
2379 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2380 ssp = env->segs[R_SS].base;
2381 if (shift) {
2382 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2383 PUSHL(ssp, sp, sp_mask, next_eip);
2384 } else {
2385 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2386 PUSHW(ssp, sp, sp_mask, next_eip);
2387 }
2388
2389 limit = get_seg_limit(e1, e2);
2390 if (new_eip > limit)
2391 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2392 /* from this point, not restartable */
2393 SET_ESP(sp, sp_mask);
2394 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2395 get_seg_base(e1, e2), limit, e2);
2396 EIP = new_eip;
2397 }
2398 } else {
2399 /* check gate type */
2400 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2401 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2402 rpl = new_cs & 3;
2403 switch(type) {
2404 case 1: /* available 286 TSS */
2405 case 9: /* available 386 TSS */
2406 case 5: /* task gate */
2407 if (dpl < cpl || dpl < rpl)
2408 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2409 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2410 CC_OP = CC_OP_EFLAGS;
2411 return;
2412 case 4: /* 286 call gate */
2413 case 12: /* 386 call gate */
2414 break;
2415 default:
2416 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2417 break;
2418 }
2419 shift = type >> 3;
2420
2421 if (dpl < cpl || dpl < rpl)
2422 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2423 /* check valid bit */
2424 if (!(e2 & DESC_P_MASK))
2425 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2426 selector = e1 >> 16;
2427 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2428 param_count = e2 & 0x1f;
2429 if ((selector & 0xfffc) == 0)
2430 raise_exception_err(EXCP0D_GPF, 0);
2431
2432 if (load_segment(&e1, &e2, selector) != 0)
2433 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2434 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2435 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2436 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2437 if (dpl > cpl)
2438 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2439 if (!(e2 & DESC_P_MASK))
2440 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2441
2442 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2443 /* to inner privilege */
2444 get_ss_esp_from_tss(&ss, &sp, dpl);
d12d51d5 2445 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
eaa728ee 2446 ss, sp, param_count, ESP);
eaa728ee
FB
2447 if ((ss & 0xfffc) == 0)
2448 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2449 if ((ss & 3) != dpl)
2450 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2451 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2452 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2453 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2454 if (ss_dpl != dpl)
2455 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2456 if (!(ss_e2 & DESC_S_MASK) ||
2457 (ss_e2 & DESC_CS_MASK) ||
2458 !(ss_e2 & DESC_W_MASK))
2459 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2460 if (!(ss_e2 & DESC_P_MASK))
2461 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2462
2463 // push_size = ((param_count * 2) + 8) << shift;
2464
2465 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2466 old_ssp = env->segs[R_SS].base;
2467
2468 sp_mask = get_sp_mask(ss_e2);
2469 ssp = get_seg_base(ss_e1, ss_e2);
2470 if (shift) {
2471 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2472 PUSHL(ssp, sp, sp_mask, ESP);
2473 for(i = param_count - 1; i >= 0; i--) {
2474 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2475 PUSHL(ssp, sp, sp_mask, val);
2476 }
2477 } else {
2478 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2479 PUSHW(ssp, sp, sp_mask, ESP);
2480 for(i = param_count - 1; i >= 0; i--) {
2481 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2482 PUSHW(ssp, sp, sp_mask, val);
2483 }
2484 }
2485 new_stack = 1;
2486 } else {
2487 /* to same privilege */
2488 sp = ESP;
2489 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2490 ssp = env->segs[R_SS].base;
2491 // push_size = (4 << shift);
2492 new_stack = 0;
2493 }
2494
2495 if (shift) {
2496 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2497 PUSHL(ssp, sp, sp_mask, next_eip);
2498 } else {
2499 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2500 PUSHW(ssp, sp, sp_mask, next_eip);
2501 }
2502
2503 /* from this point, not restartable */
2504
2505 if (new_stack) {
2506 ss = (ss & ~3) | dpl;
2507 cpu_x86_load_seg_cache(env, R_SS, ss,
2508 ssp,
2509 get_seg_limit(ss_e1, ss_e2),
2510 ss_e2);
2511 }
2512
2513 selector = (selector & ~3) | dpl;
2514 cpu_x86_load_seg_cache(env, R_CS, selector,
2515 get_seg_base(e1, e2),
2516 get_seg_limit(e1, e2),
2517 e2);
2518 cpu_x86_set_cpl(env, dpl);
2519 SET_ESP(sp, sp_mask);
2520 EIP = offset;
2521 }
eaa728ee
FB
2522}
2523
2524/* real and vm86 mode iret */
2525void helper_iret_real(int shift)
2526{
2527 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2528 target_ulong ssp;
2529 int eflags_mask;
2530
2531 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2532 sp = ESP;
2533 ssp = env->segs[R_SS].base;
2534 if (shift == 1) {
2535 /* 32 bits */
2536 POPL(ssp, sp, sp_mask, new_eip);
2537 POPL(ssp, sp, sp_mask, new_cs);
2538 new_cs &= 0xffff;
2539 POPL(ssp, sp, sp_mask, new_eflags);
2540 } else {
2541 /* 16 bits */
2542 POPW(ssp, sp, sp_mask, new_eip);
2543 POPW(ssp, sp, sp_mask, new_cs);
2544 POPW(ssp, sp, sp_mask, new_eflags);
2545 }
2546 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2547 env->segs[R_CS].selector = new_cs;
2548 env->segs[R_CS].base = (new_cs << 4);
eaa728ee
FB
2549 env->eip = new_eip;
2550 if (env->eflags & VM_MASK)
2551 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2552 else
2553 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2554 if (shift == 0)
2555 eflags_mask &= 0xffff;
2556 load_eflags(new_eflags, eflags_mask);
db620f46 2557 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2558}
2559
2560static inline void validate_seg(int seg_reg, int cpl)
2561{
2562 int dpl;
2563 uint32_t e2;
2564
2565 /* XXX: on x86_64, we do not want to nullify FS and GS because
2566 they may still contain a valid base. I would be interested to
2567 know how a real x86_64 CPU behaves */
2568 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2569 (env->segs[seg_reg].selector & 0xfffc) == 0)
2570 return;
2571
2572 e2 = env->segs[seg_reg].flags;
2573 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2574 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2575 /* data or non conforming code segment */
2576 if (dpl < cpl) {
2577 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2578 }
2579 }
2580}
2581
2582/* protected mode iret */
2583static inline void helper_ret_protected(int shift, int is_iret, int addend)
2584{
2585 uint32_t new_cs, new_eflags, new_ss;
2586 uint32_t new_es, new_ds, new_fs, new_gs;
2587 uint32_t e1, e2, ss_e1, ss_e2;
2588 int cpl, dpl, rpl, eflags_mask, iopl;
2589 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2590
2591#ifdef TARGET_X86_64
2592 if (shift == 2)
2593 sp_mask = -1;
2594 else
2595#endif
2596 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2597 sp = ESP;
2598 ssp = env->segs[R_SS].base;
2599 new_eflags = 0; /* avoid warning */
2600#ifdef TARGET_X86_64
2601 if (shift == 2) {
2602 POPQ(sp, new_eip);
2603 POPQ(sp, new_cs);
2604 new_cs &= 0xffff;
2605 if (is_iret) {
2606 POPQ(sp, new_eflags);
2607 }
2608 } else
2609#endif
2610 if (shift == 1) {
2611 /* 32 bits */
2612 POPL(ssp, sp, sp_mask, new_eip);
2613 POPL(ssp, sp, sp_mask, new_cs);
2614 new_cs &= 0xffff;
2615 if (is_iret) {
2616 POPL(ssp, sp, sp_mask, new_eflags);
2617 if (new_eflags & VM_MASK)
2618 goto return_to_vm86;
2619 }
2620 } else {
2621 /* 16 bits */
2622 POPW(ssp, sp, sp_mask, new_eip);
2623 POPW(ssp, sp, sp_mask, new_cs);
2624 if (is_iret)
2625 POPW(ssp, sp, sp_mask, new_eflags);
2626 }
d12d51d5
AL
2627 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2628 new_cs, new_eip, shift, addend);
2629 LOG_PCALL_STATE(env);
eaa728ee
FB
2630 if ((new_cs & 0xfffc) == 0)
2631 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2632 if (load_segment(&e1, &e2, new_cs) != 0)
2633 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2634 if (!(e2 & DESC_S_MASK) ||
2635 !(e2 & DESC_CS_MASK))
2636 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2637 cpl = env->hflags & HF_CPL_MASK;
2638 rpl = new_cs & 3;
2639 if (rpl < cpl)
2640 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2641 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2642 if (e2 & DESC_C_MASK) {
2643 if (dpl > rpl)
2644 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2645 } else {
2646 if (dpl != rpl)
2647 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2648 }
2649 if (!(e2 & DESC_P_MASK))
2650 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2651
2652 sp += addend;
2653 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2654 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2655 /* return to same privilege level */
eaa728ee
FB
2656 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2657 get_seg_base(e1, e2),
2658 get_seg_limit(e1, e2),
2659 e2);
2660 } else {
2661 /* return to different privilege level */
2662#ifdef TARGET_X86_64
2663 if (shift == 2) {
2664 POPQ(sp, new_esp);
2665 POPQ(sp, new_ss);
2666 new_ss &= 0xffff;
2667 } else
2668#endif
2669 if (shift == 1) {
2670 /* 32 bits */
2671 POPL(ssp, sp, sp_mask, new_esp);
2672 POPL(ssp, sp, sp_mask, new_ss);
2673 new_ss &= 0xffff;
2674 } else {
2675 /* 16 bits */
2676 POPW(ssp, sp, sp_mask, new_esp);
2677 POPW(ssp, sp, sp_mask, new_ss);
2678 }
d12d51d5 2679 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
eaa728ee 2680 new_ss, new_esp);
eaa728ee
FB
2681 if ((new_ss & 0xfffc) == 0) {
2682#ifdef TARGET_X86_64
2683 /* NULL ss is allowed in long mode if cpl != 3*/
2684 /* XXX: test CS64 ? */
2685 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2686 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2687 0, 0xffffffff,
2688 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2689 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2690 DESC_W_MASK | DESC_A_MASK);
2691 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2692 } else
2693#endif
2694 {
2695 raise_exception_err(EXCP0D_GPF, 0);
2696 }
2697 } else {
2698 if ((new_ss & 3) != rpl)
2699 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2700 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2701 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2702 if (!(ss_e2 & DESC_S_MASK) ||
2703 (ss_e2 & DESC_CS_MASK) ||
2704 !(ss_e2 & DESC_W_MASK))
2705 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2706 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2707 if (dpl != rpl)
2708 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2709 if (!(ss_e2 & DESC_P_MASK))
2710 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2711 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2712 get_seg_base(ss_e1, ss_e2),
2713 get_seg_limit(ss_e1, ss_e2),
2714 ss_e2);
2715 }
2716
2717 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2718 get_seg_base(e1, e2),
2719 get_seg_limit(e1, e2),
2720 e2);
2721 cpu_x86_set_cpl(env, rpl);
2722 sp = new_esp;
2723#ifdef TARGET_X86_64
2724 if (env->hflags & HF_CS64_MASK)
2725 sp_mask = -1;
2726 else
2727#endif
2728 sp_mask = get_sp_mask(ss_e2);
2729
2730 /* validate data segments */
2731 validate_seg(R_ES, rpl);
2732 validate_seg(R_DS, rpl);
2733 validate_seg(R_FS, rpl);
2734 validate_seg(R_GS, rpl);
2735
2736 sp += addend;
2737 }
2738 SET_ESP(sp, sp_mask);
2739 env->eip = new_eip;
2740 if (is_iret) {
2741 /* NOTE: 'cpl' is the _old_ CPL */
2742 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2743 if (cpl == 0)
2744 eflags_mask |= IOPL_MASK;
2745 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2746 if (cpl <= iopl)
2747 eflags_mask |= IF_MASK;
2748 if (shift == 0)
2749 eflags_mask &= 0xffff;
2750 load_eflags(new_eflags, eflags_mask);
2751 }
2752 return;
2753
2754 return_to_vm86:
2755 POPL(ssp, sp, sp_mask, new_esp);
2756 POPL(ssp, sp, sp_mask, new_ss);
2757 POPL(ssp, sp, sp_mask, new_es);
2758 POPL(ssp, sp, sp_mask, new_ds);
2759 POPL(ssp, sp, sp_mask, new_fs);
2760 POPL(ssp, sp, sp_mask, new_gs);
2761
2762 /* modify processor state */
2763 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2764 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2765 load_seg_vm(R_CS, new_cs & 0xffff);
2766 cpu_x86_set_cpl(env, 3);
2767 load_seg_vm(R_SS, new_ss & 0xffff);
2768 load_seg_vm(R_ES, new_es & 0xffff);
2769 load_seg_vm(R_DS, new_ds & 0xffff);
2770 load_seg_vm(R_FS, new_fs & 0xffff);
2771 load_seg_vm(R_GS, new_gs & 0xffff);
2772
2773 env->eip = new_eip & 0xffff;
2774 ESP = new_esp;
2775}
2776
2777void helper_iret_protected(int shift, int next_eip)
2778{
2779 int tss_selector, type;
2780 uint32_t e1, e2;
2781
2782 /* specific case for TSS */
2783 if (env->eflags & NT_MASK) {
2784#ifdef TARGET_X86_64
2785 if (env->hflags & HF_LMA_MASK)
2786 raise_exception_err(EXCP0D_GPF, 0);
2787#endif
2788 tss_selector = lduw_kernel(env->tr.base + 0);
2789 if (tss_selector & 4)
2790 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2791 if (load_segment(&e1, &e2, tss_selector) != 0)
2792 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2793 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2794 /* NOTE: we check both segment and busy TSS */
2795 if (type != 3)
2796 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2797 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2798 } else {
2799 helper_ret_protected(shift, 1, 0);
2800 }
db620f46 2801 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2802}
2803
2804void helper_lret_protected(int shift, int addend)
2805{
2806 helper_ret_protected(shift, 0, addend);
eaa728ee
FB
2807}
2808
2809void helper_sysenter(void)
2810{
2811 if (env->sysenter_cs == 0) {
2812 raise_exception_err(EXCP0D_GPF, 0);
2813 }
2814 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2815 cpu_x86_set_cpl(env, 0);
2436b61a
AZ
2816
2817#ifdef TARGET_X86_64
2818 if (env->hflags & HF_LMA_MASK) {
2819 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2820 0, 0xffffffff,
2821 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2822 DESC_S_MASK |
2823 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2824 } else
2825#endif
2826 {
2827 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2828 0, 0xffffffff,
2829 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2830 DESC_S_MASK |
2831 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2832 }
eaa728ee
FB
2833 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2834 0, 0xffffffff,
2835 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2836 DESC_S_MASK |
2837 DESC_W_MASK | DESC_A_MASK);
2838 ESP = env->sysenter_esp;
2839 EIP = env->sysenter_eip;
2840}
2841
2436b61a 2842void helper_sysexit(int dflag)
eaa728ee
FB
2843{
2844 int cpl;
2845
2846 cpl = env->hflags & HF_CPL_MASK;
2847 if (env->sysenter_cs == 0 || cpl != 0) {
2848 raise_exception_err(EXCP0D_GPF, 0);
2849 }
2850 cpu_x86_set_cpl(env, 3);
2436b61a
AZ
2851#ifdef TARGET_X86_64
2852 if (dflag == 2) {
2853 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2854 0, 0xffffffff,
2855 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2856 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2857 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2858 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2859 0, 0xffffffff,
2860 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2861 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2862 DESC_W_MASK | DESC_A_MASK);
2863 } else
2864#endif
2865 {
2866 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2867 0, 0xffffffff,
2868 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2869 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2870 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2871 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2872 0, 0xffffffff,
2873 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2874 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2875 DESC_W_MASK | DESC_A_MASK);
2876 }
eaa728ee
FB
2877 ESP = ECX;
2878 EIP = EDX;
eaa728ee
FB
2879}
2880
872929aa
FB
2881#if defined(CONFIG_USER_ONLY)
2882target_ulong helper_read_crN(int reg)
eaa728ee 2883{
872929aa
FB
2884 return 0;
2885}
2886
2887void helper_write_crN(int reg, target_ulong t0)
2888{
2889}
01df040b
AL
2890
2891void helper_movl_drN_T0(int reg, target_ulong t0)
2892{
2893}
872929aa
FB
2894#else
2895target_ulong helper_read_crN(int reg)
2896{
2897 target_ulong val;
2898
2899 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2900 switch(reg) {
2901 default:
2902 val = env->cr[reg];
2903 break;
2904 case 8:
db620f46 2905 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 2906 val = cpu_get_apic_tpr(env->apic_state);
db620f46
FB
2907 } else {
2908 val = env->v_tpr;
2909 }
872929aa
FB
2910 break;
2911 }
2912 return val;
2913}
2914
2915void helper_write_crN(int reg, target_ulong t0)
2916{
2917 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
2918 switch(reg) {
2919 case 0:
2920 cpu_x86_update_cr0(env, t0);
2921 break;
2922 case 3:
2923 cpu_x86_update_cr3(env, t0);
2924 break;
2925 case 4:
2926 cpu_x86_update_cr4(env, t0);
2927 break;
2928 case 8:
db620f46 2929 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 2930 cpu_set_apic_tpr(env->apic_state, t0);
db620f46
FB
2931 }
2932 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
2933 break;
2934 default:
2935 env->cr[reg] = t0;
2936 break;
2937 }
eaa728ee 2938}
01df040b
AL
2939
2940void helper_movl_drN_T0(int reg, target_ulong t0)
2941{
2942 int i;
2943
2944 if (reg < 4) {
2945 hw_breakpoint_remove(env, reg);
2946 env->dr[reg] = t0;
2947 hw_breakpoint_insert(env, reg);
2948 } else if (reg == 7) {
2949 for (i = 0; i < 4; i++)
2950 hw_breakpoint_remove(env, i);
2951 env->dr[7] = t0;
2952 for (i = 0; i < 4; i++)
2953 hw_breakpoint_insert(env, i);
2954 } else
2955 env->dr[reg] = t0;
2956}
872929aa 2957#endif
eaa728ee
FB
2958
2959void helper_lmsw(target_ulong t0)
2960{
2961 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2962 if already set to one. */
2963 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 2964 helper_write_crN(0, t0);
eaa728ee
FB
2965}
2966
2967void helper_clts(void)
2968{
2969 env->cr[0] &= ~CR0_TS_MASK;
2970 env->hflags &= ~HF_TS_MASK;
2971}
2972
eaa728ee
FB
2973void helper_invlpg(target_ulong addr)
2974{
872929aa 2975 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 2976 tlb_flush_page(env, addr);
eaa728ee
FB
2977}
2978
2979void helper_rdtsc(void)
2980{
2981 uint64_t val;
2982
2983 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2984 raise_exception(EXCP0D_GPF);
2985 }
872929aa
FB
2986 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2987
33c263df 2988 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
2989 EAX = (uint32_t)(val);
2990 EDX = (uint32_t)(val >> 32);
2991}
2992
1b050077
AP
2993void helper_rdtscp(void)
2994{
2995 helper_rdtsc();
2996 ECX = (uint32_t)(env->tsc_aux);
2997}
2998
eaa728ee
FB
2999void helper_rdpmc(void)
3000{
3001 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3002 raise_exception(EXCP0D_GPF);
3003 }
eaa728ee
FB
3004 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3005
3006 /* currently unimplemented */
3007 raise_exception_err(EXCP06_ILLOP, 0);
3008}
3009
3010#if defined(CONFIG_USER_ONLY)
3011void helper_wrmsr(void)
3012{
3013}
3014
3015void helper_rdmsr(void)
3016{
3017}
3018#else
3019void helper_wrmsr(void)
3020{
3021 uint64_t val;
3022
872929aa
FB
3023 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3024
eaa728ee
FB
3025 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3026
3027 switch((uint32_t)ECX) {
3028 case MSR_IA32_SYSENTER_CS:
3029 env->sysenter_cs = val & 0xffff;
3030 break;
3031 case MSR_IA32_SYSENTER_ESP:
3032 env->sysenter_esp = val;
3033 break;
3034 case MSR_IA32_SYSENTER_EIP:
3035 env->sysenter_eip = val;
3036 break;
3037 case MSR_IA32_APICBASE:
4a942cea 3038 cpu_set_apic_base(env->apic_state, val);
eaa728ee
FB
3039 break;
3040 case MSR_EFER:
3041 {
3042 uint64_t update_mask;
3043 update_mask = 0;
3044 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3045 update_mask |= MSR_EFER_SCE;
3046 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3047 update_mask |= MSR_EFER_LME;
3048 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3049 update_mask |= MSR_EFER_FFXSR;
3050 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3051 update_mask |= MSR_EFER_NXE;
5efc27bb
FB
3052 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3053 update_mask |= MSR_EFER_SVME;
eef26553
AL
3054 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3055 update_mask |= MSR_EFER_FFXSR;
5efc27bb
FB
3056 cpu_load_efer(env, (env->efer & ~update_mask) |
3057 (val & update_mask));
eaa728ee
FB
3058 }
3059 break;
3060 case MSR_STAR:
3061 env->star = val;
3062 break;
3063 case MSR_PAT:
3064 env->pat = val;
3065 break;
3066 case MSR_VM_HSAVE_PA:
3067 env->vm_hsave = val;
3068 break;
3069#ifdef TARGET_X86_64
3070 case MSR_LSTAR:
3071 env->lstar = val;
3072 break;
3073 case MSR_CSTAR:
3074 env->cstar = val;
3075 break;
3076 case MSR_FMASK:
3077 env->fmask = val;
3078 break;
3079 case MSR_FSBASE:
3080 env->segs[R_FS].base = val;
3081 break;
3082 case MSR_GSBASE:
3083 env->segs[R_GS].base = val;
3084 break;
3085 case MSR_KERNELGSBASE:
3086 env->kernelgsbase = val;
3087 break;
3088#endif
165d9b82
AL
3089 case MSR_MTRRphysBase(0):
3090 case MSR_MTRRphysBase(1):
3091 case MSR_MTRRphysBase(2):
3092 case MSR_MTRRphysBase(3):
3093 case MSR_MTRRphysBase(4):
3094 case MSR_MTRRphysBase(5):
3095 case MSR_MTRRphysBase(6):
3096 case MSR_MTRRphysBase(7):
3097 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3098 break;
3099 case MSR_MTRRphysMask(0):
3100 case MSR_MTRRphysMask(1):
3101 case MSR_MTRRphysMask(2):
3102 case MSR_MTRRphysMask(3):
3103 case MSR_MTRRphysMask(4):
3104 case MSR_MTRRphysMask(5):
3105 case MSR_MTRRphysMask(6):
3106 case MSR_MTRRphysMask(7):
3107 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3108 break;
3109 case MSR_MTRRfix64K_00000:
3110 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3111 break;
3112 case MSR_MTRRfix16K_80000:
3113 case MSR_MTRRfix16K_A0000:
3114 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3115 break;
3116 case MSR_MTRRfix4K_C0000:
3117 case MSR_MTRRfix4K_C8000:
3118 case MSR_MTRRfix4K_D0000:
3119 case MSR_MTRRfix4K_D8000:
3120 case MSR_MTRRfix4K_E0000:
3121 case MSR_MTRRfix4K_E8000:
3122 case MSR_MTRRfix4K_F0000:
3123 case MSR_MTRRfix4K_F8000:
3124 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3125 break;
3126 case MSR_MTRRdefType:
3127 env->mtrr_deftype = val;
3128 break;
79c4f6b0
HY
3129 case MSR_MCG_STATUS:
3130 env->mcg_status = val;
3131 break;
3132 case MSR_MCG_CTL:
3133 if ((env->mcg_cap & MCG_CTL_P)
3134 && (val == 0 || val == ~(uint64_t)0))
3135 env->mcg_ctl = val;
3136 break;
1b050077
AP
3137 case MSR_TSC_AUX:
3138 env->tsc_aux = val;
3139 break;
eaa728ee 3140 default:
79c4f6b0
HY
3141 if ((uint32_t)ECX >= MSR_MC0_CTL
3142 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3143 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3144 if ((offset & 0x3) != 0
3145 || (val == 0 || val == ~(uint64_t)0))
3146 env->mce_banks[offset] = val;
3147 break;
3148 }
eaa728ee
FB
3149 /* XXX: exception ? */
3150 break;
3151 }
3152}
3153
3154void helper_rdmsr(void)
3155{
3156 uint64_t val;
872929aa
FB
3157
3158 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3159
eaa728ee
FB
3160 switch((uint32_t)ECX) {
3161 case MSR_IA32_SYSENTER_CS:
3162 val = env->sysenter_cs;
3163 break;
3164 case MSR_IA32_SYSENTER_ESP:
3165 val = env->sysenter_esp;
3166 break;
3167 case MSR_IA32_SYSENTER_EIP:
3168 val = env->sysenter_eip;
3169 break;
3170 case MSR_IA32_APICBASE:
4a942cea 3171 val = cpu_get_apic_base(env->apic_state);
eaa728ee
FB
3172 break;
3173 case MSR_EFER:
3174 val = env->efer;
3175 break;
3176 case MSR_STAR:
3177 val = env->star;
3178 break;
3179 case MSR_PAT:
3180 val = env->pat;
3181 break;
3182 case MSR_VM_HSAVE_PA:
3183 val = env->vm_hsave;
3184 break;
d5e49a81
AZ
3185 case MSR_IA32_PERF_STATUS:
3186 /* tsc_increment_by_tick */
3187 val = 1000ULL;
3188 /* CPU multiplier */
3189 val |= (((uint64_t)4ULL) << 40);
3190 break;
eaa728ee
FB
3191#ifdef TARGET_X86_64
3192 case MSR_LSTAR:
3193 val = env->lstar;
3194 break;
3195 case MSR_CSTAR:
3196 val = env->cstar;
3197 break;
3198 case MSR_FMASK:
3199 val = env->fmask;
3200 break;
3201 case MSR_FSBASE:
3202 val = env->segs[R_FS].base;
3203 break;
3204 case MSR_GSBASE:
3205 val = env->segs[R_GS].base;
3206 break;
3207 case MSR_KERNELGSBASE:
3208 val = env->kernelgsbase;
3209 break;
1b050077
AP
3210 case MSR_TSC_AUX:
3211 val = env->tsc_aux;
3212 break;
eaa728ee 3213#endif
165d9b82
AL
3214 case MSR_MTRRphysBase(0):
3215 case MSR_MTRRphysBase(1):
3216 case MSR_MTRRphysBase(2):
3217 case MSR_MTRRphysBase(3):
3218 case MSR_MTRRphysBase(4):
3219 case MSR_MTRRphysBase(5):
3220 case MSR_MTRRphysBase(6):
3221 case MSR_MTRRphysBase(7):
3222 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3223 break;
3224 case MSR_MTRRphysMask(0):
3225 case MSR_MTRRphysMask(1):
3226 case MSR_MTRRphysMask(2):
3227 case MSR_MTRRphysMask(3):
3228 case MSR_MTRRphysMask(4):
3229 case MSR_MTRRphysMask(5):
3230 case MSR_MTRRphysMask(6):
3231 case MSR_MTRRphysMask(7):
3232 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3233 break;
3234 case MSR_MTRRfix64K_00000:
3235 val = env->mtrr_fixed[0];
3236 break;
3237 case MSR_MTRRfix16K_80000:
3238 case MSR_MTRRfix16K_A0000:
3239 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3240 break;
3241 case MSR_MTRRfix4K_C0000:
3242 case MSR_MTRRfix4K_C8000:
3243 case MSR_MTRRfix4K_D0000:
3244 case MSR_MTRRfix4K_D8000:
3245 case MSR_MTRRfix4K_E0000:
3246 case MSR_MTRRfix4K_E8000:
3247 case MSR_MTRRfix4K_F0000:
3248 case MSR_MTRRfix4K_F8000:
3249 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3250 break;
3251 case MSR_MTRRdefType:
3252 val = env->mtrr_deftype;
3253 break;
dd5e3b17
AL
3254 case MSR_MTRRcap:
3255 if (env->cpuid_features & CPUID_MTRR)
3256 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3257 else
3258 /* XXX: exception ? */
3259 val = 0;
3260 break;
79c4f6b0
HY
3261 case MSR_MCG_CAP:
3262 val = env->mcg_cap;
3263 break;
3264 case MSR_MCG_CTL:
3265 if (env->mcg_cap & MCG_CTL_P)
3266 val = env->mcg_ctl;
3267 else
3268 val = 0;
3269 break;
3270 case MSR_MCG_STATUS:
3271 val = env->mcg_status;
3272 break;
eaa728ee 3273 default:
79c4f6b0
HY
3274 if ((uint32_t)ECX >= MSR_MC0_CTL
3275 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3276 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3277 val = env->mce_banks[offset];
3278 break;
3279 }
eaa728ee
FB
3280 /* XXX: exception ? */
3281 val = 0;
3282 break;
3283 }
3284 EAX = (uint32_t)(val);
3285 EDX = (uint32_t)(val >> 32);
3286}
3287#endif
3288
3289target_ulong helper_lsl(target_ulong selector1)
3290{
3291 unsigned int limit;
3292 uint32_t e1, e2, eflags, selector;
3293 int rpl, dpl, cpl, type;
3294
3295 selector = selector1 & 0xffff;
a7812ae4 3296 eflags = helper_cc_compute_all(CC_OP);
dc1ded53
AL
3297 if ((selector & 0xfffc) == 0)
3298 goto fail;
eaa728ee
FB
3299 if (load_segment(&e1, &e2, selector) != 0)
3300 goto fail;
3301 rpl = selector & 3;
3302 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3303 cpl = env->hflags & HF_CPL_MASK;
3304 if (e2 & DESC_S_MASK) {
3305 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3306 /* conforming */
3307 } else {
3308 if (dpl < cpl || dpl < rpl)
3309 goto fail;
3310 }
3311 } else {
3312 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3313 switch(type) {
3314 case 1:
3315 case 2:
3316 case 3:
3317 case 9:
3318 case 11:
3319 break;
3320 default:
3321 goto fail;
3322 }
3323 if (dpl < cpl || dpl < rpl) {
3324 fail:
3325 CC_SRC = eflags & ~CC_Z;
3326 return 0;
3327 }
3328 }
3329 limit = get_seg_limit(e1, e2);
3330 CC_SRC = eflags | CC_Z;
3331 return limit;
3332}
3333
3334target_ulong helper_lar(target_ulong selector1)
3335{
3336 uint32_t e1, e2, eflags, selector;
3337 int rpl, dpl, cpl, type;
3338
3339 selector = selector1 & 0xffff;
a7812ae4 3340 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3341 if ((selector & 0xfffc) == 0)
3342 goto fail;
3343 if (load_segment(&e1, &e2, selector) != 0)
3344 goto fail;
3345 rpl = selector & 3;
3346 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3347 cpl = env->hflags & HF_CPL_MASK;
3348 if (e2 & DESC_S_MASK) {
3349 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3350 /* conforming */
3351 } else {
3352 if (dpl < cpl || dpl < rpl)
3353 goto fail;
3354 }
3355 } else {
3356 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3357 switch(type) {
3358 case 1:
3359 case 2:
3360 case 3:
3361 case 4:
3362 case 5:
3363 case 9:
3364 case 11:
3365 case 12:
3366 break;
3367 default:
3368 goto fail;
3369 }
3370 if (dpl < cpl || dpl < rpl) {
3371 fail:
3372 CC_SRC = eflags & ~CC_Z;
3373 return 0;
3374 }
3375 }
3376 CC_SRC = eflags | CC_Z;
3377 return e2 & 0x00f0ff00;
3378}
3379
3380void helper_verr(target_ulong selector1)
3381{
3382 uint32_t e1, e2, eflags, selector;
3383 int rpl, dpl, cpl;
3384
3385 selector = selector1 & 0xffff;
a7812ae4 3386 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3387 if ((selector & 0xfffc) == 0)
3388 goto fail;
3389 if (load_segment(&e1, &e2, selector) != 0)
3390 goto fail;
3391 if (!(e2 & DESC_S_MASK))
3392 goto fail;
3393 rpl = selector & 3;
3394 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3395 cpl = env->hflags & HF_CPL_MASK;
3396 if (e2 & DESC_CS_MASK) {
3397 if (!(e2 & DESC_R_MASK))
3398 goto fail;
3399 if (!(e2 & DESC_C_MASK)) {
3400 if (dpl < cpl || dpl < rpl)
3401 goto fail;
3402 }
3403 } else {
3404 if (dpl < cpl || dpl < rpl) {
3405 fail:
3406 CC_SRC = eflags & ~CC_Z;
3407 return;
3408 }
3409 }
3410 CC_SRC = eflags | CC_Z;
3411}
3412
3413void helper_verw(target_ulong selector1)
3414{
3415 uint32_t e1, e2, eflags, selector;
3416 int rpl, dpl, cpl;
3417
3418 selector = selector1 & 0xffff;
a7812ae4 3419 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3420 if ((selector & 0xfffc) == 0)
3421 goto fail;
3422 if (load_segment(&e1, &e2, selector) != 0)
3423 goto fail;
3424 if (!(e2 & DESC_S_MASK))
3425 goto fail;
3426 rpl = selector & 3;
3427 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3428 cpl = env->hflags & HF_CPL_MASK;
3429 if (e2 & DESC_CS_MASK) {
3430 goto fail;
3431 } else {
3432 if (dpl < cpl || dpl < rpl)
3433 goto fail;
3434 if (!(e2 & DESC_W_MASK)) {
3435 fail:
3436 CC_SRC = eflags & ~CC_Z;
3437 return;
3438 }
3439 }
3440 CC_SRC = eflags | CC_Z;
3441}
3442
3443/* x87 FPU helpers */
3444
47c0143c
AJ
3445static inline double CPU86_LDouble_to_double(CPU86_LDouble a)
3446{
3447 union {
3448 float64 f64;
3449 double d;
3450 } u;
3451
3452 u.f64 = floatx_to_float64(a, &env->fp_status);
3453 return u.d;
3454}
3455
3456static inline CPU86_LDouble double_to_CPU86_LDouble(double a)
3457{
3458 union {
3459 float64 f64;
3460 double d;
3461 } u;
3462
3463 u.d = a;
3464 return float64_to_floatx(u.f64, &env->fp_status);
3465}
3466
eaa728ee
FB
3467static void fpu_set_exception(int mask)
3468{
3469 env->fpus |= mask;
3470 if (env->fpus & (~env->fpuc & FPUC_EM))
3471 env->fpus |= FPUS_SE | FPUS_B;
3472}
3473
3474static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3475{
13822781 3476 if (floatx_is_zero(b)) {
eaa728ee 3477 fpu_set_exception(FPUS_ZE);
13822781
AJ
3478 }
3479 return floatx_div(a, b, &env->fp_status);
eaa728ee
FB
3480}
3481
d9957a8b 3482static void fpu_raise_exception(void)
eaa728ee
FB
3483{
3484 if (env->cr[0] & CR0_NE_MASK) {
3485 raise_exception(EXCP10_COPR);
3486 }
3487#if !defined(CONFIG_USER_ONLY)
3488 else {
3489 cpu_set_ferr(env);
3490 }
3491#endif
3492}
3493
3494void helper_flds_FT0(uint32_t val)
3495{
3496 union {
3497 float32 f;
3498 uint32_t i;
3499 } u;
3500 u.i = val;
3501 FT0 = float32_to_floatx(u.f, &env->fp_status);
3502}
3503
3504void helper_fldl_FT0(uint64_t val)
3505{
3506 union {
3507 float64 f;
3508 uint64_t i;
3509 } u;
3510 u.i = val;
3511 FT0 = float64_to_floatx(u.f, &env->fp_status);
3512}
3513
3514void helper_fildl_FT0(int32_t val)
3515{
3516 FT0 = int32_to_floatx(val, &env->fp_status);
3517}
3518
3519void helper_flds_ST0(uint32_t val)
3520{
3521 int new_fpstt;
3522 union {
3523 float32 f;
3524 uint32_t i;
3525 } u;
3526 new_fpstt = (env->fpstt - 1) & 7;
3527 u.i = val;
3528 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3529 env->fpstt = new_fpstt;
3530 env->fptags[new_fpstt] = 0; /* validate stack entry */
3531}
3532
3533void helper_fldl_ST0(uint64_t val)
3534{
3535 int new_fpstt;
3536 union {
3537 float64 f;
3538 uint64_t i;
3539 } u;
3540 new_fpstt = (env->fpstt - 1) & 7;
3541 u.i = val;
3542 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3543 env->fpstt = new_fpstt;
3544 env->fptags[new_fpstt] = 0; /* validate stack entry */
3545}
3546
3547void helper_fildl_ST0(int32_t val)
3548{
3549 int new_fpstt;
3550 new_fpstt = (env->fpstt - 1) & 7;
3551 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3552 env->fpstt = new_fpstt;
3553 env->fptags[new_fpstt] = 0; /* validate stack entry */
3554}
3555
3556void helper_fildll_ST0(int64_t val)
3557{
3558 int new_fpstt;
3559 new_fpstt = (env->fpstt - 1) & 7;
3560 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3561 env->fpstt = new_fpstt;
3562 env->fptags[new_fpstt] = 0; /* validate stack entry */
3563}
3564
3565uint32_t helper_fsts_ST0(void)
3566{
3567 union {
3568 float32 f;
3569 uint32_t i;
3570 } u;
3571 u.f = floatx_to_float32(ST0, &env->fp_status);
3572 return u.i;
3573}
3574
3575uint64_t helper_fstl_ST0(void)
3576{
3577 union {
3578 float64 f;
3579 uint64_t i;
3580 } u;
3581 u.f = floatx_to_float64(ST0, &env->fp_status);
3582 return u.i;
3583}
3584
3585int32_t helper_fist_ST0(void)
3586{
3587 int32_t val;
3588 val = floatx_to_int32(ST0, &env->fp_status);
3589 if (val != (int16_t)val)
3590 val = -32768;
3591 return val;
3592}
3593
3594int32_t helper_fistl_ST0(void)
3595{
3596 int32_t val;
3597 val = floatx_to_int32(ST0, &env->fp_status);
3598 return val;
3599}
3600
3601int64_t helper_fistll_ST0(void)
3602{
3603 int64_t val;
3604 val = floatx_to_int64(ST0, &env->fp_status);
3605 return val;
3606}
3607
3608int32_t helper_fistt_ST0(void)
3609{
3610 int32_t val;
3611 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3612 if (val != (int16_t)val)
3613 val = -32768;
3614 return val;
3615}
3616
3617int32_t helper_fisttl_ST0(void)
3618{
3619 int32_t val;
3620 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3621 return val;
3622}
3623
3624int64_t helper_fisttll_ST0(void)
3625{
3626 int64_t val;
3627 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3628 return val;
3629}
3630
3631void helper_fldt_ST0(target_ulong ptr)
3632{
3633 int new_fpstt;
3634 new_fpstt = (env->fpstt - 1) & 7;
3635 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3636 env->fpstt = new_fpstt;
3637 env->fptags[new_fpstt] = 0; /* validate stack entry */
3638}
3639
3640void helper_fstt_ST0(target_ulong ptr)
3641{
3642 helper_fstt(ST0, ptr);
3643}
3644
3645void helper_fpush(void)
3646{
3647 fpush();
3648}
3649
3650void helper_fpop(void)
3651{
3652 fpop();
3653}
3654
3655void helper_fdecstp(void)
3656{
3657 env->fpstt = (env->fpstt - 1) & 7;
3658 env->fpus &= (~0x4700);
3659}
3660
3661void helper_fincstp(void)
3662{
3663 env->fpstt = (env->fpstt + 1) & 7;
3664 env->fpus &= (~0x4700);
3665}
3666
3667/* FPU move */
3668
3669void helper_ffree_STN(int st_index)
3670{
3671 env->fptags[(env->fpstt + st_index) & 7] = 1;
3672}
3673
3674void helper_fmov_ST0_FT0(void)
3675{
3676 ST0 = FT0;
3677}
3678
3679void helper_fmov_FT0_STN(int st_index)
3680{
3681 FT0 = ST(st_index);
3682}
3683
3684void helper_fmov_ST0_STN(int st_index)
3685{
3686 ST0 = ST(st_index);
3687}
3688
3689void helper_fmov_STN_ST0(int st_index)
3690{
3691 ST(st_index) = ST0;
3692}
3693
3694void helper_fxchg_ST0_STN(int st_index)
3695{
3696 CPU86_LDouble tmp;
3697 tmp = ST(st_index);
3698 ST(st_index) = ST0;
3699 ST0 = tmp;
3700}
3701
3702/* FPU operations */
3703
3704static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3705
3706void helper_fcom_ST0_FT0(void)
3707{
3708 int ret;
3709
3710 ret = floatx_compare(ST0, FT0, &env->fp_status);
3711 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
eaa728ee
FB
3712}
3713
3714void helper_fucom_ST0_FT0(void)
3715{
3716 int ret;
3717
3718 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3719 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
eaa728ee
FB
3720}
3721
3722static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3723
3724void helper_fcomi_ST0_FT0(void)
3725{
3726 int eflags;
3727 int ret;
3728
3729 ret = floatx_compare(ST0, FT0, &env->fp_status);
a7812ae4 3730 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3731 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3732 CC_SRC = eflags;
eaa728ee
FB
3733}
3734
3735void helper_fucomi_ST0_FT0(void)
3736{
3737 int eflags;
3738 int ret;
3739
3740 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
a7812ae4 3741 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3742 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3743 CC_SRC = eflags;
eaa728ee
FB
3744}
3745
3746void helper_fadd_ST0_FT0(void)
3747{
67dd64bf 3748 ST0 = floatx_add(ST0, FT0, &env->fp_status);
eaa728ee
FB
3749}
3750
3751void helper_fmul_ST0_FT0(void)
3752{
67dd64bf 3753 ST0 = floatx_mul(ST0, FT0, &env->fp_status);
eaa728ee
FB
3754}
3755
3756void helper_fsub_ST0_FT0(void)
3757{
67dd64bf 3758 ST0 = floatx_sub(ST0, FT0, &env->fp_status);
eaa728ee
FB
3759}
3760
3761void helper_fsubr_ST0_FT0(void)
3762{
67dd64bf 3763 ST0 = floatx_sub(FT0, ST0, &env->fp_status);
eaa728ee
FB
3764}
3765
3766void helper_fdiv_ST0_FT0(void)
3767{
3768 ST0 = helper_fdiv(ST0, FT0);
3769}
3770
3771void helper_fdivr_ST0_FT0(void)
3772{
3773 ST0 = helper_fdiv(FT0, ST0);
3774}
3775
3776/* fp operations between STN and ST0 */
3777
3778void helper_fadd_STN_ST0(int st_index)
3779{
67dd64bf 3780 ST(st_index) = floatx_add(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3781}
3782
3783void helper_fmul_STN_ST0(int st_index)
3784{
67dd64bf 3785 ST(st_index) = floatx_mul(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3786}
3787
3788void helper_fsub_STN_ST0(int st_index)
3789{
67dd64bf 3790 ST(st_index) = floatx_sub(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3791}
3792
3793void helper_fsubr_STN_ST0(int st_index)
3794{
67dd64bf 3795 ST(st_index) = floatx_sub(ST0, ST(st_index), &env->fp_status);
eaa728ee
FB
3796}
3797
3798void helper_fdiv_STN_ST0(int st_index)
3799{
3800 CPU86_LDouble *p;
3801 p = &ST(st_index);
3802 *p = helper_fdiv(*p, ST0);
3803}
3804
3805void helper_fdivr_STN_ST0(int st_index)
3806{
3807 CPU86_LDouble *p;
3808 p = &ST(st_index);
3809 *p = helper_fdiv(ST0, *p);
3810}
3811
3812/* misc FPU operations */
3813void helper_fchs_ST0(void)
3814{
3815 ST0 = floatx_chs(ST0);
3816}
3817
3818void helper_fabs_ST0(void)
3819{
3820 ST0 = floatx_abs(ST0);
3821}
3822
3823void helper_fld1_ST0(void)
3824{
3825 ST0 = f15rk[1];
3826}
3827
3828void helper_fldl2t_ST0(void)
3829{
3830 ST0 = f15rk[6];
3831}
3832
3833void helper_fldl2e_ST0(void)
3834{
3835 ST0 = f15rk[5];
3836}
3837
3838void helper_fldpi_ST0(void)
3839{
3840 ST0 = f15rk[2];
3841}
3842
3843void helper_fldlg2_ST0(void)
3844{
3845 ST0 = f15rk[3];
3846}
3847
3848void helper_fldln2_ST0(void)
3849{
3850 ST0 = f15rk[4];
3851}
3852
3853void helper_fldz_ST0(void)
3854{
3855 ST0 = f15rk[0];
3856}
3857
3858void helper_fldz_FT0(void)
3859{
3860 FT0 = f15rk[0];
3861}
3862
3863uint32_t helper_fnstsw(void)
3864{
3865 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3866}
3867
3868uint32_t helper_fnstcw(void)
3869{
3870 return env->fpuc;
3871}
3872
3873static void update_fp_status(void)
3874{
3875 int rnd_type;
3876
3877 /* set rounding mode */
3878 switch(env->fpuc & RC_MASK) {
3879 default:
3880 case RC_NEAR:
3881 rnd_type = float_round_nearest_even;
3882 break;
3883 case RC_DOWN:
3884 rnd_type = float_round_down;
3885 break;
3886 case RC_UP:
3887 rnd_type = float_round_up;
3888 break;
3889 case RC_CHOP:
3890 rnd_type = float_round_to_zero;
3891 break;
3892 }
3893 set_float_rounding_mode(rnd_type, &env->fp_status);
3894#ifdef FLOATX80
3895 switch((env->fpuc >> 8) & 3) {
3896 case 0:
3897 rnd_type = 32;
3898 break;
3899 case 2:
3900 rnd_type = 64;
3901 break;
3902 case 3:
3903 default:
3904 rnd_type = 80;
3905 break;
3906 }
3907 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3908#endif
3909}
3910
3911void helper_fldcw(uint32_t val)
3912{
3913 env->fpuc = val;
3914 update_fp_status();
3915}
3916
3917void helper_fclex(void)
3918{
3919 env->fpus &= 0x7f00;
3920}
3921
3922void helper_fwait(void)
3923{
3924 if (env->fpus & FPUS_SE)
3925 fpu_raise_exception();
eaa728ee
FB
3926}
3927
3928void helper_fninit(void)
3929{
3930 env->fpus = 0;
3931 env->fpstt = 0;
3932 env->fpuc = 0x37f;
3933 env->fptags[0] = 1;
3934 env->fptags[1] = 1;
3935 env->fptags[2] = 1;
3936 env->fptags[3] = 1;
3937 env->fptags[4] = 1;
3938 env->fptags[5] = 1;
3939 env->fptags[6] = 1;
3940 env->fptags[7] = 1;
3941}
3942
3943/* BCD ops */
3944
3945void helper_fbld_ST0(target_ulong ptr)
3946{
3947 CPU86_LDouble tmp;
3948 uint64_t val;
3949 unsigned int v;
3950 int i;
3951
3952 val = 0;
3953 for(i = 8; i >= 0; i--) {
3954 v = ldub(ptr + i);
3955 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3956 }
788e7336
AJ
3957 tmp = int64_to_floatx(val, &env->fp_status);
3958 if (ldub(ptr + 9) & 0x80) {
3959 floatx_chs(tmp);
3960 }
eaa728ee
FB
3961 fpush();
3962 ST0 = tmp;
3963}
3964
3965void helper_fbst_ST0(target_ulong ptr)
3966{
3967 int v;
3968 target_ulong mem_ref, mem_end;
3969 int64_t val;
3970
3971 val = floatx_to_int64(ST0, &env->fp_status);
3972 mem_ref = ptr;
3973 mem_end = mem_ref + 9;
3974 if (val < 0) {
3975 stb(mem_end, 0x80);
3976 val = -val;
3977 } else {
3978 stb(mem_end, 0x00);
3979 }
3980 while (mem_ref < mem_end) {
3981 if (val == 0)
3982 break;
3983 v = val % 100;
3984 val = val / 100;
3985 v = ((v / 10) << 4) | (v % 10);
3986 stb(mem_ref++, v);
3987 }
3988 while (mem_ref < mem_end) {
3989 stb(mem_ref++, 0);
3990 }
3991}
3992
3993void helper_f2xm1(void)
3994{
a2c9ed3c
AJ
3995 double val = CPU86_LDouble_to_double(ST0);
3996 val = pow(2.0, val) - 1.0;
3997 ST0 = double_to_CPU86_LDouble(val);
eaa728ee
FB
3998}
3999
4000void helper_fyl2x(void)
4001{
a2c9ed3c 4002 double fptemp = CPU86_LDouble_to_double(ST0);
eaa728ee 4003
eaa728ee 4004 if (fptemp>0.0){
a2c9ed3c
AJ
4005 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4006 fptemp *= CPU86_LDouble_to_double(ST1);
4007 ST1 = double_to_CPU86_LDouble(fptemp);
eaa728ee
FB
4008 fpop();
4009 } else {
4010 env->fpus &= (~0x4700);
4011 env->fpus |= 0x400;
4012 }
4013}
4014
4015void helper_fptan(void)
4016{
a2c9ed3c 4017 double fptemp = CPU86_LDouble_to_double(ST0);
eaa728ee 4018
eaa728ee
FB
4019 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4020 env->fpus |= 0x400;
4021 } else {
a2c9ed3c
AJ
4022 fptemp = tan(fptemp);
4023 ST0 = double_to_CPU86_LDouble(fptemp);
eaa728ee 4024 fpush();
a2c9ed3c 4025 ST0 = floatx_one;
eaa728ee
FB
4026 env->fpus &= (~0x400); /* C2 <-- 0 */
4027 /* the above code is for |arg| < 2**52 only */
4028 }
4029}
4030
4031void helper_fpatan(void)
4032{
a2c9ed3c 4033 double fptemp, fpsrcop;
eaa728ee 4034
a2c9ed3c
AJ
4035 fpsrcop = CPU86_LDouble_to_double(ST1);
4036 fptemp = CPU86_LDouble_to_double(ST0);
4037 ST1 = double_to_CPU86_LDouble(atan2(fpsrcop, fptemp));
eaa728ee
FB
4038 fpop();
4039}
4040
4041void helper_fxtract(void)
4042{
4043 CPU86_LDoubleU temp;
eaa728ee
FB
4044
4045 temp.d = ST0;
c9ad19c5
AJ
4046
4047 if (floatx_is_zero(ST0)) {
4048 /* Easy way to generate -inf and raising division by 0 exception */
4049 ST0 = floatx_div(floatx_chs(floatx_one), floatx_zero, &env->fp_status);
4050 fpush();
4051 ST0 = temp.d;
4052 } else {
4053 int expdif;
4054
4055 expdif = EXPD(temp) - EXPBIAS;
4056 /*DP exponent bias*/
4057 ST0 = int32_to_floatx(expdif, &env->fp_status);
4058 fpush();
4059 BIASEXPONENT(temp);
4060 ST0 = temp.d;
4061 }
eaa728ee
FB
4062}
4063
4064void helper_fprem1(void)
4065{
bcb5fec5 4066 double st0, st1, dblq, fpsrcop, fptemp;
eaa728ee
FB
4067 CPU86_LDoubleU fpsrcop1, fptemp1;
4068 int expdif;
4069 signed long long int q;
4070
bcb5fec5
AJ
4071 st0 = CPU86_LDouble_to_double(ST0);
4072 st1 = CPU86_LDouble_to_double(ST1);
4073
4074 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4075 ST0 = double_to_CPU86_LDouble(0.0 / 0.0); /* NaN */
eaa728ee
FB
4076 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4077 return;
4078 }
4079
bcb5fec5
AJ
4080 fpsrcop = st0;
4081 fptemp = st1;
4082 fpsrcop1.d = ST0;
4083 fptemp1.d = ST1;
eaa728ee
FB
4084 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4085
4086 if (expdif < 0) {
4087 /* optimisation? taken from the AMD docs */
4088 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4089 /* ST0 is unchanged */
4090 return;
4091 }
4092
4093 if (expdif < 53) {
4094 dblq = fpsrcop / fptemp;
4095 /* round dblq towards nearest integer */
4096 dblq = rint(dblq);
bcb5fec5 4097 st0 = fpsrcop - fptemp * dblq;
eaa728ee
FB
4098
4099 /* convert dblq to q by truncating towards zero */
4100 if (dblq < 0.0)
4101 q = (signed long long int)(-dblq);
4102 else
4103 q = (signed long long int)dblq;
4104
4105 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4106 /* (C0,C3,C1) <-- (q2,q1,q0) */
4107 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4108 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4109 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4110 } else {
4111 env->fpus |= 0x400; /* C2 <-- 1 */
4112 fptemp = pow(2.0, expdif - 50);
bcb5fec5 4113 fpsrcop = (st0 / st1) / fptemp;
eaa728ee
FB
4114 /* fpsrcop = integer obtained by chopping */
4115 fpsrcop = (fpsrcop < 0.0) ?
4116 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
bcb5fec5 4117 st0 -= (st1 * fpsrcop * fptemp);
eaa728ee 4118 }
bcb5fec5 4119 ST0 = double_to_CPU86_LDouble(st0);
eaa728ee
FB
4120}
4121
4122void helper_fprem(void)
4123{
bcb5fec5 4124 double st0, st1, dblq, fpsrcop, fptemp;
eaa728ee
FB
4125 CPU86_LDoubleU fpsrcop1, fptemp1;
4126 int expdif;
4127 signed long long int q;
4128
bcb5fec5
AJ
4129 st0 = CPU86_LDouble_to_double(ST0);
4130 st1 = CPU86_LDouble_to_double(ST1);
4131
4132 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4133 ST0 = double_to_CPU86_LDouble(0.0 / 0.0); /* NaN */
eaa728ee
FB
4134 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4135 return;
4136 }
4137
bcb5fec5
AJ
4138 fpsrcop = st0;
4139 fptemp = st1;
4140 fpsrcop1.d = ST0;
4141 fptemp1.d = ST1;
eaa728ee
FB
4142 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4143
4144 if (expdif < 0) {
4145 /* optimisation? taken from the AMD docs */
4146 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4147 /* ST0 is unchanged */
4148 return;
4149 }
4150
4151 if ( expdif < 53 ) {
4152 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4153 /* round dblq towards zero */
4154 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
bcb5fec5 4155 st0 = fpsrcop/*ST0*/ - fptemp * dblq;
eaa728ee
FB
4156
4157 /* convert dblq to q by truncating towards zero */
4158 if (dblq < 0.0)
4159 q = (signed long long int)(-dblq);
4160 else
4161 q = (signed long long int)dblq;
4162
4163 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4164 /* (C0,C3,C1) <-- (q2,q1,q0) */
4165 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4166 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4167 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4168 } else {
4169 int N = 32 + (expdif % 32); /* as per AMD docs */
4170 env->fpus |= 0x400; /* C2 <-- 1 */
4171 fptemp = pow(2.0, (double)(expdif - N));
bcb5fec5 4172 fpsrcop = (st0 / st1) / fptemp;
eaa728ee
FB
4173 /* fpsrcop = integer obtained by chopping */
4174 fpsrcop = (fpsrcop < 0.0) ?
4175 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
bcb5fec5 4176 st0 -= (st1 * fpsrcop * fptemp);
eaa728ee 4177 }
bcb5fec5 4178 ST0 = double_to_CPU86_LDouble(st0);
eaa728ee
FB
4179}
4180
4181void helper_fyl2xp1(void)
4182{
a2c9ed3c 4183 double fptemp = CPU86_LDouble_to_double(ST0);
eaa728ee 4184
eaa728ee
FB
4185 if ((fptemp+1.0)>0.0) {
4186 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
a2c9ed3c
AJ
4187 fptemp *= CPU86_LDouble_to_double(ST1);
4188 ST1 = double_to_CPU86_LDouble(fptemp);
eaa728ee
FB
4189 fpop();
4190 } else {
4191 env->fpus &= (~0x4700);
4192 env->fpus |= 0x400;
4193 }
4194}
4195
4196void helper_fsqrt(void)
4197{
fec05e42 4198 if (floatx_is_neg(ST0)) {
eaa728ee
FB
4199 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4200 env->fpus |= 0x400;
4201 }
fec05e42 4202 ST0 = floatx_sqrt(ST0, &env->fp_status);
eaa728ee
FB
4203}
4204
4205void helper_fsincos(void)
4206{
a2c9ed3c 4207 double fptemp = CPU86_LDouble_to_double(ST0);
eaa728ee 4208
eaa728ee
FB
4209 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4210 env->fpus |= 0x400;
4211 } else {
a2c9ed3c 4212 ST0 = double_to_CPU86_LDouble(sin(fptemp));
eaa728ee 4213 fpush();
a2c9ed3c 4214 ST0 = double_to_CPU86_LDouble(cos(fptemp));
eaa728ee
FB
4215 env->fpus &= (~0x400); /* C2 <-- 0 */
4216 /* the above code is for |arg| < 2**63 only */
4217 }
4218}
4219
4220void helper_frndint(void)
4221{
4222 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4223}
4224
4225void helper_fscale(void)
4226{
be1c17c7
AJ
4227 if (floatx_is_any_nan(ST1)) {
4228 ST0 = ST1;
4229 } else {
4230 int n = floatx_to_int32_round_to_zero(ST1, &env->fp_status);
4231 ST0 = floatx_scalbn(ST0, n, &env->fp_status);
4232 }
eaa728ee
FB
4233}
4234
4235void helper_fsin(void)
4236{
a2c9ed3c 4237 double fptemp = CPU86_LDouble_to_double(ST0);
eaa728ee 4238
eaa728ee
FB
4239 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4240 env->fpus |= 0x400;
4241 } else {
a2c9ed3c 4242 ST0 = double_to_CPU86_LDouble(sin(fptemp));
eaa728ee
FB
4243 env->fpus &= (~0x400); /* C2 <-- 0 */
4244 /* the above code is for |arg| < 2**53 only */
4245 }
4246}
4247
4248void helper_fcos(void)
4249{
a2c9ed3c 4250 double fptemp = CPU86_LDouble_to_double(ST0);
eaa728ee 4251
eaa728ee
FB
4252 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4253 env->fpus |= 0x400;
4254 } else {
a2c9ed3c 4255 ST0 = double_to_CPU86_LDouble(cos(fptemp));
eaa728ee
FB
4256 env->fpus &= (~0x400); /* C2 <-- 0 */
4257 /* the above code is for |arg5 < 2**63 only */
4258 }
4259}
4260
4261void helper_fxam_ST0(void)
4262{
4263 CPU86_LDoubleU temp;
4264 int expdif;
4265
4266 temp.d = ST0;
4267
4268 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4269 if (SIGND(temp))
4270 env->fpus |= 0x200; /* C1 <-- 1 */
4271
4272 /* XXX: test fptags too */
4273 expdif = EXPD(temp);
4274 if (expdif == MAXEXPD) {
4275#ifdef USE_X86LDOUBLE
4276 if (MANTD(temp) == 0x8000000000000000ULL)
4277#else
4278 if (MANTD(temp) == 0)
4279#endif
4280 env->fpus |= 0x500 /*Infinity*/;
4281 else
4282 env->fpus |= 0x100 /*NaN*/;
4283 } else if (expdif == 0) {
4284 if (MANTD(temp) == 0)
4285 env->fpus |= 0x4000 /*Zero*/;
4286 else
4287 env->fpus |= 0x4400 /*Denormal*/;
4288 } else {
4289 env->fpus |= 0x400;
4290 }
4291}
4292
4293void helper_fstenv(target_ulong ptr, int data32)
4294{
4295 int fpus, fptag, exp, i;
4296 uint64_t mant;
4297 CPU86_LDoubleU tmp;
4298
4299 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4300 fptag = 0;
4301 for (i=7; i>=0; i--) {
4302 fptag <<= 2;
4303 if (env->fptags[i]) {
4304 fptag |= 3;
4305 } else {
4306 tmp.d = env->fpregs[i].d;
4307 exp = EXPD(tmp);
4308 mant = MANTD(tmp);
4309 if (exp == 0 && mant == 0) {
4310 /* zero */
4311 fptag |= 1;
4312 } else if (exp == 0 || exp == MAXEXPD
4313#ifdef USE_X86LDOUBLE
4314 || (mant & (1LL << 63)) == 0
4315#endif
4316 ) {
4317 /* NaNs, infinity, denormal */
4318 fptag |= 2;
4319 }
4320 }
4321 }
4322 if (data32) {
4323 /* 32 bit */
4324 stl(ptr, env->fpuc);
4325 stl(ptr + 4, fpus);
4326 stl(ptr + 8, fptag);
4327 stl(ptr + 12, 0); /* fpip */
4328 stl(ptr + 16, 0); /* fpcs */
4329 stl(ptr + 20, 0); /* fpoo */
4330 stl(ptr + 24, 0); /* fpos */
4331 } else {
4332 /* 16 bit */
4333 stw(ptr, env->fpuc);
4334 stw(ptr + 2, fpus);
4335 stw(ptr + 4, fptag);
4336 stw(ptr + 6, 0);
4337 stw(ptr + 8, 0);
4338 stw(ptr + 10, 0);
4339 stw(ptr + 12, 0);
4340 }
4341}
4342
4343void helper_fldenv(target_ulong ptr, int data32)
4344{
4345 int i, fpus, fptag;
4346
4347 if (data32) {
4348 env->fpuc = lduw(ptr);
4349 fpus = lduw(ptr + 4);
4350 fptag = lduw(ptr + 8);
4351 }
4352 else {
4353 env->fpuc = lduw(ptr);
4354 fpus = lduw(ptr + 2);
4355 fptag = lduw(ptr + 4);
4356 }
4357 env->fpstt = (fpus >> 11) & 7;
4358 env->fpus = fpus & ~0x3800;
4359 for(i = 0;i < 8; i++) {
4360 env->fptags[i] = ((fptag & 3) == 3);
4361 fptag >>= 2;
4362 }
4363}
4364
4365void helper_fsave(target_ulong ptr, int data32)
4366{
4367 CPU86_LDouble tmp;
4368 int i;
4369
4370 helper_fstenv(ptr, data32);
4371
4372 ptr += (14 << data32);
4373 for(i = 0;i < 8; i++) {
4374 tmp = ST(i);
4375 helper_fstt(tmp, ptr);
4376 ptr += 10;
4377 }
4378
4379 /* fninit */
4380 env->fpus = 0;
4381 env->fpstt = 0;
4382 env->fpuc = 0x37f;
4383 env->fptags[0] = 1;
4384 env->fptags[1] = 1;
4385 env->fptags[2] = 1;
4386 env->fptags[3] = 1;
4387 env->fptags[4] = 1;
4388 env->fptags[5] = 1;
4389 env->fptags[6] = 1;
4390 env->fptags[7] = 1;
4391}
4392
4393void helper_frstor(target_ulong ptr, int data32)
4394{
4395 CPU86_LDouble tmp;
4396 int i;
4397
4398 helper_fldenv(ptr, data32);
4399 ptr += (14 << data32);
4400
4401 for(i = 0;i < 8; i++) {
4402 tmp = helper_fldt(ptr);
4403 ST(i) = tmp;
4404 ptr += 10;
4405 }
4406}
4407
4408void helper_fxsave(target_ulong ptr, int data64)
4409{
4410 int fpus, fptag, i, nb_xmm_regs;
4411 CPU86_LDouble tmp;
4412 target_ulong addr;
4413
09d85fb8
KW
4414 /* The operand must be 16 byte aligned */
4415 if (ptr & 0xf) {
4416 raise_exception(EXCP0D_GPF);
4417 }
4418
eaa728ee
FB
4419 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4420 fptag = 0;
4421 for(i = 0; i < 8; i++) {
4422 fptag |= (env->fptags[i] << i);
4423 }
4424 stw(ptr, env->fpuc);
4425 stw(ptr + 2, fpus);
4426 stw(ptr + 4, fptag ^ 0xff);
4427#ifdef TARGET_X86_64
4428 if (data64) {
4429 stq(ptr + 0x08, 0); /* rip */
4430 stq(ptr + 0x10, 0); /* rdp */
4431 } else
4432#endif
4433 {
4434 stl(ptr + 0x08, 0); /* eip */
4435 stl(ptr + 0x0c, 0); /* sel */
4436 stl(ptr + 0x10, 0); /* dp */
4437 stl(ptr + 0x14, 0); /* sel */
4438 }
4439
4440 addr = ptr + 0x20;
4441 for(i = 0;i < 8; i++) {
4442 tmp = ST(i);
4443 helper_fstt(tmp, addr);
4444 addr += 16;
4445 }
4446
4447 if (env->cr[4] & CR4_OSFXSR_MASK) {
4448 /* XXX: finish it */
4449 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4450 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4451 if (env->hflags & HF_CS64_MASK)
4452 nb_xmm_regs = 16;
4453 else
4454 nb_xmm_regs = 8;
4455 addr = ptr + 0xa0;
eef26553
AL
4456 /* Fast FXSAVE leaves out the XMM registers */
4457 if (!(env->efer & MSR_EFER_FFXSR)
4458 || (env->hflags & HF_CPL_MASK)
4459 || !(env->hflags & HF_LMA_MASK)) {
4460 for(i = 0; i < nb_xmm_regs; i++) {
4461 stq(addr, env->xmm_regs[i].XMM_Q(0));
4462 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4463 addr += 16;
4464 }
eaa728ee
FB
4465 }
4466 }
4467}
4468
4469void helper_fxrstor(target_ulong ptr, int data64)
4470{
4471 int i, fpus, fptag, nb_xmm_regs;
4472 CPU86_LDouble tmp;
4473 target_ulong addr;
4474
09d85fb8
KW
4475 /* The operand must be 16 byte aligned */
4476 if (ptr & 0xf) {
4477 raise_exception(EXCP0D_GPF);
4478 }
4479
eaa728ee
FB
4480 env->fpuc = lduw(ptr);
4481 fpus = lduw(ptr + 2);
4482 fptag = lduw(ptr + 4);
4483 env->fpstt = (fpus >> 11) & 7;
4484 env->fpus = fpus & ~0x3800;
4485 fptag ^= 0xff;
4486 for(i = 0;i < 8; i++) {
4487 env->fptags[i] = ((fptag >> i) & 1);
4488 }
4489
4490 addr = ptr + 0x20;
4491 for(i = 0;i < 8; i++) {
4492 tmp = helper_fldt(addr);
4493 ST(i) = tmp;
4494 addr += 16;
4495 }
4496
4497 if (env->cr[4] & CR4_OSFXSR_MASK) {
4498 /* XXX: finish it */
4499 env->mxcsr = ldl(ptr + 0x18);
4500 //ldl(ptr + 0x1c);
4501 if (env->hflags & HF_CS64_MASK)
4502 nb_xmm_regs = 16;
4503 else
4504 nb_xmm_regs = 8;
4505 addr = ptr + 0xa0;
eef26553
AL
4506 /* Fast FXRESTORE leaves out the XMM registers */
4507 if (!(env->efer & MSR_EFER_FFXSR)
4508 || (env->hflags & HF_CPL_MASK)
4509 || !(env->hflags & HF_LMA_MASK)) {
4510 for(i = 0; i < nb_xmm_regs; i++) {
4511 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4512 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4513 addr += 16;
4514 }
eaa728ee
FB
4515 }
4516 }
4517}
4518
4519#ifndef USE_X86LDOUBLE
4520
4521void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4522{
4523 CPU86_LDoubleU temp;
4524 int e;
4525
4526 temp.d = f;
4527 /* mantissa */
4528 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4529 /* exponent + sign */
4530 e = EXPD(temp) - EXPBIAS + 16383;
4531 e |= SIGND(temp) >> 16;
4532 *pexp = e;
4533}
4534
4535CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4536{
4537 CPU86_LDoubleU temp;
4538 int e;
4539 uint64_t ll;
4540
4541 /* XXX: handle overflow ? */
4542 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4543 e |= (upper >> 4) & 0x800; /* sign */
4544 ll = (mant >> 11) & ((1LL << 52) - 1);
4545#ifdef __arm__
4546 temp.l.upper = (e << 20) | (ll >> 32);
4547 temp.l.lower = ll;
4548#else
4549 temp.ll = ll | ((uint64_t)e << 52);
4550#endif
4551 return temp.d;
4552}
4553
4554#else
4555
4556void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4557{
4558 CPU86_LDoubleU temp;
4559
4560 temp.d = f;
4561 *pmant = temp.l.lower;
4562 *pexp = temp.l.upper;
4563}
4564
4565CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4566{
4567 CPU86_LDoubleU temp;
4568
4569 temp.l.upper = upper;
4570 temp.l.lower = mant;
4571 return temp.d;
4572}
4573#endif
4574
4575#ifdef TARGET_X86_64
4576
4577//#define DEBUG_MULDIV
4578
4579static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4580{
4581 *plow += a;
4582 /* carry test */
4583 if (*plow < a)
4584 (*phigh)++;
4585 *phigh += b;
4586}
4587
4588static void neg128(uint64_t *plow, uint64_t *phigh)
4589{
4590 *plow = ~ *plow;
4591 *phigh = ~ *phigh;
4592 add128(plow, phigh, 1, 0);
4593}
4594
4595/* return TRUE if overflow */
4596static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4597{
4598 uint64_t q, r, a1, a0;
4599 int i, qb, ab;
4600
4601 a0 = *plow;
4602 a1 = *phigh;
4603 if (a1 == 0) {
4604 q = a0 / b;
4605 r = a0 % b;
4606 *plow = q;
4607 *phigh = r;
4608 } else {
4609 if (a1 >= b)
4610 return 1;
4611 /* XXX: use a better algorithm */
4612 for(i = 0; i < 64; i++) {
4613 ab = a1 >> 63;
4614 a1 = (a1 << 1) | (a0 >> 63);
4615 if (ab || a1 >= b) {
4616 a1 -= b;
4617 qb = 1;
4618 } else {
4619 qb = 0;
4620 }
4621 a0 = (a0 << 1) | qb;
4622 }
4623#if defined(DEBUG_MULDIV)
4624 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4625 *phigh, *plow, b, a0, a1);
4626#endif
4627 *plow = a0;
4628 *phigh = a1;
4629 }
4630 return 0;
4631}
4632
4633/* return TRUE if overflow */
4634static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4635{
4636 int sa, sb;
4637 sa = ((int64_t)*phigh < 0);
4638 if (sa)
4639 neg128(plow, phigh);
4640 sb = (b < 0);
4641 if (sb)
4642 b = -b;
4643 if (div64(plow, phigh, b) != 0)
4644 return 1;
4645 if (sa ^ sb) {
4646 if (*plow > (1ULL << 63))
4647 return 1;
4648 *plow = - *plow;
4649 } else {
4650 if (*plow >= (1ULL << 63))
4651 return 1;
4652 }
4653 if (sa)
4654 *phigh = - *phigh;
4655 return 0;
4656}
4657
4658void helper_mulq_EAX_T0(target_ulong t0)
4659{
4660 uint64_t r0, r1;
4661
4662 mulu64(&r0, &r1, EAX, t0);
4663 EAX = r0;
4664 EDX = r1;
4665 CC_DST = r0;
4666 CC_SRC = r1;
4667}
4668
4669void helper_imulq_EAX_T0(target_ulong t0)
4670{
4671 uint64_t r0, r1;
4672
4673 muls64(&r0, &r1, EAX, t0);
4674 EAX = r0;
4675 EDX = r1;
4676 CC_DST = r0;
4677 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4678}
4679
4680target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4681{
4682 uint64_t r0, r1;
4683
4684 muls64(&r0, &r1, t0, t1);
4685 CC_DST = r0;
4686 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4687 return r0;
4688}
4689
4690void helper_divq_EAX(target_ulong t0)
4691{
4692 uint64_t r0, r1;
4693 if (t0 == 0) {
4694 raise_exception(EXCP00_DIVZ);
4695 }
4696 r0 = EAX;
4697 r1 = EDX;
4698 if (div64(&r0, &r1, t0))
4699 raise_exception(EXCP00_DIVZ);
4700 EAX = r0;
4701 EDX = r1;
4702}
4703
4704void helper_idivq_EAX(target_ulong t0)
4705{
4706 uint64_t r0, r1;
4707 if (t0 == 0) {
4708 raise_exception(EXCP00_DIVZ);
4709 }
4710 r0 = EAX;
4711 r1 = EDX;
4712 if (idiv64(&r0, &r1, t0))
4713 raise_exception(EXCP00_DIVZ);
4714 EAX = r0;
4715 EDX = r1;
4716}
4717#endif
4718
94451178 4719static void do_hlt(void)
eaa728ee
FB
4720{
4721 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4722 env->halted = 1;
eaa728ee
FB
4723 env->exception_index = EXCP_HLT;
4724 cpu_loop_exit();
4725}
4726
94451178
FB
4727void helper_hlt(int next_eip_addend)
4728{
4729 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4730 EIP += next_eip_addend;
4731
4732 do_hlt();
4733}
4734
eaa728ee
FB
4735void helper_monitor(target_ulong ptr)
4736{
4737 if ((uint32_t)ECX != 0)
4738 raise_exception(EXCP0D_GPF);
4739 /* XXX: store address ? */
872929aa 4740 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4741}
4742
94451178 4743void helper_mwait(int next_eip_addend)
eaa728ee
FB
4744{
4745 if ((uint32_t)ECX != 0)
4746 raise_exception(EXCP0D_GPF);
872929aa 4747 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
94451178
FB
4748 EIP += next_eip_addend;
4749
eaa728ee
FB
4750 /* XXX: not complete but not completely erroneous */
4751 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4752 /* more than one CPU: do not sleep because another CPU may
4753 wake this one */
4754 } else {
94451178 4755 do_hlt();
eaa728ee
FB
4756 }
4757}
4758
4759void helper_debug(void)
4760{
4761 env->exception_index = EXCP_DEBUG;
4762 cpu_loop_exit();
4763}
4764
a2397807
JK
4765void helper_reset_rf(void)
4766{
4767 env->eflags &= ~RF_MASK;
4768}
4769
eaa728ee
FB
4770void helper_raise_interrupt(int intno, int next_eip_addend)
4771{
4772 raise_interrupt(intno, 1, 0, next_eip_addend);
4773}
4774
4775void helper_raise_exception(int exception_index)
4776{
4777 raise_exception(exception_index);
4778}
4779
4780void helper_cli(void)
4781{
4782 env->eflags &= ~IF_MASK;
4783}
4784
4785void helper_sti(void)
4786{
4787 env->eflags |= IF_MASK;
4788}
4789
4790#if 0
4791/* vm86plus instructions */
4792void helper_cli_vm(void)
4793{
4794 env->eflags &= ~VIF_MASK;
4795}
4796
4797void helper_sti_vm(void)
4798{
4799 env->eflags |= VIF_MASK;
4800 if (env->eflags & VIP_MASK) {
4801 raise_exception(EXCP0D_GPF);
4802 }
4803}
4804#endif
4805
4806void helper_set_inhibit_irq(void)
4807{
4808 env->hflags |= HF_INHIBIT_IRQ_MASK;
4809}
4810
4811void helper_reset_inhibit_irq(void)
4812{
4813 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4814}
4815
4816void helper_boundw(target_ulong a0, int v)
4817{
4818 int low, high;
4819 low = ldsw(a0);
4820 high = ldsw(a0 + 2);
4821 v = (int16_t)v;
4822 if (v < low || v > high) {
4823 raise_exception(EXCP05_BOUND);
4824 }
eaa728ee
FB
4825}
4826
4827void helper_boundl(target_ulong a0, int v)
4828{
4829 int low, high;
4830 low = ldl(a0);
4831 high = ldl(a0 + 4);
4832 if (v < low || v > high) {
4833 raise_exception(EXCP05_BOUND);
4834 }
eaa728ee
FB
4835}
4836
eaa728ee
FB
4837#if !defined(CONFIG_USER_ONLY)
4838
4839#define MMUSUFFIX _mmu
4840
4841#define SHIFT 0
4842#include "softmmu_template.h"
4843
4844#define SHIFT 1
4845#include "softmmu_template.h"
4846
4847#define SHIFT 2
4848#include "softmmu_template.h"
4849
4850#define SHIFT 3
4851#include "softmmu_template.h"
4852
4853#endif
4854
d9957a8b 4855#if !defined(CONFIG_USER_ONLY)
eaa728ee
FB
4856/* try to fill the TLB and return an exception if error. If retaddr is
4857 NULL, it means that the function was called in C code (i.e. not
4858 from generated code or from helper.c) */
4859/* XXX: fix it to restore all registers */
4860void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4861{
4862 TranslationBlock *tb;
4863 int ret;
4864 unsigned long pc;
4865 CPUX86State *saved_env;
4866
4867 /* XXX: hack to restore env in all cases, even if not called from
4868 generated code */
4869 saved_env = env;
4870 env = cpu_single_env;
4871
4872 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4873 if (ret) {
4874 if (retaddr) {
4875 /* now we have a real cpu fault */
4876 pc = (unsigned long)retaddr;
4877 tb = tb_find_pc(pc);
4878 if (tb) {
4879 /* the PC is inside the translated code. It means that we have
4880 a virtual CPU fault */
618ba8e6 4881 cpu_restore_state(tb, env, pc);
eaa728ee
FB
4882 }
4883 }
872929aa 4884 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
4885 }
4886 env = saved_env;
4887}
d9957a8b 4888#endif
eaa728ee
FB
4889
4890/* Secure Virtual Machine helpers */
4891
eaa728ee
FB
4892#if defined(CONFIG_USER_ONLY)
4893
db620f46 4894void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4895{
4896}
4897void helper_vmmcall(void)
4898{
4899}
914178d3 4900void helper_vmload(int aflag)
eaa728ee
FB
4901{
4902}
914178d3 4903void helper_vmsave(int aflag)
eaa728ee
FB
4904{
4905}
872929aa
FB
4906void helper_stgi(void)
4907{
4908}
4909void helper_clgi(void)
4910{
4911}
eaa728ee
FB
4912void helper_skinit(void)
4913{
4914}
914178d3 4915void helper_invlpga(int aflag)
eaa728ee
FB
4916{
4917}
4918void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4919{
4920}
4921void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4922{
4923}
4924
4925void helper_svm_check_io(uint32_t port, uint32_t param,
4926 uint32_t next_eip_addend)
4927{
4928}
4929#else
4930
c227f099 4931static inline void svm_save_seg(target_phys_addr_t addr,
872929aa 4932 const SegmentCache *sc)
eaa728ee 4933{
872929aa
FB
4934 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4935 sc->selector);
4936 stq_phys(addr + offsetof(struct vmcb_seg, base),
4937 sc->base);
4938 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4939 sc->limit);
4940 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 4941 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa
FB
4942}
4943
c227f099 4944static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
872929aa
FB
4945{
4946 unsigned int flags;
4947
4948 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4949 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4950 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4951 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4952 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
4953}
4954
c227f099 4955static inline void svm_load_seg_cache(target_phys_addr_t addr,
872929aa 4956 CPUState *env, int seg_reg)
eaa728ee 4957{
872929aa
FB
4958 SegmentCache sc1, *sc = &sc1;
4959 svm_load_seg(addr, sc);
4960 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4961 sc->base, sc->limit, sc->flags);
eaa728ee
FB
4962}
4963
db620f46 4964void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4965{
4966 target_ulong addr;
4967 uint32_t event_inj;
4968 uint32_t int_ctl;
4969
872929aa
FB
4970 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4971
914178d3
FB
4972 if (aflag == 2)
4973 addr = EAX;
4974 else
4975 addr = (uint32_t)EAX;
4976
93fcfe39 4977 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
eaa728ee
FB
4978
4979 env->vm_vmcb = addr;
4980
4981 /* save the current CPU state in the hsave page */
4982 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4983 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4984
4985 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4986 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4987
4988 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4989 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4990 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4991 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
4992 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4993 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4994
4995 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4996 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4997
872929aa
FB
4998 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4999 &env->segs[R_ES]);
5000 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
5001 &env->segs[R_CS]);
5002 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
5003 &env->segs[R_SS]);
5004 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
5005 &env->segs[R_DS]);
eaa728ee 5006
db620f46
FB
5007 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
5008 EIP + next_eip_addend);
eaa728ee
FB
5009 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
5010 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
5011
5012 /* load the interception bitmaps so we do not need to access the
5013 vmcb in svm mode */
872929aa 5014 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
5015 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
5016 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
5017 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
5018 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
5019 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
5020
872929aa
FB
5021 /* enable intercepts */
5022 env->hflags |= HF_SVMI_MASK;
5023
33c263df
FB
5024 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
5025
eaa728ee
FB
5026 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5027 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5028
5029 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5030 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5031
5032 /* clear exit_info_2 so we behave like the real hardware */
5033 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5034
5035 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5036 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5037 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5038 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5039 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 5040 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 5041 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
5042 env->v_tpr = int_ctl & V_TPR_MASK;
5043 env->hflags2 |= HF2_VINTR_MASK;
eaa728ee 5044 if (env->eflags & IF_MASK)
db620f46 5045 env->hflags2 |= HF2_HIF_MASK;
eaa728ee
FB
5046 }
5047
5efc27bb
FB
5048 cpu_load_efer(env,
5049 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5050 env->eflags = 0;
5051 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5052 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5053 CC_OP = CC_OP_EFLAGS;
eaa728ee 5054
872929aa
FB
5055 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5056 env, R_ES);
5057 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5058 env, R_CS);
5059 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5060 env, R_SS);
5061 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5062 env, R_DS);
eaa728ee
FB
5063
5064 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5065 env->eip = EIP;
5066 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5067 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5068 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5069 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5070 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5071
5072 /* FIXME: guest state consistency checks */
5073
5074 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5075 case TLB_CONTROL_DO_NOTHING:
5076 break;
5077 case TLB_CONTROL_FLUSH_ALL_ASID:
5078 /* FIXME: this is not 100% correct but should work for now */
5079 tlb_flush(env, 1);
5080 break;
5081 }
5082
960540b4 5083 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 5084
db620f46
FB
5085 if (int_ctl & V_IRQ_MASK) {
5086 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5087 }
5088
eaa728ee
FB
5089 /* maybe we need to inject an event */
5090 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5091 if (event_inj & SVM_EVTINJ_VALID) {
5092 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5093 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5094 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
eaa728ee 5095
93fcfe39 5096 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
eaa728ee
FB
5097 /* FIXME: need to implement valid_err */
5098 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5099 case SVM_EVTINJ_TYPE_INTR:
5100 env->exception_index = vector;
5101 env->error_code = event_inj_err;
5102 env->exception_is_int = 0;
5103 env->exception_next_eip = -1;
93fcfe39 5104 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
db620f46
FB
5105 /* XXX: is it always correct ? */
5106 do_interrupt(vector, 0, 0, 0, 1);
eaa728ee
FB
5107 break;
5108 case SVM_EVTINJ_TYPE_NMI:
db620f46 5109 env->exception_index = EXCP02_NMI;
eaa728ee
FB
5110 env->error_code = event_inj_err;
5111 env->exception_is_int = 0;
5112 env->exception_next_eip = EIP;
93fcfe39 5113 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
db620f46 5114 cpu_loop_exit();
eaa728ee
FB
5115 break;
5116 case SVM_EVTINJ_TYPE_EXEPT:
5117 env->exception_index = vector;
5118 env->error_code = event_inj_err;
5119 env->exception_is_int = 0;
5120 env->exception_next_eip = -1;
93fcfe39 5121 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
db620f46 5122 cpu_loop_exit();
eaa728ee
FB
5123 break;
5124 case SVM_EVTINJ_TYPE_SOFT:
5125 env->exception_index = vector;
5126 env->error_code = event_inj_err;
5127 env->exception_is_int = 1;
5128 env->exception_next_eip = EIP;
93fcfe39 5129 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
db620f46 5130 cpu_loop_exit();
eaa728ee
FB
5131 break;
5132 }
93fcfe39 5133 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
eaa728ee 5134 }
eaa728ee
FB
5135}
5136
5137void helper_vmmcall(void)
5138{
872929aa
FB
5139 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5140 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5141}
5142
914178d3 5143void helper_vmload(int aflag)
eaa728ee
FB
5144{
5145 target_ulong addr;
872929aa
FB
5146 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5147
914178d3
FB
5148 if (aflag == 2)
5149 addr = EAX;
5150 else
5151 addr = (uint32_t)EAX;
5152
93fcfe39 5153 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5154 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5155 env->segs[R_FS].base);
5156
872929aa
FB
5157 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5158 env, R_FS);
5159 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5160 env, R_GS);
5161 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5162 &env->tr);
5163 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5164 &env->ldt);
eaa728ee
FB
5165
5166#ifdef TARGET_X86_64
5167 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5168 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5169 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5170 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5171#endif
5172 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5173 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5174 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5175 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5176}
5177
914178d3 5178void helper_vmsave(int aflag)
eaa728ee
FB
5179{
5180 target_ulong addr;
872929aa 5181 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3
FB
5182
5183 if (aflag == 2)
5184 addr = EAX;
5185 else
5186 addr = (uint32_t)EAX;
5187
93fcfe39 5188 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5189 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5190 env->segs[R_FS].base);
5191
872929aa
FB
5192 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5193 &env->segs[R_FS]);
5194 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5195 &env->segs[R_GS]);
5196 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5197 &env->tr);
5198 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5199 &env->ldt);
eaa728ee
FB
5200
5201#ifdef TARGET_X86_64
5202 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5203 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5204 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5205 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5206#endif
5207 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5208 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5209 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5210 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5211}
5212
872929aa
FB
5213void helper_stgi(void)
5214{
5215 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 5216 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
5217}
5218
5219void helper_clgi(void)
5220{
5221 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 5222 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
5223}
5224
eaa728ee
FB
5225void helper_skinit(void)
5226{
872929aa
FB
5227 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5228 /* XXX: not implemented */
872929aa 5229 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5230}
5231
914178d3 5232void helper_invlpga(int aflag)
eaa728ee 5233{
914178d3 5234 target_ulong addr;
872929aa 5235 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
914178d3
FB
5236
5237 if (aflag == 2)
5238 addr = EAX;
5239 else
5240 addr = (uint32_t)EAX;
5241
5242 /* XXX: could use the ASID to see if it is needed to do the
5243 flush */
5244 tlb_flush_page(env, addr);
eaa728ee
FB
5245}
5246
5247void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5248{
872929aa
FB
5249 if (likely(!(env->hflags & HF_SVMI_MASK)))
5250 return;
eaa728ee
FB
5251 switch(type) {
5252 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5253 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5254 helper_vmexit(type, param);
5255 }
5256 break;
872929aa
FB
5257 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5258 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5259 helper_vmexit(type, param);
5260 }
5261 break;
872929aa
FB
5262 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5263 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5264 helper_vmexit(type, param);
5265 }
5266 break;
872929aa
FB
5267 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5268 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5269 helper_vmexit(type, param);
5270 }
5271 break;
872929aa
FB
5272 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5273 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5274 helper_vmexit(type, param);
5275 }
5276 break;
eaa728ee 5277 case SVM_EXIT_MSR:
872929aa 5278 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5279 /* FIXME: this should be read in at vmrun (faster this way?) */
5280 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5281 uint32_t t0, t1;
5282 switch((uint32_t)ECX) {
5283 case 0 ... 0x1fff:
5284 t0 = (ECX * 2) % 8;
583cd3cb 5285 t1 = (ECX * 2) / 8;
eaa728ee
FB
5286 break;
5287 case 0xc0000000 ... 0xc0001fff:
5288 t0 = (8192 + ECX - 0xc0000000) * 2;
5289 t1 = (t0 / 8);
5290 t0 %= 8;
5291 break;
5292 case 0xc0010000 ... 0xc0011fff:
5293 t0 = (16384 + ECX - 0xc0010000) * 2;
5294 t1 = (t0 / 8);
5295 t0 %= 8;
5296 break;
5297 default:
5298 helper_vmexit(type, param);
5299 t0 = 0;
5300 t1 = 0;
5301 break;
5302 }
5303 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5304 helper_vmexit(type, param);
5305 }
5306 break;
5307 default:
872929aa 5308 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5309 helper_vmexit(type, param);
5310 }
5311 break;
5312 }
5313}
5314
5315void helper_svm_check_io(uint32_t port, uint32_t param,
5316 uint32_t next_eip_addend)
5317{
872929aa 5318 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5319 /* FIXME: this should be read in at vmrun (faster this way?) */
5320 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5321 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5322 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5323 /* next EIP */
5324 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5325 env->eip + next_eip_addend);
5326 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5327 }
5328 }
5329}
5330
5331/* Note: currently only 32 bits of exit_code are used */
5332void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5333{
5334 uint32_t int_ctl;
5335
93fcfe39 5336 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
eaa728ee
FB
5337 exit_code, exit_info_1,
5338 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5339 EIP);
5340
5341 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5342 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5343 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5344 } else {
5345 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5346 }
5347
5348 /* Save the VM state in the vmcb */
872929aa
FB
5349 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5350 &env->segs[R_ES]);
5351 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5352 &env->segs[R_CS]);
5353 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5354 &env->segs[R_SS]);
5355 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5356 &env->segs[R_DS]);
eaa728ee
FB
5357
5358 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5359 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5360
5361 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5362 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5363
5364 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5365 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5366 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5367 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5368 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5369
db620f46
FB
5370 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5371 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5372 int_ctl |= env->v_tpr & V_TPR_MASK;
5373 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5374 int_ctl |= V_IRQ_MASK;
5375 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee
FB
5376
5377 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5378 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5379 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5380 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5381 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5382 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5383 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5384
5385 /* Reload the host state from vm_hsave */
db620f46 5386 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 5387 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5388 env->intercept = 0;
5389 env->intercept_exceptions = 0;
5390 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 5391 env->tsc_offset = 0;
eaa728ee
FB
5392
5393 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5394 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5395
5396 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5397 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5398
5399 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5400 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5401 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5efc27bb
FB
5402 /* we need to set the efer after the crs so the hidden flags get
5403 set properly */
5efc27bb
FB
5404 cpu_load_efer(env,
5405 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5406 env->eflags = 0;
5407 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5408 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5409 CC_OP = CC_OP_EFLAGS;
5410
872929aa
FB
5411 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5412 env, R_ES);
5413 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5414 env, R_CS);
5415 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5416 env, R_SS);
5417 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5418 env, R_DS);
eaa728ee
FB
5419
5420 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5421 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5422 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5423
5424 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5425 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5426
5427 /* other setups */
5428 cpu_x86_set_cpl(env, 0);
5429 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5430 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5431
2ed51f5b
AL
5432 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5433 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5434 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5435 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
ab5ea558 5436 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
2ed51f5b 5437
960540b4 5438 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
5439 /* FIXME: Resets the current ASID register to zero (host ASID). */
5440
5441 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5442
5443 /* Clears the TSC_OFFSET inside the processor. */
5444
5445 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5446 from the page table indicated the host's CR3. If the PDPEs contain
5447 illegal state, the processor causes a shutdown. */
5448
5449 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5450 env->cr[0] |= CR0_PE_MASK;
5451 env->eflags &= ~VM_MASK;
5452
5453 /* Disables all breakpoints in the host DR7 register. */
5454
5455 /* Checks the reloaded host state for consistency. */
5456
5457 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5458 host's code segment or non-canonical (in the case of long mode), a
5459 #GP fault is delivered inside the host.) */
5460
5461 /* remove any pending exception */
5462 env->exception_index = -1;
5463 env->error_code = 0;
5464 env->old_exception = -1;
5465
5466 cpu_loop_exit();
5467}
5468
5469#endif
5470
5471/* MMX/SSE */
5472/* XXX: optimize by storing fptt and fptags in the static cpu state */
5473void helper_enter_mmx(void)
5474{
5475 env->fpstt = 0;
5476 *(uint32_t *)(env->fptags) = 0;
5477 *(uint32_t *)(env->fptags + 4) = 0;
5478}
5479
5480void helper_emms(void)
5481{
5482 /* set to empty state */
5483 *(uint32_t *)(env->fptags) = 0x01010101;
5484 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5485}
5486
5487/* XXX: suppress */
a7812ae4 5488void helper_movq(void *d, void *s)
eaa728ee 5489{
a7812ae4 5490 *(uint64_t *)d = *(uint64_t *)s;
eaa728ee
FB
5491}
5492
5493#define SHIFT 0
5494#include "ops_sse.h"
5495
5496#define SHIFT 1
5497#include "ops_sse.h"
5498
5499#define SHIFT 0
5500#include "helper_template.h"
5501#undef SHIFT
5502
5503#define SHIFT 1
5504#include "helper_template.h"
5505#undef SHIFT
5506
5507#define SHIFT 2
5508#include "helper_template.h"
5509#undef SHIFT
5510
5511#ifdef TARGET_X86_64
5512
5513#define SHIFT 3
5514#include "helper_template.h"
5515#undef SHIFT
5516
5517#endif
5518
5519/* bit operations */
5520target_ulong helper_bsf(target_ulong t0)
5521{
5522 int count;
5523 target_ulong res;
5524
5525 res = t0;
5526 count = 0;
5527 while ((res & 1) == 0) {
5528 count++;
5529 res >>= 1;
5530 }
5531 return count;
5532}
5533
31501a71 5534target_ulong helper_lzcnt(target_ulong t0, int wordsize)
eaa728ee
FB
5535{
5536 int count;
5537 target_ulong res, mask;
31501a71
AP
5538
5539 if (wordsize > 0 && t0 == 0) {
5540 return wordsize;
5541 }
eaa728ee
FB
5542 res = t0;
5543 count = TARGET_LONG_BITS - 1;
5544 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5545 while ((res & mask) == 0) {
5546 count--;
5547 res <<= 1;
5548 }
31501a71
AP
5549 if (wordsize > 0) {
5550 return wordsize - 1 - count;
5551 }
eaa728ee
FB
5552 return count;
5553}
5554
31501a71
AP
5555target_ulong helper_bsr(target_ulong t0)
5556{
5557 return helper_lzcnt(t0, 0);
5558}
eaa728ee
FB
5559
5560static int compute_all_eflags(void)
5561{
5562 return CC_SRC;
5563}
5564
5565static int compute_c_eflags(void)
5566{
5567 return CC_SRC & CC_C;
5568}
5569
a7812ae4
PB
5570uint32_t helper_cc_compute_all(int op)
5571{
5572 switch (op) {
5573 default: /* should never happen */ return 0;
eaa728ee 5574
a7812ae4 5575 case CC_OP_EFLAGS: return compute_all_eflags();
eaa728ee 5576
a7812ae4
PB
5577 case CC_OP_MULB: return compute_all_mulb();
5578 case CC_OP_MULW: return compute_all_mulw();
5579 case CC_OP_MULL: return compute_all_mull();
eaa728ee 5580
a7812ae4
PB
5581 case CC_OP_ADDB: return compute_all_addb();
5582 case CC_OP_ADDW: return compute_all_addw();
5583 case CC_OP_ADDL: return compute_all_addl();
eaa728ee 5584
a7812ae4
PB
5585 case CC_OP_ADCB: return compute_all_adcb();
5586 case CC_OP_ADCW: return compute_all_adcw();
5587 case CC_OP_ADCL: return compute_all_adcl();
eaa728ee 5588
a7812ae4
PB
5589 case CC_OP_SUBB: return compute_all_subb();
5590 case CC_OP_SUBW: return compute_all_subw();
5591 case CC_OP_SUBL: return compute_all_subl();
eaa728ee 5592
a7812ae4
PB
5593 case CC_OP_SBBB: return compute_all_sbbb();
5594 case CC_OP_SBBW: return compute_all_sbbw();
5595 case CC_OP_SBBL: return compute_all_sbbl();
eaa728ee 5596
a7812ae4
PB
5597 case CC_OP_LOGICB: return compute_all_logicb();
5598 case CC_OP_LOGICW: return compute_all_logicw();
5599 case CC_OP_LOGICL: return compute_all_logicl();
eaa728ee 5600
a7812ae4
PB
5601 case CC_OP_INCB: return compute_all_incb();
5602 case CC_OP_INCW: return compute_all_incw();
5603 case CC_OP_INCL: return compute_all_incl();
eaa728ee 5604
a7812ae4
PB
5605 case CC_OP_DECB: return compute_all_decb();
5606 case CC_OP_DECW: return compute_all_decw();
5607 case CC_OP_DECL: return compute_all_decl();
eaa728ee 5608
a7812ae4
PB
5609 case CC_OP_SHLB: return compute_all_shlb();
5610 case CC_OP_SHLW: return compute_all_shlw();
5611 case CC_OP_SHLL: return compute_all_shll();
eaa728ee 5612
a7812ae4
PB
5613 case CC_OP_SARB: return compute_all_sarb();
5614 case CC_OP_SARW: return compute_all_sarw();
5615 case CC_OP_SARL: return compute_all_sarl();
eaa728ee
FB
5616
5617#ifdef TARGET_X86_64
a7812ae4 5618 case CC_OP_MULQ: return compute_all_mulq();
eaa728ee 5619
a7812ae4 5620 case CC_OP_ADDQ: return compute_all_addq();
eaa728ee 5621
a7812ae4 5622 case CC_OP_ADCQ: return compute_all_adcq();
eaa728ee 5623
a7812ae4 5624 case CC_OP_SUBQ: return compute_all_subq();
eaa728ee 5625
a7812ae4 5626 case CC_OP_SBBQ: return compute_all_sbbq();
eaa728ee 5627
a7812ae4 5628 case CC_OP_LOGICQ: return compute_all_logicq();
eaa728ee 5629
a7812ae4 5630 case CC_OP_INCQ: return compute_all_incq();
eaa728ee 5631
a7812ae4 5632 case CC_OP_DECQ: return compute_all_decq();
eaa728ee 5633
a7812ae4 5634 case CC_OP_SHLQ: return compute_all_shlq();
eaa728ee 5635
a7812ae4 5636 case CC_OP_SARQ: return compute_all_sarq();
eaa728ee 5637#endif
a7812ae4
PB
5638 }
5639}
5640
5641uint32_t helper_cc_compute_c(int op)
5642{
5643 switch (op) {
5644 default: /* should never happen */ return 0;
5645
5646 case CC_OP_EFLAGS: return compute_c_eflags();
5647
5648 case CC_OP_MULB: return compute_c_mull();
5649 case CC_OP_MULW: return compute_c_mull();
5650 case CC_OP_MULL: return compute_c_mull();
5651
5652 case CC_OP_ADDB: return compute_c_addb();
5653 case CC_OP_ADDW: return compute_c_addw();
5654 case CC_OP_ADDL: return compute_c_addl();
5655
5656 case CC_OP_ADCB: return compute_c_adcb();
5657 case CC_OP_ADCW: return compute_c_adcw();
5658 case CC_OP_ADCL: return compute_c_adcl();
5659
5660 case CC_OP_SUBB: return compute_c_subb();
5661 case CC_OP_SUBW: return compute_c_subw();
5662 case CC_OP_SUBL: return compute_c_subl();
5663
5664 case CC_OP_SBBB: return compute_c_sbbb();
5665 case CC_OP_SBBW: return compute_c_sbbw();
5666 case CC_OP_SBBL: return compute_c_sbbl();
5667
5668 case CC_OP_LOGICB: return compute_c_logicb();
5669 case CC_OP_LOGICW: return compute_c_logicw();
5670 case CC_OP_LOGICL: return compute_c_logicl();
5671
5672 case CC_OP_INCB: return compute_c_incl();
5673 case CC_OP_INCW: return compute_c_incl();
5674 case CC_OP_INCL: return compute_c_incl();
5675
5676 case CC_OP_DECB: return compute_c_incl();
5677 case CC_OP_DECW: return compute_c_incl();
5678 case CC_OP_DECL: return compute_c_incl();
eaa728ee 5679
a7812ae4
PB
5680 case CC_OP_SHLB: return compute_c_shlb();
5681 case CC_OP_SHLW: return compute_c_shlw();
5682 case CC_OP_SHLL: return compute_c_shll();
5683
5684 case CC_OP_SARB: return compute_c_sarl();
5685 case CC_OP_SARW: return compute_c_sarl();
5686 case CC_OP_SARL: return compute_c_sarl();
5687
5688#ifdef TARGET_X86_64
5689 case CC_OP_MULQ: return compute_c_mull();
5690
5691 case CC_OP_ADDQ: return compute_c_addq();
5692
5693 case CC_OP_ADCQ: return compute_c_adcq();
5694
5695 case CC_OP_SUBQ: return compute_c_subq();
5696
5697 case CC_OP_SBBQ: return compute_c_sbbq();
5698
5699 case CC_OP_LOGICQ: return compute_c_logicq();
5700
5701 case CC_OP_INCQ: return compute_c_incl();
5702
5703 case CC_OP_DECQ: return compute_c_incl();
5704
5705 case CC_OP_SHLQ: return compute_c_shlq();
5706
5707 case CC_OP_SARQ: return compute_c_sarl();
5708#endif
5709 }
5710}