]> git.proxmox.com Git - qemu.git/blame - target-i386/op_helper.c
cpu-all.h: fix cpu_get_real_ticks() #ifdef
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee
FB
18 */
19#define CPU_NO_GLOBAL_REGS
20#include "exec.h"
d9957a8b 21#include "exec-all.h"
eaa728ee
FB
22#include "host-utils.h"
23
24//#define DEBUG_PCALL
25
d12d51d5
AL
26
27#ifdef DEBUG_PCALL
93fcfe39
AL
28# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
29# define LOG_PCALL_STATE(env) \
30 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
d12d51d5
AL
31#else
32# define LOG_PCALL(...) do { } while (0)
33# define LOG_PCALL_STATE(env) do { } while (0)
34#endif
35
36
eaa728ee
FB
37#if 0
38#define raise_exception_err(a, b)\
39do {\
93fcfe39 40 qemu_log("raise_exception line=%d\n", __LINE__);\
eaa728ee
FB
41 (raise_exception_err)(a, b);\
42} while (0)
43#endif
44
d9957a8b 45static const uint8_t parity_table[256] = {
eaa728ee
FB
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
78};
79
80/* modulo 17 table */
d9957a8b 81static const uint8_t rclw_table[32] = {
eaa728ee
FB
82 0, 1, 2, 3, 4, 5, 6, 7,
83 8, 9,10,11,12,13,14,15,
84 16, 0, 1, 2, 3, 4, 5, 6,
85 7, 8, 9,10,11,12,13,14,
86};
87
88/* modulo 9 table */
d9957a8b 89static const uint8_t rclb_table[32] = {
eaa728ee
FB
90 0, 1, 2, 3, 4, 5, 6, 7,
91 8, 0, 1, 2, 3, 4, 5, 6,
92 7, 8, 0, 1, 2, 3, 4, 5,
93 6, 7, 8, 0, 1, 2, 3, 4,
94};
95
d9957a8b 96static const CPU86_LDouble f15rk[7] =
eaa728ee
FB
97{
98 0.00000000000000000000L,
99 1.00000000000000000000L,
100 3.14159265358979323851L, /*pi*/
101 0.30102999566398119523L, /*lg2*/
102 0.69314718055994530943L, /*ln2*/
103 1.44269504088896340739L, /*l2e*/
104 3.32192809488736234781L, /*l2t*/
105};
106
107/* broken thread support */
108
c227f099 109static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
eaa728ee
FB
110
111void helper_lock(void)
112{
113 spin_lock(&global_cpu_lock);
114}
115
116void helper_unlock(void)
117{
118 spin_unlock(&global_cpu_lock);
119}
120
121void helper_write_eflags(target_ulong t0, uint32_t update_mask)
122{
123 load_eflags(t0, update_mask);
124}
125
126target_ulong helper_read_eflags(void)
127{
128 uint32_t eflags;
a7812ae4 129 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
130 eflags |= (DF & DF_MASK);
131 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
132 return eflags;
133}
134
135/* return non zero if error */
136static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
137 int selector)
138{
139 SegmentCache *dt;
140 int index;
141 target_ulong ptr;
142
143 if (selector & 0x4)
144 dt = &env->ldt;
145 else
146 dt = &env->gdt;
147 index = selector & ~7;
148 if ((index + 7) > dt->limit)
149 return -1;
150 ptr = dt->base + index;
151 *e1_ptr = ldl_kernel(ptr);
152 *e2_ptr = ldl_kernel(ptr + 4);
153 return 0;
154}
155
156static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
157{
158 unsigned int limit;
159 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
160 if (e2 & DESC_G_MASK)
161 limit = (limit << 12) | 0xfff;
162 return limit;
163}
164
165static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
166{
167 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
168}
169
170static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
171{
172 sc->base = get_seg_base(e1, e2);
173 sc->limit = get_seg_limit(e1, e2);
174 sc->flags = e2;
175}
176
177/* init the segment cache in vm86 mode. */
178static inline void load_seg_vm(int seg, int selector)
179{
180 selector &= 0xffff;
181 cpu_x86_load_seg_cache(env, seg, selector,
182 (selector << 4), 0xffff, 0);
183}
184
185static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
186 uint32_t *esp_ptr, int dpl)
187{
188 int type, index, shift;
189
190#if 0
191 {
192 int i;
193 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
194 for(i=0;i<env->tr.limit;i++) {
195 printf("%02x ", env->tr.base[i]);
196 if ((i & 7) == 7) printf("\n");
197 }
198 printf("\n");
199 }
200#endif
201
202 if (!(env->tr.flags & DESC_P_MASK))
203 cpu_abort(env, "invalid tss");
204 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
205 if ((type & 7) != 1)
206 cpu_abort(env, "invalid tss type");
207 shift = type >> 3;
208 index = (dpl * 4 + 2) << shift;
209 if (index + (4 << shift) - 1 > env->tr.limit)
210 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
211 if (shift == 0) {
212 *esp_ptr = lduw_kernel(env->tr.base + index);
213 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
214 } else {
215 *esp_ptr = ldl_kernel(env->tr.base + index);
216 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
217 }
218}
219
220/* XXX: merge with load_seg() */
221static void tss_load_seg(int seg_reg, int selector)
222{
223 uint32_t e1, e2;
224 int rpl, dpl, cpl;
225
226 if ((selector & 0xfffc) != 0) {
227 if (load_segment(&e1, &e2, selector) != 0)
228 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
229 if (!(e2 & DESC_S_MASK))
230 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
231 rpl = selector & 3;
232 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
233 cpl = env->hflags & HF_CPL_MASK;
234 if (seg_reg == R_CS) {
235 if (!(e2 & DESC_CS_MASK))
236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237 /* XXX: is it correct ? */
238 if (dpl != rpl)
239 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
240 if ((e2 & DESC_C_MASK) && dpl > rpl)
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
242 } else if (seg_reg == R_SS) {
243 /* SS must be writable data */
244 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
245 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246 if (dpl != cpl || dpl != rpl)
247 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
248 } else {
249 /* not readable code */
250 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
251 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
252 /* if data or non conforming code, checks the rights */
253 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
254 if (dpl < cpl || dpl < rpl)
255 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
256 }
257 }
258 if (!(e2 & DESC_P_MASK))
259 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
260 cpu_x86_load_seg_cache(env, seg_reg, selector,
261 get_seg_base(e1, e2),
262 get_seg_limit(e1, e2),
263 e2);
264 } else {
265 if (seg_reg == R_SS || seg_reg == R_CS)
266 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
267 }
268}
269
270#define SWITCH_TSS_JMP 0
271#define SWITCH_TSS_IRET 1
272#define SWITCH_TSS_CALL 2
273
274/* XXX: restore CPU state in registers (PowerPC case) */
275static void switch_tss(int tss_selector,
276 uint32_t e1, uint32_t e2, int source,
277 uint32_t next_eip)
278{
279 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
280 target_ulong tss_base;
281 uint32_t new_regs[8], new_segs[6];
282 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
283 uint32_t old_eflags, eflags_mask;
284 SegmentCache *dt;
285 int index;
286 target_ulong ptr;
287
288 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
d12d51d5 289 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
eaa728ee
FB
290
291 /* if task gate, we read the TSS segment and we load it */
292 if (type == 5) {
293 if (!(e2 & DESC_P_MASK))
294 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
295 tss_selector = e1 >> 16;
296 if (tss_selector & 4)
297 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
298 if (load_segment(&e1, &e2, tss_selector) != 0)
299 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
300 if (e2 & DESC_S_MASK)
301 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
302 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
303 if ((type & 7) != 1)
304 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
305 }
306
307 if (!(e2 & DESC_P_MASK))
308 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
309
310 if (type & 8)
311 tss_limit_max = 103;
312 else
313 tss_limit_max = 43;
314 tss_limit = get_seg_limit(e1, e2);
315 tss_base = get_seg_base(e1, e2);
316 if ((tss_selector & 4) != 0 ||
317 tss_limit < tss_limit_max)
318 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
319 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
320 if (old_type & 8)
321 old_tss_limit_max = 103;
322 else
323 old_tss_limit_max = 43;
324
325 /* read all the registers from the new TSS */
326 if (type & 8) {
327 /* 32 bit */
328 new_cr3 = ldl_kernel(tss_base + 0x1c);
329 new_eip = ldl_kernel(tss_base + 0x20);
330 new_eflags = ldl_kernel(tss_base + 0x24);
331 for(i = 0; i < 8; i++)
332 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
333 for(i = 0; i < 6; i++)
334 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
335 new_ldt = lduw_kernel(tss_base + 0x60);
336 new_trap = ldl_kernel(tss_base + 0x64);
337 } else {
338 /* 16 bit */
339 new_cr3 = 0;
340 new_eip = lduw_kernel(tss_base + 0x0e);
341 new_eflags = lduw_kernel(tss_base + 0x10);
342 for(i = 0; i < 8; i++)
343 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
344 for(i = 0; i < 4; i++)
345 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
346 new_ldt = lduw_kernel(tss_base + 0x2a);
347 new_segs[R_FS] = 0;
348 new_segs[R_GS] = 0;
349 new_trap = 0;
350 }
351
352 /* NOTE: we must avoid memory exceptions during the task switch,
353 so we make dummy accesses before */
354 /* XXX: it can still fail in some cases, so a bigger hack is
355 necessary to valid the TLB after having done the accesses */
356
357 v1 = ldub_kernel(env->tr.base);
358 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
359 stb_kernel(env->tr.base, v1);
360 stb_kernel(env->tr.base + old_tss_limit_max, v2);
361
362 /* clear busy bit (it is restartable) */
363 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
364 target_ulong ptr;
365 uint32_t e2;
366 ptr = env->gdt.base + (env->tr.selector & ~7);
367 e2 = ldl_kernel(ptr + 4);
368 e2 &= ~DESC_TSS_BUSY_MASK;
369 stl_kernel(ptr + 4, e2);
370 }
371 old_eflags = compute_eflags();
372 if (source == SWITCH_TSS_IRET)
373 old_eflags &= ~NT_MASK;
374
375 /* save the current state in the old TSS */
376 if (type & 8) {
377 /* 32 bit */
378 stl_kernel(env->tr.base + 0x20, next_eip);
379 stl_kernel(env->tr.base + 0x24, old_eflags);
380 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
381 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
382 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
383 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
384 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
385 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
386 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
387 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
388 for(i = 0; i < 6; i++)
389 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
390 } else {
391 /* 16 bit */
392 stw_kernel(env->tr.base + 0x0e, next_eip);
393 stw_kernel(env->tr.base + 0x10, old_eflags);
394 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
395 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
396 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
397 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
398 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
399 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
400 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
401 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
402 for(i = 0; i < 4; i++)
403 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
404 }
405
406 /* now if an exception occurs, it will occurs in the next task
407 context */
408
409 if (source == SWITCH_TSS_CALL) {
410 stw_kernel(tss_base, env->tr.selector);
411 new_eflags |= NT_MASK;
412 }
413
414 /* set busy bit */
415 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
416 target_ulong ptr;
417 uint32_t e2;
418 ptr = env->gdt.base + (tss_selector & ~7);
419 e2 = ldl_kernel(ptr + 4);
420 e2 |= DESC_TSS_BUSY_MASK;
421 stl_kernel(ptr + 4, e2);
422 }
423
424 /* set the new CPU state */
425 /* from this point, any exception which occurs can give problems */
426 env->cr[0] |= CR0_TS_MASK;
427 env->hflags |= HF_TS_MASK;
428 env->tr.selector = tss_selector;
429 env->tr.base = tss_base;
430 env->tr.limit = tss_limit;
431 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
432
433 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
434 cpu_x86_update_cr3(env, new_cr3);
435 }
436
437 /* load all registers without an exception, then reload them with
438 possible exception */
439 env->eip = new_eip;
440 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
441 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
442 if (!(type & 8))
443 eflags_mask &= 0xffff;
444 load_eflags(new_eflags, eflags_mask);
445 /* XXX: what to do in 16 bit case ? */
446 EAX = new_regs[0];
447 ECX = new_regs[1];
448 EDX = new_regs[2];
449 EBX = new_regs[3];
450 ESP = new_regs[4];
451 EBP = new_regs[5];
452 ESI = new_regs[6];
453 EDI = new_regs[7];
454 if (new_eflags & VM_MASK) {
455 for(i = 0; i < 6; i++)
456 load_seg_vm(i, new_segs[i]);
457 /* in vm86, CPL is always 3 */
458 cpu_x86_set_cpl(env, 3);
459 } else {
460 /* CPL is set the RPL of CS */
461 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
462 /* first just selectors as the rest may trigger exceptions */
463 for(i = 0; i < 6; i++)
464 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
465 }
466
467 env->ldt.selector = new_ldt & ~4;
468 env->ldt.base = 0;
469 env->ldt.limit = 0;
470 env->ldt.flags = 0;
471
472 /* load the LDT */
473 if (new_ldt & 4)
474 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
475
476 if ((new_ldt & 0xfffc) != 0) {
477 dt = &env->gdt;
478 index = new_ldt & ~7;
479 if ((index + 7) > dt->limit)
480 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
481 ptr = dt->base + index;
482 e1 = ldl_kernel(ptr);
483 e2 = ldl_kernel(ptr + 4);
484 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
485 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
486 if (!(e2 & DESC_P_MASK))
487 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
488 load_seg_cache_raw_dt(&env->ldt, e1, e2);
489 }
490
491 /* load the segments */
492 if (!(new_eflags & VM_MASK)) {
493 tss_load_seg(R_CS, new_segs[R_CS]);
494 tss_load_seg(R_SS, new_segs[R_SS]);
495 tss_load_seg(R_ES, new_segs[R_ES]);
496 tss_load_seg(R_DS, new_segs[R_DS]);
497 tss_load_seg(R_FS, new_segs[R_FS]);
498 tss_load_seg(R_GS, new_segs[R_GS]);
499 }
500
501 /* check that EIP is in the CS segment limits */
502 if (new_eip > env->segs[R_CS].limit) {
503 /* XXX: different exception if CALL ? */
504 raise_exception_err(EXCP0D_GPF, 0);
505 }
01df040b
AL
506
507#ifndef CONFIG_USER_ONLY
508 /* reset local breakpoints */
509 if (env->dr[7] & 0x55) {
510 for (i = 0; i < 4; i++) {
511 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
512 hw_breakpoint_remove(env, i);
513 }
514 env->dr[7] &= ~0x55;
515 }
516#endif
eaa728ee
FB
517}
518
519/* check if Port I/O is allowed in TSS */
520static inline void check_io(int addr, int size)
521{
522 int io_offset, val, mask;
523
524 /* TSS must be a valid 32 bit one */
525 if (!(env->tr.flags & DESC_P_MASK) ||
526 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
527 env->tr.limit < 103)
528 goto fail;
529 io_offset = lduw_kernel(env->tr.base + 0x66);
530 io_offset += (addr >> 3);
531 /* Note: the check needs two bytes */
532 if ((io_offset + 1) > env->tr.limit)
533 goto fail;
534 val = lduw_kernel(env->tr.base + io_offset);
535 val >>= (addr & 7);
536 mask = (1 << size) - 1;
537 /* all bits must be zero to allow the I/O */
538 if ((val & mask) != 0) {
539 fail:
540 raise_exception_err(EXCP0D_GPF, 0);
541 }
542}
543
544void helper_check_iob(uint32_t t0)
545{
546 check_io(t0, 1);
547}
548
549void helper_check_iow(uint32_t t0)
550{
551 check_io(t0, 2);
552}
553
554void helper_check_iol(uint32_t t0)
555{
556 check_io(t0, 4);
557}
558
559void helper_outb(uint32_t port, uint32_t data)
560{
afcea8cb 561 cpu_outb(port, data & 0xff);
eaa728ee
FB
562}
563
564target_ulong helper_inb(uint32_t port)
565{
afcea8cb 566 return cpu_inb(port);
eaa728ee
FB
567}
568
569void helper_outw(uint32_t port, uint32_t data)
570{
afcea8cb 571 cpu_outw(port, data & 0xffff);
eaa728ee
FB
572}
573
574target_ulong helper_inw(uint32_t port)
575{
afcea8cb 576 return cpu_inw(port);
eaa728ee
FB
577}
578
579void helper_outl(uint32_t port, uint32_t data)
580{
afcea8cb 581 cpu_outl(port, data);
eaa728ee
FB
582}
583
584target_ulong helper_inl(uint32_t port)
585{
afcea8cb 586 return cpu_inl(port);
eaa728ee
FB
587}
588
589static inline unsigned int get_sp_mask(unsigned int e2)
590{
591 if (e2 & DESC_B_MASK)
592 return 0xffffffff;
593 else
594 return 0xffff;
595}
596
2ed51f5b
AL
597static int exeption_has_error_code(int intno)
598{
599 switch(intno) {
600 case 8:
601 case 10:
602 case 11:
603 case 12:
604 case 13:
605 case 14:
606 case 17:
607 return 1;
608 }
609 return 0;
610}
611
eaa728ee
FB
612#ifdef TARGET_X86_64
613#define SET_ESP(val, sp_mask)\
614do {\
615 if ((sp_mask) == 0xffff)\
616 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
617 else if ((sp_mask) == 0xffffffffLL)\
618 ESP = (uint32_t)(val);\
619 else\
620 ESP = (val);\
621} while (0)
622#else
623#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
624#endif
625
c0a04f0e
AL
626/* in 64-bit machines, this can overflow. So this segment addition macro
627 * can be used to trim the value to 32-bit whenever needed */
628#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
629
eaa728ee
FB
630/* XXX: add a is_user flag to have proper security support */
631#define PUSHW(ssp, sp, sp_mask, val)\
632{\
633 sp -= 2;\
634 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
635}
636
637#define PUSHL(ssp, sp, sp_mask, val)\
638{\
639 sp -= 4;\
c0a04f0e 640 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
eaa728ee
FB
641}
642
643#define POPW(ssp, sp, sp_mask, val)\
644{\
645 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
646 sp += 2;\
647}
648
649#define POPL(ssp, sp, sp_mask, val)\
650{\
c0a04f0e 651 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
eaa728ee
FB
652 sp += 4;\
653}
654
655/* protected mode interrupt */
656static void do_interrupt_protected(int intno, int is_int, int error_code,
657 unsigned int next_eip, int is_hw)
658{
659 SegmentCache *dt;
660 target_ulong ptr, ssp;
661 int type, dpl, selector, ss_dpl, cpl;
662 int has_error_code, new_stack, shift;
1c918eba 663 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 664 uint32_t old_eip, sp_mask;
eaa728ee 665
eaa728ee 666 has_error_code = 0;
2ed51f5b
AL
667 if (!is_int && !is_hw)
668 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
669 if (is_int)
670 old_eip = next_eip;
671 else
672 old_eip = env->eip;
673
674 dt = &env->idt;
675 if (intno * 8 + 7 > dt->limit)
676 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
677 ptr = dt->base + intno * 8;
678 e1 = ldl_kernel(ptr);
679 e2 = ldl_kernel(ptr + 4);
680 /* check gate type */
681 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
682 switch(type) {
683 case 5: /* task gate */
684 /* must do that check here to return the correct error code */
685 if (!(e2 & DESC_P_MASK))
686 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
687 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
688 if (has_error_code) {
689 int type;
690 uint32_t mask;
691 /* push the error code */
692 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
693 shift = type >> 3;
694 if (env->segs[R_SS].flags & DESC_B_MASK)
695 mask = 0xffffffff;
696 else
697 mask = 0xffff;
698 esp = (ESP - (2 << shift)) & mask;
699 ssp = env->segs[R_SS].base + esp;
700 if (shift)
701 stl_kernel(ssp, error_code);
702 else
703 stw_kernel(ssp, error_code);
704 SET_ESP(esp, mask);
705 }
706 return;
707 case 6: /* 286 interrupt gate */
708 case 7: /* 286 trap gate */
709 case 14: /* 386 interrupt gate */
710 case 15: /* 386 trap gate */
711 break;
712 default:
713 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
714 break;
715 }
716 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
717 cpl = env->hflags & HF_CPL_MASK;
1235fc06 718 /* check privilege if software int */
eaa728ee
FB
719 if (is_int && dpl < cpl)
720 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
721 /* check valid bit */
722 if (!(e2 & DESC_P_MASK))
723 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
724 selector = e1 >> 16;
725 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
726 if ((selector & 0xfffc) == 0)
727 raise_exception_err(EXCP0D_GPF, 0);
728
729 if (load_segment(&e1, &e2, selector) != 0)
730 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
731 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
732 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
733 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
734 if (dpl > cpl)
735 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736 if (!(e2 & DESC_P_MASK))
737 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
738 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
739 /* to inner privilege */
740 get_ss_esp_from_tss(&ss, &esp, dpl);
741 if ((ss & 0xfffc) == 0)
742 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
743 if ((ss & 3) != dpl)
744 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
745 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
746 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
747 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
748 if (ss_dpl != dpl)
749 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
750 if (!(ss_e2 & DESC_S_MASK) ||
751 (ss_e2 & DESC_CS_MASK) ||
752 !(ss_e2 & DESC_W_MASK))
753 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
754 if (!(ss_e2 & DESC_P_MASK))
755 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
756 new_stack = 1;
757 sp_mask = get_sp_mask(ss_e2);
758 ssp = get_seg_base(ss_e1, ss_e2);
759 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
760 /* to same privilege */
761 if (env->eflags & VM_MASK)
762 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
763 new_stack = 0;
764 sp_mask = get_sp_mask(env->segs[R_SS].flags);
765 ssp = env->segs[R_SS].base;
766 esp = ESP;
767 dpl = cpl;
768 } else {
769 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
770 new_stack = 0; /* avoid warning */
771 sp_mask = 0; /* avoid warning */
772 ssp = 0; /* avoid warning */
773 esp = 0; /* avoid warning */
774 }
775
776 shift = type >> 3;
777
778#if 0
779 /* XXX: check that enough room is available */
780 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
781 if (env->eflags & VM_MASK)
782 push_size += 8;
783 push_size <<= shift;
784#endif
785 if (shift == 1) {
786 if (new_stack) {
787 if (env->eflags & VM_MASK) {
788 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
789 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
790 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
791 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
792 }
793 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
794 PUSHL(ssp, esp, sp_mask, ESP);
795 }
796 PUSHL(ssp, esp, sp_mask, compute_eflags());
797 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
798 PUSHL(ssp, esp, sp_mask, old_eip);
799 if (has_error_code) {
800 PUSHL(ssp, esp, sp_mask, error_code);
801 }
802 } else {
803 if (new_stack) {
804 if (env->eflags & VM_MASK) {
805 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
806 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
807 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
808 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
809 }
810 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
811 PUSHW(ssp, esp, sp_mask, ESP);
812 }
813 PUSHW(ssp, esp, sp_mask, compute_eflags());
814 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
815 PUSHW(ssp, esp, sp_mask, old_eip);
816 if (has_error_code) {
817 PUSHW(ssp, esp, sp_mask, error_code);
818 }
819 }
820
821 if (new_stack) {
822 if (env->eflags & VM_MASK) {
823 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
824 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
825 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
826 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
827 }
828 ss = (ss & ~3) | dpl;
829 cpu_x86_load_seg_cache(env, R_SS, ss,
830 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
831 }
832 SET_ESP(esp, sp_mask);
833
834 selector = (selector & ~3) | dpl;
835 cpu_x86_load_seg_cache(env, R_CS, selector,
836 get_seg_base(e1, e2),
837 get_seg_limit(e1, e2),
838 e2);
839 cpu_x86_set_cpl(env, dpl);
840 env->eip = offset;
841
842 /* interrupt gate clear IF mask */
843 if ((type & 1) == 0) {
844 env->eflags &= ~IF_MASK;
845 }
846 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
847}
848
849#ifdef TARGET_X86_64
850
851#define PUSHQ(sp, val)\
852{\
853 sp -= 8;\
854 stq_kernel(sp, (val));\
855}
856
857#define POPQ(sp, val)\
858{\
859 val = ldq_kernel(sp);\
860 sp += 8;\
861}
862
863static inline target_ulong get_rsp_from_tss(int level)
864{
865 int index;
866
867#if 0
868 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
869 env->tr.base, env->tr.limit);
870#endif
871
872 if (!(env->tr.flags & DESC_P_MASK))
873 cpu_abort(env, "invalid tss");
874 index = 8 * level + 4;
875 if ((index + 7) > env->tr.limit)
876 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
877 return ldq_kernel(env->tr.base + index);
878}
879
880/* 64 bit interrupt */
881static void do_interrupt64(int intno, int is_int, int error_code,
882 target_ulong next_eip, int is_hw)
883{
884 SegmentCache *dt;
885 target_ulong ptr;
886 int type, dpl, selector, cpl, ist;
887 int has_error_code, new_stack;
888 uint32_t e1, e2, e3, ss;
889 target_ulong old_eip, esp, offset;
eaa728ee 890
eaa728ee 891 has_error_code = 0;
2ed51f5b
AL
892 if (!is_int && !is_hw)
893 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
894 if (is_int)
895 old_eip = next_eip;
896 else
897 old_eip = env->eip;
898
899 dt = &env->idt;
900 if (intno * 16 + 15 > dt->limit)
901 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
902 ptr = dt->base + intno * 16;
903 e1 = ldl_kernel(ptr);
904 e2 = ldl_kernel(ptr + 4);
905 e3 = ldl_kernel(ptr + 8);
906 /* check gate type */
907 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
908 switch(type) {
909 case 14: /* 386 interrupt gate */
910 case 15: /* 386 trap gate */
911 break;
912 default:
913 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
914 break;
915 }
916 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
917 cpl = env->hflags & HF_CPL_MASK;
1235fc06 918 /* check privilege if software int */
eaa728ee
FB
919 if (is_int && dpl < cpl)
920 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
921 /* check valid bit */
922 if (!(e2 & DESC_P_MASK))
923 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
924 selector = e1 >> 16;
925 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
926 ist = e2 & 7;
927 if ((selector & 0xfffc) == 0)
928 raise_exception_err(EXCP0D_GPF, 0);
929
930 if (load_segment(&e1, &e2, selector) != 0)
931 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
933 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
934 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
935 if (dpl > cpl)
936 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937 if (!(e2 & DESC_P_MASK))
938 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
939 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
940 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
941 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
942 /* to inner privilege */
943 if (ist != 0)
944 esp = get_rsp_from_tss(ist + 3);
945 else
946 esp = get_rsp_from_tss(dpl);
947 esp &= ~0xfLL; /* align stack */
948 ss = 0;
949 new_stack = 1;
950 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
951 /* to same privilege */
952 if (env->eflags & VM_MASK)
953 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
954 new_stack = 0;
955 if (ist != 0)
956 esp = get_rsp_from_tss(ist + 3);
957 else
958 esp = ESP;
959 esp &= ~0xfLL; /* align stack */
960 dpl = cpl;
961 } else {
962 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
963 new_stack = 0; /* avoid warning */
964 esp = 0; /* avoid warning */
965 }
966
967 PUSHQ(esp, env->segs[R_SS].selector);
968 PUSHQ(esp, ESP);
969 PUSHQ(esp, compute_eflags());
970 PUSHQ(esp, env->segs[R_CS].selector);
971 PUSHQ(esp, old_eip);
972 if (has_error_code) {
973 PUSHQ(esp, error_code);
974 }
975
976 if (new_stack) {
977 ss = 0 | dpl;
978 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
979 }
980 ESP = esp;
981
982 selector = (selector & ~3) | dpl;
983 cpu_x86_load_seg_cache(env, R_CS, selector,
984 get_seg_base(e1, e2),
985 get_seg_limit(e1, e2),
986 e2);
987 cpu_x86_set_cpl(env, dpl);
988 env->eip = offset;
989
990 /* interrupt gate clear IF mask */
991 if ((type & 1) == 0) {
992 env->eflags &= ~IF_MASK;
993 }
994 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
995}
996#endif
997
d9957a8b 998#ifdef TARGET_X86_64
eaa728ee
FB
999#if defined(CONFIG_USER_ONLY)
1000void helper_syscall(int next_eip_addend)
1001{
1002 env->exception_index = EXCP_SYSCALL;
1003 env->exception_next_eip = env->eip + next_eip_addend;
1004 cpu_loop_exit();
1005}
1006#else
1007void helper_syscall(int next_eip_addend)
1008{
1009 int selector;
1010
1011 if (!(env->efer & MSR_EFER_SCE)) {
1012 raise_exception_err(EXCP06_ILLOP, 0);
1013 }
1014 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
1015 if (env->hflags & HF_LMA_MASK) {
1016 int code64;
1017
1018 ECX = env->eip + next_eip_addend;
1019 env->regs[11] = compute_eflags();
1020
1021 code64 = env->hflags & HF_CS64_MASK;
1022
1023 cpu_x86_set_cpl(env, 0);
1024 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1025 0, 0xffffffff,
1026 DESC_G_MASK | DESC_P_MASK |
1027 DESC_S_MASK |
1028 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1029 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1030 0, 0xffffffff,
1031 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032 DESC_S_MASK |
1033 DESC_W_MASK | DESC_A_MASK);
1034 env->eflags &= ~env->fmask;
1035 load_eflags(env->eflags, 0);
1036 if (code64)
1037 env->eip = env->lstar;
1038 else
1039 env->eip = env->cstar;
d9957a8b 1040 } else {
eaa728ee
FB
1041 ECX = (uint32_t)(env->eip + next_eip_addend);
1042
1043 cpu_x86_set_cpl(env, 0);
1044 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1045 0, 0xffffffff,
1046 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1047 DESC_S_MASK |
1048 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1049 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1050 0, 0xffffffff,
1051 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1052 DESC_S_MASK |
1053 DESC_W_MASK | DESC_A_MASK);
1054 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1055 env->eip = (uint32_t)env->star;
1056 }
1057}
1058#endif
d9957a8b 1059#endif
eaa728ee 1060
d9957a8b 1061#ifdef TARGET_X86_64
eaa728ee
FB
1062void helper_sysret(int dflag)
1063{
1064 int cpl, selector;
1065
1066 if (!(env->efer & MSR_EFER_SCE)) {
1067 raise_exception_err(EXCP06_ILLOP, 0);
1068 }
1069 cpl = env->hflags & HF_CPL_MASK;
1070 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1071 raise_exception_err(EXCP0D_GPF, 0);
1072 }
1073 selector = (env->star >> 48) & 0xffff;
eaa728ee
FB
1074 if (env->hflags & HF_LMA_MASK) {
1075 if (dflag == 2) {
1076 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1077 0, 0xffffffff,
1078 DESC_G_MASK | DESC_P_MASK |
1079 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1080 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1081 DESC_L_MASK);
1082 env->eip = ECX;
1083 } else {
1084 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1085 0, 0xffffffff,
1086 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1089 env->eip = (uint32_t)ECX;
1090 }
1091 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1092 0, 0xffffffff,
1093 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1094 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1095 DESC_W_MASK | DESC_A_MASK);
1096 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1097 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1098 cpu_x86_set_cpl(env, 3);
d9957a8b 1099 } else {
eaa728ee
FB
1100 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1101 0, 0xffffffff,
1102 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1103 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1104 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1105 env->eip = (uint32_t)ECX;
1106 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1107 0, 0xffffffff,
1108 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1109 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1110 DESC_W_MASK | DESC_A_MASK);
1111 env->eflags |= IF_MASK;
1112 cpu_x86_set_cpl(env, 3);
1113 }
eaa728ee 1114}
d9957a8b 1115#endif
eaa728ee
FB
1116
1117/* real mode interrupt */
1118static void do_interrupt_real(int intno, int is_int, int error_code,
1119 unsigned int next_eip)
1120{
1121 SegmentCache *dt;
1122 target_ulong ptr, ssp;
1123 int selector;
1124 uint32_t offset, esp;
1125 uint32_t old_cs, old_eip;
eaa728ee 1126
eaa728ee
FB
1127 /* real mode (simpler !) */
1128 dt = &env->idt;
1129 if (intno * 4 + 3 > dt->limit)
1130 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1131 ptr = dt->base + intno * 4;
1132 offset = lduw_kernel(ptr);
1133 selector = lduw_kernel(ptr + 2);
1134 esp = ESP;
1135 ssp = env->segs[R_SS].base;
1136 if (is_int)
1137 old_eip = next_eip;
1138 else
1139 old_eip = env->eip;
1140 old_cs = env->segs[R_CS].selector;
1141 /* XXX: use SS segment size ? */
1142 PUSHW(ssp, esp, 0xffff, compute_eflags());
1143 PUSHW(ssp, esp, 0xffff, old_cs);
1144 PUSHW(ssp, esp, 0xffff, old_eip);
1145
1146 /* update processor state */
1147 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1148 env->eip = offset;
1149 env->segs[R_CS].selector = selector;
1150 env->segs[R_CS].base = (selector << 4);
1151 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1152}
1153
1154/* fake user mode interrupt */
1155void do_interrupt_user(int intno, int is_int, int error_code,
1156 target_ulong next_eip)
1157{
1158 SegmentCache *dt;
1159 target_ulong ptr;
1160 int dpl, cpl, shift;
1161 uint32_t e2;
1162
1163 dt = &env->idt;
1164 if (env->hflags & HF_LMA_MASK) {
1165 shift = 4;
1166 } else {
1167 shift = 3;
1168 }
1169 ptr = dt->base + (intno << shift);
1170 e2 = ldl_kernel(ptr + 4);
1171
1172 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1173 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1174 /* check privilege if software int */
eaa728ee
FB
1175 if (is_int && dpl < cpl)
1176 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1177
1178 /* Since we emulate only user space, we cannot do more than
1179 exiting the emulation with the suitable exception and error
1180 code */
1181 if (is_int)
1182 EIP = next_eip;
1183}
1184
00ea18d1 1185#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1186static void handle_even_inj(int intno, int is_int, int error_code,
1187 int is_hw, int rm)
1188{
1189 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1190 if (!(event_inj & SVM_EVTINJ_VALID)) {
1191 int type;
1192 if (is_int)
1193 type = SVM_EVTINJ_TYPE_SOFT;
1194 else
1195 type = SVM_EVTINJ_TYPE_EXEPT;
1196 event_inj = intno | type | SVM_EVTINJ_VALID;
1197 if (!rm && exeption_has_error_code(intno)) {
1198 event_inj |= SVM_EVTINJ_VALID_ERR;
1199 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1200 }
1201 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1202 }
1203}
00ea18d1 1204#endif
2ed51f5b 1205
eaa728ee
FB
1206/*
1207 * Begin execution of an interruption. is_int is TRUE if coming from
1208 * the int instruction. next_eip is the EIP value AFTER the interrupt
1209 * instruction. It is only relevant if is_int is TRUE.
1210 */
1211void do_interrupt(int intno, int is_int, int error_code,
1212 target_ulong next_eip, int is_hw)
1213{
8fec2b8c 1214 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1215 if ((env->cr[0] & CR0_PE_MASK)) {
1216 static int count;
93fcfe39 1217 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
eaa728ee
FB
1218 count, intno, error_code, is_int,
1219 env->hflags & HF_CPL_MASK,
1220 env->segs[R_CS].selector, EIP,
1221 (int)env->segs[R_CS].base + EIP,
1222 env->segs[R_SS].selector, ESP);
1223 if (intno == 0x0e) {
93fcfe39 1224 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1225 } else {
93fcfe39 1226 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
eaa728ee 1227 }
93fcfe39
AL
1228 qemu_log("\n");
1229 log_cpu_state(env, X86_DUMP_CCOP);
eaa728ee
FB
1230#if 0
1231 {
1232 int i;
1233 uint8_t *ptr;
93fcfe39 1234 qemu_log(" code=");
eaa728ee
FB
1235 ptr = env->segs[R_CS].base + env->eip;
1236 for(i = 0; i < 16; i++) {
93fcfe39 1237 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1238 }
93fcfe39 1239 qemu_log("\n");
eaa728ee
FB
1240 }
1241#endif
1242 count++;
1243 }
1244 }
1245 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1246#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1247 if (env->hflags & HF_SVMI_MASK)
1248 handle_even_inj(intno, is_int, error_code, is_hw, 0);
00ea18d1 1249#endif
eb38c52c 1250#ifdef TARGET_X86_64
eaa728ee
FB
1251 if (env->hflags & HF_LMA_MASK) {
1252 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1253 } else
1254#endif
1255 {
1256 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1257 }
1258 } else {
00ea18d1 1259#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1260 if (env->hflags & HF_SVMI_MASK)
1261 handle_even_inj(intno, is_int, error_code, is_hw, 1);
00ea18d1 1262#endif
eaa728ee
FB
1263 do_interrupt_real(intno, is_int, error_code, next_eip);
1264 }
2ed51f5b 1265
00ea18d1 1266#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1267 if (env->hflags & HF_SVMI_MASK) {
1268 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1269 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1270 }
00ea18d1 1271#endif
eaa728ee
FB
1272}
1273
f55761a0
AL
1274/* This should come from sysemu.h - if we could include it here... */
1275void qemu_system_reset_request(void);
1276
eaa728ee
FB
1277/*
1278 * Check nested exceptions and change to double or triple fault if
1279 * needed. It should only be called, if this is not an interrupt.
1280 * Returns the new exception number.
1281 */
1282static int check_exception(int intno, int *error_code)
1283{
1284 int first_contributory = env->old_exception == 0 ||
1285 (env->old_exception >= 10 &&
1286 env->old_exception <= 13);
1287 int second_contributory = intno == 0 ||
1288 (intno >= 10 && intno <= 13);
1289
93fcfe39 1290 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
eaa728ee
FB
1291 env->old_exception, intno);
1292
f55761a0
AL
1293#if !defined(CONFIG_USER_ONLY)
1294 if (env->old_exception == EXCP08_DBLE) {
1295 if (env->hflags & HF_SVMI_MASK)
1296 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1297
680c3069 1298 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
f55761a0
AL
1299
1300 qemu_system_reset_request();
1301 return EXCP_HLT;
1302 }
1303#endif
eaa728ee
FB
1304
1305 if ((first_contributory && second_contributory)
1306 || (env->old_exception == EXCP0E_PAGE &&
1307 (second_contributory || (intno == EXCP0E_PAGE)))) {
1308 intno = EXCP08_DBLE;
1309 *error_code = 0;
1310 }
1311
1312 if (second_contributory || (intno == EXCP0E_PAGE) ||
1313 (intno == EXCP08_DBLE))
1314 env->old_exception = intno;
1315
1316 return intno;
1317}
1318
1319/*
1320 * Signal an interruption. It is executed in the main CPU loop.
1321 * is_int is TRUE if coming from the int instruction. next_eip is the
1322 * EIP value AFTER the interrupt instruction. It is only relevant if
1323 * is_int is TRUE.
1324 */
a5e50b26 1325static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1326 int next_eip_addend)
eaa728ee
FB
1327{
1328 if (!is_int) {
1329 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1330 intno = check_exception(intno, &error_code);
872929aa
FB
1331 } else {
1332 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1333 }
1334
1335 env->exception_index = intno;
1336 env->error_code = error_code;
1337 env->exception_is_int = is_int;
1338 env->exception_next_eip = env->eip + next_eip_addend;
1339 cpu_loop_exit();
1340}
1341
eaa728ee
FB
1342/* shortcuts to generate exceptions */
1343
d9957a8b 1344void raise_exception_err(int exception_index, int error_code)
eaa728ee
FB
1345{
1346 raise_interrupt(exception_index, 0, error_code, 0);
1347}
1348
1349void raise_exception(int exception_index)
1350{
1351 raise_interrupt(exception_index, 0, 0, 0);
1352}
1353
1354/* SMM support */
1355
1356#if defined(CONFIG_USER_ONLY)
1357
1358void do_smm_enter(void)
1359{
1360}
1361
1362void helper_rsm(void)
1363{
1364}
1365
1366#else
1367
1368#ifdef TARGET_X86_64
1369#define SMM_REVISION_ID 0x00020064
1370#else
1371#define SMM_REVISION_ID 0x00020000
1372#endif
1373
1374void do_smm_enter(void)
1375{
1376 target_ulong sm_state;
1377 SegmentCache *dt;
1378 int i, offset;
1379
93fcfe39
AL
1380 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1381 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1382
1383 env->hflags |= HF_SMM_MASK;
1384 cpu_smm_update(env);
1385
1386 sm_state = env->smbase + 0x8000;
1387
1388#ifdef TARGET_X86_64
1389 for(i = 0; i < 6; i++) {
1390 dt = &env->segs[i];
1391 offset = 0x7e00 + i * 16;
1392 stw_phys(sm_state + offset, dt->selector);
1393 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1394 stl_phys(sm_state + offset + 4, dt->limit);
1395 stq_phys(sm_state + offset + 8, dt->base);
1396 }
1397
1398 stq_phys(sm_state + 0x7e68, env->gdt.base);
1399 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1400
1401 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1402 stq_phys(sm_state + 0x7e78, env->ldt.base);
1403 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1404 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1405
1406 stq_phys(sm_state + 0x7e88, env->idt.base);
1407 stl_phys(sm_state + 0x7e84, env->idt.limit);
1408
1409 stw_phys(sm_state + 0x7e90, env->tr.selector);
1410 stq_phys(sm_state + 0x7e98, env->tr.base);
1411 stl_phys(sm_state + 0x7e94, env->tr.limit);
1412 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1413
1414 stq_phys(sm_state + 0x7ed0, env->efer);
1415
1416 stq_phys(sm_state + 0x7ff8, EAX);
1417 stq_phys(sm_state + 0x7ff0, ECX);
1418 stq_phys(sm_state + 0x7fe8, EDX);
1419 stq_phys(sm_state + 0x7fe0, EBX);
1420 stq_phys(sm_state + 0x7fd8, ESP);
1421 stq_phys(sm_state + 0x7fd0, EBP);
1422 stq_phys(sm_state + 0x7fc8, ESI);
1423 stq_phys(sm_state + 0x7fc0, EDI);
1424 for(i = 8; i < 16; i++)
1425 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1426 stq_phys(sm_state + 0x7f78, env->eip);
1427 stl_phys(sm_state + 0x7f70, compute_eflags());
1428 stl_phys(sm_state + 0x7f68, env->dr[6]);
1429 stl_phys(sm_state + 0x7f60, env->dr[7]);
1430
1431 stl_phys(sm_state + 0x7f48, env->cr[4]);
1432 stl_phys(sm_state + 0x7f50, env->cr[3]);
1433 stl_phys(sm_state + 0x7f58, env->cr[0]);
1434
1435 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1436 stl_phys(sm_state + 0x7f00, env->smbase);
1437#else
1438 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1439 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1440 stl_phys(sm_state + 0x7ff4, compute_eflags());
1441 stl_phys(sm_state + 0x7ff0, env->eip);
1442 stl_phys(sm_state + 0x7fec, EDI);
1443 stl_phys(sm_state + 0x7fe8, ESI);
1444 stl_phys(sm_state + 0x7fe4, EBP);
1445 stl_phys(sm_state + 0x7fe0, ESP);
1446 stl_phys(sm_state + 0x7fdc, EBX);
1447 stl_phys(sm_state + 0x7fd8, EDX);
1448 stl_phys(sm_state + 0x7fd4, ECX);
1449 stl_phys(sm_state + 0x7fd0, EAX);
1450 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1451 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1452
1453 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1454 stl_phys(sm_state + 0x7f64, env->tr.base);
1455 stl_phys(sm_state + 0x7f60, env->tr.limit);
1456 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1457
1458 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1459 stl_phys(sm_state + 0x7f80, env->ldt.base);
1460 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1461 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1462
1463 stl_phys(sm_state + 0x7f74, env->gdt.base);
1464 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1465
1466 stl_phys(sm_state + 0x7f58, env->idt.base);
1467 stl_phys(sm_state + 0x7f54, env->idt.limit);
1468
1469 for(i = 0; i < 6; i++) {
1470 dt = &env->segs[i];
1471 if (i < 3)
1472 offset = 0x7f84 + i * 12;
1473 else
1474 offset = 0x7f2c + (i - 3) * 12;
1475 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1476 stl_phys(sm_state + offset + 8, dt->base);
1477 stl_phys(sm_state + offset + 4, dt->limit);
1478 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1479 }
1480 stl_phys(sm_state + 0x7f14, env->cr[4]);
1481
1482 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1483 stl_phys(sm_state + 0x7ef8, env->smbase);
1484#endif
1485 /* init SMM cpu state */
1486
1487#ifdef TARGET_X86_64
5efc27bb 1488 cpu_load_efer(env, 0);
eaa728ee
FB
1489#endif
1490 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1491 env->eip = 0x00008000;
1492 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1493 0xffffffff, 0);
1494 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1495 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1496 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1497 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1498 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1499
1500 cpu_x86_update_cr0(env,
1501 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1502 cpu_x86_update_cr4(env, 0);
1503 env->dr[7] = 0x00000400;
1504 CC_OP = CC_OP_EFLAGS;
1505}
1506
1507void helper_rsm(void)
1508{
1509 target_ulong sm_state;
1510 int i, offset;
1511 uint32_t val;
1512
1513 sm_state = env->smbase + 0x8000;
1514#ifdef TARGET_X86_64
5efc27bb 1515 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee
FB
1516
1517 for(i = 0; i < 6; i++) {
1518 offset = 0x7e00 + i * 16;
1519 cpu_x86_load_seg_cache(env, i,
1520 lduw_phys(sm_state + offset),
1521 ldq_phys(sm_state + offset + 8),
1522 ldl_phys(sm_state + offset + 4),
1523 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1524 }
1525
1526 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1527 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1528
1529 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1530 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1531 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1532 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1533
1534 env->idt.base = ldq_phys(sm_state + 0x7e88);
1535 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1536
1537 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1538 env->tr.base = ldq_phys(sm_state + 0x7e98);
1539 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1540 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1541
1542 EAX = ldq_phys(sm_state + 0x7ff8);
1543 ECX = ldq_phys(sm_state + 0x7ff0);
1544 EDX = ldq_phys(sm_state + 0x7fe8);
1545 EBX = ldq_phys(sm_state + 0x7fe0);
1546 ESP = ldq_phys(sm_state + 0x7fd8);
1547 EBP = ldq_phys(sm_state + 0x7fd0);
1548 ESI = ldq_phys(sm_state + 0x7fc8);
1549 EDI = ldq_phys(sm_state + 0x7fc0);
1550 for(i = 8; i < 16; i++)
1551 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1552 env->eip = ldq_phys(sm_state + 0x7f78);
1553 load_eflags(ldl_phys(sm_state + 0x7f70),
1554 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1555 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1556 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1557
1558 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1559 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1560 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1561
1562 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1563 if (val & 0x20000) {
1564 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1565 }
1566#else
1567 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1568 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1569 load_eflags(ldl_phys(sm_state + 0x7ff4),
1570 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1571 env->eip = ldl_phys(sm_state + 0x7ff0);
1572 EDI = ldl_phys(sm_state + 0x7fec);
1573 ESI = ldl_phys(sm_state + 0x7fe8);
1574 EBP = ldl_phys(sm_state + 0x7fe4);
1575 ESP = ldl_phys(sm_state + 0x7fe0);
1576 EBX = ldl_phys(sm_state + 0x7fdc);
1577 EDX = ldl_phys(sm_state + 0x7fd8);
1578 ECX = ldl_phys(sm_state + 0x7fd4);
1579 EAX = ldl_phys(sm_state + 0x7fd0);
1580 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1581 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1582
1583 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1584 env->tr.base = ldl_phys(sm_state + 0x7f64);
1585 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1586 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1587
1588 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1589 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1590 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1591 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1592
1593 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1594 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1595
1596 env->idt.base = ldl_phys(sm_state + 0x7f58);
1597 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1598
1599 for(i = 0; i < 6; i++) {
1600 if (i < 3)
1601 offset = 0x7f84 + i * 12;
1602 else
1603 offset = 0x7f2c + (i - 3) * 12;
1604 cpu_x86_load_seg_cache(env, i,
1605 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1606 ldl_phys(sm_state + offset + 8),
1607 ldl_phys(sm_state + offset + 4),
1608 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1609 }
1610 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1611
1612 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1613 if (val & 0x20000) {
1614 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1615 }
1616#endif
1617 CC_OP = CC_OP_EFLAGS;
1618 env->hflags &= ~HF_SMM_MASK;
1619 cpu_smm_update(env);
1620
93fcfe39
AL
1621 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1622 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1623}
1624
1625#endif /* !CONFIG_USER_ONLY */
1626
1627
1628/* division, flags are undefined */
1629
1630void helper_divb_AL(target_ulong t0)
1631{
1632 unsigned int num, den, q, r;
1633
1634 num = (EAX & 0xffff);
1635 den = (t0 & 0xff);
1636 if (den == 0) {
1637 raise_exception(EXCP00_DIVZ);
1638 }
1639 q = (num / den);
1640 if (q > 0xff)
1641 raise_exception(EXCP00_DIVZ);
1642 q &= 0xff;
1643 r = (num % den) & 0xff;
1644 EAX = (EAX & ~0xffff) | (r << 8) | q;
1645}
1646
1647void helper_idivb_AL(target_ulong t0)
1648{
1649 int num, den, q, r;
1650
1651 num = (int16_t)EAX;
1652 den = (int8_t)t0;
1653 if (den == 0) {
1654 raise_exception(EXCP00_DIVZ);
1655 }
1656 q = (num / den);
1657 if (q != (int8_t)q)
1658 raise_exception(EXCP00_DIVZ);
1659 q &= 0xff;
1660 r = (num % den) & 0xff;
1661 EAX = (EAX & ~0xffff) | (r << 8) | q;
1662}
1663
1664void helper_divw_AX(target_ulong t0)
1665{
1666 unsigned int num, den, q, r;
1667
1668 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1669 den = (t0 & 0xffff);
1670 if (den == 0) {
1671 raise_exception(EXCP00_DIVZ);
1672 }
1673 q = (num / den);
1674 if (q > 0xffff)
1675 raise_exception(EXCP00_DIVZ);
1676 q &= 0xffff;
1677 r = (num % den) & 0xffff;
1678 EAX = (EAX & ~0xffff) | q;
1679 EDX = (EDX & ~0xffff) | r;
1680}
1681
1682void helper_idivw_AX(target_ulong t0)
1683{
1684 int num, den, q, r;
1685
1686 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1687 den = (int16_t)t0;
1688 if (den == 0) {
1689 raise_exception(EXCP00_DIVZ);
1690 }
1691 q = (num / den);
1692 if (q != (int16_t)q)
1693 raise_exception(EXCP00_DIVZ);
1694 q &= 0xffff;
1695 r = (num % den) & 0xffff;
1696 EAX = (EAX & ~0xffff) | q;
1697 EDX = (EDX & ~0xffff) | r;
1698}
1699
1700void helper_divl_EAX(target_ulong t0)
1701{
1702 unsigned int den, r;
1703 uint64_t num, q;
1704
1705 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1706 den = t0;
1707 if (den == 0) {
1708 raise_exception(EXCP00_DIVZ);
1709 }
1710 q = (num / den);
1711 r = (num % den);
1712 if (q > 0xffffffff)
1713 raise_exception(EXCP00_DIVZ);
1714 EAX = (uint32_t)q;
1715 EDX = (uint32_t)r;
1716}
1717
1718void helper_idivl_EAX(target_ulong t0)
1719{
1720 int den, r;
1721 int64_t num, q;
1722
1723 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1724 den = t0;
1725 if (den == 0) {
1726 raise_exception(EXCP00_DIVZ);
1727 }
1728 q = (num / den);
1729 r = (num % den);
1730 if (q != (int32_t)q)
1731 raise_exception(EXCP00_DIVZ);
1732 EAX = (uint32_t)q;
1733 EDX = (uint32_t)r;
1734}
1735
1736/* bcd */
1737
1738/* XXX: exception */
1739void helper_aam(int base)
1740{
1741 int al, ah;
1742 al = EAX & 0xff;
1743 ah = al / base;
1744 al = al % base;
1745 EAX = (EAX & ~0xffff) | al | (ah << 8);
1746 CC_DST = al;
1747}
1748
1749void helper_aad(int base)
1750{
1751 int al, ah;
1752 al = EAX & 0xff;
1753 ah = (EAX >> 8) & 0xff;
1754 al = ((ah * base) + al) & 0xff;
1755 EAX = (EAX & ~0xffff) | al;
1756 CC_DST = al;
1757}
1758
1759void helper_aaa(void)
1760{
1761 int icarry;
1762 int al, ah, af;
1763 int eflags;
1764
a7812ae4 1765 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1766 af = eflags & CC_A;
1767 al = EAX & 0xff;
1768 ah = (EAX >> 8) & 0xff;
1769
1770 icarry = (al > 0xf9);
1771 if (((al & 0x0f) > 9 ) || af) {
1772 al = (al + 6) & 0x0f;
1773 ah = (ah + 1 + icarry) & 0xff;
1774 eflags |= CC_C | CC_A;
1775 } else {
1776 eflags &= ~(CC_C | CC_A);
1777 al &= 0x0f;
1778 }
1779 EAX = (EAX & ~0xffff) | al | (ah << 8);
1780 CC_SRC = eflags;
eaa728ee
FB
1781}
1782
1783void helper_aas(void)
1784{
1785 int icarry;
1786 int al, ah, af;
1787 int eflags;
1788
a7812ae4 1789 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1790 af = eflags & CC_A;
1791 al = EAX & 0xff;
1792 ah = (EAX >> 8) & 0xff;
1793
1794 icarry = (al < 6);
1795 if (((al & 0x0f) > 9 ) || af) {
1796 al = (al - 6) & 0x0f;
1797 ah = (ah - 1 - icarry) & 0xff;
1798 eflags |= CC_C | CC_A;
1799 } else {
1800 eflags &= ~(CC_C | CC_A);
1801 al &= 0x0f;
1802 }
1803 EAX = (EAX & ~0xffff) | al | (ah << 8);
1804 CC_SRC = eflags;
eaa728ee
FB
1805}
1806
1807void helper_daa(void)
1808{
1809 int al, af, cf;
1810 int eflags;
1811
a7812ae4 1812 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1813 cf = eflags & CC_C;
1814 af = eflags & CC_A;
1815 al = EAX & 0xff;
1816
1817 eflags = 0;
1818 if (((al & 0x0f) > 9 ) || af) {
1819 al = (al + 6) & 0xff;
1820 eflags |= CC_A;
1821 }
1822 if ((al > 0x9f) || cf) {
1823 al = (al + 0x60) & 0xff;
1824 eflags |= CC_C;
1825 }
1826 EAX = (EAX & ~0xff) | al;
1827 /* well, speed is not an issue here, so we compute the flags by hand */
1828 eflags |= (al == 0) << 6; /* zf */
1829 eflags |= parity_table[al]; /* pf */
1830 eflags |= (al & 0x80); /* sf */
1831 CC_SRC = eflags;
eaa728ee
FB
1832}
1833
1834void helper_das(void)
1835{
1836 int al, al1, af, cf;
1837 int eflags;
1838
a7812ae4 1839 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1840 cf = eflags & CC_C;
1841 af = eflags & CC_A;
1842 al = EAX & 0xff;
1843
1844 eflags = 0;
1845 al1 = al;
1846 if (((al & 0x0f) > 9 ) || af) {
1847 eflags |= CC_A;
1848 if (al < 6 || cf)
1849 eflags |= CC_C;
1850 al = (al - 6) & 0xff;
1851 }
1852 if ((al1 > 0x99) || cf) {
1853 al = (al - 0x60) & 0xff;
1854 eflags |= CC_C;
1855 }
1856 EAX = (EAX & ~0xff) | al;
1857 /* well, speed is not an issue here, so we compute the flags by hand */
1858 eflags |= (al == 0) << 6; /* zf */
1859 eflags |= parity_table[al]; /* pf */
1860 eflags |= (al & 0x80); /* sf */
1861 CC_SRC = eflags;
eaa728ee
FB
1862}
1863
1864void helper_into(int next_eip_addend)
1865{
1866 int eflags;
a7812ae4 1867 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1868 if (eflags & CC_O) {
1869 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1870 }
1871}
1872
1873void helper_cmpxchg8b(target_ulong a0)
1874{
1875 uint64_t d;
1876 int eflags;
1877
a7812ae4 1878 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1879 d = ldq(a0);
1880 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1881 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1882 eflags |= CC_Z;
1883 } else {
278ed7c3
FB
1884 /* always do the store */
1885 stq(a0, d);
eaa728ee
FB
1886 EDX = (uint32_t)(d >> 32);
1887 EAX = (uint32_t)d;
1888 eflags &= ~CC_Z;
1889 }
1890 CC_SRC = eflags;
1891}
1892
1893#ifdef TARGET_X86_64
1894void helper_cmpxchg16b(target_ulong a0)
1895{
1896 uint64_t d0, d1;
1897 int eflags;
1898
278ed7c3
FB
1899 if ((a0 & 0xf) != 0)
1900 raise_exception(EXCP0D_GPF);
a7812ae4 1901 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1902 d0 = ldq(a0);
1903 d1 = ldq(a0 + 8);
1904 if (d0 == EAX && d1 == EDX) {
1905 stq(a0, EBX);
1906 stq(a0 + 8, ECX);
1907 eflags |= CC_Z;
1908 } else {
278ed7c3
FB
1909 /* always do the store */
1910 stq(a0, d0);
1911 stq(a0 + 8, d1);
eaa728ee
FB
1912 EDX = d1;
1913 EAX = d0;
1914 eflags &= ~CC_Z;
1915 }
1916 CC_SRC = eflags;
1917}
1918#endif
1919
1920void helper_single_step(void)
1921{
01df040b
AL
1922#ifndef CONFIG_USER_ONLY
1923 check_hw_breakpoints(env, 1);
1924 env->dr[6] |= DR6_BS;
1925#endif
1926 raise_exception(EXCP01_DB);
eaa728ee
FB
1927}
1928
1929void helper_cpuid(void)
1930{
6fd805e1 1931 uint32_t eax, ebx, ecx, edx;
eaa728ee 1932
872929aa 1933 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
e737b32a 1934
e00b6f80 1935 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
6fd805e1
AL
1936 EAX = eax;
1937 EBX = ebx;
1938 ECX = ecx;
1939 EDX = edx;
eaa728ee
FB
1940}
1941
1942void helper_enter_level(int level, int data32, target_ulong t1)
1943{
1944 target_ulong ssp;
1945 uint32_t esp_mask, esp, ebp;
1946
1947 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1948 ssp = env->segs[R_SS].base;
1949 ebp = EBP;
1950 esp = ESP;
1951 if (data32) {
1952 /* 32 bit */
1953 esp -= 4;
1954 while (--level) {
1955 esp -= 4;
1956 ebp -= 4;
1957 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1958 }
1959 esp -= 4;
1960 stl(ssp + (esp & esp_mask), t1);
1961 } else {
1962 /* 16 bit */
1963 esp -= 2;
1964 while (--level) {
1965 esp -= 2;
1966 ebp -= 2;
1967 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1968 }
1969 esp -= 2;
1970 stw(ssp + (esp & esp_mask), t1);
1971 }
1972}
1973
1974#ifdef TARGET_X86_64
1975void helper_enter64_level(int level, int data64, target_ulong t1)
1976{
1977 target_ulong esp, ebp;
1978 ebp = EBP;
1979 esp = ESP;
1980
1981 if (data64) {
1982 /* 64 bit */
1983 esp -= 8;
1984 while (--level) {
1985 esp -= 8;
1986 ebp -= 8;
1987 stq(esp, ldq(ebp));
1988 }
1989 esp -= 8;
1990 stq(esp, t1);
1991 } else {
1992 /* 16 bit */
1993 esp -= 2;
1994 while (--level) {
1995 esp -= 2;
1996 ebp -= 2;
1997 stw(esp, lduw(ebp));
1998 }
1999 esp -= 2;
2000 stw(esp, t1);
2001 }
2002}
2003#endif
2004
2005void helper_lldt(int selector)
2006{
2007 SegmentCache *dt;
2008 uint32_t e1, e2;
2009 int index, entry_limit;
2010 target_ulong ptr;
2011
2012 selector &= 0xffff;
2013 if ((selector & 0xfffc) == 0) {
2014 /* XXX: NULL selector case: invalid LDT */
2015 env->ldt.base = 0;
2016 env->ldt.limit = 0;
2017 } else {
2018 if (selector & 0x4)
2019 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2020 dt = &env->gdt;
2021 index = selector & ~7;
2022#ifdef TARGET_X86_64
2023 if (env->hflags & HF_LMA_MASK)
2024 entry_limit = 15;
2025 else
2026#endif
2027 entry_limit = 7;
2028 if ((index + entry_limit) > dt->limit)
2029 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2030 ptr = dt->base + index;
2031 e1 = ldl_kernel(ptr);
2032 e2 = ldl_kernel(ptr + 4);
2033 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2034 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2035 if (!(e2 & DESC_P_MASK))
2036 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2037#ifdef TARGET_X86_64
2038 if (env->hflags & HF_LMA_MASK) {
2039 uint32_t e3;
2040 e3 = ldl_kernel(ptr + 8);
2041 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2042 env->ldt.base |= (target_ulong)e3 << 32;
2043 } else
2044#endif
2045 {
2046 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2047 }
2048 }
2049 env->ldt.selector = selector;
2050}
2051
2052void helper_ltr(int selector)
2053{
2054 SegmentCache *dt;
2055 uint32_t e1, e2;
2056 int index, type, entry_limit;
2057 target_ulong ptr;
2058
2059 selector &= 0xffff;
2060 if ((selector & 0xfffc) == 0) {
2061 /* NULL selector case: invalid TR */
2062 env->tr.base = 0;
2063 env->tr.limit = 0;
2064 env->tr.flags = 0;
2065 } else {
2066 if (selector & 0x4)
2067 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2068 dt = &env->gdt;
2069 index = selector & ~7;
2070#ifdef TARGET_X86_64
2071 if (env->hflags & HF_LMA_MASK)
2072 entry_limit = 15;
2073 else
2074#endif
2075 entry_limit = 7;
2076 if ((index + entry_limit) > dt->limit)
2077 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2078 ptr = dt->base + index;
2079 e1 = ldl_kernel(ptr);
2080 e2 = ldl_kernel(ptr + 4);
2081 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2082 if ((e2 & DESC_S_MASK) ||
2083 (type != 1 && type != 9))
2084 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2085 if (!(e2 & DESC_P_MASK))
2086 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2087#ifdef TARGET_X86_64
2088 if (env->hflags & HF_LMA_MASK) {
2089 uint32_t e3, e4;
2090 e3 = ldl_kernel(ptr + 8);
2091 e4 = ldl_kernel(ptr + 12);
2092 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2093 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2094 load_seg_cache_raw_dt(&env->tr, e1, e2);
2095 env->tr.base |= (target_ulong)e3 << 32;
2096 } else
2097#endif
2098 {
2099 load_seg_cache_raw_dt(&env->tr, e1, e2);
2100 }
2101 e2 |= DESC_TSS_BUSY_MASK;
2102 stl_kernel(ptr + 4, e2);
2103 }
2104 env->tr.selector = selector;
2105}
2106
2107/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2108void helper_load_seg(int seg_reg, int selector)
2109{
2110 uint32_t e1, e2;
2111 int cpl, dpl, rpl;
2112 SegmentCache *dt;
2113 int index;
2114 target_ulong ptr;
2115
2116 selector &= 0xffff;
2117 cpl = env->hflags & HF_CPL_MASK;
2118 if ((selector & 0xfffc) == 0) {
2119 /* null selector case */
2120 if (seg_reg == R_SS
2121#ifdef TARGET_X86_64
2122 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2123#endif
2124 )
2125 raise_exception_err(EXCP0D_GPF, 0);
2126 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2127 } else {
2128
2129 if (selector & 0x4)
2130 dt = &env->ldt;
2131 else
2132 dt = &env->gdt;
2133 index = selector & ~7;
2134 if ((index + 7) > dt->limit)
2135 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2136 ptr = dt->base + index;
2137 e1 = ldl_kernel(ptr);
2138 e2 = ldl_kernel(ptr + 4);
2139
2140 if (!(e2 & DESC_S_MASK))
2141 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2142 rpl = selector & 3;
2143 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2144 if (seg_reg == R_SS) {
2145 /* must be writable segment */
2146 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2147 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2148 if (rpl != cpl || dpl != cpl)
2149 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2150 } else {
2151 /* must be readable segment */
2152 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2153 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2154
2155 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2156 /* if not conforming code, test rights */
2157 if (dpl < cpl || dpl < rpl)
2158 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2159 }
2160 }
2161
2162 if (!(e2 & DESC_P_MASK)) {
2163 if (seg_reg == R_SS)
2164 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2165 else
2166 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2167 }
2168
2169 /* set the access bit if not already set */
2170 if (!(e2 & DESC_A_MASK)) {
2171 e2 |= DESC_A_MASK;
2172 stl_kernel(ptr + 4, e2);
2173 }
2174
2175 cpu_x86_load_seg_cache(env, seg_reg, selector,
2176 get_seg_base(e1, e2),
2177 get_seg_limit(e1, e2),
2178 e2);
2179#if 0
93fcfe39 2180 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
2181 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2182#endif
2183 }
2184}
2185
2186/* protected mode jump */
2187void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2188 int next_eip_addend)
2189{
2190 int gate_cs, type;
2191 uint32_t e1, e2, cpl, dpl, rpl, limit;
2192 target_ulong next_eip;
2193
2194 if ((new_cs & 0xfffc) == 0)
2195 raise_exception_err(EXCP0D_GPF, 0);
2196 if (load_segment(&e1, &e2, new_cs) != 0)
2197 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2198 cpl = env->hflags & HF_CPL_MASK;
2199 if (e2 & DESC_S_MASK) {
2200 if (!(e2 & DESC_CS_MASK))
2201 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2202 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2203 if (e2 & DESC_C_MASK) {
2204 /* conforming code segment */
2205 if (dpl > cpl)
2206 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2207 } else {
2208 /* non conforming code segment */
2209 rpl = new_cs & 3;
2210 if (rpl > cpl)
2211 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212 if (dpl != cpl)
2213 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2214 }
2215 if (!(e2 & DESC_P_MASK))
2216 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2217 limit = get_seg_limit(e1, e2);
2218 if (new_eip > limit &&
2219 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2220 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2221 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2222 get_seg_base(e1, e2), limit, e2);
2223 EIP = new_eip;
2224 } else {
2225 /* jump to call or task gate */
2226 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2227 rpl = new_cs & 3;
2228 cpl = env->hflags & HF_CPL_MASK;
2229 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2230 switch(type) {
2231 case 1: /* 286 TSS */
2232 case 9: /* 386 TSS */
2233 case 5: /* task gate */
2234 if (dpl < cpl || dpl < rpl)
2235 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2236 next_eip = env->eip + next_eip_addend;
2237 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2238 CC_OP = CC_OP_EFLAGS;
2239 break;
2240 case 4: /* 286 call gate */
2241 case 12: /* 386 call gate */
2242 if ((dpl < cpl) || (dpl < rpl))
2243 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2244 if (!(e2 & DESC_P_MASK))
2245 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2246 gate_cs = e1 >> 16;
2247 new_eip = (e1 & 0xffff);
2248 if (type == 12)
2249 new_eip |= (e2 & 0xffff0000);
2250 if (load_segment(&e1, &e2, gate_cs) != 0)
2251 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2252 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2253 /* must be code segment */
2254 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2255 (DESC_S_MASK | DESC_CS_MASK)))
2256 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2257 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2258 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2259 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2260 if (!(e2 & DESC_P_MASK))
2261 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2262 limit = get_seg_limit(e1, e2);
2263 if (new_eip > limit)
2264 raise_exception_err(EXCP0D_GPF, 0);
2265 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2266 get_seg_base(e1, e2), limit, e2);
2267 EIP = new_eip;
2268 break;
2269 default:
2270 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2271 break;
2272 }
2273 }
2274}
2275
2276/* real mode call */
2277void helper_lcall_real(int new_cs, target_ulong new_eip1,
2278 int shift, int next_eip)
2279{
2280 int new_eip;
2281 uint32_t esp, esp_mask;
2282 target_ulong ssp;
2283
2284 new_eip = new_eip1;
2285 esp = ESP;
2286 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2287 ssp = env->segs[R_SS].base;
2288 if (shift) {
2289 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2290 PUSHL(ssp, esp, esp_mask, next_eip);
2291 } else {
2292 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2293 PUSHW(ssp, esp, esp_mask, next_eip);
2294 }
2295
2296 SET_ESP(esp, esp_mask);
2297 env->eip = new_eip;
2298 env->segs[R_CS].selector = new_cs;
2299 env->segs[R_CS].base = (new_cs << 4);
2300}
2301
2302/* protected mode call */
2303void helper_lcall_protected(int new_cs, target_ulong new_eip,
2304 int shift, int next_eip_addend)
2305{
2306 int new_stack, i;
2307 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 2308 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
2309 uint32_t val, limit, old_sp_mask;
2310 target_ulong ssp, old_ssp, next_eip;
2311
2312 next_eip = env->eip + next_eip_addend;
d12d51d5
AL
2313 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2314 LOG_PCALL_STATE(env);
eaa728ee
FB
2315 if ((new_cs & 0xfffc) == 0)
2316 raise_exception_err(EXCP0D_GPF, 0);
2317 if (load_segment(&e1, &e2, new_cs) != 0)
2318 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2319 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 2320 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee
FB
2321 if (e2 & DESC_S_MASK) {
2322 if (!(e2 & DESC_CS_MASK))
2323 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2324 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2325 if (e2 & DESC_C_MASK) {
2326 /* conforming code segment */
2327 if (dpl > cpl)
2328 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2329 } else {
2330 /* non conforming code segment */
2331 rpl = new_cs & 3;
2332 if (rpl > cpl)
2333 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334 if (dpl != cpl)
2335 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2336 }
2337 if (!(e2 & DESC_P_MASK))
2338 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2339
2340#ifdef TARGET_X86_64
2341 /* XXX: check 16/32 bit cases in long mode */
2342 if (shift == 2) {
2343 target_ulong rsp;
2344 /* 64 bit case */
2345 rsp = ESP;
2346 PUSHQ(rsp, env->segs[R_CS].selector);
2347 PUSHQ(rsp, next_eip);
2348 /* from this point, not restartable */
2349 ESP = rsp;
2350 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2351 get_seg_base(e1, e2),
2352 get_seg_limit(e1, e2), e2);
2353 EIP = new_eip;
2354 } else
2355#endif
2356 {
2357 sp = ESP;
2358 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2359 ssp = env->segs[R_SS].base;
2360 if (shift) {
2361 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2362 PUSHL(ssp, sp, sp_mask, next_eip);
2363 } else {
2364 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2365 PUSHW(ssp, sp, sp_mask, next_eip);
2366 }
2367
2368 limit = get_seg_limit(e1, e2);
2369 if (new_eip > limit)
2370 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2371 /* from this point, not restartable */
2372 SET_ESP(sp, sp_mask);
2373 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2374 get_seg_base(e1, e2), limit, e2);
2375 EIP = new_eip;
2376 }
2377 } else {
2378 /* check gate type */
2379 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2380 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2381 rpl = new_cs & 3;
2382 switch(type) {
2383 case 1: /* available 286 TSS */
2384 case 9: /* available 386 TSS */
2385 case 5: /* task gate */
2386 if (dpl < cpl || dpl < rpl)
2387 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2388 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2389 CC_OP = CC_OP_EFLAGS;
2390 return;
2391 case 4: /* 286 call gate */
2392 case 12: /* 386 call gate */
2393 break;
2394 default:
2395 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2396 break;
2397 }
2398 shift = type >> 3;
2399
2400 if (dpl < cpl || dpl < rpl)
2401 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2402 /* check valid bit */
2403 if (!(e2 & DESC_P_MASK))
2404 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2405 selector = e1 >> 16;
2406 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2407 param_count = e2 & 0x1f;
2408 if ((selector & 0xfffc) == 0)
2409 raise_exception_err(EXCP0D_GPF, 0);
2410
2411 if (load_segment(&e1, &e2, selector) != 0)
2412 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2413 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2414 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2415 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2416 if (dpl > cpl)
2417 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2418 if (!(e2 & DESC_P_MASK))
2419 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2420
2421 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2422 /* to inner privilege */
2423 get_ss_esp_from_tss(&ss, &sp, dpl);
d12d51d5 2424 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
eaa728ee 2425 ss, sp, param_count, ESP);
eaa728ee
FB
2426 if ((ss & 0xfffc) == 0)
2427 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2428 if ((ss & 3) != dpl)
2429 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2430 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2431 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2432 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2433 if (ss_dpl != dpl)
2434 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2435 if (!(ss_e2 & DESC_S_MASK) ||
2436 (ss_e2 & DESC_CS_MASK) ||
2437 !(ss_e2 & DESC_W_MASK))
2438 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2439 if (!(ss_e2 & DESC_P_MASK))
2440 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441
2442 // push_size = ((param_count * 2) + 8) << shift;
2443
2444 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2445 old_ssp = env->segs[R_SS].base;
2446
2447 sp_mask = get_sp_mask(ss_e2);
2448 ssp = get_seg_base(ss_e1, ss_e2);
2449 if (shift) {
2450 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2451 PUSHL(ssp, sp, sp_mask, ESP);
2452 for(i = param_count - 1; i >= 0; i--) {
2453 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2454 PUSHL(ssp, sp, sp_mask, val);
2455 }
2456 } else {
2457 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2458 PUSHW(ssp, sp, sp_mask, ESP);
2459 for(i = param_count - 1; i >= 0; i--) {
2460 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2461 PUSHW(ssp, sp, sp_mask, val);
2462 }
2463 }
2464 new_stack = 1;
2465 } else {
2466 /* to same privilege */
2467 sp = ESP;
2468 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2469 ssp = env->segs[R_SS].base;
2470 // push_size = (4 << shift);
2471 new_stack = 0;
2472 }
2473
2474 if (shift) {
2475 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2476 PUSHL(ssp, sp, sp_mask, next_eip);
2477 } else {
2478 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2479 PUSHW(ssp, sp, sp_mask, next_eip);
2480 }
2481
2482 /* from this point, not restartable */
2483
2484 if (new_stack) {
2485 ss = (ss & ~3) | dpl;
2486 cpu_x86_load_seg_cache(env, R_SS, ss,
2487 ssp,
2488 get_seg_limit(ss_e1, ss_e2),
2489 ss_e2);
2490 }
2491
2492 selector = (selector & ~3) | dpl;
2493 cpu_x86_load_seg_cache(env, R_CS, selector,
2494 get_seg_base(e1, e2),
2495 get_seg_limit(e1, e2),
2496 e2);
2497 cpu_x86_set_cpl(env, dpl);
2498 SET_ESP(sp, sp_mask);
2499 EIP = offset;
2500 }
eaa728ee
FB
2501}
2502
2503/* real and vm86 mode iret */
2504void helper_iret_real(int shift)
2505{
2506 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2507 target_ulong ssp;
2508 int eflags_mask;
2509
2510 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2511 sp = ESP;
2512 ssp = env->segs[R_SS].base;
2513 if (shift == 1) {
2514 /* 32 bits */
2515 POPL(ssp, sp, sp_mask, new_eip);
2516 POPL(ssp, sp, sp_mask, new_cs);
2517 new_cs &= 0xffff;
2518 POPL(ssp, sp, sp_mask, new_eflags);
2519 } else {
2520 /* 16 bits */
2521 POPW(ssp, sp, sp_mask, new_eip);
2522 POPW(ssp, sp, sp_mask, new_cs);
2523 POPW(ssp, sp, sp_mask, new_eflags);
2524 }
2525 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2526 env->segs[R_CS].selector = new_cs;
2527 env->segs[R_CS].base = (new_cs << 4);
eaa728ee
FB
2528 env->eip = new_eip;
2529 if (env->eflags & VM_MASK)
2530 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2531 else
2532 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2533 if (shift == 0)
2534 eflags_mask &= 0xffff;
2535 load_eflags(new_eflags, eflags_mask);
db620f46 2536 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2537}
2538
2539static inline void validate_seg(int seg_reg, int cpl)
2540{
2541 int dpl;
2542 uint32_t e2;
2543
2544 /* XXX: on x86_64, we do not want to nullify FS and GS because
2545 they may still contain a valid base. I would be interested to
2546 know how a real x86_64 CPU behaves */
2547 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2548 (env->segs[seg_reg].selector & 0xfffc) == 0)
2549 return;
2550
2551 e2 = env->segs[seg_reg].flags;
2552 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2553 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2554 /* data or non conforming code segment */
2555 if (dpl < cpl) {
2556 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2557 }
2558 }
2559}
2560
2561/* protected mode iret */
2562static inline void helper_ret_protected(int shift, int is_iret, int addend)
2563{
2564 uint32_t new_cs, new_eflags, new_ss;
2565 uint32_t new_es, new_ds, new_fs, new_gs;
2566 uint32_t e1, e2, ss_e1, ss_e2;
2567 int cpl, dpl, rpl, eflags_mask, iopl;
2568 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2569
2570#ifdef TARGET_X86_64
2571 if (shift == 2)
2572 sp_mask = -1;
2573 else
2574#endif
2575 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2576 sp = ESP;
2577 ssp = env->segs[R_SS].base;
2578 new_eflags = 0; /* avoid warning */
2579#ifdef TARGET_X86_64
2580 if (shift == 2) {
2581 POPQ(sp, new_eip);
2582 POPQ(sp, new_cs);
2583 new_cs &= 0xffff;
2584 if (is_iret) {
2585 POPQ(sp, new_eflags);
2586 }
2587 } else
2588#endif
2589 if (shift == 1) {
2590 /* 32 bits */
2591 POPL(ssp, sp, sp_mask, new_eip);
2592 POPL(ssp, sp, sp_mask, new_cs);
2593 new_cs &= 0xffff;
2594 if (is_iret) {
2595 POPL(ssp, sp, sp_mask, new_eflags);
2596 if (new_eflags & VM_MASK)
2597 goto return_to_vm86;
2598 }
2599 } else {
2600 /* 16 bits */
2601 POPW(ssp, sp, sp_mask, new_eip);
2602 POPW(ssp, sp, sp_mask, new_cs);
2603 if (is_iret)
2604 POPW(ssp, sp, sp_mask, new_eflags);
2605 }
d12d51d5
AL
2606 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2607 new_cs, new_eip, shift, addend);
2608 LOG_PCALL_STATE(env);
eaa728ee
FB
2609 if ((new_cs & 0xfffc) == 0)
2610 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2611 if (load_segment(&e1, &e2, new_cs) != 0)
2612 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2613 if (!(e2 & DESC_S_MASK) ||
2614 !(e2 & DESC_CS_MASK))
2615 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2616 cpl = env->hflags & HF_CPL_MASK;
2617 rpl = new_cs & 3;
2618 if (rpl < cpl)
2619 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2620 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2621 if (e2 & DESC_C_MASK) {
2622 if (dpl > rpl)
2623 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2624 } else {
2625 if (dpl != rpl)
2626 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2627 }
2628 if (!(e2 & DESC_P_MASK))
2629 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2630
2631 sp += addend;
2632 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2633 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2634 /* return to same privilege level */
eaa728ee
FB
2635 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2636 get_seg_base(e1, e2),
2637 get_seg_limit(e1, e2),
2638 e2);
2639 } else {
2640 /* return to different privilege level */
2641#ifdef TARGET_X86_64
2642 if (shift == 2) {
2643 POPQ(sp, new_esp);
2644 POPQ(sp, new_ss);
2645 new_ss &= 0xffff;
2646 } else
2647#endif
2648 if (shift == 1) {
2649 /* 32 bits */
2650 POPL(ssp, sp, sp_mask, new_esp);
2651 POPL(ssp, sp, sp_mask, new_ss);
2652 new_ss &= 0xffff;
2653 } else {
2654 /* 16 bits */
2655 POPW(ssp, sp, sp_mask, new_esp);
2656 POPW(ssp, sp, sp_mask, new_ss);
2657 }
d12d51d5 2658 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
eaa728ee 2659 new_ss, new_esp);
eaa728ee
FB
2660 if ((new_ss & 0xfffc) == 0) {
2661#ifdef TARGET_X86_64
2662 /* NULL ss is allowed in long mode if cpl != 3*/
2663 /* XXX: test CS64 ? */
2664 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2665 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2666 0, 0xffffffff,
2667 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2668 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2669 DESC_W_MASK | DESC_A_MASK);
2670 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2671 } else
2672#endif
2673 {
2674 raise_exception_err(EXCP0D_GPF, 0);
2675 }
2676 } else {
2677 if ((new_ss & 3) != rpl)
2678 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2679 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2680 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2681 if (!(ss_e2 & DESC_S_MASK) ||
2682 (ss_e2 & DESC_CS_MASK) ||
2683 !(ss_e2 & DESC_W_MASK))
2684 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2685 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2686 if (dpl != rpl)
2687 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2688 if (!(ss_e2 & DESC_P_MASK))
2689 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2690 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2691 get_seg_base(ss_e1, ss_e2),
2692 get_seg_limit(ss_e1, ss_e2),
2693 ss_e2);
2694 }
2695
2696 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2697 get_seg_base(e1, e2),
2698 get_seg_limit(e1, e2),
2699 e2);
2700 cpu_x86_set_cpl(env, rpl);
2701 sp = new_esp;
2702#ifdef TARGET_X86_64
2703 if (env->hflags & HF_CS64_MASK)
2704 sp_mask = -1;
2705 else
2706#endif
2707 sp_mask = get_sp_mask(ss_e2);
2708
2709 /* validate data segments */
2710 validate_seg(R_ES, rpl);
2711 validate_seg(R_DS, rpl);
2712 validate_seg(R_FS, rpl);
2713 validate_seg(R_GS, rpl);
2714
2715 sp += addend;
2716 }
2717 SET_ESP(sp, sp_mask);
2718 env->eip = new_eip;
2719 if (is_iret) {
2720 /* NOTE: 'cpl' is the _old_ CPL */
2721 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2722 if (cpl == 0)
2723 eflags_mask |= IOPL_MASK;
2724 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2725 if (cpl <= iopl)
2726 eflags_mask |= IF_MASK;
2727 if (shift == 0)
2728 eflags_mask &= 0xffff;
2729 load_eflags(new_eflags, eflags_mask);
2730 }
2731 return;
2732
2733 return_to_vm86:
2734 POPL(ssp, sp, sp_mask, new_esp);
2735 POPL(ssp, sp, sp_mask, new_ss);
2736 POPL(ssp, sp, sp_mask, new_es);
2737 POPL(ssp, sp, sp_mask, new_ds);
2738 POPL(ssp, sp, sp_mask, new_fs);
2739 POPL(ssp, sp, sp_mask, new_gs);
2740
2741 /* modify processor state */
2742 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2743 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2744 load_seg_vm(R_CS, new_cs & 0xffff);
2745 cpu_x86_set_cpl(env, 3);
2746 load_seg_vm(R_SS, new_ss & 0xffff);
2747 load_seg_vm(R_ES, new_es & 0xffff);
2748 load_seg_vm(R_DS, new_ds & 0xffff);
2749 load_seg_vm(R_FS, new_fs & 0xffff);
2750 load_seg_vm(R_GS, new_gs & 0xffff);
2751
2752 env->eip = new_eip & 0xffff;
2753 ESP = new_esp;
2754}
2755
2756void helper_iret_protected(int shift, int next_eip)
2757{
2758 int tss_selector, type;
2759 uint32_t e1, e2;
2760
2761 /* specific case for TSS */
2762 if (env->eflags & NT_MASK) {
2763#ifdef TARGET_X86_64
2764 if (env->hflags & HF_LMA_MASK)
2765 raise_exception_err(EXCP0D_GPF, 0);
2766#endif
2767 tss_selector = lduw_kernel(env->tr.base + 0);
2768 if (tss_selector & 4)
2769 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2770 if (load_segment(&e1, &e2, tss_selector) != 0)
2771 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2772 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2773 /* NOTE: we check both segment and busy TSS */
2774 if (type != 3)
2775 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2776 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2777 } else {
2778 helper_ret_protected(shift, 1, 0);
2779 }
db620f46 2780 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2781}
2782
2783void helper_lret_protected(int shift, int addend)
2784{
2785 helper_ret_protected(shift, 0, addend);
eaa728ee
FB
2786}
2787
2788void helper_sysenter(void)
2789{
2790 if (env->sysenter_cs == 0) {
2791 raise_exception_err(EXCP0D_GPF, 0);
2792 }
2793 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2794 cpu_x86_set_cpl(env, 0);
2436b61a
AZ
2795
2796#ifdef TARGET_X86_64
2797 if (env->hflags & HF_LMA_MASK) {
2798 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2799 0, 0xffffffff,
2800 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2801 DESC_S_MASK |
2802 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2803 } else
2804#endif
2805 {
2806 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2807 0, 0xffffffff,
2808 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2809 DESC_S_MASK |
2810 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2811 }
eaa728ee
FB
2812 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2813 0, 0xffffffff,
2814 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2815 DESC_S_MASK |
2816 DESC_W_MASK | DESC_A_MASK);
2817 ESP = env->sysenter_esp;
2818 EIP = env->sysenter_eip;
2819}
2820
2436b61a 2821void helper_sysexit(int dflag)
eaa728ee
FB
2822{
2823 int cpl;
2824
2825 cpl = env->hflags & HF_CPL_MASK;
2826 if (env->sysenter_cs == 0 || cpl != 0) {
2827 raise_exception_err(EXCP0D_GPF, 0);
2828 }
2829 cpu_x86_set_cpl(env, 3);
2436b61a
AZ
2830#ifdef TARGET_X86_64
2831 if (dflag == 2) {
2832 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2833 0, 0xffffffff,
2834 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2835 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2836 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2837 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2838 0, 0xffffffff,
2839 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2840 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2841 DESC_W_MASK | DESC_A_MASK);
2842 } else
2843#endif
2844 {
2845 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2846 0, 0xffffffff,
2847 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2848 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2849 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2850 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2851 0, 0xffffffff,
2852 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2853 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2854 DESC_W_MASK | DESC_A_MASK);
2855 }
eaa728ee
FB
2856 ESP = ECX;
2857 EIP = EDX;
eaa728ee
FB
2858}
2859
872929aa
FB
2860#if defined(CONFIG_USER_ONLY)
2861target_ulong helper_read_crN(int reg)
eaa728ee 2862{
872929aa
FB
2863 return 0;
2864}
2865
2866void helper_write_crN(int reg, target_ulong t0)
2867{
2868}
01df040b
AL
2869
2870void helper_movl_drN_T0(int reg, target_ulong t0)
2871{
2872}
872929aa
FB
2873#else
2874target_ulong helper_read_crN(int reg)
2875{
2876 target_ulong val;
2877
2878 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2879 switch(reg) {
2880 default:
2881 val = env->cr[reg];
2882 break;
2883 case 8:
db620f46
FB
2884 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2885 val = cpu_get_apic_tpr(env);
2886 } else {
2887 val = env->v_tpr;
2888 }
872929aa
FB
2889 break;
2890 }
2891 return val;
2892}
2893
2894void helper_write_crN(int reg, target_ulong t0)
2895{
2896 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
2897 switch(reg) {
2898 case 0:
2899 cpu_x86_update_cr0(env, t0);
2900 break;
2901 case 3:
2902 cpu_x86_update_cr3(env, t0);
2903 break;
2904 case 4:
2905 cpu_x86_update_cr4(env, t0);
2906 break;
2907 case 8:
db620f46
FB
2908 if (!(env->hflags2 & HF2_VINTR_MASK)) {
2909 cpu_set_apic_tpr(env, t0);
2910 }
2911 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
2912 break;
2913 default:
2914 env->cr[reg] = t0;
2915 break;
2916 }
eaa728ee 2917}
01df040b
AL
2918
2919void helper_movl_drN_T0(int reg, target_ulong t0)
2920{
2921 int i;
2922
2923 if (reg < 4) {
2924 hw_breakpoint_remove(env, reg);
2925 env->dr[reg] = t0;
2926 hw_breakpoint_insert(env, reg);
2927 } else if (reg == 7) {
2928 for (i = 0; i < 4; i++)
2929 hw_breakpoint_remove(env, i);
2930 env->dr[7] = t0;
2931 for (i = 0; i < 4; i++)
2932 hw_breakpoint_insert(env, i);
2933 } else
2934 env->dr[reg] = t0;
2935}
872929aa 2936#endif
eaa728ee
FB
2937
2938void helper_lmsw(target_ulong t0)
2939{
2940 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2941 if already set to one. */
2942 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 2943 helper_write_crN(0, t0);
eaa728ee
FB
2944}
2945
2946void helper_clts(void)
2947{
2948 env->cr[0] &= ~CR0_TS_MASK;
2949 env->hflags &= ~HF_TS_MASK;
2950}
2951
eaa728ee
FB
2952void helper_invlpg(target_ulong addr)
2953{
872929aa 2954 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 2955 tlb_flush_page(env, addr);
eaa728ee
FB
2956}
2957
2958void helper_rdtsc(void)
2959{
2960 uint64_t val;
2961
2962 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2963 raise_exception(EXCP0D_GPF);
2964 }
872929aa
FB
2965 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2966
33c263df 2967 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
2968 EAX = (uint32_t)(val);
2969 EDX = (uint32_t)(val >> 32);
2970}
2971
1b050077
AP
2972void helper_rdtscp(void)
2973{
2974 helper_rdtsc();
2975 ECX = (uint32_t)(env->tsc_aux);
2976}
2977
eaa728ee
FB
2978void helper_rdpmc(void)
2979{
2980 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2981 raise_exception(EXCP0D_GPF);
2982 }
eaa728ee
FB
2983 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2984
2985 /* currently unimplemented */
2986 raise_exception_err(EXCP06_ILLOP, 0);
2987}
2988
2989#if defined(CONFIG_USER_ONLY)
2990void helper_wrmsr(void)
2991{
2992}
2993
2994void helper_rdmsr(void)
2995{
2996}
2997#else
2998void helper_wrmsr(void)
2999{
3000 uint64_t val;
3001
872929aa
FB
3002 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3003
eaa728ee
FB
3004 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3005
3006 switch((uint32_t)ECX) {
3007 case MSR_IA32_SYSENTER_CS:
3008 env->sysenter_cs = val & 0xffff;
3009 break;
3010 case MSR_IA32_SYSENTER_ESP:
3011 env->sysenter_esp = val;
3012 break;
3013 case MSR_IA32_SYSENTER_EIP:
3014 env->sysenter_eip = val;
3015 break;
3016 case MSR_IA32_APICBASE:
3017 cpu_set_apic_base(env, val);
3018 break;
3019 case MSR_EFER:
3020 {
3021 uint64_t update_mask;
3022 update_mask = 0;
3023 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3024 update_mask |= MSR_EFER_SCE;
3025 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3026 update_mask |= MSR_EFER_LME;
3027 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3028 update_mask |= MSR_EFER_FFXSR;
3029 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3030 update_mask |= MSR_EFER_NXE;
5efc27bb
FB
3031 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3032 update_mask |= MSR_EFER_SVME;
eef26553
AL
3033 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3034 update_mask |= MSR_EFER_FFXSR;
5efc27bb
FB
3035 cpu_load_efer(env, (env->efer & ~update_mask) |
3036 (val & update_mask));
eaa728ee
FB
3037 }
3038 break;
3039 case MSR_STAR:
3040 env->star = val;
3041 break;
3042 case MSR_PAT:
3043 env->pat = val;
3044 break;
3045 case MSR_VM_HSAVE_PA:
3046 env->vm_hsave = val;
3047 break;
3048#ifdef TARGET_X86_64
3049 case MSR_LSTAR:
3050 env->lstar = val;
3051 break;
3052 case MSR_CSTAR:
3053 env->cstar = val;
3054 break;
3055 case MSR_FMASK:
3056 env->fmask = val;
3057 break;
3058 case MSR_FSBASE:
3059 env->segs[R_FS].base = val;
3060 break;
3061 case MSR_GSBASE:
3062 env->segs[R_GS].base = val;
3063 break;
3064 case MSR_KERNELGSBASE:
3065 env->kernelgsbase = val;
3066 break;
3067#endif
165d9b82
AL
3068 case MSR_MTRRphysBase(0):
3069 case MSR_MTRRphysBase(1):
3070 case MSR_MTRRphysBase(2):
3071 case MSR_MTRRphysBase(3):
3072 case MSR_MTRRphysBase(4):
3073 case MSR_MTRRphysBase(5):
3074 case MSR_MTRRphysBase(6):
3075 case MSR_MTRRphysBase(7):
3076 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3077 break;
3078 case MSR_MTRRphysMask(0):
3079 case MSR_MTRRphysMask(1):
3080 case MSR_MTRRphysMask(2):
3081 case MSR_MTRRphysMask(3):
3082 case MSR_MTRRphysMask(4):
3083 case MSR_MTRRphysMask(5):
3084 case MSR_MTRRphysMask(6):
3085 case MSR_MTRRphysMask(7):
3086 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3087 break;
3088 case MSR_MTRRfix64K_00000:
3089 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3090 break;
3091 case MSR_MTRRfix16K_80000:
3092 case MSR_MTRRfix16K_A0000:
3093 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3094 break;
3095 case MSR_MTRRfix4K_C0000:
3096 case MSR_MTRRfix4K_C8000:
3097 case MSR_MTRRfix4K_D0000:
3098 case MSR_MTRRfix4K_D8000:
3099 case MSR_MTRRfix4K_E0000:
3100 case MSR_MTRRfix4K_E8000:
3101 case MSR_MTRRfix4K_F0000:
3102 case MSR_MTRRfix4K_F8000:
3103 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3104 break;
3105 case MSR_MTRRdefType:
3106 env->mtrr_deftype = val;
3107 break;
79c4f6b0
HY
3108 case MSR_MCG_STATUS:
3109 env->mcg_status = val;
3110 break;
3111 case MSR_MCG_CTL:
3112 if ((env->mcg_cap & MCG_CTL_P)
3113 && (val == 0 || val == ~(uint64_t)0))
3114 env->mcg_ctl = val;
3115 break;
1b050077
AP
3116 case MSR_TSC_AUX:
3117 env->tsc_aux = val;
3118 break;
eaa728ee 3119 default:
79c4f6b0
HY
3120 if ((uint32_t)ECX >= MSR_MC0_CTL
3121 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3122 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3123 if ((offset & 0x3) != 0
3124 || (val == 0 || val == ~(uint64_t)0))
3125 env->mce_banks[offset] = val;
3126 break;
3127 }
eaa728ee
FB
3128 /* XXX: exception ? */
3129 break;
3130 }
3131}
3132
3133void helper_rdmsr(void)
3134{
3135 uint64_t val;
872929aa
FB
3136
3137 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3138
eaa728ee
FB
3139 switch((uint32_t)ECX) {
3140 case MSR_IA32_SYSENTER_CS:
3141 val = env->sysenter_cs;
3142 break;
3143 case MSR_IA32_SYSENTER_ESP:
3144 val = env->sysenter_esp;
3145 break;
3146 case MSR_IA32_SYSENTER_EIP:
3147 val = env->sysenter_eip;
3148 break;
3149 case MSR_IA32_APICBASE:
3150 val = cpu_get_apic_base(env);
3151 break;
3152 case MSR_EFER:
3153 val = env->efer;
3154 break;
3155 case MSR_STAR:
3156 val = env->star;
3157 break;
3158 case MSR_PAT:
3159 val = env->pat;
3160 break;
3161 case MSR_VM_HSAVE_PA:
3162 val = env->vm_hsave;
3163 break;
d5e49a81
AZ
3164 case MSR_IA32_PERF_STATUS:
3165 /* tsc_increment_by_tick */
3166 val = 1000ULL;
3167 /* CPU multiplier */
3168 val |= (((uint64_t)4ULL) << 40);
3169 break;
eaa728ee
FB
3170#ifdef TARGET_X86_64
3171 case MSR_LSTAR:
3172 val = env->lstar;
3173 break;
3174 case MSR_CSTAR:
3175 val = env->cstar;
3176 break;
3177 case MSR_FMASK:
3178 val = env->fmask;
3179 break;
3180 case MSR_FSBASE:
3181 val = env->segs[R_FS].base;
3182 break;
3183 case MSR_GSBASE:
3184 val = env->segs[R_GS].base;
3185 break;
3186 case MSR_KERNELGSBASE:
3187 val = env->kernelgsbase;
3188 break;
1b050077
AP
3189 case MSR_TSC_AUX:
3190 val = env->tsc_aux;
3191 break;
eaa728ee 3192#endif
165d9b82
AL
3193 case MSR_MTRRphysBase(0):
3194 case MSR_MTRRphysBase(1):
3195 case MSR_MTRRphysBase(2):
3196 case MSR_MTRRphysBase(3):
3197 case MSR_MTRRphysBase(4):
3198 case MSR_MTRRphysBase(5):
3199 case MSR_MTRRphysBase(6):
3200 case MSR_MTRRphysBase(7):
3201 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3202 break;
3203 case MSR_MTRRphysMask(0):
3204 case MSR_MTRRphysMask(1):
3205 case MSR_MTRRphysMask(2):
3206 case MSR_MTRRphysMask(3):
3207 case MSR_MTRRphysMask(4):
3208 case MSR_MTRRphysMask(5):
3209 case MSR_MTRRphysMask(6):
3210 case MSR_MTRRphysMask(7):
3211 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3212 break;
3213 case MSR_MTRRfix64K_00000:
3214 val = env->mtrr_fixed[0];
3215 break;
3216 case MSR_MTRRfix16K_80000:
3217 case MSR_MTRRfix16K_A0000:
3218 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3219 break;
3220 case MSR_MTRRfix4K_C0000:
3221 case MSR_MTRRfix4K_C8000:
3222 case MSR_MTRRfix4K_D0000:
3223 case MSR_MTRRfix4K_D8000:
3224 case MSR_MTRRfix4K_E0000:
3225 case MSR_MTRRfix4K_E8000:
3226 case MSR_MTRRfix4K_F0000:
3227 case MSR_MTRRfix4K_F8000:
3228 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3229 break;
3230 case MSR_MTRRdefType:
3231 val = env->mtrr_deftype;
3232 break;
dd5e3b17
AL
3233 case MSR_MTRRcap:
3234 if (env->cpuid_features & CPUID_MTRR)
3235 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3236 else
3237 /* XXX: exception ? */
3238 val = 0;
3239 break;
79c4f6b0
HY
3240 case MSR_MCG_CAP:
3241 val = env->mcg_cap;
3242 break;
3243 case MSR_MCG_CTL:
3244 if (env->mcg_cap & MCG_CTL_P)
3245 val = env->mcg_ctl;
3246 else
3247 val = 0;
3248 break;
3249 case MSR_MCG_STATUS:
3250 val = env->mcg_status;
3251 break;
eaa728ee 3252 default:
79c4f6b0
HY
3253 if ((uint32_t)ECX >= MSR_MC0_CTL
3254 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3255 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3256 val = env->mce_banks[offset];
3257 break;
3258 }
eaa728ee
FB
3259 /* XXX: exception ? */
3260 val = 0;
3261 break;
3262 }
3263 EAX = (uint32_t)(val);
3264 EDX = (uint32_t)(val >> 32);
3265}
3266#endif
3267
3268target_ulong helper_lsl(target_ulong selector1)
3269{
3270 unsigned int limit;
3271 uint32_t e1, e2, eflags, selector;
3272 int rpl, dpl, cpl, type;
3273
3274 selector = selector1 & 0xffff;
a7812ae4 3275 eflags = helper_cc_compute_all(CC_OP);
dc1ded53
AL
3276 if ((selector & 0xfffc) == 0)
3277 goto fail;
eaa728ee
FB
3278 if (load_segment(&e1, &e2, selector) != 0)
3279 goto fail;
3280 rpl = selector & 3;
3281 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3282 cpl = env->hflags & HF_CPL_MASK;
3283 if (e2 & DESC_S_MASK) {
3284 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3285 /* conforming */
3286 } else {
3287 if (dpl < cpl || dpl < rpl)
3288 goto fail;
3289 }
3290 } else {
3291 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3292 switch(type) {
3293 case 1:
3294 case 2:
3295 case 3:
3296 case 9:
3297 case 11:
3298 break;
3299 default:
3300 goto fail;
3301 }
3302 if (dpl < cpl || dpl < rpl) {
3303 fail:
3304 CC_SRC = eflags & ~CC_Z;
3305 return 0;
3306 }
3307 }
3308 limit = get_seg_limit(e1, e2);
3309 CC_SRC = eflags | CC_Z;
3310 return limit;
3311}
3312
3313target_ulong helper_lar(target_ulong selector1)
3314{
3315 uint32_t e1, e2, eflags, selector;
3316 int rpl, dpl, cpl, type;
3317
3318 selector = selector1 & 0xffff;
a7812ae4 3319 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3320 if ((selector & 0xfffc) == 0)
3321 goto fail;
3322 if (load_segment(&e1, &e2, selector) != 0)
3323 goto fail;
3324 rpl = selector & 3;
3325 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3326 cpl = env->hflags & HF_CPL_MASK;
3327 if (e2 & DESC_S_MASK) {
3328 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3329 /* conforming */
3330 } else {
3331 if (dpl < cpl || dpl < rpl)
3332 goto fail;
3333 }
3334 } else {
3335 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3336 switch(type) {
3337 case 1:
3338 case 2:
3339 case 3:
3340 case 4:
3341 case 5:
3342 case 9:
3343 case 11:
3344 case 12:
3345 break;
3346 default:
3347 goto fail;
3348 }
3349 if (dpl < cpl || dpl < rpl) {
3350 fail:
3351 CC_SRC = eflags & ~CC_Z;
3352 return 0;
3353 }
3354 }
3355 CC_SRC = eflags | CC_Z;
3356 return e2 & 0x00f0ff00;
3357}
3358
3359void helper_verr(target_ulong selector1)
3360{
3361 uint32_t e1, e2, eflags, selector;
3362 int rpl, dpl, cpl;
3363
3364 selector = selector1 & 0xffff;
a7812ae4 3365 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3366 if ((selector & 0xfffc) == 0)
3367 goto fail;
3368 if (load_segment(&e1, &e2, selector) != 0)
3369 goto fail;
3370 if (!(e2 & DESC_S_MASK))
3371 goto fail;
3372 rpl = selector & 3;
3373 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3374 cpl = env->hflags & HF_CPL_MASK;
3375 if (e2 & DESC_CS_MASK) {
3376 if (!(e2 & DESC_R_MASK))
3377 goto fail;
3378 if (!(e2 & DESC_C_MASK)) {
3379 if (dpl < cpl || dpl < rpl)
3380 goto fail;
3381 }
3382 } else {
3383 if (dpl < cpl || dpl < rpl) {
3384 fail:
3385 CC_SRC = eflags & ~CC_Z;
3386 return;
3387 }
3388 }
3389 CC_SRC = eflags | CC_Z;
3390}
3391
3392void helper_verw(target_ulong selector1)
3393{
3394 uint32_t e1, e2, eflags, selector;
3395 int rpl, dpl, cpl;
3396
3397 selector = selector1 & 0xffff;
a7812ae4 3398 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3399 if ((selector & 0xfffc) == 0)
3400 goto fail;
3401 if (load_segment(&e1, &e2, selector) != 0)
3402 goto fail;
3403 if (!(e2 & DESC_S_MASK))
3404 goto fail;
3405 rpl = selector & 3;
3406 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3407 cpl = env->hflags & HF_CPL_MASK;
3408 if (e2 & DESC_CS_MASK) {
3409 goto fail;
3410 } else {
3411 if (dpl < cpl || dpl < rpl)
3412 goto fail;
3413 if (!(e2 & DESC_W_MASK)) {
3414 fail:
3415 CC_SRC = eflags & ~CC_Z;
3416 return;
3417 }
3418 }
3419 CC_SRC = eflags | CC_Z;
3420}
3421
3422/* x87 FPU helpers */
3423
3424static void fpu_set_exception(int mask)
3425{
3426 env->fpus |= mask;
3427 if (env->fpus & (~env->fpuc & FPUC_EM))
3428 env->fpus |= FPUS_SE | FPUS_B;
3429}
3430
3431static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3432{
3433 if (b == 0.0)
3434 fpu_set_exception(FPUS_ZE);
3435 return a / b;
3436}
3437
d9957a8b 3438static void fpu_raise_exception(void)
eaa728ee
FB
3439{
3440 if (env->cr[0] & CR0_NE_MASK) {
3441 raise_exception(EXCP10_COPR);
3442 }
3443#if !defined(CONFIG_USER_ONLY)
3444 else {
3445 cpu_set_ferr(env);
3446 }
3447#endif
3448}
3449
3450void helper_flds_FT0(uint32_t val)
3451{
3452 union {
3453 float32 f;
3454 uint32_t i;
3455 } u;
3456 u.i = val;
3457 FT0 = float32_to_floatx(u.f, &env->fp_status);
3458}
3459
3460void helper_fldl_FT0(uint64_t val)
3461{
3462 union {
3463 float64 f;
3464 uint64_t i;
3465 } u;
3466 u.i = val;
3467 FT0 = float64_to_floatx(u.f, &env->fp_status);
3468}
3469
3470void helper_fildl_FT0(int32_t val)
3471{
3472 FT0 = int32_to_floatx(val, &env->fp_status);
3473}
3474
3475void helper_flds_ST0(uint32_t val)
3476{
3477 int new_fpstt;
3478 union {
3479 float32 f;
3480 uint32_t i;
3481 } u;
3482 new_fpstt = (env->fpstt - 1) & 7;
3483 u.i = val;
3484 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3485 env->fpstt = new_fpstt;
3486 env->fptags[new_fpstt] = 0; /* validate stack entry */
3487}
3488
3489void helper_fldl_ST0(uint64_t val)
3490{
3491 int new_fpstt;
3492 union {
3493 float64 f;
3494 uint64_t i;
3495 } u;
3496 new_fpstt = (env->fpstt - 1) & 7;
3497 u.i = val;
3498 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3499 env->fpstt = new_fpstt;
3500 env->fptags[new_fpstt] = 0; /* validate stack entry */
3501}
3502
3503void helper_fildl_ST0(int32_t val)
3504{
3505 int new_fpstt;
3506 new_fpstt = (env->fpstt - 1) & 7;
3507 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3508 env->fpstt = new_fpstt;
3509 env->fptags[new_fpstt] = 0; /* validate stack entry */
3510}
3511
3512void helper_fildll_ST0(int64_t val)
3513{
3514 int new_fpstt;
3515 new_fpstt = (env->fpstt - 1) & 7;
3516 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3517 env->fpstt = new_fpstt;
3518 env->fptags[new_fpstt] = 0; /* validate stack entry */
3519}
3520
3521uint32_t helper_fsts_ST0(void)
3522{
3523 union {
3524 float32 f;
3525 uint32_t i;
3526 } u;
3527 u.f = floatx_to_float32(ST0, &env->fp_status);
3528 return u.i;
3529}
3530
3531uint64_t helper_fstl_ST0(void)
3532{
3533 union {
3534 float64 f;
3535 uint64_t i;
3536 } u;
3537 u.f = floatx_to_float64(ST0, &env->fp_status);
3538 return u.i;
3539}
3540
3541int32_t helper_fist_ST0(void)
3542{
3543 int32_t val;
3544 val = floatx_to_int32(ST0, &env->fp_status);
3545 if (val != (int16_t)val)
3546 val = -32768;
3547 return val;
3548}
3549
3550int32_t helper_fistl_ST0(void)
3551{
3552 int32_t val;
3553 val = floatx_to_int32(ST0, &env->fp_status);
3554 return val;
3555}
3556
3557int64_t helper_fistll_ST0(void)
3558{
3559 int64_t val;
3560 val = floatx_to_int64(ST0, &env->fp_status);
3561 return val;
3562}
3563
3564int32_t helper_fistt_ST0(void)
3565{
3566 int32_t val;
3567 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3568 if (val != (int16_t)val)
3569 val = -32768;
3570 return val;
3571}
3572
3573int32_t helper_fisttl_ST0(void)
3574{
3575 int32_t val;
3576 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3577 return val;
3578}
3579
3580int64_t helper_fisttll_ST0(void)
3581{
3582 int64_t val;
3583 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3584 return val;
3585}
3586
3587void helper_fldt_ST0(target_ulong ptr)
3588{
3589 int new_fpstt;
3590 new_fpstt = (env->fpstt - 1) & 7;
3591 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3592 env->fpstt = new_fpstt;
3593 env->fptags[new_fpstt] = 0; /* validate stack entry */
3594}
3595
3596void helper_fstt_ST0(target_ulong ptr)
3597{
3598 helper_fstt(ST0, ptr);
3599}
3600
3601void helper_fpush(void)
3602{
3603 fpush();
3604}
3605
3606void helper_fpop(void)
3607{
3608 fpop();
3609}
3610
3611void helper_fdecstp(void)
3612{
3613 env->fpstt = (env->fpstt - 1) & 7;
3614 env->fpus &= (~0x4700);
3615}
3616
3617void helper_fincstp(void)
3618{
3619 env->fpstt = (env->fpstt + 1) & 7;
3620 env->fpus &= (~0x4700);
3621}
3622
3623/* FPU move */
3624
3625void helper_ffree_STN(int st_index)
3626{
3627 env->fptags[(env->fpstt + st_index) & 7] = 1;
3628}
3629
3630void helper_fmov_ST0_FT0(void)
3631{
3632 ST0 = FT0;
3633}
3634
3635void helper_fmov_FT0_STN(int st_index)
3636{
3637 FT0 = ST(st_index);
3638}
3639
3640void helper_fmov_ST0_STN(int st_index)
3641{
3642 ST0 = ST(st_index);
3643}
3644
3645void helper_fmov_STN_ST0(int st_index)
3646{
3647 ST(st_index) = ST0;
3648}
3649
3650void helper_fxchg_ST0_STN(int st_index)
3651{
3652 CPU86_LDouble tmp;
3653 tmp = ST(st_index);
3654 ST(st_index) = ST0;
3655 ST0 = tmp;
3656}
3657
3658/* FPU operations */
3659
3660static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3661
3662void helper_fcom_ST0_FT0(void)
3663{
3664 int ret;
3665
3666 ret = floatx_compare(ST0, FT0, &env->fp_status);
3667 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
eaa728ee
FB
3668}
3669
3670void helper_fucom_ST0_FT0(void)
3671{
3672 int ret;
3673
3674 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3675 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
eaa728ee
FB
3676}
3677
3678static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3679
3680void helper_fcomi_ST0_FT0(void)
3681{
3682 int eflags;
3683 int ret;
3684
3685 ret = floatx_compare(ST0, FT0, &env->fp_status);
a7812ae4 3686 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3687 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3688 CC_SRC = eflags;
eaa728ee
FB
3689}
3690
3691void helper_fucomi_ST0_FT0(void)
3692{
3693 int eflags;
3694 int ret;
3695
3696 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
a7812ae4 3697 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3698 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3699 CC_SRC = eflags;
eaa728ee
FB
3700}
3701
3702void helper_fadd_ST0_FT0(void)
3703{
3704 ST0 += FT0;
3705}
3706
3707void helper_fmul_ST0_FT0(void)
3708{
3709 ST0 *= FT0;
3710}
3711
3712void helper_fsub_ST0_FT0(void)
3713{
3714 ST0 -= FT0;
3715}
3716
3717void helper_fsubr_ST0_FT0(void)
3718{
3719 ST0 = FT0 - ST0;
3720}
3721
3722void helper_fdiv_ST0_FT0(void)
3723{
3724 ST0 = helper_fdiv(ST0, FT0);
3725}
3726
3727void helper_fdivr_ST0_FT0(void)
3728{
3729 ST0 = helper_fdiv(FT0, ST0);
3730}
3731
3732/* fp operations between STN and ST0 */
3733
3734void helper_fadd_STN_ST0(int st_index)
3735{
3736 ST(st_index) += ST0;
3737}
3738
3739void helper_fmul_STN_ST0(int st_index)
3740{
3741 ST(st_index) *= ST0;
3742}
3743
3744void helper_fsub_STN_ST0(int st_index)
3745{
3746 ST(st_index) -= ST0;
3747}
3748
3749void helper_fsubr_STN_ST0(int st_index)
3750{
3751 CPU86_LDouble *p;
3752 p = &ST(st_index);
3753 *p = ST0 - *p;
3754}
3755
3756void helper_fdiv_STN_ST0(int st_index)
3757{
3758 CPU86_LDouble *p;
3759 p = &ST(st_index);
3760 *p = helper_fdiv(*p, ST0);
3761}
3762
3763void helper_fdivr_STN_ST0(int st_index)
3764{
3765 CPU86_LDouble *p;
3766 p = &ST(st_index);
3767 *p = helper_fdiv(ST0, *p);
3768}
3769
3770/* misc FPU operations */
3771void helper_fchs_ST0(void)
3772{
3773 ST0 = floatx_chs(ST0);
3774}
3775
3776void helper_fabs_ST0(void)
3777{
3778 ST0 = floatx_abs(ST0);
3779}
3780
3781void helper_fld1_ST0(void)
3782{
3783 ST0 = f15rk[1];
3784}
3785
3786void helper_fldl2t_ST0(void)
3787{
3788 ST0 = f15rk[6];
3789}
3790
3791void helper_fldl2e_ST0(void)
3792{
3793 ST0 = f15rk[5];
3794}
3795
3796void helper_fldpi_ST0(void)
3797{
3798 ST0 = f15rk[2];
3799}
3800
3801void helper_fldlg2_ST0(void)
3802{
3803 ST0 = f15rk[3];
3804}
3805
3806void helper_fldln2_ST0(void)
3807{
3808 ST0 = f15rk[4];
3809}
3810
3811void helper_fldz_ST0(void)
3812{
3813 ST0 = f15rk[0];
3814}
3815
3816void helper_fldz_FT0(void)
3817{
3818 FT0 = f15rk[0];
3819}
3820
3821uint32_t helper_fnstsw(void)
3822{
3823 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3824}
3825
3826uint32_t helper_fnstcw(void)
3827{
3828 return env->fpuc;
3829}
3830
3831static void update_fp_status(void)
3832{
3833 int rnd_type;
3834
3835 /* set rounding mode */
3836 switch(env->fpuc & RC_MASK) {
3837 default:
3838 case RC_NEAR:
3839 rnd_type = float_round_nearest_even;
3840 break;
3841 case RC_DOWN:
3842 rnd_type = float_round_down;
3843 break;
3844 case RC_UP:
3845 rnd_type = float_round_up;
3846 break;
3847 case RC_CHOP:
3848 rnd_type = float_round_to_zero;
3849 break;
3850 }
3851 set_float_rounding_mode(rnd_type, &env->fp_status);
3852#ifdef FLOATX80
3853 switch((env->fpuc >> 8) & 3) {
3854 case 0:
3855 rnd_type = 32;
3856 break;
3857 case 2:
3858 rnd_type = 64;
3859 break;
3860 case 3:
3861 default:
3862 rnd_type = 80;
3863 break;
3864 }
3865 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3866#endif
3867}
3868
3869void helper_fldcw(uint32_t val)
3870{
3871 env->fpuc = val;
3872 update_fp_status();
3873}
3874
3875void helper_fclex(void)
3876{
3877 env->fpus &= 0x7f00;
3878}
3879
3880void helper_fwait(void)
3881{
3882 if (env->fpus & FPUS_SE)
3883 fpu_raise_exception();
eaa728ee
FB
3884}
3885
3886void helper_fninit(void)
3887{
3888 env->fpus = 0;
3889 env->fpstt = 0;
3890 env->fpuc = 0x37f;
3891 env->fptags[0] = 1;
3892 env->fptags[1] = 1;
3893 env->fptags[2] = 1;
3894 env->fptags[3] = 1;
3895 env->fptags[4] = 1;
3896 env->fptags[5] = 1;
3897 env->fptags[6] = 1;
3898 env->fptags[7] = 1;
3899}
3900
3901/* BCD ops */
3902
3903void helper_fbld_ST0(target_ulong ptr)
3904{
3905 CPU86_LDouble tmp;
3906 uint64_t val;
3907 unsigned int v;
3908 int i;
3909
3910 val = 0;
3911 for(i = 8; i >= 0; i--) {
3912 v = ldub(ptr + i);
3913 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3914 }
3915 tmp = val;
3916 if (ldub(ptr + 9) & 0x80)
3917 tmp = -tmp;
3918 fpush();
3919 ST0 = tmp;
3920}
3921
3922void helper_fbst_ST0(target_ulong ptr)
3923{
3924 int v;
3925 target_ulong mem_ref, mem_end;
3926 int64_t val;
3927
3928 val = floatx_to_int64(ST0, &env->fp_status);
3929 mem_ref = ptr;
3930 mem_end = mem_ref + 9;
3931 if (val < 0) {
3932 stb(mem_end, 0x80);
3933 val = -val;
3934 } else {
3935 stb(mem_end, 0x00);
3936 }
3937 while (mem_ref < mem_end) {
3938 if (val == 0)
3939 break;
3940 v = val % 100;
3941 val = val / 100;
3942 v = ((v / 10) << 4) | (v % 10);
3943 stb(mem_ref++, v);
3944 }
3945 while (mem_ref < mem_end) {
3946 stb(mem_ref++, 0);
3947 }
3948}
3949
3950void helper_f2xm1(void)
3951{
3952 ST0 = pow(2.0,ST0) - 1.0;
3953}
3954
3955void helper_fyl2x(void)
3956{
3957 CPU86_LDouble fptemp;
3958
3959 fptemp = ST0;
3960 if (fptemp>0.0){
3961 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3962 ST1 *= fptemp;
3963 fpop();
3964 } else {
3965 env->fpus &= (~0x4700);
3966 env->fpus |= 0x400;
3967 }
3968}
3969
3970void helper_fptan(void)
3971{
3972 CPU86_LDouble fptemp;
3973
3974 fptemp = ST0;
3975 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3976 env->fpus |= 0x400;
3977 } else {
3978 ST0 = tan(fptemp);
3979 fpush();
3980 ST0 = 1.0;
3981 env->fpus &= (~0x400); /* C2 <-- 0 */
3982 /* the above code is for |arg| < 2**52 only */
3983 }
3984}
3985
3986void helper_fpatan(void)
3987{
3988 CPU86_LDouble fptemp, fpsrcop;
3989
3990 fpsrcop = ST1;
3991 fptemp = ST0;
3992 ST1 = atan2(fpsrcop,fptemp);
3993 fpop();
3994}
3995
3996void helper_fxtract(void)
3997{
3998 CPU86_LDoubleU temp;
3999 unsigned int expdif;
4000
4001 temp.d = ST0;
4002 expdif = EXPD(temp) - EXPBIAS;
4003 /*DP exponent bias*/
4004 ST0 = expdif;
4005 fpush();
4006 BIASEXPONENT(temp);
4007 ST0 = temp.d;
4008}
4009
4010void helper_fprem1(void)
4011{
4012 CPU86_LDouble dblq, fpsrcop, fptemp;
4013 CPU86_LDoubleU fpsrcop1, fptemp1;
4014 int expdif;
4015 signed long long int q;
4016
4017 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4018 ST0 = 0.0 / 0.0; /* NaN */
4019 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4020 return;
4021 }
4022
4023 fpsrcop = ST0;
4024 fptemp = ST1;
4025 fpsrcop1.d = fpsrcop;
4026 fptemp1.d = fptemp;
4027 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4028
4029 if (expdif < 0) {
4030 /* optimisation? taken from the AMD docs */
4031 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4032 /* ST0 is unchanged */
4033 return;
4034 }
4035
4036 if (expdif < 53) {
4037 dblq = fpsrcop / fptemp;
4038 /* round dblq towards nearest integer */
4039 dblq = rint(dblq);
4040 ST0 = fpsrcop - fptemp * dblq;
4041
4042 /* convert dblq to q by truncating towards zero */
4043 if (dblq < 0.0)
4044 q = (signed long long int)(-dblq);
4045 else
4046 q = (signed long long int)dblq;
4047
4048 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4049 /* (C0,C3,C1) <-- (q2,q1,q0) */
4050 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4051 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4052 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4053 } else {
4054 env->fpus |= 0x400; /* C2 <-- 1 */
4055 fptemp = pow(2.0, expdif - 50);
4056 fpsrcop = (ST0 / ST1) / fptemp;
4057 /* fpsrcop = integer obtained by chopping */
4058 fpsrcop = (fpsrcop < 0.0) ?
4059 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4060 ST0 -= (ST1 * fpsrcop * fptemp);
4061 }
4062}
4063
4064void helper_fprem(void)
4065{
4066 CPU86_LDouble dblq, fpsrcop, fptemp;
4067 CPU86_LDoubleU fpsrcop1, fptemp1;
4068 int expdif;
4069 signed long long int q;
4070
4071 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4072 ST0 = 0.0 / 0.0; /* NaN */
4073 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4074 return;
4075 }
4076
4077 fpsrcop = (CPU86_LDouble)ST0;
4078 fptemp = (CPU86_LDouble)ST1;
4079 fpsrcop1.d = fpsrcop;
4080 fptemp1.d = fptemp;
4081 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4082
4083 if (expdif < 0) {
4084 /* optimisation? taken from the AMD docs */
4085 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4086 /* ST0 is unchanged */
4087 return;
4088 }
4089
4090 if ( expdif < 53 ) {
4091 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4092 /* round dblq towards zero */
4093 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4094 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4095
4096 /* convert dblq to q by truncating towards zero */
4097 if (dblq < 0.0)
4098 q = (signed long long int)(-dblq);
4099 else
4100 q = (signed long long int)dblq;
4101
4102 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4103 /* (C0,C3,C1) <-- (q2,q1,q0) */
4104 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4105 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4106 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4107 } else {
4108 int N = 32 + (expdif % 32); /* as per AMD docs */
4109 env->fpus |= 0x400; /* C2 <-- 1 */
4110 fptemp = pow(2.0, (double)(expdif - N));
4111 fpsrcop = (ST0 / ST1) / fptemp;
4112 /* fpsrcop = integer obtained by chopping */
4113 fpsrcop = (fpsrcop < 0.0) ?
4114 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4115 ST0 -= (ST1 * fpsrcop * fptemp);
4116 }
4117}
4118
4119void helper_fyl2xp1(void)
4120{
4121 CPU86_LDouble fptemp;
4122
4123 fptemp = ST0;
4124 if ((fptemp+1.0)>0.0) {
4125 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4126 ST1 *= fptemp;
4127 fpop();
4128 } else {
4129 env->fpus &= (~0x4700);
4130 env->fpus |= 0x400;
4131 }
4132}
4133
4134void helper_fsqrt(void)
4135{
4136 CPU86_LDouble fptemp;
4137
4138 fptemp = ST0;
4139 if (fptemp<0.0) {
4140 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4141 env->fpus |= 0x400;
4142 }
4143 ST0 = sqrt(fptemp);
4144}
4145
4146void helper_fsincos(void)
4147{
4148 CPU86_LDouble fptemp;
4149
4150 fptemp = ST0;
4151 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4152 env->fpus |= 0x400;
4153 } else {
4154 ST0 = sin(fptemp);
4155 fpush();
4156 ST0 = cos(fptemp);
4157 env->fpus &= (~0x400); /* C2 <-- 0 */
4158 /* the above code is for |arg| < 2**63 only */
4159 }
4160}
4161
4162void helper_frndint(void)
4163{
4164 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4165}
4166
4167void helper_fscale(void)
4168{
4169 ST0 = ldexp (ST0, (int)(ST1));
4170}
4171
4172void helper_fsin(void)
4173{
4174 CPU86_LDouble fptemp;
4175
4176 fptemp = ST0;
4177 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4178 env->fpus |= 0x400;
4179 } else {
4180 ST0 = sin(fptemp);
4181 env->fpus &= (~0x400); /* C2 <-- 0 */
4182 /* the above code is for |arg| < 2**53 only */
4183 }
4184}
4185
4186void helper_fcos(void)
4187{
4188 CPU86_LDouble fptemp;
4189
4190 fptemp = ST0;
4191 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4192 env->fpus |= 0x400;
4193 } else {
4194 ST0 = cos(fptemp);
4195 env->fpus &= (~0x400); /* C2 <-- 0 */
4196 /* the above code is for |arg5 < 2**63 only */
4197 }
4198}
4199
4200void helper_fxam_ST0(void)
4201{
4202 CPU86_LDoubleU temp;
4203 int expdif;
4204
4205 temp.d = ST0;
4206
4207 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4208 if (SIGND(temp))
4209 env->fpus |= 0x200; /* C1 <-- 1 */
4210
4211 /* XXX: test fptags too */
4212 expdif = EXPD(temp);
4213 if (expdif == MAXEXPD) {
4214#ifdef USE_X86LDOUBLE
4215 if (MANTD(temp) == 0x8000000000000000ULL)
4216#else
4217 if (MANTD(temp) == 0)
4218#endif
4219 env->fpus |= 0x500 /*Infinity*/;
4220 else
4221 env->fpus |= 0x100 /*NaN*/;
4222 } else if (expdif == 0) {
4223 if (MANTD(temp) == 0)
4224 env->fpus |= 0x4000 /*Zero*/;
4225 else
4226 env->fpus |= 0x4400 /*Denormal*/;
4227 } else {
4228 env->fpus |= 0x400;
4229 }
4230}
4231
4232void helper_fstenv(target_ulong ptr, int data32)
4233{
4234 int fpus, fptag, exp, i;
4235 uint64_t mant;
4236 CPU86_LDoubleU tmp;
4237
4238 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4239 fptag = 0;
4240 for (i=7; i>=0; i--) {
4241 fptag <<= 2;
4242 if (env->fptags[i]) {
4243 fptag |= 3;
4244 } else {
4245 tmp.d = env->fpregs[i].d;
4246 exp = EXPD(tmp);
4247 mant = MANTD(tmp);
4248 if (exp == 0 && mant == 0) {
4249 /* zero */
4250 fptag |= 1;
4251 } else if (exp == 0 || exp == MAXEXPD
4252#ifdef USE_X86LDOUBLE
4253 || (mant & (1LL << 63)) == 0
4254#endif
4255 ) {
4256 /* NaNs, infinity, denormal */
4257 fptag |= 2;
4258 }
4259 }
4260 }
4261 if (data32) {
4262 /* 32 bit */
4263 stl(ptr, env->fpuc);
4264 stl(ptr + 4, fpus);
4265 stl(ptr + 8, fptag);
4266 stl(ptr + 12, 0); /* fpip */
4267 stl(ptr + 16, 0); /* fpcs */
4268 stl(ptr + 20, 0); /* fpoo */
4269 stl(ptr + 24, 0); /* fpos */
4270 } else {
4271 /* 16 bit */
4272 stw(ptr, env->fpuc);
4273 stw(ptr + 2, fpus);
4274 stw(ptr + 4, fptag);
4275 stw(ptr + 6, 0);
4276 stw(ptr + 8, 0);
4277 stw(ptr + 10, 0);
4278 stw(ptr + 12, 0);
4279 }
4280}
4281
4282void helper_fldenv(target_ulong ptr, int data32)
4283{
4284 int i, fpus, fptag;
4285
4286 if (data32) {
4287 env->fpuc = lduw(ptr);
4288 fpus = lduw(ptr + 4);
4289 fptag = lduw(ptr + 8);
4290 }
4291 else {
4292 env->fpuc = lduw(ptr);
4293 fpus = lduw(ptr + 2);
4294 fptag = lduw(ptr + 4);
4295 }
4296 env->fpstt = (fpus >> 11) & 7;
4297 env->fpus = fpus & ~0x3800;
4298 for(i = 0;i < 8; i++) {
4299 env->fptags[i] = ((fptag & 3) == 3);
4300 fptag >>= 2;
4301 }
4302}
4303
4304void helper_fsave(target_ulong ptr, int data32)
4305{
4306 CPU86_LDouble tmp;
4307 int i;
4308
4309 helper_fstenv(ptr, data32);
4310
4311 ptr += (14 << data32);
4312 for(i = 0;i < 8; i++) {
4313 tmp = ST(i);
4314 helper_fstt(tmp, ptr);
4315 ptr += 10;
4316 }
4317
4318 /* fninit */
4319 env->fpus = 0;
4320 env->fpstt = 0;
4321 env->fpuc = 0x37f;
4322 env->fptags[0] = 1;
4323 env->fptags[1] = 1;
4324 env->fptags[2] = 1;
4325 env->fptags[3] = 1;
4326 env->fptags[4] = 1;
4327 env->fptags[5] = 1;
4328 env->fptags[6] = 1;
4329 env->fptags[7] = 1;
4330}
4331
4332void helper_frstor(target_ulong ptr, int data32)
4333{
4334 CPU86_LDouble tmp;
4335 int i;
4336
4337 helper_fldenv(ptr, data32);
4338 ptr += (14 << data32);
4339
4340 for(i = 0;i < 8; i++) {
4341 tmp = helper_fldt(ptr);
4342 ST(i) = tmp;
4343 ptr += 10;
4344 }
4345}
4346
4347void helper_fxsave(target_ulong ptr, int data64)
4348{
4349 int fpus, fptag, i, nb_xmm_regs;
4350 CPU86_LDouble tmp;
4351 target_ulong addr;
4352
09d85fb8
KW
4353 /* The operand must be 16 byte aligned */
4354 if (ptr & 0xf) {
4355 raise_exception(EXCP0D_GPF);
4356 }
4357
eaa728ee
FB
4358 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4359 fptag = 0;
4360 for(i = 0; i < 8; i++) {
4361 fptag |= (env->fptags[i] << i);
4362 }
4363 stw(ptr, env->fpuc);
4364 stw(ptr + 2, fpus);
4365 stw(ptr + 4, fptag ^ 0xff);
4366#ifdef TARGET_X86_64
4367 if (data64) {
4368 stq(ptr + 0x08, 0); /* rip */
4369 stq(ptr + 0x10, 0); /* rdp */
4370 } else
4371#endif
4372 {
4373 stl(ptr + 0x08, 0); /* eip */
4374 stl(ptr + 0x0c, 0); /* sel */
4375 stl(ptr + 0x10, 0); /* dp */
4376 stl(ptr + 0x14, 0); /* sel */
4377 }
4378
4379 addr = ptr + 0x20;
4380 for(i = 0;i < 8; i++) {
4381 tmp = ST(i);
4382 helper_fstt(tmp, addr);
4383 addr += 16;
4384 }
4385
4386 if (env->cr[4] & CR4_OSFXSR_MASK) {
4387 /* XXX: finish it */
4388 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4389 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4390 if (env->hflags & HF_CS64_MASK)
4391 nb_xmm_regs = 16;
4392 else
4393 nb_xmm_regs = 8;
4394 addr = ptr + 0xa0;
eef26553
AL
4395 /* Fast FXSAVE leaves out the XMM registers */
4396 if (!(env->efer & MSR_EFER_FFXSR)
4397 || (env->hflags & HF_CPL_MASK)
4398 || !(env->hflags & HF_LMA_MASK)) {
4399 for(i = 0; i < nb_xmm_regs; i++) {
4400 stq(addr, env->xmm_regs[i].XMM_Q(0));
4401 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4402 addr += 16;
4403 }
eaa728ee
FB
4404 }
4405 }
4406}
4407
4408void helper_fxrstor(target_ulong ptr, int data64)
4409{
4410 int i, fpus, fptag, nb_xmm_regs;
4411 CPU86_LDouble tmp;
4412 target_ulong addr;
4413
09d85fb8
KW
4414 /* The operand must be 16 byte aligned */
4415 if (ptr & 0xf) {
4416 raise_exception(EXCP0D_GPF);
4417 }
4418
eaa728ee
FB
4419 env->fpuc = lduw(ptr);
4420 fpus = lduw(ptr + 2);
4421 fptag = lduw(ptr + 4);
4422 env->fpstt = (fpus >> 11) & 7;
4423 env->fpus = fpus & ~0x3800;
4424 fptag ^= 0xff;
4425 for(i = 0;i < 8; i++) {
4426 env->fptags[i] = ((fptag >> i) & 1);
4427 }
4428
4429 addr = ptr + 0x20;
4430 for(i = 0;i < 8; i++) {
4431 tmp = helper_fldt(addr);
4432 ST(i) = tmp;
4433 addr += 16;
4434 }
4435
4436 if (env->cr[4] & CR4_OSFXSR_MASK) {
4437 /* XXX: finish it */
4438 env->mxcsr = ldl(ptr + 0x18);
4439 //ldl(ptr + 0x1c);
4440 if (env->hflags & HF_CS64_MASK)
4441 nb_xmm_regs = 16;
4442 else
4443 nb_xmm_regs = 8;
4444 addr = ptr + 0xa0;
eef26553
AL
4445 /* Fast FXRESTORE leaves out the XMM registers */
4446 if (!(env->efer & MSR_EFER_FFXSR)
4447 || (env->hflags & HF_CPL_MASK)
4448 || !(env->hflags & HF_LMA_MASK)) {
4449 for(i = 0; i < nb_xmm_regs; i++) {
4450 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4451 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4452 addr += 16;
4453 }
eaa728ee
FB
4454 }
4455 }
4456}
4457
4458#ifndef USE_X86LDOUBLE
4459
4460void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4461{
4462 CPU86_LDoubleU temp;
4463 int e;
4464
4465 temp.d = f;
4466 /* mantissa */
4467 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4468 /* exponent + sign */
4469 e = EXPD(temp) - EXPBIAS + 16383;
4470 e |= SIGND(temp) >> 16;
4471 *pexp = e;
4472}
4473
4474CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4475{
4476 CPU86_LDoubleU temp;
4477 int e;
4478 uint64_t ll;
4479
4480 /* XXX: handle overflow ? */
4481 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4482 e |= (upper >> 4) & 0x800; /* sign */
4483 ll = (mant >> 11) & ((1LL << 52) - 1);
4484#ifdef __arm__
4485 temp.l.upper = (e << 20) | (ll >> 32);
4486 temp.l.lower = ll;
4487#else
4488 temp.ll = ll | ((uint64_t)e << 52);
4489#endif
4490 return temp.d;
4491}
4492
4493#else
4494
4495void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4496{
4497 CPU86_LDoubleU temp;
4498
4499 temp.d = f;
4500 *pmant = temp.l.lower;
4501 *pexp = temp.l.upper;
4502}
4503
4504CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4505{
4506 CPU86_LDoubleU temp;
4507
4508 temp.l.upper = upper;
4509 temp.l.lower = mant;
4510 return temp.d;
4511}
4512#endif
4513
4514#ifdef TARGET_X86_64
4515
4516//#define DEBUG_MULDIV
4517
4518static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4519{
4520 *plow += a;
4521 /* carry test */
4522 if (*plow < a)
4523 (*phigh)++;
4524 *phigh += b;
4525}
4526
4527static void neg128(uint64_t *plow, uint64_t *phigh)
4528{
4529 *plow = ~ *plow;
4530 *phigh = ~ *phigh;
4531 add128(plow, phigh, 1, 0);
4532}
4533
4534/* return TRUE if overflow */
4535static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4536{
4537 uint64_t q, r, a1, a0;
4538 int i, qb, ab;
4539
4540 a0 = *plow;
4541 a1 = *phigh;
4542 if (a1 == 0) {
4543 q = a0 / b;
4544 r = a0 % b;
4545 *plow = q;
4546 *phigh = r;
4547 } else {
4548 if (a1 >= b)
4549 return 1;
4550 /* XXX: use a better algorithm */
4551 for(i = 0; i < 64; i++) {
4552 ab = a1 >> 63;
4553 a1 = (a1 << 1) | (a0 >> 63);
4554 if (ab || a1 >= b) {
4555 a1 -= b;
4556 qb = 1;
4557 } else {
4558 qb = 0;
4559 }
4560 a0 = (a0 << 1) | qb;
4561 }
4562#if defined(DEBUG_MULDIV)
4563 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4564 *phigh, *plow, b, a0, a1);
4565#endif
4566 *plow = a0;
4567 *phigh = a1;
4568 }
4569 return 0;
4570}
4571
4572/* return TRUE if overflow */
4573static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4574{
4575 int sa, sb;
4576 sa = ((int64_t)*phigh < 0);
4577 if (sa)
4578 neg128(plow, phigh);
4579 sb = (b < 0);
4580 if (sb)
4581 b = -b;
4582 if (div64(plow, phigh, b) != 0)
4583 return 1;
4584 if (sa ^ sb) {
4585 if (*plow > (1ULL << 63))
4586 return 1;
4587 *plow = - *plow;
4588 } else {
4589 if (*plow >= (1ULL << 63))
4590 return 1;
4591 }
4592 if (sa)
4593 *phigh = - *phigh;
4594 return 0;
4595}
4596
4597void helper_mulq_EAX_T0(target_ulong t0)
4598{
4599 uint64_t r0, r1;
4600
4601 mulu64(&r0, &r1, EAX, t0);
4602 EAX = r0;
4603 EDX = r1;
4604 CC_DST = r0;
4605 CC_SRC = r1;
4606}
4607
4608void helper_imulq_EAX_T0(target_ulong t0)
4609{
4610 uint64_t r0, r1;
4611
4612 muls64(&r0, &r1, EAX, t0);
4613 EAX = r0;
4614 EDX = r1;
4615 CC_DST = r0;
4616 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4617}
4618
4619target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4620{
4621 uint64_t r0, r1;
4622
4623 muls64(&r0, &r1, t0, t1);
4624 CC_DST = r0;
4625 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4626 return r0;
4627}
4628
4629void helper_divq_EAX(target_ulong t0)
4630{
4631 uint64_t r0, r1;
4632 if (t0 == 0) {
4633 raise_exception(EXCP00_DIVZ);
4634 }
4635 r0 = EAX;
4636 r1 = EDX;
4637 if (div64(&r0, &r1, t0))
4638 raise_exception(EXCP00_DIVZ);
4639 EAX = r0;
4640 EDX = r1;
4641}
4642
4643void helper_idivq_EAX(target_ulong t0)
4644{
4645 uint64_t r0, r1;
4646 if (t0 == 0) {
4647 raise_exception(EXCP00_DIVZ);
4648 }
4649 r0 = EAX;
4650 r1 = EDX;
4651 if (idiv64(&r0, &r1, t0))
4652 raise_exception(EXCP00_DIVZ);
4653 EAX = r0;
4654 EDX = r1;
4655}
4656#endif
4657
94451178 4658static void do_hlt(void)
eaa728ee
FB
4659{
4660 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4661 env->halted = 1;
eaa728ee
FB
4662 env->exception_index = EXCP_HLT;
4663 cpu_loop_exit();
4664}
4665
94451178
FB
4666void helper_hlt(int next_eip_addend)
4667{
4668 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4669 EIP += next_eip_addend;
4670
4671 do_hlt();
4672}
4673
eaa728ee
FB
4674void helper_monitor(target_ulong ptr)
4675{
4676 if ((uint32_t)ECX != 0)
4677 raise_exception(EXCP0D_GPF);
4678 /* XXX: store address ? */
872929aa 4679 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4680}
4681
94451178 4682void helper_mwait(int next_eip_addend)
eaa728ee
FB
4683{
4684 if ((uint32_t)ECX != 0)
4685 raise_exception(EXCP0D_GPF);
872929aa 4686 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
94451178
FB
4687 EIP += next_eip_addend;
4688
eaa728ee
FB
4689 /* XXX: not complete but not completely erroneous */
4690 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4691 /* more than one CPU: do not sleep because another CPU may
4692 wake this one */
4693 } else {
94451178 4694 do_hlt();
eaa728ee
FB
4695 }
4696}
4697
4698void helper_debug(void)
4699{
4700 env->exception_index = EXCP_DEBUG;
4701 cpu_loop_exit();
4702}
4703
a2397807
JK
4704void helper_reset_rf(void)
4705{
4706 env->eflags &= ~RF_MASK;
4707}
4708
eaa728ee
FB
4709void helper_raise_interrupt(int intno, int next_eip_addend)
4710{
4711 raise_interrupt(intno, 1, 0, next_eip_addend);
4712}
4713
4714void helper_raise_exception(int exception_index)
4715{
4716 raise_exception(exception_index);
4717}
4718
4719void helper_cli(void)
4720{
4721 env->eflags &= ~IF_MASK;
4722}
4723
4724void helper_sti(void)
4725{
4726 env->eflags |= IF_MASK;
4727}
4728
4729#if 0
4730/* vm86plus instructions */
4731void helper_cli_vm(void)
4732{
4733 env->eflags &= ~VIF_MASK;
4734}
4735
4736void helper_sti_vm(void)
4737{
4738 env->eflags |= VIF_MASK;
4739 if (env->eflags & VIP_MASK) {
4740 raise_exception(EXCP0D_GPF);
4741 }
4742}
4743#endif
4744
4745void helper_set_inhibit_irq(void)
4746{
4747 env->hflags |= HF_INHIBIT_IRQ_MASK;
4748}
4749
4750void helper_reset_inhibit_irq(void)
4751{
4752 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4753}
4754
4755void helper_boundw(target_ulong a0, int v)
4756{
4757 int low, high;
4758 low = ldsw(a0);
4759 high = ldsw(a0 + 2);
4760 v = (int16_t)v;
4761 if (v < low || v > high) {
4762 raise_exception(EXCP05_BOUND);
4763 }
eaa728ee
FB
4764}
4765
4766void helper_boundl(target_ulong a0, int v)
4767{
4768 int low, high;
4769 low = ldl(a0);
4770 high = ldl(a0 + 4);
4771 if (v < low || v > high) {
4772 raise_exception(EXCP05_BOUND);
4773 }
eaa728ee
FB
4774}
4775
4776static float approx_rsqrt(float a)
4777{
4778 return 1.0 / sqrt(a);
4779}
4780
4781static float approx_rcp(float a)
4782{
4783 return 1.0 / a;
4784}
4785
4786#if !defined(CONFIG_USER_ONLY)
4787
4788#define MMUSUFFIX _mmu
4789
4790#define SHIFT 0
4791#include "softmmu_template.h"
4792
4793#define SHIFT 1
4794#include "softmmu_template.h"
4795
4796#define SHIFT 2
4797#include "softmmu_template.h"
4798
4799#define SHIFT 3
4800#include "softmmu_template.h"
4801
4802#endif
4803
d9957a8b 4804#if !defined(CONFIG_USER_ONLY)
eaa728ee
FB
4805/* try to fill the TLB and return an exception if error. If retaddr is
4806 NULL, it means that the function was called in C code (i.e. not
4807 from generated code or from helper.c) */
4808/* XXX: fix it to restore all registers */
4809void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4810{
4811 TranslationBlock *tb;
4812 int ret;
4813 unsigned long pc;
4814 CPUX86State *saved_env;
4815
4816 /* XXX: hack to restore env in all cases, even if not called from
4817 generated code */
4818 saved_env = env;
4819 env = cpu_single_env;
4820
4821 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4822 if (ret) {
4823 if (retaddr) {
4824 /* now we have a real cpu fault */
4825 pc = (unsigned long)retaddr;
4826 tb = tb_find_pc(pc);
4827 if (tb) {
4828 /* the PC is inside the translated code. It means that we have
4829 a virtual CPU fault */
4830 cpu_restore_state(tb, env, pc, NULL);
4831 }
4832 }
872929aa 4833 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
4834 }
4835 env = saved_env;
4836}
d9957a8b 4837#endif
eaa728ee
FB
4838
4839/* Secure Virtual Machine helpers */
4840
eaa728ee
FB
4841#if defined(CONFIG_USER_ONLY)
4842
db620f46 4843void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4844{
4845}
4846void helper_vmmcall(void)
4847{
4848}
914178d3 4849void helper_vmload(int aflag)
eaa728ee
FB
4850{
4851}
914178d3 4852void helper_vmsave(int aflag)
eaa728ee
FB
4853{
4854}
872929aa
FB
4855void helper_stgi(void)
4856{
4857}
4858void helper_clgi(void)
4859{
4860}
eaa728ee
FB
4861void helper_skinit(void)
4862{
4863}
914178d3 4864void helper_invlpga(int aflag)
eaa728ee
FB
4865{
4866}
4867void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4868{
4869}
4870void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4871{
4872}
4873
4874void helper_svm_check_io(uint32_t port, uint32_t param,
4875 uint32_t next_eip_addend)
4876{
4877}
4878#else
4879
c227f099 4880static inline void svm_save_seg(target_phys_addr_t addr,
872929aa 4881 const SegmentCache *sc)
eaa728ee 4882{
872929aa
FB
4883 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4884 sc->selector);
4885 stq_phys(addr + offsetof(struct vmcb_seg, base),
4886 sc->base);
4887 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4888 sc->limit);
4889 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 4890 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa
FB
4891}
4892
c227f099 4893static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
872929aa
FB
4894{
4895 unsigned int flags;
4896
4897 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4898 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4899 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4900 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4901 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
4902}
4903
c227f099 4904static inline void svm_load_seg_cache(target_phys_addr_t addr,
872929aa 4905 CPUState *env, int seg_reg)
eaa728ee 4906{
872929aa
FB
4907 SegmentCache sc1, *sc = &sc1;
4908 svm_load_seg(addr, sc);
4909 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4910 sc->base, sc->limit, sc->flags);
eaa728ee
FB
4911}
4912
db620f46 4913void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4914{
4915 target_ulong addr;
4916 uint32_t event_inj;
4917 uint32_t int_ctl;
4918
872929aa
FB
4919 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4920
914178d3
FB
4921 if (aflag == 2)
4922 addr = EAX;
4923 else
4924 addr = (uint32_t)EAX;
4925
93fcfe39 4926 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
eaa728ee
FB
4927
4928 env->vm_vmcb = addr;
4929
4930 /* save the current CPU state in the hsave page */
4931 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4932 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4933
4934 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4935 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4936
4937 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4938 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4939 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4940 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
4941 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4942 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4943
4944 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4945 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4946
872929aa
FB
4947 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4948 &env->segs[R_ES]);
4949 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4950 &env->segs[R_CS]);
4951 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4952 &env->segs[R_SS]);
4953 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4954 &env->segs[R_DS]);
eaa728ee 4955
db620f46
FB
4956 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4957 EIP + next_eip_addend);
eaa728ee
FB
4958 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4959 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4960
4961 /* load the interception bitmaps so we do not need to access the
4962 vmcb in svm mode */
872929aa 4963 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
4964 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4965 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4966 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4967 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4968 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4969
872929aa
FB
4970 /* enable intercepts */
4971 env->hflags |= HF_SVMI_MASK;
4972
33c263df
FB
4973 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4974
eaa728ee
FB
4975 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4976 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4977
4978 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4979 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4980
4981 /* clear exit_info_2 so we behave like the real hardware */
4982 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4983
4984 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4985 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4986 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4987 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4988 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 4989 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 4990 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
4991 env->v_tpr = int_ctl & V_TPR_MASK;
4992 env->hflags2 |= HF2_VINTR_MASK;
eaa728ee 4993 if (env->eflags & IF_MASK)
db620f46 4994 env->hflags2 |= HF2_HIF_MASK;
eaa728ee
FB
4995 }
4996
5efc27bb
FB
4997 cpu_load_efer(env,
4998 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
4999 env->eflags = 0;
5000 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5001 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5002 CC_OP = CC_OP_EFLAGS;
eaa728ee 5003
872929aa
FB
5004 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5005 env, R_ES);
5006 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5007 env, R_CS);
5008 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5009 env, R_SS);
5010 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5011 env, R_DS);
eaa728ee
FB
5012
5013 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5014 env->eip = EIP;
5015 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5016 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5017 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5018 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5019 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5020
5021 /* FIXME: guest state consistency checks */
5022
5023 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5024 case TLB_CONTROL_DO_NOTHING:
5025 break;
5026 case TLB_CONTROL_FLUSH_ALL_ASID:
5027 /* FIXME: this is not 100% correct but should work for now */
5028 tlb_flush(env, 1);
5029 break;
5030 }
5031
960540b4 5032 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 5033
db620f46
FB
5034 if (int_ctl & V_IRQ_MASK) {
5035 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5036 }
5037
eaa728ee
FB
5038 /* maybe we need to inject an event */
5039 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5040 if (event_inj & SVM_EVTINJ_VALID) {
5041 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5042 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5043 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
eaa728ee 5044
93fcfe39 5045 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
eaa728ee
FB
5046 /* FIXME: need to implement valid_err */
5047 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5048 case SVM_EVTINJ_TYPE_INTR:
5049 env->exception_index = vector;
5050 env->error_code = event_inj_err;
5051 env->exception_is_int = 0;
5052 env->exception_next_eip = -1;
93fcfe39 5053 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
db620f46
FB
5054 /* XXX: is it always correct ? */
5055 do_interrupt(vector, 0, 0, 0, 1);
eaa728ee
FB
5056 break;
5057 case SVM_EVTINJ_TYPE_NMI:
db620f46 5058 env->exception_index = EXCP02_NMI;
eaa728ee
FB
5059 env->error_code = event_inj_err;
5060 env->exception_is_int = 0;
5061 env->exception_next_eip = EIP;
93fcfe39 5062 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
db620f46 5063 cpu_loop_exit();
eaa728ee
FB
5064 break;
5065 case SVM_EVTINJ_TYPE_EXEPT:
5066 env->exception_index = vector;
5067 env->error_code = event_inj_err;
5068 env->exception_is_int = 0;
5069 env->exception_next_eip = -1;
93fcfe39 5070 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
db620f46 5071 cpu_loop_exit();
eaa728ee
FB
5072 break;
5073 case SVM_EVTINJ_TYPE_SOFT:
5074 env->exception_index = vector;
5075 env->error_code = event_inj_err;
5076 env->exception_is_int = 1;
5077 env->exception_next_eip = EIP;
93fcfe39 5078 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
db620f46 5079 cpu_loop_exit();
eaa728ee
FB
5080 break;
5081 }
93fcfe39 5082 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
eaa728ee 5083 }
eaa728ee
FB
5084}
5085
5086void helper_vmmcall(void)
5087{
872929aa
FB
5088 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5089 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5090}
5091
914178d3 5092void helper_vmload(int aflag)
eaa728ee
FB
5093{
5094 target_ulong addr;
872929aa
FB
5095 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5096
914178d3
FB
5097 if (aflag == 2)
5098 addr = EAX;
5099 else
5100 addr = (uint32_t)EAX;
5101
93fcfe39 5102 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5103 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5104 env->segs[R_FS].base);
5105
872929aa
FB
5106 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5107 env, R_FS);
5108 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5109 env, R_GS);
5110 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5111 &env->tr);
5112 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5113 &env->ldt);
eaa728ee
FB
5114
5115#ifdef TARGET_X86_64
5116 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5117 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5118 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5119 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5120#endif
5121 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5122 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5123 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5124 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5125}
5126
914178d3 5127void helper_vmsave(int aflag)
eaa728ee
FB
5128{
5129 target_ulong addr;
872929aa 5130 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3
FB
5131
5132 if (aflag == 2)
5133 addr = EAX;
5134 else
5135 addr = (uint32_t)EAX;
5136
93fcfe39 5137 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5138 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5139 env->segs[R_FS].base);
5140
872929aa
FB
5141 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5142 &env->segs[R_FS]);
5143 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5144 &env->segs[R_GS]);
5145 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5146 &env->tr);
5147 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5148 &env->ldt);
eaa728ee
FB
5149
5150#ifdef TARGET_X86_64
5151 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5152 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5153 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5154 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5155#endif
5156 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5157 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5158 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5159 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5160}
5161
872929aa
FB
5162void helper_stgi(void)
5163{
5164 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 5165 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
5166}
5167
5168void helper_clgi(void)
5169{
5170 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 5171 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
5172}
5173
eaa728ee
FB
5174void helper_skinit(void)
5175{
872929aa
FB
5176 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5177 /* XXX: not implemented */
872929aa 5178 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5179}
5180
914178d3 5181void helper_invlpga(int aflag)
eaa728ee 5182{
914178d3 5183 target_ulong addr;
872929aa 5184 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
914178d3
FB
5185
5186 if (aflag == 2)
5187 addr = EAX;
5188 else
5189 addr = (uint32_t)EAX;
5190
5191 /* XXX: could use the ASID to see if it is needed to do the
5192 flush */
5193 tlb_flush_page(env, addr);
eaa728ee
FB
5194}
5195
5196void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5197{
872929aa
FB
5198 if (likely(!(env->hflags & HF_SVMI_MASK)))
5199 return;
eaa728ee
FB
5200 switch(type) {
5201 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5202 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5203 helper_vmexit(type, param);
5204 }
5205 break;
872929aa
FB
5206 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5207 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5208 helper_vmexit(type, param);
5209 }
5210 break;
872929aa
FB
5211 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5212 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5213 helper_vmexit(type, param);
5214 }
5215 break;
872929aa
FB
5216 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5217 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5218 helper_vmexit(type, param);
5219 }
5220 break;
872929aa
FB
5221 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5222 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5223 helper_vmexit(type, param);
5224 }
5225 break;
eaa728ee 5226 case SVM_EXIT_MSR:
872929aa 5227 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5228 /* FIXME: this should be read in at vmrun (faster this way?) */
5229 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5230 uint32_t t0, t1;
5231 switch((uint32_t)ECX) {
5232 case 0 ... 0x1fff:
5233 t0 = (ECX * 2) % 8;
5234 t1 = ECX / 8;
5235 break;
5236 case 0xc0000000 ... 0xc0001fff:
5237 t0 = (8192 + ECX - 0xc0000000) * 2;
5238 t1 = (t0 / 8);
5239 t0 %= 8;
5240 break;
5241 case 0xc0010000 ... 0xc0011fff:
5242 t0 = (16384 + ECX - 0xc0010000) * 2;
5243 t1 = (t0 / 8);
5244 t0 %= 8;
5245 break;
5246 default:
5247 helper_vmexit(type, param);
5248 t0 = 0;
5249 t1 = 0;
5250 break;
5251 }
5252 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5253 helper_vmexit(type, param);
5254 }
5255 break;
5256 default:
872929aa 5257 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5258 helper_vmexit(type, param);
5259 }
5260 break;
5261 }
5262}
5263
5264void helper_svm_check_io(uint32_t port, uint32_t param,
5265 uint32_t next_eip_addend)
5266{
872929aa 5267 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5268 /* FIXME: this should be read in at vmrun (faster this way?) */
5269 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5270 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5271 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5272 /* next EIP */
5273 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5274 env->eip + next_eip_addend);
5275 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5276 }
5277 }
5278}
5279
5280/* Note: currently only 32 bits of exit_code are used */
5281void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5282{
5283 uint32_t int_ctl;
5284
93fcfe39 5285 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
eaa728ee
FB
5286 exit_code, exit_info_1,
5287 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5288 EIP);
5289
5290 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5291 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5292 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5293 } else {
5294 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5295 }
5296
5297 /* Save the VM state in the vmcb */
872929aa
FB
5298 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5299 &env->segs[R_ES]);
5300 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5301 &env->segs[R_CS]);
5302 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5303 &env->segs[R_SS]);
5304 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5305 &env->segs[R_DS]);
eaa728ee
FB
5306
5307 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5308 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5309
5310 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5311 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5312
5313 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5314 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5315 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5316 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5317 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5318
db620f46
FB
5319 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5320 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5321 int_ctl |= env->v_tpr & V_TPR_MASK;
5322 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5323 int_ctl |= V_IRQ_MASK;
5324 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee
FB
5325
5326 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5327 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5328 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5329 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5330 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5331 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5332 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5333
5334 /* Reload the host state from vm_hsave */
db620f46 5335 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 5336 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5337 env->intercept = 0;
5338 env->intercept_exceptions = 0;
5339 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 5340 env->tsc_offset = 0;
eaa728ee
FB
5341
5342 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5343 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5344
5345 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5346 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5347
5348 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5349 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5350 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5efc27bb
FB
5351 /* we need to set the efer after the crs so the hidden flags get
5352 set properly */
5efc27bb
FB
5353 cpu_load_efer(env,
5354 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5355 env->eflags = 0;
5356 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5357 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5358 CC_OP = CC_OP_EFLAGS;
5359
872929aa
FB
5360 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5361 env, R_ES);
5362 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5363 env, R_CS);
5364 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5365 env, R_SS);
5366 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5367 env, R_DS);
eaa728ee
FB
5368
5369 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5370 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5371 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5372
5373 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5374 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5375
5376 /* other setups */
5377 cpu_x86_set_cpl(env, 0);
5378 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5379 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5380
2ed51f5b
AL
5381 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5382 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5383 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5384 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5385
960540b4 5386 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
5387 /* FIXME: Resets the current ASID register to zero (host ASID). */
5388
5389 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5390
5391 /* Clears the TSC_OFFSET inside the processor. */
5392
5393 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5394 from the page table indicated the host's CR3. If the PDPEs contain
5395 illegal state, the processor causes a shutdown. */
5396
5397 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5398 env->cr[0] |= CR0_PE_MASK;
5399 env->eflags &= ~VM_MASK;
5400
5401 /* Disables all breakpoints in the host DR7 register. */
5402
5403 /* Checks the reloaded host state for consistency. */
5404
5405 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5406 host's code segment or non-canonical (in the case of long mode), a
5407 #GP fault is delivered inside the host.) */
5408
5409 /* remove any pending exception */
5410 env->exception_index = -1;
5411 env->error_code = 0;
5412 env->old_exception = -1;
5413
5414 cpu_loop_exit();
5415}
5416
5417#endif
5418
5419/* MMX/SSE */
5420/* XXX: optimize by storing fptt and fptags in the static cpu state */
5421void helper_enter_mmx(void)
5422{
5423 env->fpstt = 0;
5424 *(uint32_t *)(env->fptags) = 0;
5425 *(uint32_t *)(env->fptags + 4) = 0;
5426}
5427
5428void helper_emms(void)
5429{
5430 /* set to empty state */
5431 *(uint32_t *)(env->fptags) = 0x01010101;
5432 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5433}
5434
5435/* XXX: suppress */
a7812ae4 5436void helper_movq(void *d, void *s)
eaa728ee 5437{
a7812ae4 5438 *(uint64_t *)d = *(uint64_t *)s;
eaa728ee
FB
5439}
5440
5441#define SHIFT 0
5442#include "ops_sse.h"
5443
5444#define SHIFT 1
5445#include "ops_sse.h"
5446
5447#define SHIFT 0
5448#include "helper_template.h"
5449#undef SHIFT
5450
5451#define SHIFT 1
5452#include "helper_template.h"
5453#undef SHIFT
5454
5455#define SHIFT 2
5456#include "helper_template.h"
5457#undef SHIFT
5458
5459#ifdef TARGET_X86_64
5460
5461#define SHIFT 3
5462#include "helper_template.h"
5463#undef SHIFT
5464
5465#endif
5466
5467/* bit operations */
5468target_ulong helper_bsf(target_ulong t0)
5469{
5470 int count;
5471 target_ulong res;
5472
5473 res = t0;
5474 count = 0;
5475 while ((res & 1) == 0) {
5476 count++;
5477 res >>= 1;
5478 }
5479 return count;
5480}
5481
31501a71 5482target_ulong helper_lzcnt(target_ulong t0, int wordsize)
eaa728ee
FB
5483{
5484 int count;
5485 target_ulong res, mask;
31501a71
AP
5486
5487 if (wordsize > 0 && t0 == 0) {
5488 return wordsize;
5489 }
eaa728ee
FB
5490 res = t0;
5491 count = TARGET_LONG_BITS - 1;
5492 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5493 while ((res & mask) == 0) {
5494 count--;
5495 res <<= 1;
5496 }
31501a71
AP
5497 if (wordsize > 0) {
5498 return wordsize - 1 - count;
5499 }
eaa728ee
FB
5500 return count;
5501}
5502
31501a71
AP
5503target_ulong helper_bsr(target_ulong t0)
5504{
5505 return helper_lzcnt(t0, 0);
5506}
eaa728ee
FB
5507
5508static int compute_all_eflags(void)
5509{
5510 return CC_SRC;
5511}
5512
5513static int compute_c_eflags(void)
5514{
5515 return CC_SRC & CC_C;
5516}
5517
a7812ae4
PB
5518uint32_t helper_cc_compute_all(int op)
5519{
5520 switch (op) {
5521 default: /* should never happen */ return 0;
eaa728ee 5522
a7812ae4 5523 case CC_OP_EFLAGS: return compute_all_eflags();
eaa728ee 5524
a7812ae4
PB
5525 case CC_OP_MULB: return compute_all_mulb();
5526 case CC_OP_MULW: return compute_all_mulw();
5527 case CC_OP_MULL: return compute_all_mull();
eaa728ee 5528
a7812ae4
PB
5529 case CC_OP_ADDB: return compute_all_addb();
5530 case CC_OP_ADDW: return compute_all_addw();
5531 case CC_OP_ADDL: return compute_all_addl();
eaa728ee 5532
a7812ae4
PB
5533 case CC_OP_ADCB: return compute_all_adcb();
5534 case CC_OP_ADCW: return compute_all_adcw();
5535 case CC_OP_ADCL: return compute_all_adcl();
eaa728ee 5536
a7812ae4
PB
5537 case CC_OP_SUBB: return compute_all_subb();
5538 case CC_OP_SUBW: return compute_all_subw();
5539 case CC_OP_SUBL: return compute_all_subl();
eaa728ee 5540
a7812ae4
PB
5541 case CC_OP_SBBB: return compute_all_sbbb();
5542 case CC_OP_SBBW: return compute_all_sbbw();
5543 case CC_OP_SBBL: return compute_all_sbbl();
eaa728ee 5544
a7812ae4
PB
5545 case CC_OP_LOGICB: return compute_all_logicb();
5546 case CC_OP_LOGICW: return compute_all_logicw();
5547 case CC_OP_LOGICL: return compute_all_logicl();
eaa728ee 5548
a7812ae4
PB
5549 case CC_OP_INCB: return compute_all_incb();
5550 case CC_OP_INCW: return compute_all_incw();
5551 case CC_OP_INCL: return compute_all_incl();
eaa728ee 5552
a7812ae4
PB
5553 case CC_OP_DECB: return compute_all_decb();
5554 case CC_OP_DECW: return compute_all_decw();
5555 case CC_OP_DECL: return compute_all_decl();
eaa728ee 5556
a7812ae4
PB
5557 case CC_OP_SHLB: return compute_all_shlb();
5558 case CC_OP_SHLW: return compute_all_shlw();
5559 case CC_OP_SHLL: return compute_all_shll();
eaa728ee 5560
a7812ae4
PB
5561 case CC_OP_SARB: return compute_all_sarb();
5562 case CC_OP_SARW: return compute_all_sarw();
5563 case CC_OP_SARL: return compute_all_sarl();
eaa728ee
FB
5564
5565#ifdef TARGET_X86_64
a7812ae4 5566 case CC_OP_MULQ: return compute_all_mulq();
eaa728ee 5567
a7812ae4 5568 case CC_OP_ADDQ: return compute_all_addq();
eaa728ee 5569
a7812ae4 5570 case CC_OP_ADCQ: return compute_all_adcq();
eaa728ee 5571
a7812ae4 5572 case CC_OP_SUBQ: return compute_all_subq();
eaa728ee 5573
a7812ae4 5574 case CC_OP_SBBQ: return compute_all_sbbq();
eaa728ee 5575
a7812ae4 5576 case CC_OP_LOGICQ: return compute_all_logicq();
eaa728ee 5577
a7812ae4 5578 case CC_OP_INCQ: return compute_all_incq();
eaa728ee 5579
a7812ae4 5580 case CC_OP_DECQ: return compute_all_decq();
eaa728ee 5581
a7812ae4 5582 case CC_OP_SHLQ: return compute_all_shlq();
eaa728ee 5583
a7812ae4 5584 case CC_OP_SARQ: return compute_all_sarq();
eaa728ee 5585#endif
a7812ae4
PB
5586 }
5587}
5588
5589uint32_t helper_cc_compute_c(int op)
5590{
5591 switch (op) {
5592 default: /* should never happen */ return 0;
5593
5594 case CC_OP_EFLAGS: return compute_c_eflags();
5595
5596 case CC_OP_MULB: return compute_c_mull();
5597 case CC_OP_MULW: return compute_c_mull();
5598 case CC_OP_MULL: return compute_c_mull();
5599
5600 case CC_OP_ADDB: return compute_c_addb();
5601 case CC_OP_ADDW: return compute_c_addw();
5602 case CC_OP_ADDL: return compute_c_addl();
5603
5604 case CC_OP_ADCB: return compute_c_adcb();
5605 case CC_OP_ADCW: return compute_c_adcw();
5606 case CC_OP_ADCL: return compute_c_adcl();
5607
5608 case CC_OP_SUBB: return compute_c_subb();
5609 case CC_OP_SUBW: return compute_c_subw();
5610 case CC_OP_SUBL: return compute_c_subl();
5611
5612 case CC_OP_SBBB: return compute_c_sbbb();
5613 case CC_OP_SBBW: return compute_c_sbbw();
5614 case CC_OP_SBBL: return compute_c_sbbl();
5615
5616 case CC_OP_LOGICB: return compute_c_logicb();
5617 case CC_OP_LOGICW: return compute_c_logicw();
5618 case CC_OP_LOGICL: return compute_c_logicl();
5619
5620 case CC_OP_INCB: return compute_c_incl();
5621 case CC_OP_INCW: return compute_c_incl();
5622 case CC_OP_INCL: return compute_c_incl();
5623
5624 case CC_OP_DECB: return compute_c_incl();
5625 case CC_OP_DECW: return compute_c_incl();
5626 case CC_OP_DECL: return compute_c_incl();
eaa728ee 5627
a7812ae4
PB
5628 case CC_OP_SHLB: return compute_c_shlb();
5629 case CC_OP_SHLW: return compute_c_shlw();
5630 case CC_OP_SHLL: return compute_c_shll();
5631
5632 case CC_OP_SARB: return compute_c_sarl();
5633 case CC_OP_SARW: return compute_c_sarl();
5634 case CC_OP_SARL: return compute_c_sarl();
5635
5636#ifdef TARGET_X86_64
5637 case CC_OP_MULQ: return compute_c_mull();
5638
5639 case CC_OP_ADDQ: return compute_c_addq();
5640
5641 case CC_OP_ADCQ: return compute_c_adcq();
5642
5643 case CC_OP_SUBQ: return compute_c_subq();
5644
5645 case CC_OP_SBBQ: return compute_c_sbbq();
5646
5647 case CC_OP_LOGICQ: return compute_c_logicq();
5648
5649 case CC_OP_INCQ: return compute_c_incl();
5650
5651 case CC_OP_DECQ: return compute_c_incl();
5652
5653 case CC_OP_SHLQ: return compute_c_shlq();
5654
5655 case CC_OP_SARQ: return compute_c_sarl();
5656#endif
5657 }
5658}