]> git.proxmox.com Git - qemu.git/blame - target-i386/op_helper.c
target-i386: fix helper_fdiv() wrt softfloat
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 18 */
83dae095 19
eaa728ee 20#include "exec.h"
d9957a8b 21#include "exec-all.h"
eaa728ee 22#include "host-utils.h"
35bed8ee 23#include "ioport.h"
eaa728ee
FB
24
25//#define DEBUG_PCALL
26
d12d51d5
AL
27
28#ifdef DEBUG_PCALL
93fcfe39
AL
29# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30# define LOG_PCALL_STATE(env) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
d12d51d5
AL
32#else
33# define LOG_PCALL(...) do { } while (0)
34# define LOG_PCALL_STATE(env) do { } while (0)
35#endif
36
37
eaa728ee
FB
38#if 0
39#define raise_exception_err(a, b)\
40do {\
93fcfe39 41 qemu_log("raise_exception line=%d\n", __LINE__);\
eaa728ee
FB
42 (raise_exception_err)(a, b);\
43} while (0)
44#endif
45
d9957a8b 46static const uint8_t parity_table[256] = {
eaa728ee
FB
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79};
80
81/* modulo 17 table */
d9957a8b 82static const uint8_t rclw_table[32] = {
eaa728ee
FB
83 0, 1, 2, 3, 4, 5, 6, 7,
84 8, 9,10,11,12,13,14,15,
85 16, 0, 1, 2, 3, 4, 5, 6,
86 7, 8, 9,10,11,12,13,14,
87};
88
89/* modulo 9 table */
d9957a8b 90static const uint8_t rclb_table[32] = {
eaa728ee
FB
91 0, 1, 2, 3, 4, 5, 6, 7,
92 8, 0, 1, 2, 3, 4, 5, 6,
93 7, 8, 0, 1, 2, 3, 4, 5,
94 6, 7, 8, 0, 1, 2, 3, 4,
95};
96
d9957a8b 97static const CPU86_LDouble f15rk[7] =
eaa728ee
FB
98{
99 0.00000000000000000000L,
100 1.00000000000000000000L,
101 3.14159265358979323851L, /*pi*/
102 0.30102999566398119523L, /*lg2*/
103 0.69314718055994530943L, /*ln2*/
104 1.44269504088896340739L, /*l2e*/
105 3.32192809488736234781L, /*l2t*/
106};
107
108/* broken thread support */
109
c227f099 110static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
eaa728ee
FB
111
112void helper_lock(void)
113{
114 spin_lock(&global_cpu_lock);
115}
116
117void helper_unlock(void)
118{
119 spin_unlock(&global_cpu_lock);
120}
121
122void helper_write_eflags(target_ulong t0, uint32_t update_mask)
123{
124 load_eflags(t0, update_mask);
125}
126
127target_ulong helper_read_eflags(void)
128{
129 uint32_t eflags;
a7812ae4 130 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
131 eflags |= (DF & DF_MASK);
132 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133 return eflags;
134}
135
136/* return non zero if error */
137static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138 int selector)
139{
140 SegmentCache *dt;
141 int index;
142 target_ulong ptr;
143
144 if (selector & 0x4)
145 dt = &env->ldt;
146 else
147 dt = &env->gdt;
148 index = selector & ~7;
149 if ((index + 7) > dt->limit)
150 return -1;
151 ptr = dt->base + index;
152 *e1_ptr = ldl_kernel(ptr);
153 *e2_ptr = ldl_kernel(ptr + 4);
154 return 0;
155}
156
157static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
158{
159 unsigned int limit;
160 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161 if (e2 & DESC_G_MASK)
162 limit = (limit << 12) | 0xfff;
163 return limit;
164}
165
166static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
167{
168 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
169}
170
171static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
172{
173 sc->base = get_seg_base(e1, e2);
174 sc->limit = get_seg_limit(e1, e2);
175 sc->flags = e2;
176}
177
178/* init the segment cache in vm86 mode. */
179static inline void load_seg_vm(int seg, int selector)
180{
181 selector &= 0xffff;
182 cpu_x86_load_seg_cache(env, seg, selector,
183 (selector << 4), 0xffff, 0);
184}
185
186static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187 uint32_t *esp_ptr, int dpl)
188{
189 int type, index, shift;
190
191#if 0
192 {
193 int i;
194 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195 for(i=0;i<env->tr.limit;i++) {
196 printf("%02x ", env->tr.base[i]);
197 if ((i & 7) == 7) printf("\n");
198 }
199 printf("\n");
200 }
201#endif
202
203 if (!(env->tr.flags & DESC_P_MASK))
204 cpu_abort(env, "invalid tss");
205 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206 if ((type & 7) != 1)
207 cpu_abort(env, "invalid tss type");
208 shift = type >> 3;
209 index = (dpl * 4 + 2) << shift;
210 if (index + (4 << shift) - 1 > env->tr.limit)
211 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212 if (shift == 0) {
213 *esp_ptr = lduw_kernel(env->tr.base + index);
214 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215 } else {
216 *esp_ptr = ldl_kernel(env->tr.base + index);
217 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
218 }
219}
220
221/* XXX: merge with load_seg() */
222static void tss_load_seg(int seg_reg, int selector)
223{
224 uint32_t e1, e2;
225 int rpl, dpl, cpl;
226
227 if ((selector & 0xfffc) != 0) {
228 if (load_segment(&e1, &e2, selector) != 0)
229 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230 if (!(e2 & DESC_S_MASK))
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 rpl = selector & 3;
233 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234 cpl = env->hflags & HF_CPL_MASK;
235 if (seg_reg == R_CS) {
236 if (!(e2 & DESC_CS_MASK))
237 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238 /* XXX: is it correct ? */
239 if (dpl != rpl)
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 if ((e2 & DESC_C_MASK) && dpl > rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 } else if (seg_reg == R_SS) {
244 /* SS must be writable data */
245 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 if (dpl != cpl || dpl != rpl)
248 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249 } else {
250 /* not readable code */
251 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253 /* if data or non conforming code, checks the rights */
254 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255 if (dpl < cpl || dpl < rpl)
256 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257 }
258 }
259 if (!(e2 & DESC_P_MASK))
260 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261 cpu_x86_load_seg_cache(env, seg_reg, selector,
262 get_seg_base(e1, e2),
263 get_seg_limit(e1, e2),
264 e2);
265 } else {
266 if (seg_reg == R_SS || seg_reg == R_CS)
267 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
268 }
269}
270
271#define SWITCH_TSS_JMP 0
272#define SWITCH_TSS_IRET 1
273#define SWITCH_TSS_CALL 2
274
275/* XXX: restore CPU state in registers (PowerPC case) */
276static void switch_tss(int tss_selector,
277 uint32_t e1, uint32_t e2, int source,
278 uint32_t next_eip)
279{
280 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281 target_ulong tss_base;
282 uint32_t new_regs[8], new_segs[6];
283 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284 uint32_t old_eflags, eflags_mask;
285 SegmentCache *dt;
286 int index;
287 target_ulong ptr;
288
289 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
d12d51d5 290 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
eaa728ee
FB
291
292 /* if task gate, we read the TSS segment and we load it */
293 if (type == 5) {
294 if (!(e2 & DESC_P_MASK))
295 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296 tss_selector = e1 >> 16;
297 if (tss_selector & 4)
298 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299 if (load_segment(&e1, &e2, tss_selector) != 0)
300 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301 if (e2 & DESC_S_MASK)
302 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304 if ((type & 7) != 1)
305 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306 }
307
308 if (!(e2 & DESC_P_MASK))
309 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
310
311 if (type & 8)
312 tss_limit_max = 103;
313 else
314 tss_limit_max = 43;
315 tss_limit = get_seg_limit(e1, e2);
316 tss_base = get_seg_base(e1, e2);
317 if ((tss_selector & 4) != 0 ||
318 tss_limit < tss_limit_max)
319 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321 if (old_type & 8)
322 old_tss_limit_max = 103;
323 else
324 old_tss_limit_max = 43;
325
326 /* read all the registers from the new TSS */
327 if (type & 8) {
328 /* 32 bit */
329 new_cr3 = ldl_kernel(tss_base + 0x1c);
330 new_eip = ldl_kernel(tss_base + 0x20);
331 new_eflags = ldl_kernel(tss_base + 0x24);
332 for(i = 0; i < 8; i++)
333 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334 for(i = 0; i < 6; i++)
335 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336 new_ldt = lduw_kernel(tss_base + 0x60);
337 new_trap = ldl_kernel(tss_base + 0x64);
338 } else {
339 /* 16 bit */
340 new_cr3 = 0;
341 new_eip = lduw_kernel(tss_base + 0x0e);
342 new_eflags = lduw_kernel(tss_base + 0x10);
343 for(i = 0; i < 8; i++)
344 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345 for(i = 0; i < 4; i++)
346 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347 new_ldt = lduw_kernel(tss_base + 0x2a);
348 new_segs[R_FS] = 0;
349 new_segs[R_GS] = 0;
350 new_trap = 0;
351 }
4581cbcd
BS
352 /* XXX: avoid a compiler warning, see
353 http://support.amd.com/us/Processor_TechDocs/24593.pdf
354 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
355 (void)new_trap;
eaa728ee
FB
356
357 /* NOTE: we must avoid memory exceptions during the task switch,
358 so we make dummy accesses before */
359 /* XXX: it can still fail in some cases, so a bigger hack is
360 necessary to valid the TLB after having done the accesses */
361
362 v1 = ldub_kernel(env->tr.base);
363 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
364 stb_kernel(env->tr.base, v1);
365 stb_kernel(env->tr.base + old_tss_limit_max, v2);
366
367 /* clear busy bit (it is restartable) */
368 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
369 target_ulong ptr;
370 uint32_t e2;
371 ptr = env->gdt.base + (env->tr.selector & ~7);
372 e2 = ldl_kernel(ptr + 4);
373 e2 &= ~DESC_TSS_BUSY_MASK;
374 stl_kernel(ptr + 4, e2);
375 }
376 old_eflags = compute_eflags();
377 if (source == SWITCH_TSS_IRET)
378 old_eflags &= ~NT_MASK;
379
380 /* save the current state in the old TSS */
381 if (type & 8) {
382 /* 32 bit */
383 stl_kernel(env->tr.base + 0x20, next_eip);
384 stl_kernel(env->tr.base + 0x24, old_eflags);
385 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
386 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
387 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
388 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
389 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
390 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
391 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
392 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
393 for(i = 0; i < 6; i++)
394 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
395 } else {
396 /* 16 bit */
397 stw_kernel(env->tr.base + 0x0e, next_eip);
398 stw_kernel(env->tr.base + 0x10, old_eflags);
399 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
400 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
401 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
402 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
403 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
404 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
405 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
406 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
407 for(i = 0; i < 4; i++)
408 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
409 }
410
411 /* now if an exception occurs, it will occurs in the next task
412 context */
413
414 if (source == SWITCH_TSS_CALL) {
415 stw_kernel(tss_base, env->tr.selector);
416 new_eflags |= NT_MASK;
417 }
418
419 /* set busy bit */
420 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
421 target_ulong ptr;
422 uint32_t e2;
423 ptr = env->gdt.base + (tss_selector & ~7);
424 e2 = ldl_kernel(ptr + 4);
425 e2 |= DESC_TSS_BUSY_MASK;
426 stl_kernel(ptr + 4, e2);
427 }
428
429 /* set the new CPU state */
430 /* from this point, any exception which occurs can give problems */
431 env->cr[0] |= CR0_TS_MASK;
432 env->hflags |= HF_TS_MASK;
433 env->tr.selector = tss_selector;
434 env->tr.base = tss_base;
435 env->tr.limit = tss_limit;
436 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
437
438 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
439 cpu_x86_update_cr3(env, new_cr3);
440 }
441
442 /* load all registers without an exception, then reload them with
443 possible exception */
444 env->eip = new_eip;
445 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
446 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
447 if (!(type & 8))
448 eflags_mask &= 0xffff;
449 load_eflags(new_eflags, eflags_mask);
450 /* XXX: what to do in 16 bit case ? */
451 EAX = new_regs[0];
452 ECX = new_regs[1];
453 EDX = new_regs[2];
454 EBX = new_regs[3];
455 ESP = new_regs[4];
456 EBP = new_regs[5];
457 ESI = new_regs[6];
458 EDI = new_regs[7];
459 if (new_eflags & VM_MASK) {
460 for(i = 0; i < 6; i++)
461 load_seg_vm(i, new_segs[i]);
462 /* in vm86, CPL is always 3 */
463 cpu_x86_set_cpl(env, 3);
464 } else {
465 /* CPL is set the RPL of CS */
466 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
467 /* first just selectors as the rest may trigger exceptions */
468 for(i = 0; i < 6; i++)
469 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
470 }
471
472 env->ldt.selector = new_ldt & ~4;
473 env->ldt.base = 0;
474 env->ldt.limit = 0;
475 env->ldt.flags = 0;
476
477 /* load the LDT */
478 if (new_ldt & 4)
479 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
480
481 if ((new_ldt & 0xfffc) != 0) {
482 dt = &env->gdt;
483 index = new_ldt & ~7;
484 if ((index + 7) > dt->limit)
485 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
486 ptr = dt->base + index;
487 e1 = ldl_kernel(ptr);
488 e2 = ldl_kernel(ptr + 4);
489 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
490 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
491 if (!(e2 & DESC_P_MASK))
492 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
493 load_seg_cache_raw_dt(&env->ldt, e1, e2);
494 }
495
496 /* load the segments */
497 if (!(new_eflags & VM_MASK)) {
498 tss_load_seg(R_CS, new_segs[R_CS]);
499 tss_load_seg(R_SS, new_segs[R_SS]);
500 tss_load_seg(R_ES, new_segs[R_ES]);
501 tss_load_seg(R_DS, new_segs[R_DS]);
502 tss_load_seg(R_FS, new_segs[R_FS]);
503 tss_load_seg(R_GS, new_segs[R_GS]);
504 }
505
506 /* check that EIP is in the CS segment limits */
507 if (new_eip > env->segs[R_CS].limit) {
508 /* XXX: different exception if CALL ? */
509 raise_exception_err(EXCP0D_GPF, 0);
510 }
01df040b
AL
511
512#ifndef CONFIG_USER_ONLY
513 /* reset local breakpoints */
514 if (env->dr[7] & 0x55) {
515 for (i = 0; i < 4; i++) {
516 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
517 hw_breakpoint_remove(env, i);
518 }
519 env->dr[7] &= ~0x55;
520 }
521#endif
eaa728ee
FB
522}
523
524/* check if Port I/O is allowed in TSS */
525static inline void check_io(int addr, int size)
526{
527 int io_offset, val, mask;
528
529 /* TSS must be a valid 32 bit one */
530 if (!(env->tr.flags & DESC_P_MASK) ||
531 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
532 env->tr.limit < 103)
533 goto fail;
534 io_offset = lduw_kernel(env->tr.base + 0x66);
535 io_offset += (addr >> 3);
536 /* Note: the check needs two bytes */
537 if ((io_offset + 1) > env->tr.limit)
538 goto fail;
539 val = lduw_kernel(env->tr.base + io_offset);
540 val >>= (addr & 7);
541 mask = (1 << size) - 1;
542 /* all bits must be zero to allow the I/O */
543 if ((val & mask) != 0) {
544 fail:
545 raise_exception_err(EXCP0D_GPF, 0);
546 }
547}
548
549void helper_check_iob(uint32_t t0)
550{
551 check_io(t0, 1);
552}
553
554void helper_check_iow(uint32_t t0)
555{
556 check_io(t0, 2);
557}
558
559void helper_check_iol(uint32_t t0)
560{
561 check_io(t0, 4);
562}
563
564void helper_outb(uint32_t port, uint32_t data)
565{
afcea8cb 566 cpu_outb(port, data & 0xff);
eaa728ee
FB
567}
568
569target_ulong helper_inb(uint32_t port)
570{
afcea8cb 571 return cpu_inb(port);
eaa728ee
FB
572}
573
574void helper_outw(uint32_t port, uint32_t data)
575{
afcea8cb 576 cpu_outw(port, data & 0xffff);
eaa728ee
FB
577}
578
579target_ulong helper_inw(uint32_t port)
580{
afcea8cb 581 return cpu_inw(port);
eaa728ee
FB
582}
583
584void helper_outl(uint32_t port, uint32_t data)
585{
afcea8cb 586 cpu_outl(port, data);
eaa728ee
FB
587}
588
589target_ulong helper_inl(uint32_t port)
590{
afcea8cb 591 return cpu_inl(port);
eaa728ee
FB
592}
593
594static inline unsigned int get_sp_mask(unsigned int e2)
595{
596 if (e2 & DESC_B_MASK)
597 return 0xffffffff;
598 else
599 return 0xffff;
600}
601
2ed51f5b
AL
602static int exeption_has_error_code(int intno)
603{
604 switch(intno) {
605 case 8:
606 case 10:
607 case 11:
608 case 12:
609 case 13:
610 case 14:
611 case 17:
612 return 1;
613 }
614 return 0;
615}
616
eaa728ee
FB
617#ifdef TARGET_X86_64
618#define SET_ESP(val, sp_mask)\
619do {\
620 if ((sp_mask) == 0xffff)\
621 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
622 else if ((sp_mask) == 0xffffffffLL)\
623 ESP = (uint32_t)(val);\
624 else\
625 ESP = (val);\
626} while (0)
627#else
628#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
629#endif
630
c0a04f0e
AL
631/* in 64-bit machines, this can overflow. So this segment addition macro
632 * can be used to trim the value to 32-bit whenever needed */
633#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
634
eaa728ee
FB
635/* XXX: add a is_user flag to have proper security support */
636#define PUSHW(ssp, sp, sp_mask, val)\
637{\
638 sp -= 2;\
639 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
640}
641
642#define PUSHL(ssp, sp, sp_mask, val)\
643{\
644 sp -= 4;\
c0a04f0e 645 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
eaa728ee
FB
646}
647
648#define POPW(ssp, sp, sp_mask, val)\
649{\
650 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
651 sp += 2;\
652}
653
654#define POPL(ssp, sp, sp_mask, val)\
655{\
c0a04f0e 656 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
eaa728ee
FB
657 sp += 4;\
658}
659
660/* protected mode interrupt */
661static void do_interrupt_protected(int intno, int is_int, int error_code,
662 unsigned int next_eip, int is_hw)
663{
664 SegmentCache *dt;
665 target_ulong ptr, ssp;
666 int type, dpl, selector, ss_dpl, cpl;
667 int has_error_code, new_stack, shift;
1c918eba 668 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 669 uint32_t old_eip, sp_mask;
eaa728ee 670
eaa728ee 671 has_error_code = 0;
2ed51f5b
AL
672 if (!is_int && !is_hw)
673 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
674 if (is_int)
675 old_eip = next_eip;
676 else
677 old_eip = env->eip;
678
679 dt = &env->idt;
680 if (intno * 8 + 7 > dt->limit)
681 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
682 ptr = dt->base + intno * 8;
683 e1 = ldl_kernel(ptr);
684 e2 = ldl_kernel(ptr + 4);
685 /* check gate type */
686 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
687 switch(type) {
688 case 5: /* task gate */
689 /* must do that check here to return the correct error code */
690 if (!(e2 & DESC_P_MASK))
691 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
692 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
693 if (has_error_code) {
694 int type;
695 uint32_t mask;
696 /* push the error code */
697 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
698 shift = type >> 3;
699 if (env->segs[R_SS].flags & DESC_B_MASK)
700 mask = 0xffffffff;
701 else
702 mask = 0xffff;
703 esp = (ESP - (2 << shift)) & mask;
704 ssp = env->segs[R_SS].base + esp;
705 if (shift)
706 stl_kernel(ssp, error_code);
707 else
708 stw_kernel(ssp, error_code);
709 SET_ESP(esp, mask);
710 }
711 return;
712 case 6: /* 286 interrupt gate */
713 case 7: /* 286 trap gate */
714 case 14: /* 386 interrupt gate */
715 case 15: /* 386 trap gate */
716 break;
717 default:
718 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
719 break;
720 }
721 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
722 cpl = env->hflags & HF_CPL_MASK;
1235fc06 723 /* check privilege if software int */
eaa728ee
FB
724 if (is_int && dpl < cpl)
725 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
726 /* check valid bit */
727 if (!(e2 & DESC_P_MASK))
728 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
729 selector = e1 >> 16;
730 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
731 if ((selector & 0xfffc) == 0)
732 raise_exception_err(EXCP0D_GPF, 0);
733
734 if (load_segment(&e1, &e2, selector) != 0)
735 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
736 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
737 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
738 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
739 if (dpl > cpl)
740 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741 if (!(e2 & DESC_P_MASK))
742 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
743 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
744 /* to inner privilege */
745 get_ss_esp_from_tss(&ss, &esp, dpl);
746 if ((ss & 0xfffc) == 0)
747 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748 if ((ss & 3) != dpl)
749 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
750 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
751 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
752 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
753 if (ss_dpl != dpl)
754 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755 if (!(ss_e2 & DESC_S_MASK) ||
756 (ss_e2 & DESC_CS_MASK) ||
757 !(ss_e2 & DESC_W_MASK))
758 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
759 if (!(ss_e2 & DESC_P_MASK))
760 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
761 new_stack = 1;
762 sp_mask = get_sp_mask(ss_e2);
763 ssp = get_seg_base(ss_e1, ss_e2);
764 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
765 /* to same privilege */
766 if (env->eflags & VM_MASK)
767 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
768 new_stack = 0;
769 sp_mask = get_sp_mask(env->segs[R_SS].flags);
770 ssp = env->segs[R_SS].base;
771 esp = ESP;
772 dpl = cpl;
773 } else {
774 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
775 new_stack = 0; /* avoid warning */
776 sp_mask = 0; /* avoid warning */
777 ssp = 0; /* avoid warning */
778 esp = 0; /* avoid warning */
779 }
780
781 shift = type >> 3;
782
783#if 0
784 /* XXX: check that enough room is available */
785 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
786 if (env->eflags & VM_MASK)
787 push_size += 8;
788 push_size <<= shift;
789#endif
790 if (shift == 1) {
791 if (new_stack) {
792 if (env->eflags & VM_MASK) {
793 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
794 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
795 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
796 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
797 }
798 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
799 PUSHL(ssp, esp, sp_mask, ESP);
800 }
801 PUSHL(ssp, esp, sp_mask, compute_eflags());
802 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
803 PUSHL(ssp, esp, sp_mask, old_eip);
804 if (has_error_code) {
805 PUSHL(ssp, esp, sp_mask, error_code);
806 }
807 } else {
808 if (new_stack) {
809 if (env->eflags & VM_MASK) {
810 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
811 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
812 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
813 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
814 }
815 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
816 PUSHW(ssp, esp, sp_mask, ESP);
817 }
818 PUSHW(ssp, esp, sp_mask, compute_eflags());
819 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
820 PUSHW(ssp, esp, sp_mask, old_eip);
821 if (has_error_code) {
822 PUSHW(ssp, esp, sp_mask, error_code);
823 }
824 }
825
826 if (new_stack) {
827 if (env->eflags & VM_MASK) {
828 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
829 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
830 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
831 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
832 }
833 ss = (ss & ~3) | dpl;
834 cpu_x86_load_seg_cache(env, R_SS, ss,
835 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
836 }
837 SET_ESP(esp, sp_mask);
838
839 selector = (selector & ~3) | dpl;
840 cpu_x86_load_seg_cache(env, R_CS, selector,
841 get_seg_base(e1, e2),
842 get_seg_limit(e1, e2),
843 e2);
844 cpu_x86_set_cpl(env, dpl);
845 env->eip = offset;
846
847 /* interrupt gate clear IF mask */
848 if ((type & 1) == 0) {
849 env->eflags &= ~IF_MASK;
850 }
851 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
852}
853
854#ifdef TARGET_X86_64
855
856#define PUSHQ(sp, val)\
857{\
858 sp -= 8;\
859 stq_kernel(sp, (val));\
860}
861
862#define POPQ(sp, val)\
863{\
864 val = ldq_kernel(sp);\
865 sp += 8;\
866}
867
868static inline target_ulong get_rsp_from_tss(int level)
869{
870 int index;
871
872#if 0
873 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
874 env->tr.base, env->tr.limit);
875#endif
876
877 if (!(env->tr.flags & DESC_P_MASK))
878 cpu_abort(env, "invalid tss");
879 index = 8 * level + 4;
880 if ((index + 7) > env->tr.limit)
881 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
882 return ldq_kernel(env->tr.base + index);
883}
884
885/* 64 bit interrupt */
886static void do_interrupt64(int intno, int is_int, int error_code,
887 target_ulong next_eip, int is_hw)
888{
889 SegmentCache *dt;
890 target_ulong ptr;
891 int type, dpl, selector, cpl, ist;
892 int has_error_code, new_stack;
893 uint32_t e1, e2, e3, ss;
894 target_ulong old_eip, esp, offset;
eaa728ee 895
eaa728ee 896 has_error_code = 0;
2ed51f5b
AL
897 if (!is_int && !is_hw)
898 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
899 if (is_int)
900 old_eip = next_eip;
901 else
902 old_eip = env->eip;
903
904 dt = &env->idt;
905 if (intno * 16 + 15 > dt->limit)
906 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
907 ptr = dt->base + intno * 16;
908 e1 = ldl_kernel(ptr);
909 e2 = ldl_kernel(ptr + 4);
910 e3 = ldl_kernel(ptr + 8);
911 /* check gate type */
912 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
913 switch(type) {
914 case 14: /* 386 interrupt gate */
915 case 15: /* 386 trap gate */
916 break;
917 default:
918 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
919 break;
920 }
921 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
922 cpl = env->hflags & HF_CPL_MASK;
1235fc06 923 /* check privilege if software int */
eaa728ee
FB
924 if (is_int && dpl < cpl)
925 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
926 /* check valid bit */
927 if (!(e2 & DESC_P_MASK))
928 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
929 selector = e1 >> 16;
930 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
931 ist = e2 & 7;
932 if ((selector & 0xfffc) == 0)
933 raise_exception_err(EXCP0D_GPF, 0);
934
935 if (load_segment(&e1, &e2, selector) != 0)
936 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
937 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
940 if (dpl > cpl)
941 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942 if (!(e2 & DESC_P_MASK))
943 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
944 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
945 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
946 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
947 /* to inner privilege */
948 if (ist != 0)
949 esp = get_rsp_from_tss(ist + 3);
950 else
951 esp = get_rsp_from_tss(dpl);
952 esp &= ~0xfLL; /* align stack */
953 ss = 0;
954 new_stack = 1;
955 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
956 /* to same privilege */
957 if (env->eflags & VM_MASK)
958 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
959 new_stack = 0;
960 if (ist != 0)
961 esp = get_rsp_from_tss(ist + 3);
962 else
963 esp = ESP;
964 esp &= ~0xfLL; /* align stack */
965 dpl = cpl;
966 } else {
967 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
968 new_stack = 0; /* avoid warning */
969 esp = 0; /* avoid warning */
970 }
971
972 PUSHQ(esp, env->segs[R_SS].selector);
973 PUSHQ(esp, ESP);
974 PUSHQ(esp, compute_eflags());
975 PUSHQ(esp, env->segs[R_CS].selector);
976 PUSHQ(esp, old_eip);
977 if (has_error_code) {
978 PUSHQ(esp, error_code);
979 }
980
981 if (new_stack) {
982 ss = 0 | dpl;
983 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
984 }
985 ESP = esp;
986
987 selector = (selector & ~3) | dpl;
988 cpu_x86_load_seg_cache(env, R_CS, selector,
989 get_seg_base(e1, e2),
990 get_seg_limit(e1, e2),
991 e2);
992 cpu_x86_set_cpl(env, dpl);
993 env->eip = offset;
994
995 /* interrupt gate clear IF mask */
996 if ((type & 1) == 0) {
997 env->eflags &= ~IF_MASK;
998 }
999 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1000}
1001#endif
1002
d9957a8b 1003#ifdef TARGET_X86_64
eaa728ee
FB
1004#if defined(CONFIG_USER_ONLY)
1005void helper_syscall(int next_eip_addend)
1006{
1007 env->exception_index = EXCP_SYSCALL;
1008 env->exception_next_eip = env->eip + next_eip_addend;
1009 cpu_loop_exit();
1010}
1011#else
1012void helper_syscall(int next_eip_addend)
1013{
1014 int selector;
1015
1016 if (!(env->efer & MSR_EFER_SCE)) {
1017 raise_exception_err(EXCP06_ILLOP, 0);
1018 }
1019 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
1020 if (env->hflags & HF_LMA_MASK) {
1021 int code64;
1022
1023 ECX = env->eip + next_eip_addend;
1024 env->regs[11] = compute_eflags();
1025
1026 code64 = env->hflags & HF_CS64_MASK;
1027
1028 cpu_x86_set_cpl(env, 0);
1029 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1030 0, 0xffffffff,
1031 DESC_G_MASK | DESC_P_MASK |
1032 DESC_S_MASK |
1033 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1034 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1035 0, 0xffffffff,
1036 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1037 DESC_S_MASK |
1038 DESC_W_MASK | DESC_A_MASK);
1039 env->eflags &= ~env->fmask;
1040 load_eflags(env->eflags, 0);
1041 if (code64)
1042 env->eip = env->lstar;
1043 else
1044 env->eip = env->cstar;
d9957a8b 1045 } else {
eaa728ee
FB
1046 ECX = (uint32_t)(env->eip + next_eip_addend);
1047
1048 cpu_x86_set_cpl(env, 0);
1049 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1050 0, 0xffffffff,
1051 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1052 DESC_S_MASK |
1053 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1054 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1055 0, 0xffffffff,
1056 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1057 DESC_S_MASK |
1058 DESC_W_MASK | DESC_A_MASK);
1059 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1060 env->eip = (uint32_t)env->star;
1061 }
1062}
1063#endif
d9957a8b 1064#endif
eaa728ee 1065
d9957a8b 1066#ifdef TARGET_X86_64
eaa728ee
FB
1067void helper_sysret(int dflag)
1068{
1069 int cpl, selector;
1070
1071 if (!(env->efer & MSR_EFER_SCE)) {
1072 raise_exception_err(EXCP06_ILLOP, 0);
1073 }
1074 cpl = env->hflags & HF_CPL_MASK;
1075 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1076 raise_exception_err(EXCP0D_GPF, 0);
1077 }
1078 selector = (env->star >> 48) & 0xffff;
eaa728ee
FB
1079 if (env->hflags & HF_LMA_MASK) {
1080 if (dflag == 2) {
1081 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1082 0, 0xffffffff,
1083 DESC_G_MASK | DESC_P_MASK |
1084 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1086 DESC_L_MASK);
1087 env->eip = ECX;
1088 } else {
1089 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1090 0, 0xffffffff,
1091 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1092 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1093 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1094 env->eip = (uint32_t)ECX;
1095 }
1096 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1097 0, 0xffffffff,
1098 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1099 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1100 DESC_W_MASK | DESC_A_MASK);
1101 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1102 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1103 cpu_x86_set_cpl(env, 3);
d9957a8b 1104 } else {
eaa728ee
FB
1105 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1106 0, 0xffffffff,
1107 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1108 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1109 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1110 env->eip = (uint32_t)ECX;
1111 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1112 0, 0xffffffff,
1113 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1114 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1115 DESC_W_MASK | DESC_A_MASK);
1116 env->eflags |= IF_MASK;
1117 cpu_x86_set_cpl(env, 3);
1118 }
eaa728ee 1119}
d9957a8b 1120#endif
eaa728ee
FB
1121
1122/* real mode interrupt */
1123static void do_interrupt_real(int intno, int is_int, int error_code,
1124 unsigned int next_eip)
1125{
1126 SegmentCache *dt;
1127 target_ulong ptr, ssp;
1128 int selector;
1129 uint32_t offset, esp;
1130 uint32_t old_cs, old_eip;
eaa728ee 1131
eaa728ee
FB
1132 /* real mode (simpler !) */
1133 dt = &env->idt;
1134 if (intno * 4 + 3 > dt->limit)
1135 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1136 ptr = dt->base + intno * 4;
1137 offset = lduw_kernel(ptr);
1138 selector = lduw_kernel(ptr + 2);
1139 esp = ESP;
1140 ssp = env->segs[R_SS].base;
1141 if (is_int)
1142 old_eip = next_eip;
1143 else
1144 old_eip = env->eip;
1145 old_cs = env->segs[R_CS].selector;
1146 /* XXX: use SS segment size ? */
1147 PUSHW(ssp, esp, 0xffff, compute_eflags());
1148 PUSHW(ssp, esp, 0xffff, old_cs);
1149 PUSHW(ssp, esp, 0xffff, old_eip);
1150
1151 /* update processor state */
1152 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1153 env->eip = offset;
1154 env->segs[R_CS].selector = selector;
1155 env->segs[R_CS].base = (selector << 4);
1156 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1157}
1158
1159/* fake user mode interrupt */
1160void do_interrupt_user(int intno, int is_int, int error_code,
1161 target_ulong next_eip)
1162{
1163 SegmentCache *dt;
1164 target_ulong ptr;
1165 int dpl, cpl, shift;
1166 uint32_t e2;
1167
1168 dt = &env->idt;
1169 if (env->hflags & HF_LMA_MASK) {
1170 shift = 4;
1171 } else {
1172 shift = 3;
1173 }
1174 ptr = dt->base + (intno << shift);
1175 e2 = ldl_kernel(ptr + 4);
1176
1177 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1178 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1179 /* check privilege if software int */
eaa728ee
FB
1180 if (is_int && dpl < cpl)
1181 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1182
1183 /* Since we emulate only user space, we cannot do more than
1184 exiting the emulation with the suitable exception and error
1185 code */
1186 if (is_int)
1187 EIP = next_eip;
1188}
1189
00ea18d1 1190#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1191static void handle_even_inj(int intno, int is_int, int error_code,
1192 int is_hw, int rm)
1193{
1194 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1195 if (!(event_inj & SVM_EVTINJ_VALID)) {
1196 int type;
1197 if (is_int)
1198 type = SVM_EVTINJ_TYPE_SOFT;
1199 else
1200 type = SVM_EVTINJ_TYPE_EXEPT;
1201 event_inj = intno | type | SVM_EVTINJ_VALID;
1202 if (!rm && exeption_has_error_code(intno)) {
1203 event_inj |= SVM_EVTINJ_VALID_ERR;
1204 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1205 }
1206 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1207 }
1208}
00ea18d1 1209#endif
2ed51f5b 1210
eaa728ee
FB
1211/*
1212 * Begin execution of an interruption. is_int is TRUE if coming from
1213 * the int instruction. next_eip is the EIP value AFTER the interrupt
1214 * instruction. It is only relevant if is_int is TRUE.
1215 */
1216void do_interrupt(int intno, int is_int, int error_code,
1217 target_ulong next_eip, int is_hw)
1218{
8fec2b8c 1219 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1220 if ((env->cr[0] & CR0_PE_MASK)) {
1221 static int count;
93fcfe39 1222 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
eaa728ee
FB
1223 count, intno, error_code, is_int,
1224 env->hflags & HF_CPL_MASK,
1225 env->segs[R_CS].selector, EIP,
1226 (int)env->segs[R_CS].base + EIP,
1227 env->segs[R_SS].selector, ESP);
1228 if (intno == 0x0e) {
93fcfe39 1229 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1230 } else {
93fcfe39 1231 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
eaa728ee 1232 }
93fcfe39
AL
1233 qemu_log("\n");
1234 log_cpu_state(env, X86_DUMP_CCOP);
eaa728ee
FB
1235#if 0
1236 {
1237 int i;
9bd5494e 1238 target_ulong ptr;
93fcfe39 1239 qemu_log(" code=");
eaa728ee
FB
1240 ptr = env->segs[R_CS].base + env->eip;
1241 for(i = 0; i < 16; i++) {
93fcfe39 1242 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1243 }
93fcfe39 1244 qemu_log("\n");
eaa728ee
FB
1245 }
1246#endif
1247 count++;
1248 }
1249 }
1250 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1251#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1252 if (env->hflags & HF_SVMI_MASK)
1253 handle_even_inj(intno, is_int, error_code, is_hw, 0);
00ea18d1 1254#endif
eb38c52c 1255#ifdef TARGET_X86_64
eaa728ee
FB
1256 if (env->hflags & HF_LMA_MASK) {
1257 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1258 } else
1259#endif
1260 {
1261 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1262 }
1263 } else {
00ea18d1 1264#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1265 if (env->hflags & HF_SVMI_MASK)
1266 handle_even_inj(intno, is_int, error_code, is_hw, 1);
00ea18d1 1267#endif
eaa728ee
FB
1268 do_interrupt_real(intno, is_int, error_code, next_eip);
1269 }
2ed51f5b 1270
00ea18d1 1271#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1272 if (env->hflags & HF_SVMI_MASK) {
1273 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1274 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1275 }
00ea18d1 1276#endif
eaa728ee
FB
1277}
1278
f55761a0
AL
1279/* This should come from sysemu.h - if we could include it here... */
1280void qemu_system_reset_request(void);
1281
eaa728ee
FB
1282/*
1283 * Check nested exceptions and change to double or triple fault if
1284 * needed. It should only be called, if this is not an interrupt.
1285 * Returns the new exception number.
1286 */
1287static int check_exception(int intno, int *error_code)
1288{
1289 int first_contributory = env->old_exception == 0 ||
1290 (env->old_exception >= 10 &&
1291 env->old_exception <= 13);
1292 int second_contributory = intno == 0 ||
1293 (intno >= 10 && intno <= 13);
1294
93fcfe39 1295 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
eaa728ee
FB
1296 env->old_exception, intno);
1297
f55761a0
AL
1298#if !defined(CONFIG_USER_ONLY)
1299 if (env->old_exception == EXCP08_DBLE) {
1300 if (env->hflags & HF_SVMI_MASK)
1301 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1302
680c3069 1303 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
f55761a0
AL
1304
1305 qemu_system_reset_request();
1306 return EXCP_HLT;
1307 }
1308#endif
eaa728ee
FB
1309
1310 if ((first_contributory && second_contributory)
1311 || (env->old_exception == EXCP0E_PAGE &&
1312 (second_contributory || (intno == EXCP0E_PAGE)))) {
1313 intno = EXCP08_DBLE;
1314 *error_code = 0;
1315 }
1316
1317 if (second_contributory || (intno == EXCP0E_PAGE) ||
1318 (intno == EXCP08_DBLE))
1319 env->old_exception = intno;
1320
1321 return intno;
1322}
1323
1324/*
1325 * Signal an interruption. It is executed in the main CPU loop.
1326 * is_int is TRUE if coming from the int instruction. next_eip is the
1327 * EIP value AFTER the interrupt instruction. It is only relevant if
1328 * is_int is TRUE.
1329 */
a5e50b26 1330static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1331 int next_eip_addend)
eaa728ee
FB
1332{
1333 if (!is_int) {
1334 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1335 intno = check_exception(intno, &error_code);
872929aa
FB
1336 } else {
1337 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1338 }
1339
1340 env->exception_index = intno;
1341 env->error_code = error_code;
1342 env->exception_is_int = is_int;
1343 env->exception_next_eip = env->eip + next_eip_addend;
1344 cpu_loop_exit();
1345}
1346
eaa728ee
FB
1347/* shortcuts to generate exceptions */
1348
d9957a8b 1349void raise_exception_err(int exception_index, int error_code)
eaa728ee
FB
1350{
1351 raise_interrupt(exception_index, 0, error_code, 0);
1352}
1353
1354void raise_exception(int exception_index)
1355{
1356 raise_interrupt(exception_index, 0, 0, 0);
1357}
1358
63a54736
JW
1359void raise_exception_env(int exception_index, CPUState *nenv)
1360{
1361 env = nenv;
1362 raise_exception(exception_index);
1363}
eaa728ee
FB
1364/* SMM support */
1365
1366#if defined(CONFIG_USER_ONLY)
1367
1368void do_smm_enter(void)
1369{
1370}
1371
1372void helper_rsm(void)
1373{
1374}
1375
1376#else
1377
1378#ifdef TARGET_X86_64
1379#define SMM_REVISION_ID 0x00020064
1380#else
1381#define SMM_REVISION_ID 0x00020000
1382#endif
1383
1384void do_smm_enter(void)
1385{
1386 target_ulong sm_state;
1387 SegmentCache *dt;
1388 int i, offset;
1389
93fcfe39
AL
1390 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1391 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1392
1393 env->hflags |= HF_SMM_MASK;
1394 cpu_smm_update(env);
1395
1396 sm_state = env->smbase + 0x8000;
1397
1398#ifdef TARGET_X86_64
1399 for(i = 0; i < 6; i++) {
1400 dt = &env->segs[i];
1401 offset = 0x7e00 + i * 16;
1402 stw_phys(sm_state + offset, dt->selector);
1403 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1404 stl_phys(sm_state + offset + 4, dt->limit);
1405 stq_phys(sm_state + offset + 8, dt->base);
1406 }
1407
1408 stq_phys(sm_state + 0x7e68, env->gdt.base);
1409 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1410
1411 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1412 stq_phys(sm_state + 0x7e78, env->ldt.base);
1413 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1414 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1415
1416 stq_phys(sm_state + 0x7e88, env->idt.base);
1417 stl_phys(sm_state + 0x7e84, env->idt.limit);
1418
1419 stw_phys(sm_state + 0x7e90, env->tr.selector);
1420 stq_phys(sm_state + 0x7e98, env->tr.base);
1421 stl_phys(sm_state + 0x7e94, env->tr.limit);
1422 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1423
1424 stq_phys(sm_state + 0x7ed0, env->efer);
1425
1426 stq_phys(sm_state + 0x7ff8, EAX);
1427 stq_phys(sm_state + 0x7ff0, ECX);
1428 stq_phys(sm_state + 0x7fe8, EDX);
1429 stq_phys(sm_state + 0x7fe0, EBX);
1430 stq_phys(sm_state + 0x7fd8, ESP);
1431 stq_phys(sm_state + 0x7fd0, EBP);
1432 stq_phys(sm_state + 0x7fc8, ESI);
1433 stq_phys(sm_state + 0x7fc0, EDI);
1434 for(i = 8; i < 16; i++)
1435 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1436 stq_phys(sm_state + 0x7f78, env->eip);
1437 stl_phys(sm_state + 0x7f70, compute_eflags());
1438 stl_phys(sm_state + 0x7f68, env->dr[6]);
1439 stl_phys(sm_state + 0x7f60, env->dr[7]);
1440
1441 stl_phys(sm_state + 0x7f48, env->cr[4]);
1442 stl_phys(sm_state + 0x7f50, env->cr[3]);
1443 stl_phys(sm_state + 0x7f58, env->cr[0]);
1444
1445 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1446 stl_phys(sm_state + 0x7f00, env->smbase);
1447#else
1448 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1449 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1450 stl_phys(sm_state + 0x7ff4, compute_eflags());
1451 stl_phys(sm_state + 0x7ff0, env->eip);
1452 stl_phys(sm_state + 0x7fec, EDI);
1453 stl_phys(sm_state + 0x7fe8, ESI);
1454 stl_phys(sm_state + 0x7fe4, EBP);
1455 stl_phys(sm_state + 0x7fe0, ESP);
1456 stl_phys(sm_state + 0x7fdc, EBX);
1457 stl_phys(sm_state + 0x7fd8, EDX);
1458 stl_phys(sm_state + 0x7fd4, ECX);
1459 stl_phys(sm_state + 0x7fd0, EAX);
1460 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1461 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1462
1463 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1464 stl_phys(sm_state + 0x7f64, env->tr.base);
1465 stl_phys(sm_state + 0x7f60, env->tr.limit);
1466 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1467
1468 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1469 stl_phys(sm_state + 0x7f80, env->ldt.base);
1470 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1471 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1472
1473 stl_phys(sm_state + 0x7f74, env->gdt.base);
1474 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1475
1476 stl_phys(sm_state + 0x7f58, env->idt.base);
1477 stl_phys(sm_state + 0x7f54, env->idt.limit);
1478
1479 for(i = 0; i < 6; i++) {
1480 dt = &env->segs[i];
1481 if (i < 3)
1482 offset = 0x7f84 + i * 12;
1483 else
1484 offset = 0x7f2c + (i - 3) * 12;
1485 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1486 stl_phys(sm_state + offset + 8, dt->base);
1487 stl_phys(sm_state + offset + 4, dt->limit);
1488 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1489 }
1490 stl_phys(sm_state + 0x7f14, env->cr[4]);
1491
1492 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1493 stl_phys(sm_state + 0x7ef8, env->smbase);
1494#endif
1495 /* init SMM cpu state */
1496
1497#ifdef TARGET_X86_64
5efc27bb 1498 cpu_load_efer(env, 0);
eaa728ee
FB
1499#endif
1500 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1501 env->eip = 0x00008000;
1502 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1503 0xffffffff, 0);
1504 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1505 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1506 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1507 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1508 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1509
1510 cpu_x86_update_cr0(env,
1511 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1512 cpu_x86_update_cr4(env, 0);
1513 env->dr[7] = 0x00000400;
1514 CC_OP = CC_OP_EFLAGS;
1515}
1516
1517void helper_rsm(void)
1518{
1519 target_ulong sm_state;
1520 int i, offset;
1521 uint32_t val;
1522
1523 sm_state = env->smbase + 0x8000;
1524#ifdef TARGET_X86_64
5efc27bb 1525 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee
FB
1526
1527 for(i = 0; i < 6; i++) {
1528 offset = 0x7e00 + i * 16;
1529 cpu_x86_load_seg_cache(env, i,
1530 lduw_phys(sm_state + offset),
1531 ldq_phys(sm_state + offset + 8),
1532 ldl_phys(sm_state + offset + 4),
1533 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1534 }
1535
1536 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1537 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1538
1539 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1540 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1541 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1542 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1543
1544 env->idt.base = ldq_phys(sm_state + 0x7e88);
1545 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1546
1547 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1548 env->tr.base = ldq_phys(sm_state + 0x7e98);
1549 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1550 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1551
1552 EAX = ldq_phys(sm_state + 0x7ff8);
1553 ECX = ldq_phys(sm_state + 0x7ff0);
1554 EDX = ldq_phys(sm_state + 0x7fe8);
1555 EBX = ldq_phys(sm_state + 0x7fe0);
1556 ESP = ldq_phys(sm_state + 0x7fd8);
1557 EBP = ldq_phys(sm_state + 0x7fd0);
1558 ESI = ldq_phys(sm_state + 0x7fc8);
1559 EDI = ldq_phys(sm_state + 0x7fc0);
1560 for(i = 8; i < 16; i++)
1561 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1562 env->eip = ldq_phys(sm_state + 0x7f78);
1563 load_eflags(ldl_phys(sm_state + 0x7f70),
1564 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1565 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1566 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1567
1568 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1569 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1570 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1571
1572 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1573 if (val & 0x20000) {
1574 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1575 }
1576#else
1577 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1578 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1579 load_eflags(ldl_phys(sm_state + 0x7ff4),
1580 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1581 env->eip = ldl_phys(sm_state + 0x7ff0);
1582 EDI = ldl_phys(sm_state + 0x7fec);
1583 ESI = ldl_phys(sm_state + 0x7fe8);
1584 EBP = ldl_phys(sm_state + 0x7fe4);
1585 ESP = ldl_phys(sm_state + 0x7fe0);
1586 EBX = ldl_phys(sm_state + 0x7fdc);
1587 EDX = ldl_phys(sm_state + 0x7fd8);
1588 ECX = ldl_phys(sm_state + 0x7fd4);
1589 EAX = ldl_phys(sm_state + 0x7fd0);
1590 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1591 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1592
1593 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1594 env->tr.base = ldl_phys(sm_state + 0x7f64);
1595 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1596 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1597
1598 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1599 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1600 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1601 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1602
1603 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1604 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1605
1606 env->idt.base = ldl_phys(sm_state + 0x7f58);
1607 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1608
1609 for(i = 0; i < 6; i++) {
1610 if (i < 3)
1611 offset = 0x7f84 + i * 12;
1612 else
1613 offset = 0x7f2c + (i - 3) * 12;
1614 cpu_x86_load_seg_cache(env, i,
1615 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1616 ldl_phys(sm_state + offset + 8),
1617 ldl_phys(sm_state + offset + 4),
1618 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1619 }
1620 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1621
1622 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1623 if (val & 0x20000) {
1624 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1625 }
1626#endif
1627 CC_OP = CC_OP_EFLAGS;
1628 env->hflags &= ~HF_SMM_MASK;
1629 cpu_smm_update(env);
1630
93fcfe39
AL
1631 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1632 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1633}
1634
1635#endif /* !CONFIG_USER_ONLY */
1636
1637
1638/* division, flags are undefined */
1639
1640void helper_divb_AL(target_ulong t0)
1641{
1642 unsigned int num, den, q, r;
1643
1644 num = (EAX & 0xffff);
1645 den = (t0 & 0xff);
1646 if (den == 0) {
1647 raise_exception(EXCP00_DIVZ);
1648 }
1649 q = (num / den);
1650 if (q > 0xff)
1651 raise_exception(EXCP00_DIVZ);
1652 q &= 0xff;
1653 r = (num % den) & 0xff;
1654 EAX = (EAX & ~0xffff) | (r << 8) | q;
1655}
1656
1657void helper_idivb_AL(target_ulong t0)
1658{
1659 int num, den, q, r;
1660
1661 num = (int16_t)EAX;
1662 den = (int8_t)t0;
1663 if (den == 0) {
1664 raise_exception(EXCP00_DIVZ);
1665 }
1666 q = (num / den);
1667 if (q != (int8_t)q)
1668 raise_exception(EXCP00_DIVZ);
1669 q &= 0xff;
1670 r = (num % den) & 0xff;
1671 EAX = (EAX & ~0xffff) | (r << 8) | q;
1672}
1673
1674void helper_divw_AX(target_ulong t0)
1675{
1676 unsigned int num, den, q, r;
1677
1678 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1679 den = (t0 & 0xffff);
1680 if (den == 0) {
1681 raise_exception(EXCP00_DIVZ);
1682 }
1683 q = (num / den);
1684 if (q > 0xffff)
1685 raise_exception(EXCP00_DIVZ);
1686 q &= 0xffff;
1687 r = (num % den) & 0xffff;
1688 EAX = (EAX & ~0xffff) | q;
1689 EDX = (EDX & ~0xffff) | r;
1690}
1691
1692void helper_idivw_AX(target_ulong t0)
1693{
1694 int num, den, q, r;
1695
1696 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1697 den = (int16_t)t0;
1698 if (den == 0) {
1699 raise_exception(EXCP00_DIVZ);
1700 }
1701 q = (num / den);
1702 if (q != (int16_t)q)
1703 raise_exception(EXCP00_DIVZ);
1704 q &= 0xffff;
1705 r = (num % den) & 0xffff;
1706 EAX = (EAX & ~0xffff) | q;
1707 EDX = (EDX & ~0xffff) | r;
1708}
1709
1710void helper_divl_EAX(target_ulong t0)
1711{
1712 unsigned int den, r;
1713 uint64_t num, q;
1714
1715 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1716 den = t0;
1717 if (den == 0) {
1718 raise_exception(EXCP00_DIVZ);
1719 }
1720 q = (num / den);
1721 r = (num % den);
1722 if (q > 0xffffffff)
1723 raise_exception(EXCP00_DIVZ);
1724 EAX = (uint32_t)q;
1725 EDX = (uint32_t)r;
1726}
1727
1728void helper_idivl_EAX(target_ulong t0)
1729{
1730 int den, r;
1731 int64_t num, q;
1732
1733 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1734 den = t0;
1735 if (den == 0) {
1736 raise_exception(EXCP00_DIVZ);
1737 }
1738 q = (num / den);
1739 r = (num % den);
1740 if (q != (int32_t)q)
1741 raise_exception(EXCP00_DIVZ);
1742 EAX = (uint32_t)q;
1743 EDX = (uint32_t)r;
1744}
1745
1746/* bcd */
1747
1748/* XXX: exception */
1749void helper_aam(int base)
1750{
1751 int al, ah;
1752 al = EAX & 0xff;
1753 ah = al / base;
1754 al = al % base;
1755 EAX = (EAX & ~0xffff) | al | (ah << 8);
1756 CC_DST = al;
1757}
1758
1759void helper_aad(int base)
1760{
1761 int al, ah;
1762 al = EAX & 0xff;
1763 ah = (EAX >> 8) & 0xff;
1764 al = ((ah * base) + al) & 0xff;
1765 EAX = (EAX & ~0xffff) | al;
1766 CC_DST = al;
1767}
1768
1769void helper_aaa(void)
1770{
1771 int icarry;
1772 int al, ah, af;
1773 int eflags;
1774
a7812ae4 1775 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1776 af = eflags & CC_A;
1777 al = EAX & 0xff;
1778 ah = (EAX >> 8) & 0xff;
1779
1780 icarry = (al > 0xf9);
1781 if (((al & 0x0f) > 9 ) || af) {
1782 al = (al + 6) & 0x0f;
1783 ah = (ah + 1 + icarry) & 0xff;
1784 eflags |= CC_C | CC_A;
1785 } else {
1786 eflags &= ~(CC_C | CC_A);
1787 al &= 0x0f;
1788 }
1789 EAX = (EAX & ~0xffff) | al | (ah << 8);
1790 CC_SRC = eflags;
eaa728ee
FB
1791}
1792
1793void helper_aas(void)
1794{
1795 int icarry;
1796 int al, ah, af;
1797 int eflags;
1798
a7812ae4 1799 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1800 af = eflags & CC_A;
1801 al = EAX & 0xff;
1802 ah = (EAX >> 8) & 0xff;
1803
1804 icarry = (al < 6);
1805 if (((al & 0x0f) > 9 ) || af) {
1806 al = (al - 6) & 0x0f;
1807 ah = (ah - 1 - icarry) & 0xff;
1808 eflags |= CC_C | CC_A;
1809 } else {
1810 eflags &= ~(CC_C | CC_A);
1811 al &= 0x0f;
1812 }
1813 EAX = (EAX & ~0xffff) | al | (ah << 8);
1814 CC_SRC = eflags;
eaa728ee
FB
1815}
1816
1817void helper_daa(void)
1818{
1819 int al, af, cf;
1820 int eflags;
1821
a7812ae4 1822 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1823 cf = eflags & CC_C;
1824 af = eflags & CC_A;
1825 al = EAX & 0xff;
1826
1827 eflags = 0;
1828 if (((al & 0x0f) > 9 ) || af) {
1829 al = (al + 6) & 0xff;
1830 eflags |= CC_A;
1831 }
1832 if ((al > 0x9f) || cf) {
1833 al = (al + 0x60) & 0xff;
1834 eflags |= CC_C;
1835 }
1836 EAX = (EAX & ~0xff) | al;
1837 /* well, speed is not an issue here, so we compute the flags by hand */
1838 eflags |= (al == 0) << 6; /* zf */
1839 eflags |= parity_table[al]; /* pf */
1840 eflags |= (al & 0x80); /* sf */
1841 CC_SRC = eflags;
eaa728ee
FB
1842}
1843
1844void helper_das(void)
1845{
1846 int al, al1, af, cf;
1847 int eflags;
1848
a7812ae4 1849 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1850 cf = eflags & CC_C;
1851 af = eflags & CC_A;
1852 al = EAX & 0xff;
1853
1854 eflags = 0;
1855 al1 = al;
1856 if (((al & 0x0f) > 9 ) || af) {
1857 eflags |= CC_A;
1858 if (al < 6 || cf)
1859 eflags |= CC_C;
1860 al = (al - 6) & 0xff;
1861 }
1862 if ((al1 > 0x99) || cf) {
1863 al = (al - 0x60) & 0xff;
1864 eflags |= CC_C;
1865 }
1866 EAX = (EAX & ~0xff) | al;
1867 /* well, speed is not an issue here, so we compute the flags by hand */
1868 eflags |= (al == 0) << 6; /* zf */
1869 eflags |= parity_table[al]; /* pf */
1870 eflags |= (al & 0x80); /* sf */
1871 CC_SRC = eflags;
eaa728ee
FB
1872}
1873
1874void helper_into(int next_eip_addend)
1875{
1876 int eflags;
a7812ae4 1877 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1878 if (eflags & CC_O) {
1879 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1880 }
1881}
1882
1883void helper_cmpxchg8b(target_ulong a0)
1884{
1885 uint64_t d;
1886 int eflags;
1887
a7812ae4 1888 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1889 d = ldq(a0);
1890 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1891 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1892 eflags |= CC_Z;
1893 } else {
278ed7c3
FB
1894 /* always do the store */
1895 stq(a0, d);
eaa728ee
FB
1896 EDX = (uint32_t)(d >> 32);
1897 EAX = (uint32_t)d;
1898 eflags &= ~CC_Z;
1899 }
1900 CC_SRC = eflags;
1901}
1902
1903#ifdef TARGET_X86_64
1904void helper_cmpxchg16b(target_ulong a0)
1905{
1906 uint64_t d0, d1;
1907 int eflags;
1908
278ed7c3
FB
1909 if ((a0 & 0xf) != 0)
1910 raise_exception(EXCP0D_GPF);
a7812ae4 1911 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1912 d0 = ldq(a0);
1913 d1 = ldq(a0 + 8);
1914 if (d0 == EAX && d1 == EDX) {
1915 stq(a0, EBX);
1916 stq(a0 + 8, ECX);
1917 eflags |= CC_Z;
1918 } else {
278ed7c3
FB
1919 /* always do the store */
1920 stq(a0, d0);
1921 stq(a0 + 8, d1);
eaa728ee
FB
1922 EDX = d1;
1923 EAX = d0;
1924 eflags &= ~CC_Z;
1925 }
1926 CC_SRC = eflags;
1927}
1928#endif
1929
1930void helper_single_step(void)
1931{
01df040b
AL
1932#ifndef CONFIG_USER_ONLY
1933 check_hw_breakpoints(env, 1);
1934 env->dr[6] |= DR6_BS;
1935#endif
1936 raise_exception(EXCP01_DB);
eaa728ee
FB
1937}
1938
1939void helper_cpuid(void)
1940{
6fd805e1 1941 uint32_t eax, ebx, ecx, edx;
eaa728ee 1942
872929aa 1943 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
e737b32a 1944
e00b6f80 1945 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
6fd805e1
AL
1946 EAX = eax;
1947 EBX = ebx;
1948 ECX = ecx;
1949 EDX = edx;
eaa728ee
FB
1950}
1951
1952void helper_enter_level(int level, int data32, target_ulong t1)
1953{
1954 target_ulong ssp;
1955 uint32_t esp_mask, esp, ebp;
1956
1957 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1958 ssp = env->segs[R_SS].base;
1959 ebp = EBP;
1960 esp = ESP;
1961 if (data32) {
1962 /* 32 bit */
1963 esp -= 4;
1964 while (--level) {
1965 esp -= 4;
1966 ebp -= 4;
1967 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1968 }
1969 esp -= 4;
1970 stl(ssp + (esp & esp_mask), t1);
1971 } else {
1972 /* 16 bit */
1973 esp -= 2;
1974 while (--level) {
1975 esp -= 2;
1976 ebp -= 2;
1977 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1978 }
1979 esp -= 2;
1980 stw(ssp + (esp & esp_mask), t1);
1981 }
1982}
1983
1984#ifdef TARGET_X86_64
1985void helper_enter64_level(int level, int data64, target_ulong t1)
1986{
1987 target_ulong esp, ebp;
1988 ebp = EBP;
1989 esp = ESP;
1990
1991 if (data64) {
1992 /* 64 bit */
1993 esp -= 8;
1994 while (--level) {
1995 esp -= 8;
1996 ebp -= 8;
1997 stq(esp, ldq(ebp));
1998 }
1999 esp -= 8;
2000 stq(esp, t1);
2001 } else {
2002 /* 16 bit */
2003 esp -= 2;
2004 while (--level) {
2005 esp -= 2;
2006 ebp -= 2;
2007 stw(esp, lduw(ebp));
2008 }
2009 esp -= 2;
2010 stw(esp, t1);
2011 }
2012}
2013#endif
2014
2015void helper_lldt(int selector)
2016{
2017 SegmentCache *dt;
2018 uint32_t e1, e2;
2019 int index, entry_limit;
2020 target_ulong ptr;
2021
2022 selector &= 0xffff;
2023 if ((selector & 0xfffc) == 0) {
2024 /* XXX: NULL selector case: invalid LDT */
2025 env->ldt.base = 0;
2026 env->ldt.limit = 0;
2027 } else {
2028 if (selector & 0x4)
2029 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2030 dt = &env->gdt;
2031 index = selector & ~7;
2032#ifdef TARGET_X86_64
2033 if (env->hflags & HF_LMA_MASK)
2034 entry_limit = 15;
2035 else
2036#endif
2037 entry_limit = 7;
2038 if ((index + entry_limit) > dt->limit)
2039 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2040 ptr = dt->base + index;
2041 e1 = ldl_kernel(ptr);
2042 e2 = ldl_kernel(ptr + 4);
2043 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2044 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2045 if (!(e2 & DESC_P_MASK))
2046 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2047#ifdef TARGET_X86_64
2048 if (env->hflags & HF_LMA_MASK) {
2049 uint32_t e3;
2050 e3 = ldl_kernel(ptr + 8);
2051 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2052 env->ldt.base |= (target_ulong)e3 << 32;
2053 } else
2054#endif
2055 {
2056 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2057 }
2058 }
2059 env->ldt.selector = selector;
2060}
2061
2062void helper_ltr(int selector)
2063{
2064 SegmentCache *dt;
2065 uint32_t e1, e2;
2066 int index, type, entry_limit;
2067 target_ulong ptr;
2068
2069 selector &= 0xffff;
2070 if ((selector & 0xfffc) == 0) {
2071 /* NULL selector case: invalid TR */
2072 env->tr.base = 0;
2073 env->tr.limit = 0;
2074 env->tr.flags = 0;
2075 } else {
2076 if (selector & 0x4)
2077 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2078 dt = &env->gdt;
2079 index = selector & ~7;
2080#ifdef TARGET_X86_64
2081 if (env->hflags & HF_LMA_MASK)
2082 entry_limit = 15;
2083 else
2084#endif
2085 entry_limit = 7;
2086 if ((index + entry_limit) > dt->limit)
2087 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2088 ptr = dt->base + index;
2089 e1 = ldl_kernel(ptr);
2090 e2 = ldl_kernel(ptr + 4);
2091 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2092 if ((e2 & DESC_S_MASK) ||
2093 (type != 1 && type != 9))
2094 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2095 if (!(e2 & DESC_P_MASK))
2096 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2097#ifdef TARGET_X86_64
2098 if (env->hflags & HF_LMA_MASK) {
2099 uint32_t e3, e4;
2100 e3 = ldl_kernel(ptr + 8);
2101 e4 = ldl_kernel(ptr + 12);
2102 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2103 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2104 load_seg_cache_raw_dt(&env->tr, e1, e2);
2105 env->tr.base |= (target_ulong)e3 << 32;
2106 } else
2107#endif
2108 {
2109 load_seg_cache_raw_dt(&env->tr, e1, e2);
2110 }
2111 e2 |= DESC_TSS_BUSY_MASK;
2112 stl_kernel(ptr + 4, e2);
2113 }
2114 env->tr.selector = selector;
2115}
2116
2117/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2118void helper_load_seg(int seg_reg, int selector)
2119{
2120 uint32_t e1, e2;
2121 int cpl, dpl, rpl;
2122 SegmentCache *dt;
2123 int index;
2124 target_ulong ptr;
2125
2126 selector &= 0xffff;
2127 cpl = env->hflags & HF_CPL_MASK;
2128 if ((selector & 0xfffc) == 0) {
2129 /* null selector case */
2130 if (seg_reg == R_SS
2131#ifdef TARGET_X86_64
2132 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2133#endif
2134 )
2135 raise_exception_err(EXCP0D_GPF, 0);
2136 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2137 } else {
2138
2139 if (selector & 0x4)
2140 dt = &env->ldt;
2141 else
2142 dt = &env->gdt;
2143 index = selector & ~7;
2144 if ((index + 7) > dt->limit)
2145 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2146 ptr = dt->base + index;
2147 e1 = ldl_kernel(ptr);
2148 e2 = ldl_kernel(ptr + 4);
2149
2150 if (!(e2 & DESC_S_MASK))
2151 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2152 rpl = selector & 3;
2153 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2154 if (seg_reg == R_SS) {
2155 /* must be writable segment */
2156 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2157 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2158 if (rpl != cpl || dpl != cpl)
2159 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2160 } else {
2161 /* must be readable segment */
2162 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2163 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2164
2165 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2166 /* if not conforming code, test rights */
2167 if (dpl < cpl || dpl < rpl)
2168 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2169 }
2170 }
2171
2172 if (!(e2 & DESC_P_MASK)) {
2173 if (seg_reg == R_SS)
2174 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2175 else
2176 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2177 }
2178
2179 /* set the access bit if not already set */
2180 if (!(e2 & DESC_A_MASK)) {
2181 e2 |= DESC_A_MASK;
2182 stl_kernel(ptr + 4, e2);
2183 }
2184
2185 cpu_x86_load_seg_cache(env, seg_reg, selector,
2186 get_seg_base(e1, e2),
2187 get_seg_limit(e1, e2),
2188 e2);
2189#if 0
93fcfe39 2190 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
2191 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2192#endif
2193 }
2194}
2195
2196/* protected mode jump */
2197void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2198 int next_eip_addend)
2199{
2200 int gate_cs, type;
2201 uint32_t e1, e2, cpl, dpl, rpl, limit;
2202 target_ulong next_eip;
2203
2204 if ((new_cs & 0xfffc) == 0)
2205 raise_exception_err(EXCP0D_GPF, 0);
2206 if (load_segment(&e1, &e2, new_cs) != 0)
2207 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2208 cpl = env->hflags & HF_CPL_MASK;
2209 if (e2 & DESC_S_MASK) {
2210 if (!(e2 & DESC_CS_MASK))
2211 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2212 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2213 if (e2 & DESC_C_MASK) {
2214 /* conforming code segment */
2215 if (dpl > cpl)
2216 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2217 } else {
2218 /* non conforming code segment */
2219 rpl = new_cs & 3;
2220 if (rpl > cpl)
2221 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2222 if (dpl != cpl)
2223 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2224 }
2225 if (!(e2 & DESC_P_MASK))
2226 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2227 limit = get_seg_limit(e1, e2);
2228 if (new_eip > limit &&
2229 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2230 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2231 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2232 get_seg_base(e1, e2), limit, e2);
2233 EIP = new_eip;
2234 } else {
2235 /* jump to call or task gate */
2236 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2237 rpl = new_cs & 3;
2238 cpl = env->hflags & HF_CPL_MASK;
2239 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2240 switch(type) {
2241 case 1: /* 286 TSS */
2242 case 9: /* 386 TSS */
2243 case 5: /* task gate */
2244 if (dpl < cpl || dpl < rpl)
2245 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2246 next_eip = env->eip + next_eip_addend;
2247 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2248 CC_OP = CC_OP_EFLAGS;
2249 break;
2250 case 4: /* 286 call gate */
2251 case 12: /* 386 call gate */
2252 if ((dpl < cpl) || (dpl < rpl))
2253 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2254 if (!(e2 & DESC_P_MASK))
2255 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2256 gate_cs = e1 >> 16;
2257 new_eip = (e1 & 0xffff);
2258 if (type == 12)
2259 new_eip |= (e2 & 0xffff0000);
2260 if (load_segment(&e1, &e2, gate_cs) != 0)
2261 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2262 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2263 /* must be code segment */
2264 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2265 (DESC_S_MASK | DESC_CS_MASK)))
2266 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2267 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2268 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2269 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2270 if (!(e2 & DESC_P_MASK))
2271 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2272 limit = get_seg_limit(e1, e2);
2273 if (new_eip > limit)
2274 raise_exception_err(EXCP0D_GPF, 0);
2275 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2276 get_seg_base(e1, e2), limit, e2);
2277 EIP = new_eip;
2278 break;
2279 default:
2280 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2281 break;
2282 }
2283 }
2284}
2285
2286/* real mode call */
2287void helper_lcall_real(int new_cs, target_ulong new_eip1,
2288 int shift, int next_eip)
2289{
2290 int new_eip;
2291 uint32_t esp, esp_mask;
2292 target_ulong ssp;
2293
2294 new_eip = new_eip1;
2295 esp = ESP;
2296 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2297 ssp = env->segs[R_SS].base;
2298 if (shift) {
2299 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2300 PUSHL(ssp, esp, esp_mask, next_eip);
2301 } else {
2302 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2303 PUSHW(ssp, esp, esp_mask, next_eip);
2304 }
2305
2306 SET_ESP(esp, esp_mask);
2307 env->eip = new_eip;
2308 env->segs[R_CS].selector = new_cs;
2309 env->segs[R_CS].base = (new_cs << 4);
2310}
2311
2312/* protected mode call */
2313void helper_lcall_protected(int new_cs, target_ulong new_eip,
2314 int shift, int next_eip_addend)
2315{
2316 int new_stack, i;
2317 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 2318 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
2319 uint32_t val, limit, old_sp_mask;
2320 target_ulong ssp, old_ssp, next_eip;
2321
2322 next_eip = env->eip + next_eip_addend;
d12d51d5
AL
2323 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2324 LOG_PCALL_STATE(env);
eaa728ee
FB
2325 if ((new_cs & 0xfffc) == 0)
2326 raise_exception_err(EXCP0D_GPF, 0);
2327 if (load_segment(&e1, &e2, new_cs) != 0)
2328 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2329 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 2330 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee
FB
2331 if (e2 & DESC_S_MASK) {
2332 if (!(e2 & DESC_CS_MASK))
2333 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2334 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2335 if (e2 & DESC_C_MASK) {
2336 /* conforming code segment */
2337 if (dpl > cpl)
2338 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339 } else {
2340 /* non conforming code segment */
2341 rpl = new_cs & 3;
2342 if (rpl > cpl)
2343 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2344 if (dpl != cpl)
2345 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2346 }
2347 if (!(e2 & DESC_P_MASK))
2348 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2349
2350#ifdef TARGET_X86_64
2351 /* XXX: check 16/32 bit cases in long mode */
2352 if (shift == 2) {
2353 target_ulong rsp;
2354 /* 64 bit case */
2355 rsp = ESP;
2356 PUSHQ(rsp, env->segs[R_CS].selector);
2357 PUSHQ(rsp, next_eip);
2358 /* from this point, not restartable */
2359 ESP = rsp;
2360 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2361 get_seg_base(e1, e2),
2362 get_seg_limit(e1, e2), e2);
2363 EIP = new_eip;
2364 } else
2365#endif
2366 {
2367 sp = ESP;
2368 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2369 ssp = env->segs[R_SS].base;
2370 if (shift) {
2371 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2372 PUSHL(ssp, sp, sp_mask, next_eip);
2373 } else {
2374 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2375 PUSHW(ssp, sp, sp_mask, next_eip);
2376 }
2377
2378 limit = get_seg_limit(e1, e2);
2379 if (new_eip > limit)
2380 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2381 /* from this point, not restartable */
2382 SET_ESP(sp, sp_mask);
2383 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2384 get_seg_base(e1, e2), limit, e2);
2385 EIP = new_eip;
2386 }
2387 } else {
2388 /* check gate type */
2389 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2390 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2391 rpl = new_cs & 3;
2392 switch(type) {
2393 case 1: /* available 286 TSS */
2394 case 9: /* available 386 TSS */
2395 case 5: /* task gate */
2396 if (dpl < cpl || dpl < rpl)
2397 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2398 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2399 CC_OP = CC_OP_EFLAGS;
2400 return;
2401 case 4: /* 286 call gate */
2402 case 12: /* 386 call gate */
2403 break;
2404 default:
2405 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406 break;
2407 }
2408 shift = type >> 3;
2409
2410 if (dpl < cpl || dpl < rpl)
2411 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2412 /* check valid bit */
2413 if (!(e2 & DESC_P_MASK))
2414 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2415 selector = e1 >> 16;
2416 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2417 param_count = e2 & 0x1f;
2418 if ((selector & 0xfffc) == 0)
2419 raise_exception_err(EXCP0D_GPF, 0);
2420
2421 if (load_segment(&e1, &e2, selector) != 0)
2422 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2423 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2424 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2425 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2426 if (dpl > cpl)
2427 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2428 if (!(e2 & DESC_P_MASK))
2429 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2430
2431 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2432 /* to inner privilege */
2433 get_ss_esp_from_tss(&ss, &sp, dpl);
d12d51d5 2434 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
eaa728ee 2435 ss, sp, param_count, ESP);
eaa728ee
FB
2436 if ((ss & 0xfffc) == 0)
2437 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2438 if ((ss & 3) != dpl)
2439 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2440 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2441 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2442 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2443 if (ss_dpl != dpl)
2444 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445 if (!(ss_e2 & DESC_S_MASK) ||
2446 (ss_e2 & DESC_CS_MASK) ||
2447 !(ss_e2 & DESC_W_MASK))
2448 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2449 if (!(ss_e2 & DESC_P_MASK))
2450 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2451
2452 // push_size = ((param_count * 2) + 8) << shift;
2453
2454 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2455 old_ssp = env->segs[R_SS].base;
2456
2457 sp_mask = get_sp_mask(ss_e2);
2458 ssp = get_seg_base(ss_e1, ss_e2);
2459 if (shift) {
2460 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2461 PUSHL(ssp, sp, sp_mask, ESP);
2462 for(i = param_count - 1; i >= 0; i--) {
2463 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2464 PUSHL(ssp, sp, sp_mask, val);
2465 }
2466 } else {
2467 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2468 PUSHW(ssp, sp, sp_mask, ESP);
2469 for(i = param_count - 1; i >= 0; i--) {
2470 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2471 PUSHW(ssp, sp, sp_mask, val);
2472 }
2473 }
2474 new_stack = 1;
2475 } else {
2476 /* to same privilege */
2477 sp = ESP;
2478 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2479 ssp = env->segs[R_SS].base;
2480 // push_size = (4 << shift);
2481 new_stack = 0;
2482 }
2483
2484 if (shift) {
2485 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2486 PUSHL(ssp, sp, sp_mask, next_eip);
2487 } else {
2488 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2489 PUSHW(ssp, sp, sp_mask, next_eip);
2490 }
2491
2492 /* from this point, not restartable */
2493
2494 if (new_stack) {
2495 ss = (ss & ~3) | dpl;
2496 cpu_x86_load_seg_cache(env, R_SS, ss,
2497 ssp,
2498 get_seg_limit(ss_e1, ss_e2),
2499 ss_e2);
2500 }
2501
2502 selector = (selector & ~3) | dpl;
2503 cpu_x86_load_seg_cache(env, R_CS, selector,
2504 get_seg_base(e1, e2),
2505 get_seg_limit(e1, e2),
2506 e2);
2507 cpu_x86_set_cpl(env, dpl);
2508 SET_ESP(sp, sp_mask);
2509 EIP = offset;
2510 }
eaa728ee
FB
2511}
2512
2513/* real and vm86 mode iret */
2514void helper_iret_real(int shift)
2515{
2516 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2517 target_ulong ssp;
2518 int eflags_mask;
2519
2520 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2521 sp = ESP;
2522 ssp = env->segs[R_SS].base;
2523 if (shift == 1) {
2524 /* 32 bits */
2525 POPL(ssp, sp, sp_mask, new_eip);
2526 POPL(ssp, sp, sp_mask, new_cs);
2527 new_cs &= 0xffff;
2528 POPL(ssp, sp, sp_mask, new_eflags);
2529 } else {
2530 /* 16 bits */
2531 POPW(ssp, sp, sp_mask, new_eip);
2532 POPW(ssp, sp, sp_mask, new_cs);
2533 POPW(ssp, sp, sp_mask, new_eflags);
2534 }
2535 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2536 env->segs[R_CS].selector = new_cs;
2537 env->segs[R_CS].base = (new_cs << 4);
eaa728ee
FB
2538 env->eip = new_eip;
2539 if (env->eflags & VM_MASK)
2540 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2541 else
2542 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2543 if (shift == 0)
2544 eflags_mask &= 0xffff;
2545 load_eflags(new_eflags, eflags_mask);
db620f46 2546 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2547}
2548
2549static inline void validate_seg(int seg_reg, int cpl)
2550{
2551 int dpl;
2552 uint32_t e2;
2553
2554 /* XXX: on x86_64, we do not want to nullify FS and GS because
2555 they may still contain a valid base. I would be interested to
2556 know how a real x86_64 CPU behaves */
2557 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2558 (env->segs[seg_reg].selector & 0xfffc) == 0)
2559 return;
2560
2561 e2 = env->segs[seg_reg].flags;
2562 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2563 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2564 /* data or non conforming code segment */
2565 if (dpl < cpl) {
2566 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2567 }
2568 }
2569}
2570
2571/* protected mode iret */
2572static inline void helper_ret_protected(int shift, int is_iret, int addend)
2573{
2574 uint32_t new_cs, new_eflags, new_ss;
2575 uint32_t new_es, new_ds, new_fs, new_gs;
2576 uint32_t e1, e2, ss_e1, ss_e2;
2577 int cpl, dpl, rpl, eflags_mask, iopl;
2578 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2579
2580#ifdef TARGET_X86_64
2581 if (shift == 2)
2582 sp_mask = -1;
2583 else
2584#endif
2585 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2586 sp = ESP;
2587 ssp = env->segs[R_SS].base;
2588 new_eflags = 0; /* avoid warning */
2589#ifdef TARGET_X86_64
2590 if (shift == 2) {
2591 POPQ(sp, new_eip);
2592 POPQ(sp, new_cs);
2593 new_cs &= 0xffff;
2594 if (is_iret) {
2595 POPQ(sp, new_eflags);
2596 }
2597 } else
2598#endif
2599 if (shift == 1) {
2600 /* 32 bits */
2601 POPL(ssp, sp, sp_mask, new_eip);
2602 POPL(ssp, sp, sp_mask, new_cs);
2603 new_cs &= 0xffff;
2604 if (is_iret) {
2605 POPL(ssp, sp, sp_mask, new_eflags);
2606 if (new_eflags & VM_MASK)
2607 goto return_to_vm86;
2608 }
2609 } else {
2610 /* 16 bits */
2611 POPW(ssp, sp, sp_mask, new_eip);
2612 POPW(ssp, sp, sp_mask, new_cs);
2613 if (is_iret)
2614 POPW(ssp, sp, sp_mask, new_eflags);
2615 }
d12d51d5
AL
2616 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2617 new_cs, new_eip, shift, addend);
2618 LOG_PCALL_STATE(env);
eaa728ee
FB
2619 if ((new_cs & 0xfffc) == 0)
2620 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2621 if (load_segment(&e1, &e2, new_cs) != 0)
2622 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2623 if (!(e2 & DESC_S_MASK) ||
2624 !(e2 & DESC_CS_MASK))
2625 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2626 cpl = env->hflags & HF_CPL_MASK;
2627 rpl = new_cs & 3;
2628 if (rpl < cpl)
2629 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2630 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2631 if (e2 & DESC_C_MASK) {
2632 if (dpl > rpl)
2633 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2634 } else {
2635 if (dpl != rpl)
2636 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2637 }
2638 if (!(e2 & DESC_P_MASK))
2639 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2640
2641 sp += addend;
2642 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2643 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2644 /* return to same privilege level */
eaa728ee
FB
2645 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2646 get_seg_base(e1, e2),
2647 get_seg_limit(e1, e2),
2648 e2);
2649 } else {
2650 /* return to different privilege level */
2651#ifdef TARGET_X86_64
2652 if (shift == 2) {
2653 POPQ(sp, new_esp);
2654 POPQ(sp, new_ss);
2655 new_ss &= 0xffff;
2656 } else
2657#endif
2658 if (shift == 1) {
2659 /* 32 bits */
2660 POPL(ssp, sp, sp_mask, new_esp);
2661 POPL(ssp, sp, sp_mask, new_ss);
2662 new_ss &= 0xffff;
2663 } else {
2664 /* 16 bits */
2665 POPW(ssp, sp, sp_mask, new_esp);
2666 POPW(ssp, sp, sp_mask, new_ss);
2667 }
d12d51d5 2668 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
eaa728ee 2669 new_ss, new_esp);
eaa728ee
FB
2670 if ((new_ss & 0xfffc) == 0) {
2671#ifdef TARGET_X86_64
2672 /* NULL ss is allowed in long mode if cpl != 3*/
2673 /* XXX: test CS64 ? */
2674 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2675 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2676 0, 0xffffffff,
2677 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2678 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2679 DESC_W_MASK | DESC_A_MASK);
2680 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2681 } else
2682#endif
2683 {
2684 raise_exception_err(EXCP0D_GPF, 0);
2685 }
2686 } else {
2687 if ((new_ss & 3) != rpl)
2688 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2689 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2690 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2691 if (!(ss_e2 & DESC_S_MASK) ||
2692 (ss_e2 & DESC_CS_MASK) ||
2693 !(ss_e2 & DESC_W_MASK))
2694 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2695 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2696 if (dpl != rpl)
2697 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2698 if (!(ss_e2 & DESC_P_MASK))
2699 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2700 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2701 get_seg_base(ss_e1, ss_e2),
2702 get_seg_limit(ss_e1, ss_e2),
2703 ss_e2);
2704 }
2705
2706 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2707 get_seg_base(e1, e2),
2708 get_seg_limit(e1, e2),
2709 e2);
2710 cpu_x86_set_cpl(env, rpl);
2711 sp = new_esp;
2712#ifdef TARGET_X86_64
2713 if (env->hflags & HF_CS64_MASK)
2714 sp_mask = -1;
2715 else
2716#endif
2717 sp_mask = get_sp_mask(ss_e2);
2718
2719 /* validate data segments */
2720 validate_seg(R_ES, rpl);
2721 validate_seg(R_DS, rpl);
2722 validate_seg(R_FS, rpl);
2723 validate_seg(R_GS, rpl);
2724
2725 sp += addend;
2726 }
2727 SET_ESP(sp, sp_mask);
2728 env->eip = new_eip;
2729 if (is_iret) {
2730 /* NOTE: 'cpl' is the _old_ CPL */
2731 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2732 if (cpl == 0)
2733 eflags_mask |= IOPL_MASK;
2734 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2735 if (cpl <= iopl)
2736 eflags_mask |= IF_MASK;
2737 if (shift == 0)
2738 eflags_mask &= 0xffff;
2739 load_eflags(new_eflags, eflags_mask);
2740 }
2741 return;
2742
2743 return_to_vm86:
2744 POPL(ssp, sp, sp_mask, new_esp);
2745 POPL(ssp, sp, sp_mask, new_ss);
2746 POPL(ssp, sp, sp_mask, new_es);
2747 POPL(ssp, sp, sp_mask, new_ds);
2748 POPL(ssp, sp, sp_mask, new_fs);
2749 POPL(ssp, sp, sp_mask, new_gs);
2750
2751 /* modify processor state */
2752 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2753 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2754 load_seg_vm(R_CS, new_cs & 0xffff);
2755 cpu_x86_set_cpl(env, 3);
2756 load_seg_vm(R_SS, new_ss & 0xffff);
2757 load_seg_vm(R_ES, new_es & 0xffff);
2758 load_seg_vm(R_DS, new_ds & 0xffff);
2759 load_seg_vm(R_FS, new_fs & 0xffff);
2760 load_seg_vm(R_GS, new_gs & 0xffff);
2761
2762 env->eip = new_eip & 0xffff;
2763 ESP = new_esp;
2764}
2765
2766void helper_iret_protected(int shift, int next_eip)
2767{
2768 int tss_selector, type;
2769 uint32_t e1, e2;
2770
2771 /* specific case for TSS */
2772 if (env->eflags & NT_MASK) {
2773#ifdef TARGET_X86_64
2774 if (env->hflags & HF_LMA_MASK)
2775 raise_exception_err(EXCP0D_GPF, 0);
2776#endif
2777 tss_selector = lduw_kernel(env->tr.base + 0);
2778 if (tss_selector & 4)
2779 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2780 if (load_segment(&e1, &e2, tss_selector) != 0)
2781 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2782 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2783 /* NOTE: we check both segment and busy TSS */
2784 if (type != 3)
2785 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2786 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2787 } else {
2788 helper_ret_protected(shift, 1, 0);
2789 }
db620f46 2790 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2791}
2792
2793void helper_lret_protected(int shift, int addend)
2794{
2795 helper_ret_protected(shift, 0, addend);
eaa728ee
FB
2796}
2797
2798void helper_sysenter(void)
2799{
2800 if (env->sysenter_cs == 0) {
2801 raise_exception_err(EXCP0D_GPF, 0);
2802 }
2803 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2804 cpu_x86_set_cpl(env, 0);
2436b61a
AZ
2805
2806#ifdef TARGET_X86_64
2807 if (env->hflags & HF_LMA_MASK) {
2808 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2809 0, 0xffffffff,
2810 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2811 DESC_S_MASK |
2812 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2813 } else
2814#endif
2815 {
2816 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2817 0, 0xffffffff,
2818 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2819 DESC_S_MASK |
2820 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2821 }
eaa728ee
FB
2822 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2823 0, 0xffffffff,
2824 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2825 DESC_S_MASK |
2826 DESC_W_MASK | DESC_A_MASK);
2827 ESP = env->sysenter_esp;
2828 EIP = env->sysenter_eip;
2829}
2830
2436b61a 2831void helper_sysexit(int dflag)
eaa728ee
FB
2832{
2833 int cpl;
2834
2835 cpl = env->hflags & HF_CPL_MASK;
2836 if (env->sysenter_cs == 0 || cpl != 0) {
2837 raise_exception_err(EXCP0D_GPF, 0);
2838 }
2839 cpu_x86_set_cpl(env, 3);
2436b61a
AZ
2840#ifdef TARGET_X86_64
2841 if (dflag == 2) {
2842 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2843 0, 0xffffffff,
2844 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2845 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2846 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2847 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2848 0, 0xffffffff,
2849 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2850 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2851 DESC_W_MASK | DESC_A_MASK);
2852 } else
2853#endif
2854 {
2855 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2856 0, 0xffffffff,
2857 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2858 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2859 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2860 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2861 0, 0xffffffff,
2862 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2863 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2864 DESC_W_MASK | DESC_A_MASK);
2865 }
eaa728ee
FB
2866 ESP = ECX;
2867 EIP = EDX;
eaa728ee
FB
2868}
2869
872929aa
FB
2870#if defined(CONFIG_USER_ONLY)
2871target_ulong helper_read_crN(int reg)
eaa728ee 2872{
872929aa
FB
2873 return 0;
2874}
2875
2876void helper_write_crN(int reg, target_ulong t0)
2877{
2878}
01df040b
AL
2879
2880void helper_movl_drN_T0(int reg, target_ulong t0)
2881{
2882}
872929aa
FB
2883#else
2884target_ulong helper_read_crN(int reg)
2885{
2886 target_ulong val;
2887
2888 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2889 switch(reg) {
2890 default:
2891 val = env->cr[reg];
2892 break;
2893 case 8:
db620f46 2894 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 2895 val = cpu_get_apic_tpr(env->apic_state);
db620f46
FB
2896 } else {
2897 val = env->v_tpr;
2898 }
872929aa
FB
2899 break;
2900 }
2901 return val;
2902}
2903
2904void helper_write_crN(int reg, target_ulong t0)
2905{
2906 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
2907 switch(reg) {
2908 case 0:
2909 cpu_x86_update_cr0(env, t0);
2910 break;
2911 case 3:
2912 cpu_x86_update_cr3(env, t0);
2913 break;
2914 case 4:
2915 cpu_x86_update_cr4(env, t0);
2916 break;
2917 case 8:
db620f46 2918 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 2919 cpu_set_apic_tpr(env->apic_state, t0);
db620f46
FB
2920 }
2921 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
2922 break;
2923 default:
2924 env->cr[reg] = t0;
2925 break;
2926 }
eaa728ee 2927}
01df040b
AL
2928
2929void helper_movl_drN_T0(int reg, target_ulong t0)
2930{
2931 int i;
2932
2933 if (reg < 4) {
2934 hw_breakpoint_remove(env, reg);
2935 env->dr[reg] = t0;
2936 hw_breakpoint_insert(env, reg);
2937 } else if (reg == 7) {
2938 for (i = 0; i < 4; i++)
2939 hw_breakpoint_remove(env, i);
2940 env->dr[7] = t0;
2941 for (i = 0; i < 4; i++)
2942 hw_breakpoint_insert(env, i);
2943 } else
2944 env->dr[reg] = t0;
2945}
872929aa 2946#endif
eaa728ee
FB
2947
2948void helper_lmsw(target_ulong t0)
2949{
2950 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2951 if already set to one. */
2952 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 2953 helper_write_crN(0, t0);
eaa728ee
FB
2954}
2955
2956void helper_clts(void)
2957{
2958 env->cr[0] &= ~CR0_TS_MASK;
2959 env->hflags &= ~HF_TS_MASK;
2960}
2961
eaa728ee
FB
2962void helper_invlpg(target_ulong addr)
2963{
872929aa 2964 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 2965 tlb_flush_page(env, addr);
eaa728ee
FB
2966}
2967
2968void helper_rdtsc(void)
2969{
2970 uint64_t val;
2971
2972 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2973 raise_exception(EXCP0D_GPF);
2974 }
872929aa
FB
2975 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2976
33c263df 2977 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
2978 EAX = (uint32_t)(val);
2979 EDX = (uint32_t)(val >> 32);
2980}
2981
1b050077
AP
2982void helper_rdtscp(void)
2983{
2984 helper_rdtsc();
2985 ECX = (uint32_t)(env->tsc_aux);
2986}
2987
eaa728ee
FB
2988void helper_rdpmc(void)
2989{
2990 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2991 raise_exception(EXCP0D_GPF);
2992 }
eaa728ee
FB
2993 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2994
2995 /* currently unimplemented */
2996 raise_exception_err(EXCP06_ILLOP, 0);
2997}
2998
2999#if defined(CONFIG_USER_ONLY)
3000void helper_wrmsr(void)
3001{
3002}
3003
3004void helper_rdmsr(void)
3005{
3006}
3007#else
3008void helper_wrmsr(void)
3009{
3010 uint64_t val;
3011
872929aa
FB
3012 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3013
eaa728ee
FB
3014 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3015
3016 switch((uint32_t)ECX) {
3017 case MSR_IA32_SYSENTER_CS:
3018 env->sysenter_cs = val & 0xffff;
3019 break;
3020 case MSR_IA32_SYSENTER_ESP:
3021 env->sysenter_esp = val;
3022 break;
3023 case MSR_IA32_SYSENTER_EIP:
3024 env->sysenter_eip = val;
3025 break;
3026 case MSR_IA32_APICBASE:
4a942cea 3027 cpu_set_apic_base(env->apic_state, val);
eaa728ee
FB
3028 break;
3029 case MSR_EFER:
3030 {
3031 uint64_t update_mask;
3032 update_mask = 0;
3033 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3034 update_mask |= MSR_EFER_SCE;
3035 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3036 update_mask |= MSR_EFER_LME;
3037 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3038 update_mask |= MSR_EFER_FFXSR;
3039 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3040 update_mask |= MSR_EFER_NXE;
5efc27bb
FB
3041 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3042 update_mask |= MSR_EFER_SVME;
eef26553
AL
3043 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3044 update_mask |= MSR_EFER_FFXSR;
5efc27bb
FB
3045 cpu_load_efer(env, (env->efer & ~update_mask) |
3046 (val & update_mask));
eaa728ee
FB
3047 }
3048 break;
3049 case MSR_STAR:
3050 env->star = val;
3051 break;
3052 case MSR_PAT:
3053 env->pat = val;
3054 break;
3055 case MSR_VM_HSAVE_PA:
3056 env->vm_hsave = val;
3057 break;
3058#ifdef TARGET_X86_64
3059 case MSR_LSTAR:
3060 env->lstar = val;
3061 break;
3062 case MSR_CSTAR:
3063 env->cstar = val;
3064 break;
3065 case MSR_FMASK:
3066 env->fmask = val;
3067 break;
3068 case MSR_FSBASE:
3069 env->segs[R_FS].base = val;
3070 break;
3071 case MSR_GSBASE:
3072 env->segs[R_GS].base = val;
3073 break;
3074 case MSR_KERNELGSBASE:
3075 env->kernelgsbase = val;
3076 break;
3077#endif
165d9b82
AL
3078 case MSR_MTRRphysBase(0):
3079 case MSR_MTRRphysBase(1):
3080 case MSR_MTRRphysBase(2):
3081 case MSR_MTRRphysBase(3):
3082 case MSR_MTRRphysBase(4):
3083 case MSR_MTRRphysBase(5):
3084 case MSR_MTRRphysBase(6):
3085 case MSR_MTRRphysBase(7):
3086 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3087 break;
3088 case MSR_MTRRphysMask(0):
3089 case MSR_MTRRphysMask(1):
3090 case MSR_MTRRphysMask(2):
3091 case MSR_MTRRphysMask(3):
3092 case MSR_MTRRphysMask(4):
3093 case MSR_MTRRphysMask(5):
3094 case MSR_MTRRphysMask(6):
3095 case MSR_MTRRphysMask(7):
3096 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3097 break;
3098 case MSR_MTRRfix64K_00000:
3099 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3100 break;
3101 case MSR_MTRRfix16K_80000:
3102 case MSR_MTRRfix16K_A0000:
3103 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3104 break;
3105 case MSR_MTRRfix4K_C0000:
3106 case MSR_MTRRfix4K_C8000:
3107 case MSR_MTRRfix4K_D0000:
3108 case MSR_MTRRfix4K_D8000:
3109 case MSR_MTRRfix4K_E0000:
3110 case MSR_MTRRfix4K_E8000:
3111 case MSR_MTRRfix4K_F0000:
3112 case MSR_MTRRfix4K_F8000:
3113 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3114 break;
3115 case MSR_MTRRdefType:
3116 env->mtrr_deftype = val;
3117 break;
79c4f6b0
HY
3118 case MSR_MCG_STATUS:
3119 env->mcg_status = val;
3120 break;
3121 case MSR_MCG_CTL:
3122 if ((env->mcg_cap & MCG_CTL_P)
3123 && (val == 0 || val == ~(uint64_t)0))
3124 env->mcg_ctl = val;
3125 break;
1b050077
AP
3126 case MSR_TSC_AUX:
3127 env->tsc_aux = val;
3128 break;
eaa728ee 3129 default:
79c4f6b0
HY
3130 if ((uint32_t)ECX >= MSR_MC0_CTL
3131 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3132 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3133 if ((offset & 0x3) != 0
3134 || (val == 0 || val == ~(uint64_t)0))
3135 env->mce_banks[offset] = val;
3136 break;
3137 }
eaa728ee
FB
3138 /* XXX: exception ? */
3139 break;
3140 }
3141}
3142
3143void helper_rdmsr(void)
3144{
3145 uint64_t val;
872929aa
FB
3146
3147 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3148
eaa728ee
FB
3149 switch((uint32_t)ECX) {
3150 case MSR_IA32_SYSENTER_CS:
3151 val = env->sysenter_cs;
3152 break;
3153 case MSR_IA32_SYSENTER_ESP:
3154 val = env->sysenter_esp;
3155 break;
3156 case MSR_IA32_SYSENTER_EIP:
3157 val = env->sysenter_eip;
3158 break;
3159 case MSR_IA32_APICBASE:
4a942cea 3160 val = cpu_get_apic_base(env->apic_state);
eaa728ee
FB
3161 break;
3162 case MSR_EFER:
3163 val = env->efer;
3164 break;
3165 case MSR_STAR:
3166 val = env->star;
3167 break;
3168 case MSR_PAT:
3169 val = env->pat;
3170 break;
3171 case MSR_VM_HSAVE_PA:
3172 val = env->vm_hsave;
3173 break;
d5e49a81
AZ
3174 case MSR_IA32_PERF_STATUS:
3175 /* tsc_increment_by_tick */
3176 val = 1000ULL;
3177 /* CPU multiplier */
3178 val |= (((uint64_t)4ULL) << 40);
3179 break;
eaa728ee
FB
3180#ifdef TARGET_X86_64
3181 case MSR_LSTAR:
3182 val = env->lstar;
3183 break;
3184 case MSR_CSTAR:
3185 val = env->cstar;
3186 break;
3187 case MSR_FMASK:
3188 val = env->fmask;
3189 break;
3190 case MSR_FSBASE:
3191 val = env->segs[R_FS].base;
3192 break;
3193 case MSR_GSBASE:
3194 val = env->segs[R_GS].base;
3195 break;
3196 case MSR_KERNELGSBASE:
3197 val = env->kernelgsbase;
3198 break;
1b050077
AP
3199 case MSR_TSC_AUX:
3200 val = env->tsc_aux;
3201 break;
eaa728ee 3202#endif
165d9b82
AL
3203 case MSR_MTRRphysBase(0):
3204 case MSR_MTRRphysBase(1):
3205 case MSR_MTRRphysBase(2):
3206 case MSR_MTRRphysBase(3):
3207 case MSR_MTRRphysBase(4):
3208 case MSR_MTRRphysBase(5):
3209 case MSR_MTRRphysBase(6):
3210 case MSR_MTRRphysBase(7):
3211 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3212 break;
3213 case MSR_MTRRphysMask(0):
3214 case MSR_MTRRphysMask(1):
3215 case MSR_MTRRphysMask(2):
3216 case MSR_MTRRphysMask(3):
3217 case MSR_MTRRphysMask(4):
3218 case MSR_MTRRphysMask(5):
3219 case MSR_MTRRphysMask(6):
3220 case MSR_MTRRphysMask(7):
3221 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3222 break;
3223 case MSR_MTRRfix64K_00000:
3224 val = env->mtrr_fixed[0];
3225 break;
3226 case MSR_MTRRfix16K_80000:
3227 case MSR_MTRRfix16K_A0000:
3228 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3229 break;
3230 case MSR_MTRRfix4K_C0000:
3231 case MSR_MTRRfix4K_C8000:
3232 case MSR_MTRRfix4K_D0000:
3233 case MSR_MTRRfix4K_D8000:
3234 case MSR_MTRRfix4K_E0000:
3235 case MSR_MTRRfix4K_E8000:
3236 case MSR_MTRRfix4K_F0000:
3237 case MSR_MTRRfix4K_F8000:
3238 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3239 break;
3240 case MSR_MTRRdefType:
3241 val = env->mtrr_deftype;
3242 break;
dd5e3b17
AL
3243 case MSR_MTRRcap:
3244 if (env->cpuid_features & CPUID_MTRR)
3245 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3246 else
3247 /* XXX: exception ? */
3248 val = 0;
3249 break;
79c4f6b0
HY
3250 case MSR_MCG_CAP:
3251 val = env->mcg_cap;
3252 break;
3253 case MSR_MCG_CTL:
3254 if (env->mcg_cap & MCG_CTL_P)
3255 val = env->mcg_ctl;
3256 else
3257 val = 0;
3258 break;
3259 case MSR_MCG_STATUS:
3260 val = env->mcg_status;
3261 break;
eaa728ee 3262 default:
79c4f6b0
HY
3263 if ((uint32_t)ECX >= MSR_MC0_CTL
3264 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3265 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3266 val = env->mce_banks[offset];
3267 break;
3268 }
eaa728ee
FB
3269 /* XXX: exception ? */
3270 val = 0;
3271 break;
3272 }
3273 EAX = (uint32_t)(val);
3274 EDX = (uint32_t)(val >> 32);
3275}
3276#endif
3277
3278target_ulong helper_lsl(target_ulong selector1)
3279{
3280 unsigned int limit;
3281 uint32_t e1, e2, eflags, selector;
3282 int rpl, dpl, cpl, type;
3283
3284 selector = selector1 & 0xffff;
a7812ae4 3285 eflags = helper_cc_compute_all(CC_OP);
dc1ded53
AL
3286 if ((selector & 0xfffc) == 0)
3287 goto fail;
eaa728ee
FB
3288 if (load_segment(&e1, &e2, selector) != 0)
3289 goto fail;
3290 rpl = selector & 3;
3291 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3292 cpl = env->hflags & HF_CPL_MASK;
3293 if (e2 & DESC_S_MASK) {
3294 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3295 /* conforming */
3296 } else {
3297 if (dpl < cpl || dpl < rpl)
3298 goto fail;
3299 }
3300 } else {
3301 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3302 switch(type) {
3303 case 1:
3304 case 2:
3305 case 3:
3306 case 9:
3307 case 11:
3308 break;
3309 default:
3310 goto fail;
3311 }
3312 if (dpl < cpl || dpl < rpl) {
3313 fail:
3314 CC_SRC = eflags & ~CC_Z;
3315 return 0;
3316 }
3317 }
3318 limit = get_seg_limit(e1, e2);
3319 CC_SRC = eflags | CC_Z;
3320 return limit;
3321}
3322
3323target_ulong helper_lar(target_ulong selector1)
3324{
3325 uint32_t e1, e2, eflags, selector;
3326 int rpl, dpl, cpl, type;
3327
3328 selector = selector1 & 0xffff;
a7812ae4 3329 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3330 if ((selector & 0xfffc) == 0)
3331 goto fail;
3332 if (load_segment(&e1, &e2, selector) != 0)
3333 goto fail;
3334 rpl = selector & 3;
3335 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3336 cpl = env->hflags & HF_CPL_MASK;
3337 if (e2 & DESC_S_MASK) {
3338 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3339 /* conforming */
3340 } else {
3341 if (dpl < cpl || dpl < rpl)
3342 goto fail;
3343 }
3344 } else {
3345 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3346 switch(type) {
3347 case 1:
3348 case 2:
3349 case 3:
3350 case 4:
3351 case 5:
3352 case 9:
3353 case 11:
3354 case 12:
3355 break;
3356 default:
3357 goto fail;
3358 }
3359 if (dpl < cpl || dpl < rpl) {
3360 fail:
3361 CC_SRC = eflags & ~CC_Z;
3362 return 0;
3363 }
3364 }
3365 CC_SRC = eflags | CC_Z;
3366 return e2 & 0x00f0ff00;
3367}
3368
3369void helper_verr(target_ulong selector1)
3370{
3371 uint32_t e1, e2, eflags, selector;
3372 int rpl, dpl, cpl;
3373
3374 selector = selector1 & 0xffff;
a7812ae4 3375 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3376 if ((selector & 0xfffc) == 0)
3377 goto fail;
3378 if (load_segment(&e1, &e2, selector) != 0)
3379 goto fail;
3380 if (!(e2 & DESC_S_MASK))
3381 goto fail;
3382 rpl = selector & 3;
3383 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3384 cpl = env->hflags & HF_CPL_MASK;
3385 if (e2 & DESC_CS_MASK) {
3386 if (!(e2 & DESC_R_MASK))
3387 goto fail;
3388 if (!(e2 & DESC_C_MASK)) {
3389 if (dpl < cpl || dpl < rpl)
3390 goto fail;
3391 }
3392 } else {
3393 if (dpl < cpl || dpl < rpl) {
3394 fail:
3395 CC_SRC = eflags & ~CC_Z;
3396 return;
3397 }
3398 }
3399 CC_SRC = eflags | CC_Z;
3400}
3401
3402void helper_verw(target_ulong selector1)
3403{
3404 uint32_t e1, e2, eflags, selector;
3405 int rpl, dpl, cpl;
3406
3407 selector = selector1 & 0xffff;
a7812ae4 3408 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3409 if ((selector & 0xfffc) == 0)
3410 goto fail;
3411 if (load_segment(&e1, &e2, selector) != 0)
3412 goto fail;
3413 if (!(e2 & DESC_S_MASK))
3414 goto fail;
3415 rpl = selector & 3;
3416 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3417 cpl = env->hflags & HF_CPL_MASK;
3418 if (e2 & DESC_CS_MASK) {
3419 goto fail;
3420 } else {
3421 if (dpl < cpl || dpl < rpl)
3422 goto fail;
3423 if (!(e2 & DESC_W_MASK)) {
3424 fail:
3425 CC_SRC = eflags & ~CC_Z;
3426 return;
3427 }
3428 }
3429 CC_SRC = eflags | CC_Z;
3430}
3431
3432/* x87 FPU helpers */
3433
3434static void fpu_set_exception(int mask)
3435{
3436 env->fpus |= mask;
3437 if (env->fpus & (~env->fpuc & FPUC_EM))
3438 env->fpus |= FPUS_SE | FPUS_B;
3439}
3440
3441static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3442{
13822781 3443 if (floatx_is_zero(b)) {
eaa728ee 3444 fpu_set_exception(FPUS_ZE);
13822781
AJ
3445 }
3446 return floatx_div(a, b, &env->fp_status);
eaa728ee
FB
3447}
3448
d9957a8b 3449static void fpu_raise_exception(void)
eaa728ee
FB
3450{
3451 if (env->cr[0] & CR0_NE_MASK) {
3452 raise_exception(EXCP10_COPR);
3453 }
3454#if !defined(CONFIG_USER_ONLY)
3455 else {
3456 cpu_set_ferr(env);
3457 }
3458#endif
3459}
3460
3461void helper_flds_FT0(uint32_t val)
3462{
3463 union {
3464 float32 f;
3465 uint32_t i;
3466 } u;
3467 u.i = val;
3468 FT0 = float32_to_floatx(u.f, &env->fp_status);
3469}
3470
3471void helper_fldl_FT0(uint64_t val)
3472{
3473 union {
3474 float64 f;
3475 uint64_t i;
3476 } u;
3477 u.i = val;
3478 FT0 = float64_to_floatx(u.f, &env->fp_status);
3479}
3480
3481void helper_fildl_FT0(int32_t val)
3482{
3483 FT0 = int32_to_floatx(val, &env->fp_status);
3484}
3485
3486void helper_flds_ST0(uint32_t val)
3487{
3488 int new_fpstt;
3489 union {
3490 float32 f;
3491 uint32_t i;
3492 } u;
3493 new_fpstt = (env->fpstt - 1) & 7;
3494 u.i = val;
3495 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3496 env->fpstt = new_fpstt;
3497 env->fptags[new_fpstt] = 0; /* validate stack entry */
3498}
3499
3500void helper_fldl_ST0(uint64_t val)
3501{
3502 int new_fpstt;
3503 union {
3504 float64 f;
3505 uint64_t i;
3506 } u;
3507 new_fpstt = (env->fpstt - 1) & 7;
3508 u.i = val;
3509 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3510 env->fpstt = new_fpstt;
3511 env->fptags[new_fpstt] = 0; /* validate stack entry */
3512}
3513
3514void helper_fildl_ST0(int32_t val)
3515{
3516 int new_fpstt;
3517 new_fpstt = (env->fpstt - 1) & 7;
3518 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3519 env->fpstt = new_fpstt;
3520 env->fptags[new_fpstt] = 0; /* validate stack entry */
3521}
3522
3523void helper_fildll_ST0(int64_t val)
3524{
3525 int new_fpstt;
3526 new_fpstt = (env->fpstt - 1) & 7;
3527 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3528 env->fpstt = new_fpstt;
3529 env->fptags[new_fpstt] = 0; /* validate stack entry */
3530}
3531
3532uint32_t helper_fsts_ST0(void)
3533{
3534 union {
3535 float32 f;
3536 uint32_t i;
3537 } u;
3538 u.f = floatx_to_float32(ST0, &env->fp_status);
3539 return u.i;
3540}
3541
3542uint64_t helper_fstl_ST0(void)
3543{
3544 union {
3545 float64 f;
3546 uint64_t i;
3547 } u;
3548 u.f = floatx_to_float64(ST0, &env->fp_status);
3549 return u.i;
3550}
3551
3552int32_t helper_fist_ST0(void)
3553{
3554 int32_t val;
3555 val = floatx_to_int32(ST0, &env->fp_status);
3556 if (val != (int16_t)val)
3557 val = -32768;
3558 return val;
3559}
3560
3561int32_t helper_fistl_ST0(void)
3562{
3563 int32_t val;
3564 val = floatx_to_int32(ST0, &env->fp_status);
3565 return val;
3566}
3567
3568int64_t helper_fistll_ST0(void)
3569{
3570 int64_t val;
3571 val = floatx_to_int64(ST0, &env->fp_status);
3572 return val;
3573}
3574
3575int32_t helper_fistt_ST0(void)
3576{
3577 int32_t val;
3578 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3579 if (val != (int16_t)val)
3580 val = -32768;
3581 return val;
3582}
3583
3584int32_t helper_fisttl_ST0(void)
3585{
3586 int32_t val;
3587 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3588 return val;
3589}
3590
3591int64_t helper_fisttll_ST0(void)
3592{
3593 int64_t val;
3594 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3595 return val;
3596}
3597
3598void helper_fldt_ST0(target_ulong ptr)
3599{
3600 int new_fpstt;
3601 new_fpstt = (env->fpstt - 1) & 7;
3602 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3603 env->fpstt = new_fpstt;
3604 env->fptags[new_fpstt] = 0; /* validate stack entry */
3605}
3606
3607void helper_fstt_ST0(target_ulong ptr)
3608{
3609 helper_fstt(ST0, ptr);
3610}
3611
3612void helper_fpush(void)
3613{
3614 fpush();
3615}
3616
3617void helper_fpop(void)
3618{
3619 fpop();
3620}
3621
3622void helper_fdecstp(void)
3623{
3624 env->fpstt = (env->fpstt - 1) & 7;
3625 env->fpus &= (~0x4700);
3626}
3627
3628void helper_fincstp(void)
3629{
3630 env->fpstt = (env->fpstt + 1) & 7;
3631 env->fpus &= (~0x4700);
3632}
3633
3634/* FPU move */
3635
3636void helper_ffree_STN(int st_index)
3637{
3638 env->fptags[(env->fpstt + st_index) & 7] = 1;
3639}
3640
3641void helper_fmov_ST0_FT0(void)
3642{
3643 ST0 = FT0;
3644}
3645
3646void helper_fmov_FT0_STN(int st_index)
3647{
3648 FT0 = ST(st_index);
3649}
3650
3651void helper_fmov_ST0_STN(int st_index)
3652{
3653 ST0 = ST(st_index);
3654}
3655
3656void helper_fmov_STN_ST0(int st_index)
3657{
3658 ST(st_index) = ST0;
3659}
3660
3661void helper_fxchg_ST0_STN(int st_index)
3662{
3663 CPU86_LDouble tmp;
3664 tmp = ST(st_index);
3665 ST(st_index) = ST0;
3666 ST0 = tmp;
3667}
3668
3669/* FPU operations */
3670
3671static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3672
3673void helper_fcom_ST0_FT0(void)
3674{
3675 int ret;
3676
3677 ret = floatx_compare(ST0, FT0, &env->fp_status);
3678 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
eaa728ee
FB
3679}
3680
3681void helper_fucom_ST0_FT0(void)
3682{
3683 int ret;
3684
3685 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3686 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
eaa728ee
FB
3687}
3688
3689static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3690
3691void helper_fcomi_ST0_FT0(void)
3692{
3693 int eflags;
3694 int ret;
3695
3696 ret = floatx_compare(ST0, FT0, &env->fp_status);
a7812ae4 3697 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3698 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3699 CC_SRC = eflags;
eaa728ee
FB
3700}
3701
3702void helper_fucomi_ST0_FT0(void)
3703{
3704 int eflags;
3705 int ret;
3706
3707 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
a7812ae4 3708 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3709 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3710 CC_SRC = eflags;
eaa728ee
FB
3711}
3712
3713void helper_fadd_ST0_FT0(void)
3714{
67dd64bf 3715 ST0 = floatx_add(ST0, FT0, &env->fp_status);
eaa728ee
FB
3716}
3717
3718void helper_fmul_ST0_FT0(void)
3719{
67dd64bf 3720 ST0 = floatx_mul(ST0, FT0, &env->fp_status);
eaa728ee
FB
3721}
3722
3723void helper_fsub_ST0_FT0(void)
3724{
67dd64bf 3725 ST0 = floatx_sub(ST0, FT0, &env->fp_status);
eaa728ee
FB
3726}
3727
3728void helper_fsubr_ST0_FT0(void)
3729{
67dd64bf 3730 ST0 = floatx_sub(FT0, ST0, &env->fp_status);
eaa728ee
FB
3731}
3732
3733void helper_fdiv_ST0_FT0(void)
3734{
3735 ST0 = helper_fdiv(ST0, FT0);
3736}
3737
3738void helper_fdivr_ST0_FT0(void)
3739{
3740 ST0 = helper_fdiv(FT0, ST0);
3741}
3742
3743/* fp operations between STN and ST0 */
3744
3745void helper_fadd_STN_ST0(int st_index)
3746{
67dd64bf 3747 ST(st_index) = floatx_add(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3748}
3749
3750void helper_fmul_STN_ST0(int st_index)
3751{
67dd64bf 3752 ST(st_index) = floatx_mul(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3753}
3754
3755void helper_fsub_STN_ST0(int st_index)
3756{
67dd64bf 3757 ST(st_index) = floatx_sub(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3758}
3759
3760void helper_fsubr_STN_ST0(int st_index)
3761{
67dd64bf 3762 ST(st_index) = floatx_sub(ST0, ST(st_index), &env->fp_status);
eaa728ee
FB
3763}
3764
3765void helper_fdiv_STN_ST0(int st_index)
3766{
3767 CPU86_LDouble *p;
3768 p = &ST(st_index);
3769 *p = helper_fdiv(*p, ST0);
3770}
3771
3772void helper_fdivr_STN_ST0(int st_index)
3773{
3774 CPU86_LDouble *p;
3775 p = &ST(st_index);
3776 *p = helper_fdiv(ST0, *p);
3777}
3778
3779/* misc FPU operations */
3780void helper_fchs_ST0(void)
3781{
3782 ST0 = floatx_chs(ST0);
3783}
3784
3785void helper_fabs_ST0(void)
3786{
3787 ST0 = floatx_abs(ST0);
3788}
3789
3790void helper_fld1_ST0(void)
3791{
3792 ST0 = f15rk[1];
3793}
3794
3795void helper_fldl2t_ST0(void)
3796{
3797 ST0 = f15rk[6];
3798}
3799
3800void helper_fldl2e_ST0(void)
3801{
3802 ST0 = f15rk[5];
3803}
3804
3805void helper_fldpi_ST0(void)
3806{
3807 ST0 = f15rk[2];
3808}
3809
3810void helper_fldlg2_ST0(void)
3811{
3812 ST0 = f15rk[3];
3813}
3814
3815void helper_fldln2_ST0(void)
3816{
3817 ST0 = f15rk[4];
3818}
3819
3820void helper_fldz_ST0(void)
3821{
3822 ST0 = f15rk[0];
3823}
3824
3825void helper_fldz_FT0(void)
3826{
3827 FT0 = f15rk[0];
3828}
3829
3830uint32_t helper_fnstsw(void)
3831{
3832 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3833}
3834
3835uint32_t helper_fnstcw(void)
3836{
3837 return env->fpuc;
3838}
3839
3840static void update_fp_status(void)
3841{
3842 int rnd_type;
3843
3844 /* set rounding mode */
3845 switch(env->fpuc & RC_MASK) {
3846 default:
3847 case RC_NEAR:
3848 rnd_type = float_round_nearest_even;
3849 break;
3850 case RC_DOWN:
3851 rnd_type = float_round_down;
3852 break;
3853 case RC_UP:
3854 rnd_type = float_round_up;
3855 break;
3856 case RC_CHOP:
3857 rnd_type = float_round_to_zero;
3858 break;
3859 }
3860 set_float_rounding_mode(rnd_type, &env->fp_status);
3861#ifdef FLOATX80
3862 switch((env->fpuc >> 8) & 3) {
3863 case 0:
3864 rnd_type = 32;
3865 break;
3866 case 2:
3867 rnd_type = 64;
3868 break;
3869 case 3:
3870 default:
3871 rnd_type = 80;
3872 break;
3873 }
3874 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3875#endif
3876}
3877
3878void helper_fldcw(uint32_t val)
3879{
3880 env->fpuc = val;
3881 update_fp_status();
3882}
3883
3884void helper_fclex(void)
3885{
3886 env->fpus &= 0x7f00;
3887}
3888
3889void helper_fwait(void)
3890{
3891 if (env->fpus & FPUS_SE)
3892 fpu_raise_exception();
eaa728ee
FB
3893}
3894
3895void helper_fninit(void)
3896{
3897 env->fpus = 0;
3898 env->fpstt = 0;
3899 env->fpuc = 0x37f;
3900 env->fptags[0] = 1;
3901 env->fptags[1] = 1;
3902 env->fptags[2] = 1;
3903 env->fptags[3] = 1;
3904 env->fptags[4] = 1;
3905 env->fptags[5] = 1;
3906 env->fptags[6] = 1;
3907 env->fptags[7] = 1;
3908}
3909
3910/* BCD ops */
3911
3912void helper_fbld_ST0(target_ulong ptr)
3913{
3914 CPU86_LDouble tmp;
3915 uint64_t val;
3916 unsigned int v;
3917 int i;
3918
3919 val = 0;
3920 for(i = 8; i >= 0; i--) {
3921 v = ldub(ptr + i);
3922 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3923 }
788e7336
AJ
3924 tmp = int64_to_floatx(val, &env->fp_status);
3925 if (ldub(ptr + 9) & 0x80) {
3926 floatx_chs(tmp);
3927 }
eaa728ee
FB
3928 fpush();
3929 ST0 = tmp;
3930}
3931
3932void helper_fbst_ST0(target_ulong ptr)
3933{
3934 int v;
3935 target_ulong mem_ref, mem_end;
3936 int64_t val;
3937
3938 val = floatx_to_int64(ST0, &env->fp_status);
3939 mem_ref = ptr;
3940 mem_end = mem_ref + 9;
3941 if (val < 0) {
3942 stb(mem_end, 0x80);
3943 val = -val;
3944 } else {
3945 stb(mem_end, 0x00);
3946 }
3947 while (mem_ref < mem_end) {
3948 if (val == 0)
3949 break;
3950 v = val % 100;
3951 val = val / 100;
3952 v = ((v / 10) << 4) | (v % 10);
3953 stb(mem_ref++, v);
3954 }
3955 while (mem_ref < mem_end) {
3956 stb(mem_ref++, 0);
3957 }
3958}
3959
3960void helper_f2xm1(void)
3961{
3962 ST0 = pow(2.0,ST0) - 1.0;
3963}
3964
3965void helper_fyl2x(void)
3966{
3967 CPU86_LDouble fptemp;
3968
3969 fptemp = ST0;
3970 if (fptemp>0.0){
3971 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3972 ST1 *= fptemp;
3973 fpop();
3974 } else {
3975 env->fpus &= (~0x4700);
3976 env->fpus |= 0x400;
3977 }
3978}
3979
3980void helper_fptan(void)
3981{
3982 CPU86_LDouble fptemp;
3983
3984 fptemp = ST0;
3985 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3986 env->fpus |= 0x400;
3987 } else {
3988 ST0 = tan(fptemp);
3989 fpush();
3990 ST0 = 1.0;
3991 env->fpus &= (~0x400); /* C2 <-- 0 */
3992 /* the above code is for |arg| < 2**52 only */
3993 }
3994}
3995
3996void helper_fpatan(void)
3997{
3998 CPU86_LDouble fptemp, fpsrcop;
3999
4000 fpsrcop = ST1;
4001 fptemp = ST0;
4002 ST1 = atan2(fpsrcop,fptemp);
4003 fpop();
4004}
4005
4006void helper_fxtract(void)
4007{
4008 CPU86_LDoubleU temp;
eaa728ee
FB
4009
4010 temp.d = ST0;
c9ad19c5
AJ
4011
4012 if (floatx_is_zero(ST0)) {
4013 /* Easy way to generate -inf and raising division by 0 exception */
4014 ST0 = floatx_div(floatx_chs(floatx_one), floatx_zero, &env->fp_status);
4015 fpush();
4016 ST0 = temp.d;
4017 } else {
4018 int expdif;
4019
4020 expdif = EXPD(temp) - EXPBIAS;
4021 /*DP exponent bias*/
4022 ST0 = int32_to_floatx(expdif, &env->fp_status);
4023 fpush();
4024 BIASEXPONENT(temp);
4025 ST0 = temp.d;
4026 }
eaa728ee
FB
4027}
4028
4029void helper_fprem1(void)
4030{
4031 CPU86_LDouble dblq, fpsrcop, fptemp;
4032 CPU86_LDoubleU fpsrcop1, fptemp1;
4033 int expdif;
4034 signed long long int q;
4035
4036 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4037 ST0 = 0.0 / 0.0; /* NaN */
4038 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4039 return;
4040 }
4041
4042 fpsrcop = ST0;
4043 fptemp = ST1;
4044 fpsrcop1.d = fpsrcop;
4045 fptemp1.d = fptemp;
4046 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4047
4048 if (expdif < 0) {
4049 /* optimisation? taken from the AMD docs */
4050 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4051 /* ST0 is unchanged */
4052 return;
4053 }
4054
4055 if (expdif < 53) {
4056 dblq = fpsrcop / fptemp;
4057 /* round dblq towards nearest integer */
4058 dblq = rint(dblq);
4059 ST0 = fpsrcop - fptemp * dblq;
4060
4061 /* convert dblq to q by truncating towards zero */
4062 if (dblq < 0.0)
4063 q = (signed long long int)(-dblq);
4064 else
4065 q = (signed long long int)dblq;
4066
4067 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4068 /* (C0,C3,C1) <-- (q2,q1,q0) */
4069 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4070 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4071 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4072 } else {
4073 env->fpus |= 0x400; /* C2 <-- 1 */
4074 fptemp = pow(2.0, expdif - 50);
4075 fpsrcop = (ST0 / ST1) / fptemp;
4076 /* fpsrcop = integer obtained by chopping */
4077 fpsrcop = (fpsrcop < 0.0) ?
4078 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4079 ST0 -= (ST1 * fpsrcop * fptemp);
4080 }
4081}
4082
4083void helper_fprem(void)
4084{
4085 CPU86_LDouble dblq, fpsrcop, fptemp;
4086 CPU86_LDoubleU fpsrcop1, fptemp1;
4087 int expdif;
4088 signed long long int q;
4089
4090 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4091 ST0 = 0.0 / 0.0; /* NaN */
4092 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4093 return;
4094 }
4095
4096 fpsrcop = (CPU86_LDouble)ST0;
4097 fptemp = (CPU86_LDouble)ST1;
4098 fpsrcop1.d = fpsrcop;
4099 fptemp1.d = fptemp;
4100 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4101
4102 if (expdif < 0) {
4103 /* optimisation? taken from the AMD docs */
4104 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4105 /* ST0 is unchanged */
4106 return;
4107 }
4108
4109 if ( expdif < 53 ) {
4110 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4111 /* round dblq towards zero */
4112 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4113 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4114
4115 /* convert dblq to q by truncating towards zero */
4116 if (dblq < 0.0)
4117 q = (signed long long int)(-dblq);
4118 else
4119 q = (signed long long int)dblq;
4120
4121 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4122 /* (C0,C3,C1) <-- (q2,q1,q0) */
4123 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4124 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4125 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4126 } else {
4127 int N = 32 + (expdif % 32); /* as per AMD docs */
4128 env->fpus |= 0x400; /* C2 <-- 1 */
4129 fptemp = pow(2.0, (double)(expdif - N));
4130 fpsrcop = (ST0 / ST1) / fptemp;
4131 /* fpsrcop = integer obtained by chopping */
4132 fpsrcop = (fpsrcop < 0.0) ?
4133 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4134 ST0 -= (ST1 * fpsrcop * fptemp);
4135 }
4136}
4137
4138void helper_fyl2xp1(void)
4139{
4140 CPU86_LDouble fptemp;
4141
4142 fptemp = ST0;
4143 if ((fptemp+1.0)>0.0) {
4144 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4145 ST1 *= fptemp;
4146 fpop();
4147 } else {
4148 env->fpus &= (~0x4700);
4149 env->fpus |= 0x400;
4150 }
4151}
4152
4153void helper_fsqrt(void)
4154{
4155 CPU86_LDouble fptemp;
4156
4157 fptemp = ST0;
4158 if (fptemp<0.0) {
4159 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4160 env->fpus |= 0x400;
4161 }
4162 ST0 = sqrt(fptemp);
4163}
4164
4165void helper_fsincos(void)
4166{
4167 CPU86_LDouble fptemp;
4168
4169 fptemp = ST0;
4170 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4171 env->fpus |= 0x400;
4172 } else {
4173 ST0 = sin(fptemp);
4174 fpush();
4175 ST0 = cos(fptemp);
4176 env->fpus &= (~0x400); /* C2 <-- 0 */
4177 /* the above code is for |arg| < 2**63 only */
4178 }
4179}
4180
4181void helper_frndint(void)
4182{
4183 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4184}
4185
4186void helper_fscale(void)
4187{
be1c17c7
AJ
4188 if (floatx_is_any_nan(ST1)) {
4189 ST0 = ST1;
4190 } else {
4191 int n = floatx_to_int32_round_to_zero(ST1, &env->fp_status);
4192 ST0 = floatx_scalbn(ST0, n, &env->fp_status);
4193 }
eaa728ee
FB
4194}
4195
4196void helper_fsin(void)
4197{
4198 CPU86_LDouble fptemp;
4199
4200 fptemp = ST0;
4201 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4202 env->fpus |= 0x400;
4203 } else {
4204 ST0 = sin(fptemp);
4205 env->fpus &= (~0x400); /* C2 <-- 0 */
4206 /* the above code is for |arg| < 2**53 only */
4207 }
4208}
4209
4210void helper_fcos(void)
4211{
4212 CPU86_LDouble fptemp;
4213
4214 fptemp = ST0;
4215 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4216 env->fpus |= 0x400;
4217 } else {
4218 ST0 = cos(fptemp);
4219 env->fpus &= (~0x400); /* C2 <-- 0 */
4220 /* the above code is for |arg5 < 2**63 only */
4221 }
4222}
4223
4224void helper_fxam_ST0(void)
4225{
4226 CPU86_LDoubleU temp;
4227 int expdif;
4228
4229 temp.d = ST0;
4230
4231 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4232 if (SIGND(temp))
4233 env->fpus |= 0x200; /* C1 <-- 1 */
4234
4235 /* XXX: test fptags too */
4236 expdif = EXPD(temp);
4237 if (expdif == MAXEXPD) {
4238#ifdef USE_X86LDOUBLE
4239 if (MANTD(temp) == 0x8000000000000000ULL)
4240#else
4241 if (MANTD(temp) == 0)
4242#endif
4243 env->fpus |= 0x500 /*Infinity*/;
4244 else
4245 env->fpus |= 0x100 /*NaN*/;
4246 } else if (expdif == 0) {
4247 if (MANTD(temp) == 0)
4248 env->fpus |= 0x4000 /*Zero*/;
4249 else
4250 env->fpus |= 0x4400 /*Denormal*/;
4251 } else {
4252 env->fpus |= 0x400;
4253 }
4254}
4255
4256void helper_fstenv(target_ulong ptr, int data32)
4257{
4258 int fpus, fptag, exp, i;
4259 uint64_t mant;
4260 CPU86_LDoubleU tmp;
4261
4262 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4263 fptag = 0;
4264 for (i=7; i>=0; i--) {
4265 fptag <<= 2;
4266 if (env->fptags[i]) {
4267 fptag |= 3;
4268 } else {
4269 tmp.d = env->fpregs[i].d;
4270 exp = EXPD(tmp);
4271 mant = MANTD(tmp);
4272 if (exp == 0 && mant == 0) {
4273 /* zero */
4274 fptag |= 1;
4275 } else if (exp == 0 || exp == MAXEXPD
4276#ifdef USE_X86LDOUBLE
4277 || (mant & (1LL << 63)) == 0
4278#endif
4279 ) {
4280 /* NaNs, infinity, denormal */
4281 fptag |= 2;
4282 }
4283 }
4284 }
4285 if (data32) {
4286 /* 32 bit */
4287 stl(ptr, env->fpuc);
4288 stl(ptr + 4, fpus);
4289 stl(ptr + 8, fptag);
4290 stl(ptr + 12, 0); /* fpip */
4291 stl(ptr + 16, 0); /* fpcs */
4292 stl(ptr + 20, 0); /* fpoo */
4293 stl(ptr + 24, 0); /* fpos */
4294 } else {
4295 /* 16 bit */
4296 stw(ptr, env->fpuc);
4297 stw(ptr + 2, fpus);
4298 stw(ptr + 4, fptag);
4299 stw(ptr + 6, 0);
4300 stw(ptr + 8, 0);
4301 stw(ptr + 10, 0);
4302 stw(ptr + 12, 0);
4303 }
4304}
4305
4306void helper_fldenv(target_ulong ptr, int data32)
4307{
4308 int i, fpus, fptag;
4309
4310 if (data32) {
4311 env->fpuc = lduw(ptr);
4312 fpus = lduw(ptr + 4);
4313 fptag = lduw(ptr + 8);
4314 }
4315 else {
4316 env->fpuc = lduw(ptr);
4317 fpus = lduw(ptr + 2);
4318 fptag = lduw(ptr + 4);
4319 }
4320 env->fpstt = (fpus >> 11) & 7;
4321 env->fpus = fpus & ~0x3800;
4322 for(i = 0;i < 8; i++) {
4323 env->fptags[i] = ((fptag & 3) == 3);
4324 fptag >>= 2;
4325 }
4326}
4327
4328void helper_fsave(target_ulong ptr, int data32)
4329{
4330 CPU86_LDouble tmp;
4331 int i;
4332
4333 helper_fstenv(ptr, data32);
4334
4335 ptr += (14 << data32);
4336 for(i = 0;i < 8; i++) {
4337 tmp = ST(i);
4338 helper_fstt(tmp, ptr);
4339 ptr += 10;
4340 }
4341
4342 /* fninit */
4343 env->fpus = 0;
4344 env->fpstt = 0;
4345 env->fpuc = 0x37f;
4346 env->fptags[0] = 1;
4347 env->fptags[1] = 1;
4348 env->fptags[2] = 1;
4349 env->fptags[3] = 1;
4350 env->fptags[4] = 1;
4351 env->fptags[5] = 1;
4352 env->fptags[6] = 1;
4353 env->fptags[7] = 1;
4354}
4355
4356void helper_frstor(target_ulong ptr, int data32)
4357{
4358 CPU86_LDouble tmp;
4359 int i;
4360
4361 helper_fldenv(ptr, data32);
4362 ptr += (14 << data32);
4363
4364 for(i = 0;i < 8; i++) {
4365 tmp = helper_fldt(ptr);
4366 ST(i) = tmp;
4367 ptr += 10;
4368 }
4369}
4370
4371void helper_fxsave(target_ulong ptr, int data64)
4372{
4373 int fpus, fptag, i, nb_xmm_regs;
4374 CPU86_LDouble tmp;
4375 target_ulong addr;
4376
09d85fb8
KW
4377 /* The operand must be 16 byte aligned */
4378 if (ptr & 0xf) {
4379 raise_exception(EXCP0D_GPF);
4380 }
4381
eaa728ee
FB
4382 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4383 fptag = 0;
4384 for(i = 0; i < 8; i++) {
4385 fptag |= (env->fptags[i] << i);
4386 }
4387 stw(ptr, env->fpuc);
4388 stw(ptr + 2, fpus);
4389 stw(ptr + 4, fptag ^ 0xff);
4390#ifdef TARGET_X86_64
4391 if (data64) {
4392 stq(ptr + 0x08, 0); /* rip */
4393 stq(ptr + 0x10, 0); /* rdp */
4394 } else
4395#endif
4396 {
4397 stl(ptr + 0x08, 0); /* eip */
4398 stl(ptr + 0x0c, 0); /* sel */
4399 stl(ptr + 0x10, 0); /* dp */
4400 stl(ptr + 0x14, 0); /* sel */
4401 }
4402
4403 addr = ptr + 0x20;
4404 for(i = 0;i < 8; i++) {
4405 tmp = ST(i);
4406 helper_fstt(tmp, addr);
4407 addr += 16;
4408 }
4409
4410 if (env->cr[4] & CR4_OSFXSR_MASK) {
4411 /* XXX: finish it */
4412 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4413 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4414 if (env->hflags & HF_CS64_MASK)
4415 nb_xmm_regs = 16;
4416 else
4417 nb_xmm_regs = 8;
4418 addr = ptr + 0xa0;
eef26553
AL
4419 /* Fast FXSAVE leaves out the XMM registers */
4420 if (!(env->efer & MSR_EFER_FFXSR)
4421 || (env->hflags & HF_CPL_MASK)
4422 || !(env->hflags & HF_LMA_MASK)) {
4423 for(i = 0; i < nb_xmm_regs; i++) {
4424 stq(addr, env->xmm_regs[i].XMM_Q(0));
4425 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4426 addr += 16;
4427 }
eaa728ee
FB
4428 }
4429 }
4430}
4431
4432void helper_fxrstor(target_ulong ptr, int data64)
4433{
4434 int i, fpus, fptag, nb_xmm_regs;
4435 CPU86_LDouble tmp;
4436 target_ulong addr;
4437
09d85fb8
KW
4438 /* The operand must be 16 byte aligned */
4439 if (ptr & 0xf) {
4440 raise_exception(EXCP0D_GPF);
4441 }
4442
eaa728ee
FB
4443 env->fpuc = lduw(ptr);
4444 fpus = lduw(ptr + 2);
4445 fptag = lduw(ptr + 4);
4446 env->fpstt = (fpus >> 11) & 7;
4447 env->fpus = fpus & ~0x3800;
4448 fptag ^= 0xff;
4449 for(i = 0;i < 8; i++) {
4450 env->fptags[i] = ((fptag >> i) & 1);
4451 }
4452
4453 addr = ptr + 0x20;
4454 for(i = 0;i < 8; i++) {
4455 tmp = helper_fldt(addr);
4456 ST(i) = tmp;
4457 addr += 16;
4458 }
4459
4460 if (env->cr[4] & CR4_OSFXSR_MASK) {
4461 /* XXX: finish it */
4462 env->mxcsr = ldl(ptr + 0x18);
4463 //ldl(ptr + 0x1c);
4464 if (env->hflags & HF_CS64_MASK)
4465 nb_xmm_regs = 16;
4466 else
4467 nb_xmm_regs = 8;
4468 addr = ptr + 0xa0;
eef26553
AL
4469 /* Fast FXRESTORE leaves out the XMM registers */
4470 if (!(env->efer & MSR_EFER_FFXSR)
4471 || (env->hflags & HF_CPL_MASK)
4472 || !(env->hflags & HF_LMA_MASK)) {
4473 for(i = 0; i < nb_xmm_regs; i++) {
4474 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4475 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4476 addr += 16;
4477 }
eaa728ee
FB
4478 }
4479 }
4480}
4481
4482#ifndef USE_X86LDOUBLE
4483
4484void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4485{
4486 CPU86_LDoubleU temp;
4487 int e;
4488
4489 temp.d = f;
4490 /* mantissa */
4491 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4492 /* exponent + sign */
4493 e = EXPD(temp) - EXPBIAS + 16383;
4494 e |= SIGND(temp) >> 16;
4495 *pexp = e;
4496}
4497
4498CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4499{
4500 CPU86_LDoubleU temp;
4501 int e;
4502 uint64_t ll;
4503
4504 /* XXX: handle overflow ? */
4505 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4506 e |= (upper >> 4) & 0x800; /* sign */
4507 ll = (mant >> 11) & ((1LL << 52) - 1);
4508#ifdef __arm__
4509 temp.l.upper = (e << 20) | (ll >> 32);
4510 temp.l.lower = ll;
4511#else
4512 temp.ll = ll | ((uint64_t)e << 52);
4513#endif
4514 return temp.d;
4515}
4516
4517#else
4518
4519void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4520{
4521 CPU86_LDoubleU temp;
4522
4523 temp.d = f;
4524 *pmant = temp.l.lower;
4525 *pexp = temp.l.upper;
4526}
4527
4528CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4529{
4530 CPU86_LDoubleU temp;
4531
4532 temp.l.upper = upper;
4533 temp.l.lower = mant;
4534 return temp.d;
4535}
4536#endif
4537
4538#ifdef TARGET_X86_64
4539
4540//#define DEBUG_MULDIV
4541
4542static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4543{
4544 *plow += a;
4545 /* carry test */
4546 if (*plow < a)
4547 (*phigh)++;
4548 *phigh += b;
4549}
4550
4551static void neg128(uint64_t *plow, uint64_t *phigh)
4552{
4553 *plow = ~ *plow;
4554 *phigh = ~ *phigh;
4555 add128(plow, phigh, 1, 0);
4556}
4557
4558/* return TRUE if overflow */
4559static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4560{
4561 uint64_t q, r, a1, a0;
4562 int i, qb, ab;
4563
4564 a0 = *plow;
4565 a1 = *phigh;
4566 if (a1 == 0) {
4567 q = a0 / b;
4568 r = a0 % b;
4569 *plow = q;
4570 *phigh = r;
4571 } else {
4572 if (a1 >= b)
4573 return 1;
4574 /* XXX: use a better algorithm */
4575 for(i = 0; i < 64; i++) {
4576 ab = a1 >> 63;
4577 a1 = (a1 << 1) | (a0 >> 63);
4578 if (ab || a1 >= b) {
4579 a1 -= b;
4580 qb = 1;
4581 } else {
4582 qb = 0;
4583 }
4584 a0 = (a0 << 1) | qb;
4585 }
4586#if defined(DEBUG_MULDIV)
4587 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4588 *phigh, *plow, b, a0, a1);
4589#endif
4590 *plow = a0;
4591 *phigh = a1;
4592 }
4593 return 0;
4594}
4595
4596/* return TRUE if overflow */
4597static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4598{
4599 int sa, sb;
4600 sa = ((int64_t)*phigh < 0);
4601 if (sa)
4602 neg128(plow, phigh);
4603 sb = (b < 0);
4604 if (sb)
4605 b = -b;
4606 if (div64(plow, phigh, b) != 0)
4607 return 1;
4608 if (sa ^ sb) {
4609 if (*plow > (1ULL << 63))
4610 return 1;
4611 *plow = - *plow;
4612 } else {
4613 if (*plow >= (1ULL << 63))
4614 return 1;
4615 }
4616 if (sa)
4617 *phigh = - *phigh;
4618 return 0;
4619}
4620
4621void helper_mulq_EAX_T0(target_ulong t0)
4622{
4623 uint64_t r0, r1;
4624
4625 mulu64(&r0, &r1, EAX, t0);
4626 EAX = r0;
4627 EDX = r1;
4628 CC_DST = r0;
4629 CC_SRC = r1;
4630}
4631
4632void helper_imulq_EAX_T0(target_ulong t0)
4633{
4634 uint64_t r0, r1;
4635
4636 muls64(&r0, &r1, EAX, t0);
4637 EAX = r0;
4638 EDX = r1;
4639 CC_DST = r0;
4640 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4641}
4642
4643target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4644{
4645 uint64_t r0, r1;
4646
4647 muls64(&r0, &r1, t0, t1);
4648 CC_DST = r0;
4649 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4650 return r0;
4651}
4652
4653void helper_divq_EAX(target_ulong t0)
4654{
4655 uint64_t r0, r1;
4656 if (t0 == 0) {
4657 raise_exception(EXCP00_DIVZ);
4658 }
4659 r0 = EAX;
4660 r1 = EDX;
4661 if (div64(&r0, &r1, t0))
4662 raise_exception(EXCP00_DIVZ);
4663 EAX = r0;
4664 EDX = r1;
4665}
4666
4667void helper_idivq_EAX(target_ulong t0)
4668{
4669 uint64_t r0, r1;
4670 if (t0 == 0) {
4671 raise_exception(EXCP00_DIVZ);
4672 }
4673 r0 = EAX;
4674 r1 = EDX;
4675 if (idiv64(&r0, &r1, t0))
4676 raise_exception(EXCP00_DIVZ);
4677 EAX = r0;
4678 EDX = r1;
4679}
4680#endif
4681
94451178 4682static void do_hlt(void)
eaa728ee
FB
4683{
4684 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4685 env->halted = 1;
eaa728ee
FB
4686 env->exception_index = EXCP_HLT;
4687 cpu_loop_exit();
4688}
4689
94451178
FB
4690void helper_hlt(int next_eip_addend)
4691{
4692 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4693 EIP += next_eip_addend;
4694
4695 do_hlt();
4696}
4697
eaa728ee
FB
4698void helper_monitor(target_ulong ptr)
4699{
4700 if ((uint32_t)ECX != 0)
4701 raise_exception(EXCP0D_GPF);
4702 /* XXX: store address ? */
872929aa 4703 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4704}
4705
94451178 4706void helper_mwait(int next_eip_addend)
eaa728ee
FB
4707{
4708 if ((uint32_t)ECX != 0)
4709 raise_exception(EXCP0D_GPF);
872929aa 4710 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
94451178
FB
4711 EIP += next_eip_addend;
4712
eaa728ee
FB
4713 /* XXX: not complete but not completely erroneous */
4714 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4715 /* more than one CPU: do not sleep because another CPU may
4716 wake this one */
4717 } else {
94451178 4718 do_hlt();
eaa728ee
FB
4719 }
4720}
4721
4722void helper_debug(void)
4723{
4724 env->exception_index = EXCP_DEBUG;
4725 cpu_loop_exit();
4726}
4727
a2397807
JK
4728void helper_reset_rf(void)
4729{
4730 env->eflags &= ~RF_MASK;
4731}
4732
eaa728ee
FB
4733void helper_raise_interrupt(int intno, int next_eip_addend)
4734{
4735 raise_interrupt(intno, 1, 0, next_eip_addend);
4736}
4737
4738void helper_raise_exception(int exception_index)
4739{
4740 raise_exception(exception_index);
4741}
4742
4743void helper_cli(void)
4744{
4745 env->eflags &= ~IF_MASK;
4746}
4747
4748void helper_sti(void)
4749{
4750 env->eflags |= IF_MASK;
4751}
4752
4753#if 0
4754/* vm86plus instructions */
4755void helper_cli_vm(void)
4756{
4757 env->eflags &= ~VIF_MASK;
4758}
4759
4760void helper_sti_vm(void)
4761{
4762 env->eflags |= VIF_MASK;
4763 if (env->eflags & VIP_MASK) {
4764 raise_exception(EXCP0D_GPF);
4765 }
4766}
4767#endif
4768
4769void helper_set_inhibit_irq(void)
4770{
4771 env->hflags |= HF_INHIBIT_IRQ_MASK;
4772}
4773
4774void helper_reset_inhibit_irq(void)
4775{
4776 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4777}
4778
4779void helper_boundw(target_ulong a0, int v)
4780{
4781 int low, high;
4782 low = ldsw(a0);
4783 high = ldsw(a0 + 2);
4784 v = (int16_t)v;
4785 if (v < low || v > high) {
4786 raise_exception(EXCP05_BOUND);
4787 }
eaa728ee
FB
4788}
4789
4790void helper_boundl(target_ulong a0, int v)
4791{
4792 int low, high;
4793 low = ldl(a0);
4794 high = ldl(a0 + 4);
4795 if (v < low || v > high) {
4796 raise_exception(EXCP05_BOUND);
4797 }
eaa728ee
FB
4798}
4799
4800static float approx_rsqrt(float a)
4801{
4802 return 1.0 / sqrt(a);
4803}
4804
4805static float approx_rcp(float a)
4806{
4807 return 1.0 / a;
4808}
4809
4810#if !defined(CONFIG_USER_ONLY)
4811
4812#define MMUSUFFIX _mmu
4813
4814#define SHIFT 0
4815#include "softmmu_template.h"
4816
4817#define SHIFT 1
4818#include "softmmu_template.h"
4819
4820#define SHIFT 2
4821#include "softmmu_template.h"
4822
4823#define SHIFT 3
4824#include "softmmu_template.h"
4825
4826#endif
4827
d9957a8b 4828#if !defined(CONFIG_USER_ONLY)
eaa728ee
FB
4829/* try to fill the TLB and return an exception if error. If retaddr is
4830 NULL, it means that the function was called in C code (i.e. not
4831 from generated code or from helper.c) */
4832/* XXX: fix it to restore all registers */
4833void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4834{
4835 TranslationBlock *tb;
4836 int ret;
4837 unsigned long pc;
4838 CPUX86State *saved_env;
4839
4840 /* XXX: hack to restore env in all cases, even if not called from
4841 generated code */
4842 saved_env = env;
4843 env = cpu_single_env;
4844
4845 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4846 if (ret) {
4847 if (retaddr) {
4848 /* now we have a real cpu fault */
4849 pc = (unsigned long)retaddr;
4850 tb = tb_find_pc(pc);
4851 if (tb) {
4852 /* the PC is inside the translated code. It means that we have
4853 a virtual CPU fault */
618ba8e6 4854 cpu_restore_state(tb, env, pc);
eaa728ee
FB
4855 }
4856 }
872929aa 4857 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
4858 }
4859 env = saved_env;
4860}
d9957a8b 4861#endif
eaa728ee
FB
4862
4863/* Secure Virtual Machine helpers */
4864
eaa728ee
FB
4865#if defined(CONFIG_USER_ONLY)
4866
db620f46 4867void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4868{
4869}
4870void helper_vmmcall(void)
4871{
4872}
914178d3 4873void helper_vmload(int aflag)
eaa728ee
FB
4874{
4875}
914178d3 4876void helper_vmsave(int aflag)
eaa728ee
FB
4877{
4878}
872929aa
FB
4879void helper_stgi(void)
4880{
4881}
4882void helper_clgi(void)
4883{
4884}
eaa728ee
FB
4885void helper_skinit(void)
4886{
4887}
914178d3 4888void helper_invlpga(int aflag)
eaa728ee
FB
4889{
4890}
4891void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4892{
4893}
4894void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4895{
4896}
4897
4898void helper_svm_check_io(uint32_t port, uint32_t param,
4899 uint32_t next_eip_addend)
4900{
4901}
4902#else
4903
c227f099 4904static inline void svm_save_seg(target_phys_addr_t addr,
872929aa 4905 const SegmentCache *sc)
eaa728ee 4906{
872929aa
FB
4907 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4908 sc->selector);
4909 stq_phys(addr + offsetof(struct vmcb_seg, base),
4910 sc->base);
4911 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4912 sc->limit);
4913 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 4914 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa
FB
4915}
4916
c227f099 4917static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
872929aa
FB
4918{
4919 unsigned int flags;
4920
4921 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4922 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4923 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4924 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4925 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
4926}
4927
c227f099 4928static inline void svm_load_seg_cache(target_phys_addr_t addr,
872929aa 4929 CPUState *env, int seg_reg)
eaa728ee 4930{
872929aa
FB
4931 SegmentCache sc1, *sc = &sc1;
4932 svm_load_seg(addr, sc);
4933 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4934 sc->base, sc->limit, sc->flags);
eaa728ee
FB
4935}
4936
db620f46 4937void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4938{
4939 target_ulong addr;
4940 uint32_t event_inj;
4941 uint32_t int_ctl;
4942
872929aa
FB
4943 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4944
914178d3
FB
4945 if (aflag == 2)
4946 addr = EAX;
4947 else
4948 addr = (uint32_t)EAX;
4949
93fcfe39 4950 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
eaa728ee
FB
4951
4952 env->vm_vmcb = addr;
4953
4954 /* save the current CPU state in the hsave page */
4955 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4956 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4957
4958 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4959 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4960
4961 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4962 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4963 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4964 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
4965 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4966 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4967
4968 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4969 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4970
872929aa
FB
4971 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4972 &env->segs[R_ES]);
4973 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4974 &env->segs[R_CS]);
4975 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4976 &env->segs[R_SS]);
4977 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4978 &env->segs[R_DS]);
eaa728ee 4979
db620f46
FB
4980 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4981 EIP + next_eip_addend);
eaa728ee
FB
4982 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4983 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4984
4985 /* load the interception bitmaps so we do not need to access the
4986 vmcb in svm mode */
872929aa 4987 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
4988 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4989 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4990 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4991 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4992 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4993
872929aa
FB
4994 /* enable intercepts */
4995 env->hflags |= HF_SVMI_MASK;
4996
33c263df
FB
4997 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4998
eaa728ee
FB
4999 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5000 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5001
5002 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5003 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5004
5005 /* clear exit_info_2 so we behave like the real hardware */
5006 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5007
5008 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5009 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5010 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5011 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5012 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 5013 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 5014 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
5015 env->v_tpr = int_ctl & V_TPR_MASK;
5016 env->hflags2 |= HF2_VINTR_MASK;
eaa728ee 5017 if (env->eflags & IF_MASK)
db620f46 5018 env->hflags2 |= HF2_HIF_MASK;
eaa728ee
FB
5019 }
5020
5efc27bb
FB
5021 cpu_load_efer(env,
5022 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5023 env->eflags = 0;
5024 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5025 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5026 CC_OP = CC_OP_EFLAGS;
eaa728ee 5027
872929aa
FB
5028 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5029 env, R_ES);
5030 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5031 env, R_CS);
5032 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5033 env, R_SS);
5034 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5035 env, R_DS);
eaa728ee
FB
5036
5037 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5038 env->eip = EIP;
5039 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5040 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5041 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5042 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5043 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5044
5045 /* FIXME: guest state consistency checks */
5046
5047 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5048 case TLB_CONTROL_DO_NOTHING:
5049 break;
5050 case TLB_CONTROL_FLUSH_ALL_ASID:
5051 /* FIXME: this is not 100% correct but should work for now */
5052 tlb_flush(env, 1);
5053 break;
5054 }
5055
960540b4 5056 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 5057
db620f46
FB
5058 if (int_ctl & V_IRQ_MASK) {
5059 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5060 }
5061
eaa728ee
FB
5062 /* maybe we need to inject an event */
5063 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5064 if (event_inj & SVM_EVTINJ_VALID) {
5065 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5066 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5067 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
eaa728ee 5068
93fcfe39 5069 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
eaa728ee
FB
5070 /* FIXME: need to implement valid_err */
5071 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5072 case SVM_EVTINJ_TYPE_INTR:
5073 env->exception_index = vector;
5074 env->error_code = event_inj_err;
5075 env->exception_is_int = 0;
5076 env->exception_next_eip = -1;
93fcfe39 5077 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
db620f46
FB
5078 /* XXX: is it always correct ? */
5079 do_interrupt(vector, 0, 0, 0, 1);
eaa728ee
FB
5080 break;
5081 case SVM_EVTINJ_TYPE_NMI:
db620f46 5082 env->exception_index = EXCP02_NMI;
eaa728ee
FB
5083 env->error_code = event_inj_err;
5084 env->exception_is_int = 0;
5085 env->exception_next_eip = EIP;
93fcfe39 5086 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
db620f46 5087 cpu_loop_exit();
eaa728ee
FB
5088 break;
5089 case SVM_EVTINJ_TYPE_EXEPT:
5090 env->exception_index = vector;
5091 env->error_code = event_inj_err;
5092 env->exception_is_int = 0;
5093 env->exception_next_eip = -1;
93fcfe39 5094 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
db620f46 5095 cpu_loop_exit();
eaa728ee
FB
5096 break;
5097 case SVM_EVTINJ_TYPE_SOFT:
5098 env->exception_index = vector;
5099 env->error_code = event_inj_err;
5100 env->exception_is_int = 1;
5101 env->exception_next_eip = EIP;
93fcfe39 5102 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
db620f46 5103 cpu_loop_exit();
eaa728ee
FB
5104 break;
5105 }
93fcfe39 5106 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
eaa728ee 5107 }
eaa728ee
FB
5108}
5109
5110void helper_vmmcall(void)
5111{
872929aa
FB
5112 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5113 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5114}
5115
914178d3 5116void helper_vmload(int aflag)
eaa728ee
FB
5117{
5118 target_ulong addr;
872929aa
FB
5119 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5120
914178d3
FB
5121 if (aflag == 2)
5122 addr = EAX;
5123 else
5124 addr = (uint32_t)EAX;
5125
93fcfe39 5126 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5127 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5128 env->segs[R_FS].base);
5129
872929aa
FB
5130 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5131 env, R_FS);
5132 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5133 env, R_GS);
5134 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5135 &env->tr);
5136 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5137 &env->ldt);
eaa728ee
FB
5138
5139#ifdef TARGET_X86_64
5140 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5141 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5142 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5143 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5144#endif
5145 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5146 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5147 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5148 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5149}
5150
914178d3 5151void helper_vmsave(int aflag)
eaa728ee
FB
5152{
5153 target_ulong addr;
872929aa 5154 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3
FB
5155
5156 if (aflag == 2)
5157 addr = EAX;
5158 else
5159 addr = (uint32_t)EAX;
5160
93fcfe39 5161 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5162 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5163 env->segs[R_FS].base);
5164
872929aa
FB
5165 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5166 &env->segs[R_FS]);
5167 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5168 &env->segs[R_GS]);
5169 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5170 &env->tr);
5171 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5172 &env->ldt);
eaa728ee
FB
5173
5174#ifdef TARGET_X86_64
5175 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5176 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5177 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5178 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5179#endif
5180 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5181 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5182 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5183 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5184}
5185
872929aa
FB
5186void helper_stgi(void)
5187{
5188 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 5189 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
5190}
5191
5192void helper_clgi(void)
5193{
5194 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 5195 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
5196}
5197
eaa728ee
FB
5198void helper_skinit(void)
5199{
872929aa
FB
5200 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5201 /* XXX: not implemented */
872929aa 5202 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5203}
5204
914178d3 5205void helper_invlpga(int aflag)
eaa728ee 5206{
914178d3 5207 target_ulong addr;
872929aa 5208 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
914178d3
FB
5209
5210 if (aflag == 2)
5211 addr = EAX;
5212 else
5213 addr = (uint32_t)EAX;
5214
5215 /* XXX: could use the ASID to see if it is needed to do the
5216 flush */
5217 tlb_flush_page(env, addr);
eaa728ee
FB
5218}
5219
5220void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5221{
872929aa
FB
5222 if (likely(!(env->hflags & HF_SVMI_MASK)))
5223 return;
eaa728ee
FB
5224 switch(type) {
5225 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5226 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5227 helper_vmexit(type, param);
5228 }
5229 break;
872929aa
FB
5230 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5231 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5232 helper_vmexit(type, param);
5233 }
5234 break;
872929aa
FB
5235 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5236 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5237 helper_vmexit(type, param);
5238 }
5239 break;
872929aa
FB
5240 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5241 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5242 helper_vmexit(type, param);
5243 }
5244 break;
872929aa
FB
5245 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5246 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5247 helper_vmexit(type, param);
5248 }
5249 break;
eaa728ee 5250 case SVM_EXIT_MSR:
872929aa 5251 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5252 /* FIXME: this should be read in at vmrun (faster this way?) */
5253 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5254 uint32_t t0, t1;
5255 switch((uint32_t)ECX) {
5256 case 0 ... 0x1fff:
5257 t0 = (ECX * 2) % 8;
583cd3cb 5258 t1 = (ECX * 2) / 8;
eaa728ee
FB
5259 break;
5260 case 0xc0000000 ... 0xc0001fff:
5261 t0 = (8192 + ECX - 0xc0000000) * 2;
5262 t1 = (t0 / 8);
5263 t0 %= 8;
5264 break;
5265 case 0xc0010000 ... 0xc0011fff:
5266 t0 = (16384 + ECX - 0xc0010000) * 2;
5267 t1 = (t0 / 8);
5268 t0 %= 8;
5269 break;
5270 default:
5271 helper_vmexit(type, param);
5272 t0 = 0;
5273 t1 = 0;
5274 break;
5275 }
5276 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5277 helper_vmexit(type, param);
5278 }
5279 break;
5280 default:
872929aa 5281 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5282 helper_vmexit(type, param);
5283 }
5284 break;
5285 }
5286}
5287
5288void helper_svm_check_io(uint32_t port, uint32_t param,
5289 uint32_t next_eip_addend)
5290{
872929aa 5291 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5292 /* FIXME: this should be read in at vmrun (faster this way?) */
5293 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5294 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5295 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5296 /* next EIP */
5297 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5298 env->eip + next_eip_addend);
5299 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5300 }
5301 }
5302}
5303
5304/* Note: currently only 32 bits of exit_code are used */
5305void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5306{
5307 uint32_t int_ctl;
5308
93fcfe39 5309 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
eaa728ee
FB
5310 exit_code, exit_info_1,
5311 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5312 EIP);
5313
5314 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5315 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5316 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5317 } else {
5318 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5319 }
5320
5321 /* Save the VM state in the vmcb */
872929aa
FB
5322 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5323 &env->segs[R_ES]);
5324 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5325 &env->segs[R_CS]);
5326 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5327 &env->segs[R_SS]);
5328 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5329 &env->segs[R_DS]);
eaa728ee
FB
5330
5331 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5332 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5333
5334 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5335 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5336
5337 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5338 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5339 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5340 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5341 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5342
db620f46
FB
5343 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5344 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5345 int_ctl |= env->v_tpr & V_TPR_MASK;
5346 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5347 int_ctl |= V_IRQ_MASK;
5348 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee
FB
5349
5350 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5351 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5352 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5353 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5354 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5355 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5356 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5357
5358 /* Reload the host state from vm_hsave */
db620f46 5359 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 5360 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5361 env->intercept = 0;
5362 env->intercept_exceptions = 0;
5363 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 5364 env->tsc_offset = 0;
eaa728ee
FB
5365
5366 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5367 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5368
5369 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5370 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5371
5372 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5373 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5374 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5efc27bb
FB
5375 /* we need to set the efer after the crs so the hidden flags get
5376 set properly */
5efc27bb
FB
5377 cpu_load_efer(env,
5378 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5379 env->eflags = 0;
5380 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5381 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5382 CC_OP = CC_OP_EFLAGS;
5383
872929aa
FB
5384 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5385 env, R_ES);
5386 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5387 env, R_CS);
5388 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5389 env, R_SS);
5390 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5391 env, R_DS);
eaa728ee
FB
5392
5393 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5394 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5395 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5396
5397 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5398 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5399
5400 /* other setups */
5401 cpu_x86_set_cpl(env, 0);
5402 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5403 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5404
2ed51f5b
AL
5405 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5406 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5407 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5408 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
ab5ea558 5409 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
2ed51f5b 5410
960540b4 5411 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
5412 /* FIXME: Resets the current ASID register to zero (host ASID). */
5413
5414 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5415
5416 /* Clears the TSC_OFFSET inside the processor. */
5417
5418 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5419 from the page table indicated the host's CR3. If the PDPEs contain
5420 illegal state, the processor causes a shutdown. */
5421
5422 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5423 env->cr[0] |= CR0_PE_MASK;
5424 env->eflags &= ~VM_MASK;
5425
5426 /* Disables all breakpoints in the host DR7 register. */
5427
5428 /* Checks the reloaded host state for consistency. */
5429
5430 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5431 host's code segment or non-canonical (in the case of long mode), a
5432 #GP fault is delivered inside the host.) */
5433
5434 /* remove any pending exception */
5435 env->exception_index = -1;
5436 env->error_code = 0;
5437 env->old_exception = -1;
5438
5439 cpu_loop_exit();
5440}
5441
5442#endif
5443
5444/* MMX/SSE */
5445/* XXX: optimize by storing fptt and fptags in the static cpu state */
5446void helper_enter_mmx(void)
5447{
5448 env->fpstt = 0;
5449 *(uint32_t *)(env->fptags) = 0;
5450 *(uint32_t *)(env->fptags + 4) = 0;
5451}
5452
5453void helper_emms(void)
5454{
5455 /* set to empty state */
5456 *(uint32_t *)(env->fptags) = 0x01010101;
5457 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5458}
5459
5460/* XXX: suppress */
a7812ae4 5461void helper_movq(void *d, void *s)
eaa728ee 5462{
a7812ae4 5463 *(uint64_t *)d = *(uint64_t *)s;
eaa728ee
FB
5464}
5465
5466#define SHIFT 0
5467#include "ops_sse.h"
5468
5469#define SHIFT 1
5470#include "ops_sse.h"
5471
5472#define SHIFT 0
5473#include "helper_template.h"
5474#undef SHIFT
5475
5476#define SHIFT 1
5477#include "helper_template.h"
5478#undef SHIFT
5479
5480#define SHIFT 2
5481#include "helper_template.h"
5482#undef SHIFT
5483
5484#ifdef TARGET_X86_64
5485
5486#define SHIFT 3
5487#include "helper_template.h"
5488#undef SHIFT
5489
5490#endif
5491
5492/* bit operations */
5493target_ulong helper_bsf(target_ulong t0)
5494{
5495 int count;
5496 target_ulong res;
5497
5498 res = t0;
5499 count = 0;
5500 while ((res & 1) == 0) {
5501 count++;
5502 res >>= 1;
5503 }
5504 return count;
5505}
5506
31501a71 5507target_ulong helper_lzcnt(target_ulong t0, int wordsize)
eaa728ee
FB
5508{
5509 int count;
5510 target_ulong res, mask;
31501a71
AP
5511
5512 if (wordsize > 0 && t0 == 0) {
5513 return wordsize;
5514 }
eaa728ee
FB
5515 res = t0;
5516 count = TARGET_LONG_BITS - 1;
5517 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5518 while ((res & mask) == 0) {
5519 count--;
5520 res <<= 1;
5521 }
31501a71
AP
5522 if (wordsize > 0) {
5523 return wordsize - 1 - count;
5524 }
eaa728ee
FB
5525 return count;
5526}
5527
31501a71
AP
5528target_ulong helper_bsr(target_ulong t0)
5529{
5530 return helper_lzcnt(t0, 0);
5531}
eaa728ee
FB
5532
5533static int compute_all_eflags(void)
5534{
5535 return CC_SRC;
5536}
5537
5538static int compute_c_eflags(void)
5539{
5540 return CC_SRC & CC_C;
5541}
5542
a7812ae4
PB
5543uint32_t helper_cc_compute_all(int op)
5544{
5545 switch (op) {
5546 default: /* should never happen */ return 0;
eaa728ee 5547
a7812ae4 5548 case CC_OP_EFLAGS: return compute_all_eflags();
eaa728ee 5549
a7812ae4
PB
5550 case CC_OP_MULB: return compute_all_mulb();
5551 case CC_OP_MULW: return compute_all_mulw();
5552 case CC_OP_MULL: return compute_all_mull();
eaa728ee 5553
a7812ae4
PB
5554 case CC_OP_ADDB: return compute_all_addb();
5555 case CC_OP_ADDW: return compute_all_addw();
5556 case CC_OP_ADDL: return compute_all_addl();
eaa728ee 5557
a7812ae4
PB
5558 case CC_OP_ADCB: return compute_all_adcb();
5559 case CC_OP_ADCW: return compute_all_adcw();
5560 case CC_OP_ADCL: return compute_all_adcl();
eaa728ee 5561
a7812ae4
PB
5562 case CC_OP_SUBB: return compute_all_subb();
5563 case CC_OP_SUBW: return compute_all_subw();
5564 case CC_OP_SUBL: return compute_all_subl();
eaa728ee 5565
a7812ae4
PB
5566 case CC_OP_SBBB: return compute_all_sbbb();
5567 case CC_OP_SBBW: return compute_all_sbbw();
5568 case CC_OP_SBBL: return compute_all_sbbl();
eaa728ee 5569
a7812ae4
PB
5570 case CC_OP_LOGICB: return compute_all_logicb();
5571 case CC_OP_LOGICW: return compute_all_logicw();
5572 case CC_OP_LOGICL: return compute_all_logicl();
eaa728ee 5573
a7812ae4
PB
5574 case CC_OP_INCB: return compute_all_incb();
5575 case CC_OP_INCW: return compute_all_incw();
5576 case CC_OP_INCL: return compute_all_incl();
eaa728ee 5577
a7812ae4
PB
5578 case CC_OP_DECB: return compute_all_decb();
5579 case CC_OP_DECW: return compute_all_decw();
5580 case CC_OP_DECL: return compute_all_decl();
eaa728ee 5581
a7812ae4
PB
5582 case CC_OP_SHLB: return compute_all_shlb();
5583 case CC_OP_SHLW: return compute_all_shlw();
5584 case CC_OP_SHLL: return compute_all_shll();
eaa728ee 5585
a7812ae4
PB
5586 case CC_OP_SARB: return compute_all_sarb();
5587 case CC_OP_SARW: return compute_all_sarw();
5588 case CC_OP_SARL: return compute_all_sarl();
eaa728ee
FB
5589
5590#ifdef TARGET_X86_64
a7812ae4 5591 case CC_OP_MULQ: return compute_all_mulq();
eaa728ee 5592
a7812ae4 5593 case CC_OP_ADDQ: return compute_all_addq();
eaa728ee 5594
a7812ae4 5595 case CC_OP_ADCQ: return compute_all_adcq();
eaa728ee 5596
a7812ae4 5597 case CC_OP_SUBQ: return compute_all_subq();
eaa728ee 5598
a7812ae4 5599 case CC_OP_SBBQ: return compute_all_sbbq();
eaa728ee 5600
a7812ae4 5601 case CC_OP_LOGICQ: return compute_all_logicq();
eaa728ee 5602
a7812ae4 5603 case CC_OP_INCQ: return compute_all_incq();
eaa728ee 5604
a7812ae4 5605 case CC_OP_DECQ: return compute_all_decq();
eaa728ee 5606
a7812ae4 5607 case CC_OP_SHLQ: return compute_all_shlq();
eaa728ee 5608
a7812ae4 5609 case CC_OP_SARQ: return compute_all_sarq();
eaa728ee 5610#endif
a7812ae4
PB
5611 }
5612}
5613
5614uint32_t helper_cc_compute_c(int op)
5615{
5616 switch (op) {
5617 default: /* should never happen */ return 0;
5618
5619 case CC_OP_EFLAGS: return compute_c_eflags();
5620
5621 case CC_OP_MULB: return compute_c_mull();
5622 case CC_OP_MULW: return compute_c_mull();
5623 case CC_OP_MULL: return compute_c_mull();
5624
5625 case CC_OP_ADDB: return compute_c_addb();
5626 case CC_OP_ADDW: return compute_c_addw();
5627 case CC_OP_ADDL: return compute_c_addl();
5628
5629 case CC_OP_ADCB: return compute_c_adcb();
5630 case CC_OP_ADCW: return compute_c_adcw();
5631 case CC_OP_ADCL: return compute_c_adcl();
5632
5633 case CC_OP_SUBB: return compute_c_subb();
5634 case CC_OP_SUBW: return compute_c_subw();
5635 case CC_OP_SUBL: return compute_c_subl();
5636
5637 case CC_OP_SBBB: return compute_c_sbbb();
5638 case CC_OP_SBBW: return compute_c_sbbw();
5639 case CC_OP_SBBL: return compute_c_sbbl();
5640
5641 case CC_OP_LOGICB: return compute_c_logicb();
5642 case CC_OP_LOGICW: return compute_c_logicw();
5643 case CC_OP_LOGICL: return compute_c_logicl();
5644
5645 case CC_OP_INCB: return compute_c_incl();
5646 case CC_OP_INCW: return compute_c_incl();
5647 case CC_OP_INCL: return compute_c_incl();
5648
5649 case CC_OP_DECB: return compute_c_incl();
5650 case CC_OP_DECW: return compute_c_incl();
5651 case CC_OP_DECL: return compute_c_incl();
eaa728ee 5652
a7812ae4
PB
5653 case CC_OP_SHLB: return compute_c_shlb();
5654 case CC_OP_SHLW: return compute_c_shlw();
5655 case CC_OP_SHLL: return compute_c_shll();
5656
5657 case CC_OP_SARB: return compute_c_sarl();
5658 case CC_OP_SARW: return compute_c_sarl();
5659 case CC_OP_SARL: return compute_c_sarl();
5660
5661#ifdef TARGET_X86_64
5662 case CC_OP_MULQ: return compute_c_mull();
5663
5664 case CC_OP_ADDQ: return compute_c_addq();
5665
5666 case CC_OP_ADCQ: return compute_c_adcq();
5667
5668 case CC_OP_SUBQ: return compute_c_subq();
5669
5670 case CC_OP_SBBQ: return compute_c_sbbq();
5671
5672 case CC_OP_LOGICQ: return compute_c_logicq();
5673
5674 case CC_OP_INCQ: return compute_c_incl();
5675
5676 case CC_OP_DECQ: return compute_c_incl();
5677
5678 case CC_OP_SHLQ: return compute_c_shlq();
5679
5680 case CC_OP_SARQ: return compute_c_sarl();
5681#endif
5682 }
5683}