]> git.proxmox.com Git - qemu.git/blame - target-i386/op_helper.c
target-i386: Make x86 mfence and lfence illegal without SSE2
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 18 */
83dae095 19
a2c9ed3c 20#include <math.h>
eaa728ee 21#include "exec.h"
d9957a8b 22#include "exec-all.h"
eaa728ee 23#include "host-utils.h"
35bed8ee 24#include "ioport.h"
eaa728ee
FB
25
26//#define DEBUG_PCALL
27
d12d51d5
AL
28
29#ifdef DEBUG_PCALL
93fcfe39
AL
30# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
31# define LOG_PCALL_STATE(env) \
32 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
d12d51d5
AL
33#else
34# define LOG_PCALL(...) do { } while (0)
35# define LOG_PCALL_STATE(env) do { } while (0)
36#endif
37
38
eaa728ee
FB
39#if 0
40#define raise_exception_err(a, b)\
41do {\
93fcfe39 42 qemu_log("raise_exception line=%d\n", __LINE__);\
eaa728ee
FB
43 (raise_exception_err)(a, b);\
44} while (0)
45#endif
46
d9957a8b 47static const uint8_t parity_table[256] = {
eaa728ee
FB
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80};
81
82/* modulo 17 table */
d9957a8b 83static const uint8_t rclw_table[32] = {
eaa728ee
FB
84 0, 1, 2, 3, 4, 5, 6, 7,
85 8, 9,10,11,12,13,14,15,
86 16, 0, 1, 2, 3, 4, 5, 6,
87 7, 8, 9,10,11,12,13,14,
88};
89
90/* modulo 9 table */
d9957a8b 91static const uint8_t rclb_table[32] = {
eaa728ee
FB
92 0, 1, 2, 3, 4, 5, 6, 7,
93 8, 0, 1, 2, 3, 4, 5, 6,
94 7, 8, 0, 1, 2, 3, 4, 5,
95 6, 7, 8, 0, 1, 2, 3, 4,
96};
97
c31da136
AJ
98#define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
99#define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
100#define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
101
eaa728ee
FB
102/* broken thread support */
103
c227f099 104static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
eaa728ee
FB
105
106void helper_lock(void)
107{
108 spin_lock(&global_cpu_lock);
109}
110
111void helper_unlock(void)
112{
113 spin_unlock(&global_cpu_lock);
114}
115
116void helper_write_eflags(target_ulong t0, uint32_t update_mask)
117{
118 load_eflags(t0, update_mask);
119}
120
121target_ulong helper_read_eflags(void)
122{
123 uint32_t eflags;
a7812ae4 124 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
125 eflags |= (DF & DF_MASK);
126 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
127 return eflags;
128}
129
130/* return non zero if error */
131static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
132 int selector)
133{
134 SegmentCache *dt;
135 int index;
136 target_ulong ptr;
137
138 if (selector & 0x4)
139 dt = &env->ldt;
140 else
141 dt = &env->gdt;
142 index = selector & ~7;
143 if ((index + 7) > dt->limit)
144 return -1;
145 ptr = dt->base + index;
146 *e1_ptr = ldl_kernel(ptr);
147 *e2_ptr = ldl_kernel(ptr + 4);
148 return 0;
149}
150
151static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
152{
153 unsigned int limit;
154 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
155 if (e2 & DESC_G_MASK)
156 limit = (limit << 12) | 0xfff;
157 return limit;
158}
159
160static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
161{
162 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
163}
164
165static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
166{
167 sc->base = get_seg_base(e1, e2);
168 sc->limit = get_seg_limit(e1, e2);
169 sc->flags = e2;
170}
171
172/* init the segment cache in vm86 mode. */
173static inline void load_seg_vm(int seg, int selector)
174{
175 selector &= 0xffff;
176 cpu_x86_load_seg_cache(env, seg, selector,
177 (selector << 4), 0xffff, 0);
178}
179
180static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
181 uint32_t *esp_ptr, int dpl)
182{
183 int type, index, shift;
184
185#if 0
186 {
187 int i;
188 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
189 for(i=0;i<env->tr.limit;i++) {
190 printf("%02x ", env->tr.base[i]);
191 if ((i & 7) == 7) printf("\n");
192 }
193 printf("\n");
194 }
195#endif
196
197 if (!(env->tr.flags & DESC_P_MASK))
198 cpu_abort(env, "invalid tss");
199 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
200 if ((type & 7) != 1)
201 cpu_abort(env, "invalid tss type");
202 shift = type >> 3;
203 index = (dpl * 4 + 2) << shift;
204 if (index + (4 << shift) - 1 > env->tr.limit)
205 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
206 if (shift == 0) {
207 *esp_ptr = lduw_kernel(env->tr.base + index);
208 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
209 } else {
210 *esp_ptr = ldl_kernel(env->tr.base + index);
211 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
212 }
213}
214
215/* XXX: merge with load_seg() */
216static void tss_load_seg(int seg_reg, int selector)
217{
218 uint32_t e1, e2;
219 int rpl, dpl, cpl;
220
221 if ((selector & 0xfffc) != 0) {
222 if (load_segment(&e1, &e2, selector) != 0)
223 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
224 if (!(e2 & DESC_S_MASK))
225 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
226 rpl = selector & 3;
227 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
228 cpl = env->hflags & HF_CPL_MASK;
229 if (seg_reg == R_CS) {
230 if (!(e2 & DESC_CS_MASK))
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 /* XXX: is it correct ? */
233 if (dpl != rpl)
234 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235 if ((e2 & DESC_C_MASK) && dpl > rpl)
236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237 } else if (seg_reg == R_SS) {
238 /* SS must be writable data */
239 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 if (dpl != cpl || dpl != rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 } else {
244 /* not readable code */
245 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 /* if data or non conforming code, checks the rights */
248 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
249 if (dpl < cpl || dpl < rpl)
250 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
251 }
252 }
253 if (!(e2 & DESC_P_MASK))
254 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
255 cpu_x86_load_seg_cache(env, seg_reg, selector,
256 get_seg_base(e1, e2),
257 get_seg_limit(e1, e2),
258 e2);
259 } else {
260 if (seg_reg == R_SS || seg_reg == R_CS)
261 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
262 }
263}
264
265#define SWITCH_TSS_JMP 0
266#define SWITCH_TSS_IRET 1
267#define SWITCH_TSS_CALL 2
268
269/* XXX: restore CPU state in registers (PowerPC case) */
270static void switch_tss(int tss_selector,
271 uint32_t e1, uint32_t e2, int source,
272 uint32_t next_eip)
273{
274 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
275 target_ulong tss_base;
276 uint32_t new_regs[8], new_segs[6];
277 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
278 uint32_t old_eflags, eflags_mask;
279 SegmentCache *dt;
280 int index;
281 target_ulong ptr;
282
283 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
d12d51d5 284 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
eaa728ee
FB
285
286 /* if task gate, we read the TSS segment and we load it */
287 if (type == 5) {
288 if (!(e2 & DESC_P_MASK))
289 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
290 tss_selector = e1 >> 16;
291 if (tss_selector & 4)
292 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
293 if (load_segment(&e1, &e2, tss_selector) != 0)
294 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295 if (e2 & DESC_S_MASK)
296 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
297 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
298 if ((type & 7) != 1)
299 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
300 }
301
302 if (!(e2 & DESC_P_MASK))
303 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
304
305 if (type & 8)
306 tss_limit_max = 103;
307 else
308 tss_limit_max = 43;
309 tss_limit = get_seg_limit(e1, e2);
310 tss_base = get_seg_base(e1, e2);
311 if ((tss_selector & 4) != 0 ||
312 tss_limit < tss_limit_max)
313 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
314 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
315 if (old_type & 8)
316 old_tss_limit_max = 103;
317 else
318 old_tss_limit_max = 43;
319
320 /* read all the registers from the new TSS */
321 if (type & 8) {
322 /* 32 bit */
323 new_cr3 = ldl_kernel(tss_base + 0x1c);
324 new_eip = ldl_kernel(tss_base + 0x20);
325 new_eflags = ldl_kernel(tss_base + 0x24);
326 for(i = 0; i < 8; i++)
327 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
328 for(i = 0; i < 6; i++)
329 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
330 new_ldt = lduw_kernel(tss_base + 0x60);
331 new_trap = ldl_kernel(tss_base + 0x64);
332 } else {
333 /* 16 bit */
334 new_cr3 = 0;
335 new_eip = lduw_kernel(tss_base + 0x0e);
336 new_eflags = lduw_kernel(tss_base + 0x10);
337 for(i = 0; i < 8; i++)
338 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
339 for(i = 0; i < 4; i++)
340 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
341 new_ldt = lduw_kernel(tss_base + 0x2a);
342 new_segs[R_FS] = 0;
343 new_segs[R_GS] = 0;
344 new_trap = 0;
345 }
4581cbcd
BS
346 /* XXX: avoid a compiler warning, see
347 http://support.amd.com/us/Processor_TechDocs/24593.pdf
348 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
349 (void)new_trap;
eaa728ee
FB
350
351 /* NOTE: we must avoid memory exceptions during the task switch,
352 so we make dummy accesses before */
353 /* XXX: it can still fail in some cases, so a bigger hack is
354 necessary to valid the TLB after having done the accesses */
355
356 v1 = ldub_kernel(env->tr.base);
357 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
358 stb_kernel(env->tr.base, v1);
359 stb_kernel(env->tr.base + old_tss_limit_max, v2);
360
361 /* clear busy bit (it is restartable) */
362 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
363 target_ulong ptr;
364 uint32_t e2;
365 ptr = env->gdt.base + (env->tr.selector & ~7);
366 e2 = ldl_kernel(ptr + 4);
367 e2 &= ~DESC_TSS_BUSY_MASK;
368 stl_kernel(ptr + 4, e2);
369 }
370 old_eflags = compute_eflags();
371 if (source == SWITCH_TSS_IRET)
372 old_eflags &= ~NT_MASK;
373
374 /* save the current state in the old TSS */
375 if (type & 8) {
376 /* 32 bit */
377 stl_kernel(env->tr.base + 0x20, next_eip);
378 stl_kernel(env->tr.base + 0x24, old_eflags);
379 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
380 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
381 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
382 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
383 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
384 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
385 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
386 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
387 for(i = 0; i < 6; i++)
388 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
389 } else {
390 /* 16 bit */
391 stw_kernel(env->tr.base + 0x0e, next_eip);
392 stw_kernel(env->tr.base + 0x10, old_eflags);
393 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
394 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
395 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
396 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
397 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
398 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
399 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
400 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
401 for(i = 0; i < 4; i++)
402 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
403 }
404
405 /* now if an exception occurs, it will occurs in the next task
406 context */
407
408 if (source == SWITCH_TSS_CALL) {
409 stw_kernel(tss_base, env->tr.selector);
410 new_eflags |= NT_MASK;
411 }
412
413 /* set busy bit */
414 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
415 target_ulong ptr;
416 uint32_t e2;
417 ptr = env->gdt.base + (tss_selector & ~7);
418 e2 = ldl_kernel(ptr + 4);
419 e2 |= DESC_TSS_BUSY_MASK;
420 stl_kernel(ptr + 4, e2);
421 }
422
423 /* set the new CPU state */
424 /* from this point, any exception which occurs can give problems */
425 env->cr[0] |= CR0_TS_MASK;
426 env->hflags |= HF_TS_MASK;
427 env->tr.selector = tss_selector;
428 env->tr.base = tss_base;
429 env->tr.limit = tss_limit;
430 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
431
432 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
433 cpu_x86_update_cr3(env, new_cr3);
434 }
435
436 /* load all registers without an exception, then reload them with
437 possible exception */
438 env->eip = new_eip;
439 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
440 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
441 if (!(type & 8))
442 eflags_mask &= 0xffff;
443 load_eflags(new_eflags, eflags_mask);
444 /* XXX: what to do in 16 bit case ? */
445 EAX = new_regs[0];
446 ECX = new_regs[1];
447 EDX = new_regs[2];
448 EBX = new_regs[3];
449 ESP = new_regs[4];
450 EBP = new_regs[5];
451 ESI = new_regs[6];
452 EDI = new_regs[7];
453 if (new_eflags & VM_MASK) {
454 for(i = 0; i < 6; i++)
455 load_seg_vm(i, new_segs[i]);
456 /* in vm86, CPL is always 3 */
457 cpu_x86_set_cpl(env, 3);
458 } else {
459 /* CPL is set the RPL of CS */
460 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
461 /* first just selectors as the rest may trigger exceptions */
462 for(i = 0; i < 6; i++)
463 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
464 }
465
466 env->ldt.selector = new_ldt & ~4;
467 env->ldt.base = 0;
468 env->ldt.limit = 0;
469 env->ldt.flags = 0;
470
471 /* load the LDT */
472 if (new_ldt & 4)
473 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
475 if ((new_ldt & 0xfffc) != 0) {
476 dt = &env->gdt;
477 index = new_ldt & ~7;
478 if ((index + 7) > dt->limit)
479 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
480 ptr = dt->base + index;
481 e1 = ldl_kernel(ptr);
482 e2 = ldl_kernel(ptr + 4);
483 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
484 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
485 if (!(e2 & DESC_P_MASK))
486 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487 load_seg_cache_raw_dt(&env->ldt, e1, e2);
488 }
489
490 /* load the segments */
491 if (!(new_eflags & VM_MASK)) {
492 tss_load_seg(R_CS, new_segs[R_CS]);
493 tss_load_seg(R_SS, new_segs[R_SS]);
494 tss_load_seg(R_ES, new_segs[R_ES]);
495 tss_load_seg(R_DS, new_segs[R_DS]);
496 tss_load_seg(R_FS, new_segs[R_FS]);
497 tss_load_seg(R_GS, new_segs[R_GS]);
498 }
499
500 /* check that EIP is in the CS segment limits */
501 if (new_eip > env->segs[R_CS].limit) {
502 /* XXX: different exception if CALL ? */
503 raise_exception_err(EXCP0D_GPF, 0);
504 }
01df040b
AL
505
506#ifndef CONFIG_USER_ONLY
507 /* reset local breakpoints */
508 if (env->dr[7] & 0x55) {
509 for (i = 0; i < 4; i++) {
510 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
511 hw_breakpoint_remove(env, i);
512 }
513 env->dr[7] &= ~0x55;
514 }
515#endif
eaa728ee
FB
516}
517
518/* check if Port I/O is allowed in TSS */
519static inline void check_io(int addr, int size)
520{
521 int io_offset, val, mask;
522
523 /* TSS must be a valid 32 bit one */
524 if (!(env->tr.flags & DESC_P_MASK) ||
525 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
526 env->tr.limit < 103)
527 goto fail;
528 io_offset = lduw_kernel(env->tr.base + 0x66);
529 io_offset += (addr >> 3);
530 /* Note: the check needs two bytes */
531 if ((io_offset + 1) > env->tr.limit)
532 goto fail;
533 val = lduw_kernel(env->tr.base + io_offset);
534 val >>= (addr & 7);
535 mask = (1 << size) - 1;
536 /* all bits must be zero to allow the I/O */
537 if ((val & mask) != 0) {
538 fail:
539 raise_exception_err(EXCP0D_GPF, 0);
540 }
541}
542
543void helper_check_iob(uint32_t t0)
544{
545 check_io(t0, 1);
546}
547
548void helper_check_iow(uint32_t t0)
549{
550 check_io(t0, 2);
551}
552
553void helper_check_iol(uint32_t t0)
554{
555 check_io(t0, 4);
556}
557
558void helper_outb(uint32_t port, uint32_t data)
559{
afcea8cb 560 cpu_outb(port, data & 0xff);
eaa728ee
FB
561}
562
563target_ulong helper_inb(uint32_t port)
564{
afcea8cb 565 return cpu_inb(port);
eaa728ee
FB
566}
567
568void helper_outw(uint32_t port, uint32_t data)
569{
afcea8cb 570 cpu_outw(port, data & 0xffff);
eaa728ee
FB
571}
572
573target_ulong helper_inw(uint32_t port)
574{
afcea8cb 575 return cpu_inw(port);
eaa728ee
FB
576}
577
578void helper_outl(uint32_t port, uint32_t data)
579{
afcea8cb 580 cpu_outl(port, data);
eaa728ee
FB
581}
582
583target_ulong helper_inl(uint32_t port)
584{
afcea8cb 585 return cpu_inl(port);
eaa728ee
FB
586}
587
588static inline unsigned int get_sp_mask(unsigned int e2)
589{
590 if (e2 & DESC_B_MASK)
591 return 0xffffffff;
592 else
593 return 0xffff;
594}
595
2ed51f5b
AL
596static int exeption_has_error_code(int intno)
597{
598 switch(intno) {
599 case 8:
600 case 10:
601 case 11:
602 case 12:
603 case 13:
604 case 14:
605 case 17:
606 return 1;
607 }
608 return 0;
609}
610
eaa728ee
FB
611#ifdef TARGET_X86_64
612#define SET_ESP(val, sp_mask)\
613do {\
614 if ((sp_mask) == 0xffff)\
615 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
616 else if ((sp_mask) == 0xffffffffLL)\
617 ESP = (uint32_t)(val);\
618 else\
619 ESP = (val);\
620} while (0)
621#else
622#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
623#endif
624
c0a04f0e
AL
625/* in 64-bit machines, this can overflow. So this segment addition macro
626 * can be used to trim the value to 32-bit whenever needed */
627#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
628
eaa728ee
FB
629/* XXX: add a is_user flag to have proper security support */
630#define PUSHW(ssp, sp, sp_mask, val)\
631{\
632 sp -= 2;\
633 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
634}
635
636#define PUSHL(ssp, sp, sp_mask, val)\
637{\
638 sp -= 4;\
c0a04f0e 639 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
eaa728ee
FB
640}
641
642#define POPW(ssp, sp, sp_mask, val)\
643{\
644 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
645 sp += 2;\
646}
647
648#define POPL(ssp, sp, sp_mask, val)\
649{\
c0a04f0e 650 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
eaa728ee
FB
651 sp += 4;\
652}
653
654/* protected mode interrupt */
655static void do_interrupt_protected(int intno, int is_int, int error_code,
656 unsigned int next_eip, int is_hw)
657{
658 SegmentCache *dt;
659 target_ulong ptr, ssp;
660 int type, dpl, selector, ss_dpl, cpl;
661 int has_error_code, new_stack, shift;
1c918eba 662 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 663 uint32_t old_eip, sp_mask;
eaa728ee 664
eaa728ee 665 has_error_code = 0;
2ed51f5b
AL
666 if (!is_int && !is_hw)
667 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
668 if (is_int)
669 old_eip = next_eip;
670 else
671 old_eip = env->eip;
672
673 dt = &env->idt;
674 if (intno * 8 + 7 > dt->limit)
675 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676 ptr = dt->base + intno * 8;
677 e1 = ldl_kernel(ptr);
678 e2 = ldl_kernel(ptr + 4);
679 /* check gate type */
680 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
681 switch(type) {
682 case 5: /* task gate */
683 /* must do that check here to return the correct error code */
684 if (!(e2 & DESC_P_MASK))
685 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
686 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
687 if (has_error_code) {
688 int type;
689 uint32_t mask;
690 /* push the error code */
691 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
692 shift = type >> 3;
693 if (env->segs[R_SS].flags & DESC_B_MASK)
694 mask = 0xffffffff;
695 else
696 mask = 0xffff;
697 esp = (ESP - (2 << shift)) & mask;
698 ssp = env->segs[R_SS].base + esp;
699 if (shift)
700 stl_kernel(ssp, error_code);
701 else
702 stw_kernel(ssp, error_code);
703 SET_ESP(esp, mask);
704 }
705 return;
706 case 6: /* 286 interrupt gate */
707 case 7: /* 286 trap gate */
708 case 14: /* 386 interrupt gate */
709 case 15: /* 386 trap gate */
710 break;
711 default:
712 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
713 break;
714 }
715 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
716 cpl = env->hflags & HF_CPL_MASK;
1235fc06 717 /* check privilege if software int */
eaa728ee
FB
718 if (is_int && dpl < cpl)
719 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
720 /* check valid bit */
721 if (!(e2 & DESC_P_MASK))
722 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
723 selector = e1 >> 16;
724 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
725 if ((selector & 0xfffc) == 0)
726 raise_exception_err(EXCP0D_GPF, 0);
727
728 if (load_segment(&e1, &e2, selector) != 0)
729 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
730 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
731 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
732 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
733 if (dpl > cpl)
734 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
735 if (!(e2 & DESC_P_MASK))
736 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
737 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
738 /* to inner privilege */
739 get_ss_esp_from_tss(&ss, &esp, dpl);
740 if ((ss & 0xfffc) == 0)
741 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
742 if ((ss & 3) != dpl)
743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
745 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
746 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
747 if (ss_dpl != dpl)
748 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
749 if (!(ss_e2 & DESC_S_MASK) ||
750 (ss_e2 & DESC_CS_MASK) ||
751 !(ss_e2 & DESC_W_MASK))
752 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753 if (!(ss_e2 & DESC_P_MASK))
754 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755 new_stack = 1;
756 sp_mask = get_sp_mask(ss_e2);
757 ssp = get_seg_base(ss_e1, ss_e2);
758 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
759 /* to same privilege */
760 if (env->eflags & VM_MASK)
761 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
762 new_stack = 0;
763 sp_mask = get_sp_mask(env->segs[R_SS].flags);
764 ssp = env->segs[R_SS].base;
765 esp = ESP;
766 dpl = cpl;
767 } else {
768 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
769 new_stack = 0; /* avoid warning */
770 sp_mask = 0; /* avoid warning */
771 ssp = 0; /* avoid warning */
772 esp = 0; /* avoid warning */
773 }
774
775 shift = type >> 3;
776
777#if 0
778 /* XXX: check that enough room is available */
779 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
780 if (env->eflags & VM_MASK)
781 push_size += 8;
782 push_size <<= shift;
783#endif
784 if (shift == 1) {
785 if (new_stack) {
786 if (env->eflags & VM_MASK) {
787 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
788 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
789 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
790 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
791 }
792 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
793 PUSHL(ssp, esp, sp_mask, ESP);
794 }
795 PUSHL(ssp, esp, sp_mask, compute_eflags());
796 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
797 PUSHL(ssp, esp, sp_mask, old_eip);
798 if (has_error_code) {
799 PUSHL(ssp, esp, sp_mask, error_code);
800 }
801 } else {
802 if (new_stack) {
803 if (env->eflags & VM_MASK) {
804 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
805 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
806 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
807 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
808 }
809 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
810 PUSHW(ssp, esp, sp_mask, ESP);
811 }
812 PUSHW(ssp, esp, sp_mask, compute_eflags());
813 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
814 PUSHW(ssp, esp, sp_mask, old_eip);
815 if (has_error_code) {
816 PUSHW(ssp, esp, sp_mask, error_code);
817 }
818 }
819
820 if (new_stack) {
821 if (env->eflags & VM_MASK) {
822 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
823 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
824 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
825 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
826 }
827 ss = (ss & ~3) | dpl;
828 cpu_x86_load_seg_cache(env, R_SS, ss,
829 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
830 }
831 SET_ESP(esp, sp_mask);
832
833 selector = (selector & ~3) | dpl;
834 cpu_x86_load_seg_cache(env, R_CS, selector,
835 get_seg_base(e1, e2),
836 get_seg_limit(e1, e2),
837 e2);
838 cpu_x86_set_cpl(env, dpl);
839 env->eip = offset;
840
841 /* interrupt gate clear IF mask */
842 if ((type & 1) == 0) {
843 env->eflags &= ~IF_MASK;
844 }
845 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
846}
847
848#ifdef TARGET_X86_64
849
850#define PUSHQ(sp, val)\
851{\
852 sp -= 8;\
853 stq_kernel(sp, (val));\
854}
855
856#define POPQ(sp, val)\
857{\
858 val = ldq_kernel(sp);\
859 sp += 8;\
860}
861
862static inline target_ulong get_rsp_from_tss(int level)
863{
864 int index;
865
866#if 0
867 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
868 env->tr.base, env->tr.limit);
869#endif
870
871 if (!(env->tr.flags & DESC_P_MASK))
872 cpu_abort(env, "invalid tss");
873 index = 8 * level + 4;
874 if ((index + 7) > env->tr.limit)
875 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
876 return ldq_kernel(env->tr.base + index);
877}
878
879/* 64 bit interrupt */
880static void do_interrupt64(int intno, int is_int, int error_code,
881 target_ulong next_eip, int is_hw)
882{
883 SegmentCache *dt;
884 target_ulong ptr;
885 int type, dpl, selector, cpl, ist;
886 int has_error_code, new_stack;
887 uint32_t e1, e2, e3, ss;
888 target_ulong old_eip, esp, offset;
eaa728ee 889
eaa728ee 890 has_error_code = 0;
2ed51f5b
AL
891 if (!is_int && !is_hw)
892 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
893 if (is_int)
894 old_eip = next_eip;
895 else
896 old_eip = env->eip;
897
898 dt = &env->idt;
899 if (intno * 16 + 15 > dt->limit)
900 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
901 ptr = dt->base + intno * 16;
902 e1 = ldl_kernel(ptr);
903 e2 = ldl_kernel(ptr + 4);
904 e3 = ldl_kernel(ptr + 8);
905 /* check gate type */
906 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
907 switch(type) {
908 case 14: /* 386 interrupt gate */
909 case 15: /* 386 trap gate */
910 break;
911 default:
912 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
913 break;
914 }
915 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
916 cpl = env->hflags & HF_CPL_MASK;
1235fc06 917 /* check privilege if software int */
eaa728ee
FB
918 if (is_int && dpl < cpl)
919 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
920 /* check valid bit */
921 if (!(e2 & DESC_P_MASK))
922 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
923 selector = e1 >> 16;
924 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
925 ist = e2 & 7;
926 if ((selector & 0xfffc) == 0)
927 raise_exception_err(EXCP0D_GPF, 0);
928
929 if (load_segment(&e1, &e2, selector) != 0)
930 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
931 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
932 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
934 if (dpl > cpl)
935 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
936 if (!(e2 & DESC_P_MASK))
937 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
938 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
939 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
941 /* to inner privilege */
942 if (ist != 0)
943 esp = get_rsp_from_tss(ist + 3);
944 else
945 esp = get_rsp_from_tss(dpl);
946 esp &= ~0xfLL; /* align stack */
947 ss = 0;
948 new_stack = 1;
949 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
950 /* to same privilege */
951 if (env->eflags & VM_MASK)
952 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
953 new_stack = 0;
954 if (ist != 0)
955 esp = get_rsp_from_tss(ist + 3);
956 else
957 esp = ESP;
958 esp &= ~0xfLL; /* align stack */
959 dpl = cpl;
960 } else {
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962 new_stack = 0; /* avoid warning */
963 esp = 0; /* avoid warning */
964 }
965
966 PUSHQ(esp, env->segs[R_SS].selector);
967 PUSHQ(esp, ESP);
968 PUSHQ(esp, compute_eflags());
969 PUSHQ(esp, env->segs[R_CS].selector);
970 PUSHQ(esp, old_eip);
971 if (has_error_code) {
972 PUSHQ(esp, error_code);
973 }
974
975 if (new_stack) {
976 ss = 0 | dpl;
977 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
978 }
979 ESP = esp;
980
981 selector = (selector & ~3) | dpl;
982 cpu_x86_load_seg_cache(env, R_CS, selector,
983 get_seg_base(e1, e2),
984 get_seg_limit(e1, e2),
985 e2);
986 cpu_x86_set_cpl(env, dpl);
987 env->eip = offset;
988
989 /* interrupt gate clear IF mask */
990 if ((type & 1) == 0) {
991 env->eflags &= ~IF_MASK;
992 }
993 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
994}
995#endif
996
d9957a8b 997#ifdef TARGET_X86_64
eaa728ee
FB
998#if defined(CONFIG_USER_ONLY)
999void helper_syscall(int next_eip_addend)
1000{
1001 env->exception_index = EXCP_SYSCALL;
1002 env->exception_next_eip = env->eip + next_eip_addend;
1003 cpu_loop_exit();
1004}
1005#else
1006void helper_syscall(int next_eip_addend)
1007{
1008 int selector;
1009
1010 if (!(env->efer & MSR_EFER_SCE)) {
1011 raise_exception_err(EXCP06_ILLOP, 0);
1012 }
1013 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
1014 if (env->hflags & HF_LMA_MASK) {
1015 int code64;
1016
1017 ECX = env->eip + next_eip_addend;
1018 env->regs[11] = compute_eflags();
1019
1020 code64 = env->hflags & HF_CS64_MASK;
1021
1022 cpu_x86_set_cpl(env, 0);
1023 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1024 0, 0xffffffff,
1025 DESC_G_MASK | DESC_P_MASK |
1026 DESC_S_MASK |
1027 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1028 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1029 0, 0xffffffff,
1030 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1031 DESC_S_MASK |
1032 DESC_W_MASK | DESC_A_MASK);
1033 env->eflags &= ~env->fmask;
1034 load_eflags(env->eflags, 0);
1035 if (code64)
1036 env->eip = env->lstar;
1037 else
1038 env->eip = env->cstar;
d9957a8b 1039 } else {
eaa728ee
FB
1040 ECX = (uint32_t)(env->eip + next_eip_addend);
1041
1042 cpu_x86_set_cpl(env, 0);
1043 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1044 0, 0xffffffff,
1045 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1046 DESC_S_MASK |
1047 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1048 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1049 0, 0xffffffff,
1050 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1051 DESC_S_MASK |
1052 DESC_W_MASK | DESC_A_MASK);
1053 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1054 env->eip = (uint32_t)env->star;
1055 }
1056}
1057#endif
d9957a8b 1058#endif
eaa728ee 1059
d9957a8b 1060#ifdef TARGET_X86_64
eaa728ee
FB
1061void helper_sysret(int dflag)
1062{
1063 int cpl, selector;
1064
1065 if (!(env->efer & MSR_EFER_SCE)) {
1066 raise_exception_err(EXCP06_ILLOP, 0);
1067 }
1068 cpl = env->hflags & HF_CPL_MASK;
1069 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1070 raise_exception_err(EXCP0D_GPF, 0);
1071 }
1072 selector = (env->star >> 48) & 0xffff;
eaa728ee
FB
1073 if (env->hflags & HF_LMA_MASK) {
1074 if (dflag == 2) {
1075 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1076 0, 0xffffffff,
1077 DESC_G_MASK | DESC_P_MASK |
1078 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1079 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1080 DESC_L_MASK);
1081 env->eip = ECX;
1082 } else {
1083 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1084 0, 0xffffffff,
1085 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1086 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1088 env->eip = (uint32_t)ECX;
1089 }
1090 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1091 0, 0xffffffff,
1092 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1093 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1094 DESC_W_MASK | DESC_A_MASK);
1095 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1096 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1097 cpu_x86_set_cpl(env, 3);
d9957a8b 1098 } else {
eaa728ee
FB
1099 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1100 0, 0xffffffff,
1101 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1102 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1103 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1104 env->eip = (uint32_t)ECX;
1105 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1106 0, 0xffffffff,
1107 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1108 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1109 DESC_W_MASK | DESC_A_MASK);
1110 env->eflags |= IF_MASK;
1111 cpu_x86_set_cpl(env, 3);
1112 }
eaa728ee 1113}
d9957a8b 1114#endif
eaa728ee
FB
1115
1116/* real mode interrupt */
1117static void do_interrupt_real(int intno, int is_int, int error_code,
1118 unsigned int next_eip)
1119{
1120 SegmentCache *dt;
1121 target_ulong ptr, ssp;
1122 int selector;
1123 uint32_t offset, esp;
1124 uint32_t old_cs, old_eip;
eaa728ee 1125
eaa728ee
FB
1126 /* real mode (simpler !) */
1127 dt = &env->idt;
1128 if (intno * 4 + 3 > dt->limit)
1129 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1130 ptr = dt->base + intno * 4;
1131 offset = lduw_kernel(ptr);
1132 selector = lduw_kernel(ptr + 2);
1133 esp = ESP;
1134 ssp = env->segs[R_SS].base;
1135 if (is_int)
1136 old_eip = next_eip;
1137 else
1138 old_eip = env->eip;
1139 old_cs = env->segs[R_CS].selector;
1140 /* XXX: use SS segment size ? */
1141 PUSHW(ssp, esp, 0xffff, compute_eflags());
1142 PUSHW(ssp, esp, 0xffff, old_cs);
1143 PUSHW(ssp, esp, 0xffff, old_eip);
1144
1145 /* update processor state */
1146 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1147 env->eip = offset;
1148 env->segs[R_CS].selector = selector;
1149 env->segs[R_CS].base = (selector << 4);
1150 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1151}
1152
1153/* fake user mode interrupt */
1154void do_interrupt_user(int intno, int is_int, int error_code,
1155 target_ulong next_eip)
1156{
1157 SegmentCache *dt;
1158 target_ulong ptr;
1159 int dpl, cpl, shift;
1160 uint32_t e2;
1161
1162 dt = &env->idt;
1163 if (env->hflags & HF_LMA_MASK) {
1164 shift = 4;
1165 } else {
1166 shift = 3;
1167 }
1168 ptr = dt->base + (intno << shift);
1169 e2 = ldl_kernel(ptr + 4);
1170
1171 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1172 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1173 /* check privilege if software int */
eaa728ee
FB
1174 if (is_int && dpl < cpl)
1175 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1176
1177 /* Since we emulate only user space, we cannot do more than
1178 exiting the emulation with the suitable exception and error
1179 code */
1180 if (is_int)
1181 EIP = next_eip;
1182}
1183
00ea18d1 1184#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1185static void handle_even_inj(int intno, int is_int, int error_code,
1186 int is_hw, int rm)
1187{
1188 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1189 if (!(event_inj & SVM_EVTINJ_VALID)) {
1190 int type;
1191 if (is_int)
1192 type = SVM_EVTINJ_TYPE_SOFT;
1193 else
1194 type = SVM_EVTINJ_TYPE_EXEPT;
1195 event_inj = intno | type | SVM_EVTINJ_VALID;
1196 if (!rm && exeption_has_error_code(intno)) {
1197 event_inj |= SVM_EVTINJ_VALID_ERR;
1198 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1199 }
1200 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1201 }
1202}
00ea18d1 1203#endif
2ed51f5b 1204
eaa728ee
FB
1205/*
1206 * Begin execution of an interruption. is_int is TRUE if coming from
1207 * the int instruction. next_eip is the EIP value AFTER the interrupt
1208 * instruction. It is only relevant if is_int is TRUE.
1209 */
1210void do_interrupt(int intno, int is_int, int error_code,
1211 target_ulong next_eip, int is_hw)
1212{
8fec2b8c 1213 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1214 if ((env->cr[0] & CR0_PE_MASK)) {
1215 static int count;
93fcfe39 1216 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
eaa728ee
FB
1217 count, intno, error_code, is_int,
1218 env->hflags & HF_CPL_MASK,
1219 env->segs[R_CS].selector, EIP,
1220 (int)env->segs[R_CS].base + EIP,
1221 env->segs[R_SS].selector, ESP);
1222 if (intno == 0x0e) {
93fcfe39 1223 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1224 } else {
93fcfe39 1225 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
eaa728ee 1226 }
93fcfe39
AL
1227 qemu_log("\n");
1228 log_cpu_state(env, X86_DUMP_CCOP);
eaa728ee
FB
1229#if 0
1230 {
1231 int i;
9bd5494e 1232 target_ulong ptr;
93fcfe39 1233 qemu_log(" code=");
eaa728ee
FB
1234 ptr = env->segs[R_CS].base + env->eip;
1235 for(i = 0; i < 16; i++) {
93fcfe39 1236 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1237 }
93fcfe39 1238 qemu_log("\n");
eaa728ee
FB
1239 }
1240#endif
1241 count++;
1242 }
1243 }
1244 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1245#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1246 if (env->hflags & HF_SVMI_MASK)
1247 handle_even_inj(intno, is_int, error_code, is_hw, 0);
00ea18d1 1248#endif
eb38c52c 1249#ifdef TARGET_X86_64
eaa728ee
FB
1250 if (env->hflags & HF_LMA_MASK) {
1251 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1252 } else
1253#endif
1254 {
1255 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1256 }
1257 } else {
00ea18d1 1258#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1259 if (env->hflags & HF_SVMI_MASK)
1260 handle_even_inj(intno, is_int, error_code, is_hw, 1);
00ea18d1 1261#endif
eaa728ee
FB
1262 do_interrupt_real(intno, is_int, error_code, next_eip);
1263 }
2ed51f5b 1264
00ea18d1 1265#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1266 if (env->hflags & HF_SVMI_MASK) {
1267 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1268 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1269 }
00ea18d1 1270#endif
eaa728ee
FB
1271}
1272
f55761a0
AL
1273/* This should come from sysemu.h - if we could include it here... */
1274void qemu_system_reset_request(void);
1275
eaa728ee
FB
1276/*
1277 * Check nested exceptions and change to double or triple fault if
1278 * needed. It should only be called, if this is not an interrupt.
1279 * Returns the new exception number.
1280 */
1281static int check_exception(int intno, int *error_code)
1282{
1283 int first_contributory = env->old_exception == 0 ||
1284 (env->old_exception >= 10 &&
1285 env->old_exception <= 13);
1286 int second_contributory = intno == 0 ||
1287 (intno >= 10 && intno <= 13);
1288
93fcfe39 1289 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
eaa728ee
FB
1290 env->old_exception, intno);
1291
f55761a0
AL
1292#if !defined(CONFIG_USER_ONLY)
1293 if (env->old_exception == EXCP08_DBLE) {
1294 if (env->hflags & HF_SVMI_MASK)
1295 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1296
680c3069 1297 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
f55761a0
AL
1298
1299 qemu_system_reset_request();
1300 return EXCP_HLT;
1301 }
1302#endif
eaa728ee
FB
1303
1304 if ((first_contributory && second_contributory)
1305 || (env->old_exception == EXCP0E_PAGE &&
1306 (second_contributory || (intno == EXCP0E_PAGE)))) {
1307 intno = EXCP08_DBLE;
1308 *error_code = 0;
1309 }
1310
1311 if (second_contributory || (intno == EXCP0E_PAGE) ||
1312 (intno == EXCP08_DBLE))
1313 env->old_exception = intno;
1314
1315 return intno;
1316}
1317
1318/*
1319 * Signal an interruption. It is executed in the main CPU loop.
1320 * is_int is TRUE if coming from the int instruction. next_eip is the
1321 * EIP value AFTER the interrupt instruction. It is only relevant if
1322 * is_int is TRUE.
1323 */
a5e50b26 1324static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1325 int next_eip_addend)
eaa728ee
FB
1326{
1327 if (!is_int) {
1328 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1329 intno = check_exception(intno, &error_code);
872929aa
FB
1330 } else {
1331 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1332 }
1333
1334 env->exception_index = intno;
1335 env->error_code = error_code;
1336 env->exception_is_int = is_int;
1337 env->exception_next_eip = env->eip + next_eip_addend;
1338 cpu_loop_exit();
1339}
1340
eaa728ee
FB
1341/* shortcuts to generate exceptions */
1342
d9957a8b 1343void raise_exception_err(int exception_index, int error_code)
eaa728ee
FB
1344{
1345 raise_interrupt(exception_index, 0, error_code, 0);
1346}
1347
1348void raise_exception(int exception_index)
1349{
1350 raise_interrupt(exception_index, 0, 0, 0);
1351}
1352
63a54736
JW
1353void raise_exception_env(int exception_index, CPUState *nenv)
1354{
1355 env = nenv;
1356 raise_exception(exception_index);
1357}
eaa728ee
FB
1358/* SMM support */
1359
1360#if defined(CONFIG_USER_ONLY)
1361
1362void do_smm_enter(void)
1363{
1364}
1365
1366void helper_rsm(void)
1367{
1368}
1369
1370#else
1371
1372#ifdef TARGET_X86_64
1373#define SMM_REVISION_ID 0x00020064
1374#else
1375#define SMM_REVISION_ID 0x00020000
1376#endif
1377
1378void do_smm_enter(void)
1379{
1380 target_ulong sm_state;
1381 SegmentCache *dt;
1382 int i, offset;
1383
93fcfe39
AL
1384 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1385 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1386
1387 env->hflags |= HF_SMM_MASK;
1388 cpu_smm_update(env);
1389
1390 sm_state = env->smbase + 0x8000;
1391
1392#ifdef TARGET_X86_64
1393 for(i = 0; i < 6; i++) {
1394 dt = &env->segs[i];
1395 offset = 0x7e00 + i * 16;
1396 stw_phys(sm_state + offset, dt->selector);
1397 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1398 stl_phys(sm_state + offset + 4, dt->limit);
1399 stq_phys(sm_state + offset + 8, dt->base);
1400 }
1401
1402 stq_phys(sm_state + 0x7e68, env->gdt.base);
1403 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1404
1405 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1406 stq_phys(sm_state + 0x7e78, env->ldt.base);
1407 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1408 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1409
1410 stq_phys(sm_state + 0x7e88, env->idt.base);
1411 stl_phys(sm_state + 0x7e84, env->idt.limit);
1412
1413 stw_phys(sm_state + 0x7e90, env->tr.selector);
1414 stq_phys(sm_state + 0x7e98, env->tr.base);
1415 stl_phys(sm_state + 0x7e94, env->tr.limit);
1416 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1417
1418 stq_phys(sm_state + 0x7ed0, env->efer);
1419
1420 stq_phys(sm_state + 0x7ff8, EAX);
1421 stq_phys(sm_state + 0x7ff0, ECX);
1422 stq_phys(sm_state + 0x7fe8, EDX);
1423 stq_phys(sm_state + 0x7fe0, EBX);
1424 stq_phys(sm_state + 0x7fd8, ESP);
1425 stq_phys(sm_state + 0x7fd0, EBP);
1426 stq_phys(sm_state + 0x7fc8, ESI);
1427 stq_phys(sm_state + 0x7fc0, EDI);
1428 for(i = 8; i < 16; i++)
1429 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1430 stq_phys(sm_state + 0x7f78, env->eip);
1431 stl_phys(sm_state + 0x7f70, compute_eflags());
1432 stl_phys(sm_state + 0x7f68, env->dr[6]);
1433 stl_phys(sm_state + 0x7f60, env->dr[7]);
1434
1435 stl_phys(sm_state + 0x7f48, env->cr[4]);
1436 stl_phys(sm_state + 0x7f50, env->cr[3]);
1437 stl_phys(sm_state + 0x7f58, env->cr[0]);
1438
1439 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1440 stl_phys(sm_state + 0x7f00, env->smbase);
1441#else
1442 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1443 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1444 stl_phys(sm_state + 0x7ff4, compute_eflags());
1445 stl_phys(sm_state + 0x7ff0, env->eip);
1446 stl_phys(sm_state + 0x7fec, EDI);
1447 stl_phys(sm_state + 0x7fe8, ESI);
1448 stl_phys(sm_state + 0x7fe4, EBP);
1449 stl_phys(sm_state + 0x7fe0, ESP);
1450 stl_phys(sm_state + 0x7fdc, EBX);
1451 stl_phys(sm_state + 0x7fd8, EDX);
1452 stl_phys(sm_state + 0x7fd4, ECX);
1453 stl_phys(sm_state + 0x7fd0, EAX);
1454 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1455 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1456
1457 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1458 stl_phys(sm_state + 0x7f64, env->tr.base);
1459 stl_phys(sm_state + 0x7f60, env->tr.limit);
1460 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1461
1462 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1463 stl_phys(sm_state + 0x7f80, env->ldt.base);
1464 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1465 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1466
1467 stl_phys(sm_state + 0x7f74, env->gdt.base);
1468 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1469
1470 stl_phys(sm_state + 0x7f58, env->idt.base);
1471 stl_phys(sm_state + 0x7f54, env->idt.limit);
1472
1473 for(i = 0; i < 6; i++) {
1474 dt = &env->segs[i];
1475 if (i < 3)
1476 offset = 0x7f84 + i * 12;
1477 else
1478 offset = 0x7f2c + (i - 3) * 12;
1479 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1480 stl_phys(sm_state + offset + 8, dt->base);
1481 stl_phys(sm_state + offset + 4, dt->limit);
1482 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1483 }
1484 stl_phys(sm_state + 0x7f14, env->cr[4]);
1485
1486 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1487 stl_phys(sm_state + 0x7ef8, env->smbase);
1488#endif
1489 /* init SMM cpu state */
1490
1491#ifdef TARGET_X86_64
5efc27bb 1492 cpu_load_efer(env, 0);
eaa728ee
FB
1493#endif
1494 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1495 env->eip = 0x00008000;
1496 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1497 0xffffffff, 0);
1498 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1499 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1500 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1501 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1502 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1503
1504 cpu_x86_update_cr0(env,
1505 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1506 cpu_x86_update_cr4(env, 0);
1507 env->dr[7] = 0x00000400;
1508 CC_OP = CC_OP_EFLAGS;
1509}
1510
1511void helper_rsm(void)
1512{
1513 target_ulong sm_state;
1514 int i, offset;
1515 uint32_t val;
1516
1517 sm_state = env->smbase + 0x8000;
1518#ifdef TARGET_X86_64
5efc27bb 1519 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee
FB
1520
1521 for(i = 0; i < 6; i++) {
1522 offset = 0x7e00 + i * 16;
1523 cpu_x86_load_seg_cache(env, i,
1524 lduw_phys(sm_state + offset),
1525 ldq_phys(sm_state + offset + 8),
1526 ldl_phys(sm_state + offset + 4),
1527 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1528 }
1529
1530 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1531 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1532
1533 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1534 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1535 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1536 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1537
1538 env->idt.base = ldq_phys(sm_state + 0x7e88);
1539 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1540
1541 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1542 env->tr.base = ldq_phys(sm_state + 0x7e98);
1543 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1544 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1545
1546 EAX = ldq_phys(sm_state + 0x7ff8);
1547 ECX = ldq_phys(sm_state + 0x7ff0);
1548 EDX = ldq_phys(sm_state + 0x7fe8);
1549 EBX = ldq_phys(sm_state + 0x7fe0);
1550 ESP = ldq_phys(sm_state + 0x7fd8);
1551 EBP = ldq_phys(sm_state + 0x7fd0);
1552 ESI = ldq_phys(sm_state + 0x7fc8);
1553 EDI = ldq_phys(sm_state + 0x7fc0);
1554 for(i = 8; i < 16; i++)
1555 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1556 env->eip = ldq_phys(sm_state + 0x7f78);
1557 load_eflags(ldl_phys(sm_state + 0x7f70),
1558 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1559 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1560 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1561
1562 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1563 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1564 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1565
1566 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1567 if (val & 0x20000) {
1568 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1569 }
1570#else
1571 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1572 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1573 load_eflags(ldl_phys(sm_state + 0x7ff4),
1574 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1575 env->eip = ldl_phys(sm_state + 0x7ff0);
1576 EDI = ldl_phys(sm_state + 0x7fec);
1577 ESI = ldl_phys(sm_state + 0x7fe8);
1578 EBP = ldl_phys(sm_state + 0x7fe4);
1579 ESP = ldl_phys(sm_state + 0x7fe0);
1580 EBX = ldl_phys(sm_state + 0x7fdc);
1581 EDX = ldl_phys(sm_state + 0x7fd8);
1582 ECX = ldl_phys(sm_state + 0x7fd4);
1583 EAX = ldl_phys(sm_state + 0x7fd0);
1584 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1585 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1586
1587 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1588 env->tr.base = ldl_phys(sm_state + 0x7f64);
1589 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1590 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1591
1592 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1593 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1594 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1595 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1596
1597 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1598 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1599
1600 env->idt.base = ldl_phys(sm_state + 0x7f58);
1601 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1602
1603 for(i = 0; i < 6; i++) {
1604 if (i < 3)
1605 offset = 0x7f84 + i * 12;
1606 else
1607 offset = 0x7f2c + (i - 3) * 12;
1608 cpu_x86_load_seg_cache(env, i,
1609 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1610 ldl_phys(sm_state + offset + 8),
1611 ldl_phys(sm_state + offset + 4),
1612 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1613 }
1614 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1615
1616 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1617 if (val & 0x20000) {
1618 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1619 }
1620#endif
1621 CC_OP = CC_OP_EFLAGS;
1622 env->hflags &= ~HF_SMM_MASK;
1623 cpu_smm_update(env);
1624
93fcfe39
AL
1625 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1626 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1627}
1628
1629#endif /* !CONFIG_USER_ONLY */
1630
1631
1632/* division, flags are undefined */
1633
1634void helper_divb_AL(target_ulong t0)
1635{
1636 unsigned int num, den, q, r;
1637
1638 num = (EAX & 0xffff);
1639 den = (t0 & 0xff);
1640 if (den == 0) {
1641 raise_exception(EXCP00_DIVZ);
1642 }
1643 q = (num / den);
1644 if (q > 0xff)
1645 raise_exception(EXCP00_DIVZ);
1646 q &= 0xff;
1647 r = (num % den) & 0xff;
1648 EAX = (EAX & ~0xffff) | (r << 8) | q;
1649}
1650
1651void helper_idivb_AL(target_ulong t0)
1652{
1653 int num, den, q, r;
1654
1655 num = (int16_t)EAX;
1656 den = (int8_t)t0;
1657 if (den == 0) {
1658 raise_exception(EXCP00_DIVZ);
1659 }
1660 q = (num / den);
1661 if (q != (int8_t)q)
1662 raise_exception(EXCP00_DIVZ);
1663 q &= 0xff;
1664 r = (num % den) & 0xff;
1665 EAX = (EAX & ~0xffff) | (r << 8) | q;
1666}
1667
1668void helper_divw_AX(target_ulong t0)
1669{
1670 unsigned int num, den, q, r;
1671
1672 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1673 den = (t0 & 0xffff);
1674 if (den == 0) {
1675 raise_exception(EXCP00_DIVZ);
1676 }
1677 q = (num / den);
1678 if (q > 0xffff)
1679 raise_exception(EXCP00_DIVZ);
1680 q &= 0xffff;
1681 r = (num % den) & 0xffff;
1682 EAX = (EAX & ~0xffff) | q;
1683 EDX = (EDX & ~0xffff) | r;
1684}
1685
1686void helper_idivw_AX(target_ulong t0)
1687{
1688 int num, den, q, r;
1689
1690 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1691 den = (int16_t)t0;
1692 if (den == 0) {
1693 raise_exception(EXCP00_DIVZ);
1694 }
1695 q = (num / den);
1696 if (q != (int16_t)q)
1697 raise_exception(EXCP00_DIVZ);
1698 q &= 0xffff;
1699 r = (num % den) & 0xffff;
1700 EAX = (EAX & ~0xffff) | q;
1701 EDX = (EDX & ~0xffff) | r;
1702}
1703
1704void helper_divl_EAX(target_ulong t0)
1705{
1706 unsigned int den, r;
1707 uint64_t num, q;
1708
1709 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1710 den = t0;
1711 if (den == 0) {
1712 raise_exception(EXCP00_DIVZ);
1713 }
1714 q = (num / den);
1715 r = (num % den);
1716 if (q > 0xffffffff)
1717 raise_exception(EXCP00_DIVZ);
1718 EAX = (uint32_t)q;
1719 EDX = (uint32_t)r;
1720}
1721
1722void helper_idivl_EAX(target_ulong t0)
1723{
1724 int den, r;
1725 int64_t num, q;
1726
1727 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1728 den = t0;
1729 if (den == 0) {
1730 raise_exception(EXCP00_DIVZ);
1731 }
1732 q = (num / den);
1733 r = (num % den);
1734 if (q != (int32_t)q)
1735 raise_exception(EXCP00_DIVZ);
1736 EAX = (uint32_t)q;
1737 EDX = (uint32_t)r;
1738}
1739
1740/* bcd */
1741
1742/* XXX: exception */
1743void helper_aam(int base)
1744{
1745 int al, ah;
1746 al = EAX & 0xff;
1747 ah = al / base;
1748 al = al % base;
1749 EAX = (EAX & ~0xffff) | al | (ah << 8);
1750 CC_DST = al;
1751}
1752
1753void helper_aad(int base)
1754{
1755 int al, ah;
1756 al = EAX & 0xff;
1757 ah = (EAX >> 8) & 0xff;
1758 al = ((ah * base) + al) & 0xff;
1759 EAX = (EAX & ~0xffff) | al;
1760 CC_DST = al;
1761}
1762
1763void helper_aaa(void)
1764{
1765 int icarry;
1766 int al, ah, af;
1767 int eflags;
1768
a7812ae4 1769 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1770 af = eflags & CC_A;
1771 al = EAX & 0xff;
1772 ah = (EAX >> 8) & 0xff;
1773
1774 icarry = (al > 0xf9);
1775 if (((al & 0x0f) > 9 ) || af) {
1776 al = (al + 6) & 0x0f;
1777 ah = (ah + 1 + icarry) & 0xff;
1778 eflags |= CC_C | CC_A;
1779 } else {
1780 eflags &= ~(CC_C | CC_A);
1781 al &= 0x0f;
1782 }
1783 EAX = (EAX & ~0xffff) | al | (ah << 8);
1784 CC_SRC = eflags;
eaa728ee
FB
1785}
1786
1787void helper_aas(void)
1788{
1789 int icarry;
1790 int al, ah, af;
1791 int eflags;
1792
a7812ae4 1793 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1794 af = eflags & CC_A;
1795 al = EAX & 0xff;
1796 ah = (EAX >> 8) & 0xff;
1797
1798 icarry = (al < 6);
1799 if (((al & 0x0f) > 9 ) || af) {
1800 al = (al - 6) & 0x0f;
1801 ah = (ah - 1 - icarry) & 0xff;
1802 eflags |= CC_C | CC_A;
1803 } else {
1804 eflags &= ~(CC_C | CC_A);
1805 al &= 0x0f;
1806 }
1807 EAX = (EAX & ~0xffff) | al | (ah << 8);
1808 CC_SRC = eflags;
eaa728ee
FB
1809}
1810
1811void helper_daa(void)
1812{
1813 int al, af, cf;
1814 int eflags;
1815
a7812ae4 1816 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1817 cf = eflags & CC_C;
1818 af = eflags & CC_A;
1819 al = EAX & 0xff;
1820
1821 eflags = 0;
1822 if (((al & 0x0f) > 9 ) || af) {
1823 al = (al + 6) & 0xff;
1824 eflags |= CC_A;
1825 }
1826 if ((al > 0x9f) || cf) {
1827 al = (al + 0x60) & 0xff;
1828 eflags |= CC_C;
1829 }
1830 EAX = (EAX & ~0xff) | al;
1831 /* well, speed is not an issue here, so we compute the flags by hand */
1832 eflags |= (al == 0) << 6; /* zf */
1833 eflags |= parity_table[al]; /* pf */
1834 eflags |= (al & 0x80); /* sf */
1835 CC_SRC = eflags;
eaa728ee
FB
1836}
1837
1838void helper_das(void)
1839{
1840 int al, al1, af, cf;
1841 int eflags;
1842
a7812ae4 1843 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1844 cf = eflags & CC_C;
1845 af = eflags & CC_A;
1846 al = EAX & 0xff;
1847
1848 eflags = 0;
1849 al1 = al;
1850 if (((al & 0x0f) > 9 ) || af) {
1851 eflags |= CC_A;
1852 if (al < 6 || cf)
1853 eflags |= CC_C;
1854 al = (al - 6) & 0xff;
1855 }
1856 if ((al1 > 0x99) || cf) {
1857 al = (al - 0x60) & 0xff;
1858 eflags |= CC_C;
1859 }
1860 EAX = (EAX & ~0xff) | al;
1861 /* well, speed is not an issue here, so we compute the flags by hand */
1862 eflags |= (al == 0) << 6; /* zf */
1863 eflags |= parity_table[al]; /* pf */
1864 eflags |= (al & 0x80); /* sf */
1865 CC_SRC = eflags;
eaa728ee
FB
1866}
1867
1868void helper_into(int next_eip_addend)
1869{
1870 int eflags;
a7812ae4 1871 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1872 if (eflags & CC_O) {
1873 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1874 }
1875}
1876
1877void helper_cmpxchg8b(target_ulong a0)
1878{
1879 uint64_t d;
1880 int eflags;
1881
a7812ae4 1882 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1883 d = ldq(a0);
1884 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1885 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1886 eflags |= CC_Z;
1887 } else {
278ed7c3
FB
1888 /* always do the store */
1889 stq(a0, d);
eaa728ee
FB
1890 EDX = (uint32_t)(d >> 32);
1891 EAX = (uint32_t)d;
1892 eflags &= ~CC_Z;
1893 }
1894 CC_SRC = eflags;
1895}
1896
1897#ifdef TARGET_X86_64
1898void helper_cmpxchg16b(target_ulong a0)
1899{
1900 uint64_t d0, d1;
1901 int eflags;
1902
278ed7c3
FB
1903 if ((a0 & 0xf) != 0)
1904 raise_exception(EXCP0D_GPF);
a7812ae4 1905 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1906 d0 = ldq(a0);
1907 d1 = ldq(a0 + 8);
1908 if (d0 == EAX && d1 == EDX) {
1909 stq(a0, EBX);
1910 stq(a0 + 8, ECX);
1911 eflags |= CC_Z;
1912 } else {
278ed7c3
FB
1913 /* always do the store */
1914 stq(a0, d0);
1915 stq(a0 + 8, d1);
eaa728ee
FB
1916 EDX = d1;
1917 EAX = d0;
1918 eflags &= ~CC_Z;
1919 }
1920 CC_SRC = eflags;
1921}
1922#endif
1923
1924void helper_single_step(void)
1925{
01df040b
AL
1926#ifndef CONFIG_USER_ONLY
1927 check_hw_breakpoints(env, 1);
1928 env->dr[6] |= DR6_BS;
1929#endif
1930 raise_exception(EXCP01_DB);
eaa728ee
FB
1931}
1932
1933void helper_cpuid(void)
1934{
6fd805e1 1935 uint32_t eax, ebx, ecx, edx;
eaa728ee 1936
872929aa 1937 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
e737b32a 1938
e00b6f80 1939 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
6fd805e1
AL
1940 EAX = eax;
1941 EBX = ebx;
1942 ECX = ecx;
1943 EDX = edx;
eaa728ee
FB
1944}
1945
1946void helper_enter_level(int level, int data32, target_ulong t1)
1947{
1948 target_ulong ssp;
1949 uint32_t esp_mask, esp, ebp;
1950
1951 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1952 ssp = env->segs[R_SS].base;
1953 ebp = EBP;
1954 esp = ESP;
1955 if (data32) {
1956 /* 32 bit */
1957 esp -= 4;
1958 while (--level) {
1959 esp -= 4;
1960 ebp -= 4;
1961 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1962 }
1963 esp -= 4;
1964 stl(ssp + (esp & esp_mask), t1);
1965 } else {
1966 /* 16 bit */
1967 esp -= 2;
1968 while (--level) {
1969 esp -= 2;
1970 ebp -= 2;
1971 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1972 }
1973 esp -= 2;
1974 stw(ssp + (esp & esp_mask), t1);
1975 }
1976}
1977
1978#ifdef TARGET_X86_64
1979void helper_enter64_level(int level, int data64, target_ulong t1)
1980{
1981 target_ulong esp, ebp;
1982 ebp = EBP;
1983 esp = ESP;
1984
1985 if (data64) {
1986 /* 64 bit */
1987 esp -= 8;
1988 while (--level) {
1989 esp -= 8;
1990 ebp -= 8;
1991 stq(esp, ldq(ebp));
1992 }
1993 esp -= 8;
1994 stq(esp, t1);
1995 } else {
1996 /* 16 bit */
1997 esp -= 2;
1998 while (--level) {
1999 esp -= 2;
2000 ebp -= 2;
2001 stw(esp, lduw(ebp));
2002 }
2003 esp -= 2;
2004 stw(esp, t1);
2005 }
2006}
2007#endif
2008
2009void helper_lldt(int selector)
2010{
2011 SegmentCache *dt;
2012 uint32_t e1, e2;
2013 int index, entry_limit;
2014 target_ulong ptr;
2015
2016 selector &= 0xffff;
2017 if ((selector & 0xfffc) == 0) {
2018 /* XXX: NULL selector case: invalid LDT */
2019 env->ldt.base = 0;
2020 env->ldt.limit = 0;
2021 } else {
2022 if (selector & 0x4)
2023 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2024 dt = &env->gdt;
2025 index = selector & ~7;
2026#ifdef TARGET_X86_64
2027 if (env->hflags & HF_LMA_MASK)
2028 entry_limit = 15;
2029 else
2030#endif
2031 entry_limit = 7;
2032 if ((index + entry_limit) > dt->limit)
2033 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2034 ptr = dt->base + index;
2035 e1 = ldl_kernel(ptr);
2036 e2 = ldl_kernel(ptr + 4);
2037 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2038 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2039 if (!(e2 & DESC_P_MASK))
2040 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2041#ifdef TARGET_X86_64
2042 if (env->hflags & HF_LMA_MASK) {
2043 uint32_t e3;
2044 e3 = ldl_kernel(ptr + 8);
2045 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2046 env->ldt.base |= (target_ulong)e3 << 32;
2047 } else
2048#endif
2049 {
2050 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2051 }
2052 }
2053 env->ldt.selector = selector;
2054}
2055
2056void helper_ltr(int selector)
2057{
2058 SegmentCache *dt;
2059 uint32_t e1, e2;
2060 int index, type, entry_limit;
2061 target_ulong ptr;
2062
2063 selector &= 0xffff;
2064 if ((selector & 0xfffc) == 0) {
2065 /* NULL selector case: invalid TR */
2066 env->tr.base = 0;
2067 env->tr.limit = 0;
2068 env->tr.flags = 0;
2069 } else {
2070 if (selector & 0x4)
2071 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2072 dt = &env->gdt;
2073 index = selector & ~7;
2074#ifdef TARGET_X86_64
2075 if (env->hflags & HF_LMA_MASK)
2076 entry_limit = 15;
2077 else
2078#endif
2079 entry_limit = 7;
2080 if ((index + entry_limit) > dt->limit)
2081 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2082 ptr = dt->base + index;
2083 e1 = ldl_kernel(ptr);
2084 e2 = ldl_kernel(ptr + 4);
2085 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2086 if ((e2 & DESC_S_MASK) ||
2087 (type != 1 && type != 9))
2088 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2089 if (!(e2 & DESC_P_MASK))
2090 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2091#ifdef TARGET_X86_64
2092 if (env->hflags & HF_LMA_MASK) {
2093 uint32_t e3, e4;
2094 e3 = ldl_kernel(ptr + 8);
2095 e4 = ldl_kernel(ptr + 12);
2096 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2097 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2098 load_seg_cache_raw_dt(&env->tr, e1, e2);
2099 env->tr.base |= (target_ulong)e3 << 32;
2100 } else
2101#endif
2102 {
2103 load_seg_cache_raw_dt(&env->tr, e1, e2);
2104 }
2105 e2 |= DESC_TSS_BUSY_MASK;
2106 stl_kernel(ptr + 4, e2);
2107 }
2108 env->tr.selector = selector;
2109}
2110
2111/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2112void helper_load_seg(int seg_reg, int selector)
2113{
2114 uint32_t e1, e2;
2115 int cpl, dpl, rpl;
2116 SegmentCache *dt;
2117 int index;
2118 target_ulong ptr;
2119
2120 selector &= 0xffff;
2121 cpl = env->hflags & HF_CPL_MASK;
2122 if ((selector & 0xfffc) == 0) {
2123 /* null selector case */
2124 if (seg_reg == R_SS
2125#ifdef TARGET_X86_64
2126 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2127#endif
2128 )
2129 raise_exception_err(EXCP0D_GPF, 0);
2130 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2131 } else {
2132
2133 if (selector & 0x4)
2134 dt = &env->ldt;
2135 else
2136 dt = &env->gdt;
2137 index = selector & ~7;
2138 if ((index + 7) > dt->limit)
2139 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2140 ptr = dt->base + index;
2141 e1 = ldl_kernel(ptr);
2142 e2 = ldl_kernel(ptr + 4);
2143
2144 if (!(e2 & DESC_S_MASK))
2145 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2146 rpl = selector & 3;
2147 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2148 if (seg_reg == R_SS) {
2149 /* must be writable segment */
2150 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2151 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2152 if (rpl != cpl || dpl != cpl)
2153 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2154 } else {
2155 /* must be readable segment */
2156 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2157 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2158
2159 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2160 /* if not conforming code, test rights */
2161 if (dpl < cpl || dpl < rpl)
2162 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2163 }
2164 }
2165
2166 if (!(e2 & DESC_P_MASK)) {
2167 if (seg_reg == R_SS)
2168 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2169 else
2170 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2171 }
2172
2173 /* set the access bit if not already set */
2174 if (!(e2 & DESC_A_MASK)) {
2175 e2 |= DESC_A_MASK;
2176 stl_kernel(ptr + 4, e2);
2177 }
2178
2179 cpu_x86_load_seg_cache(env, seg_reg, selector,
2180 get_seg_base(e1, e2),
2181 get_seg_limit(e1, e2),
2182 e2);
2183#if 0
93fcfe39 2184 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
2185 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2186#endif
2187 }
2188}
2189
2190/* protected mode jump */
2191void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2192 int next_eip_addend)
2193{
2194 int gate_cs, type;
2195 uint32_t e1, e2, cpl, dpl, rpl, limit;
2196 target_ulong next_eip;
2197
2198 if ((new_cs & 0xfffc) == 0)
2199 raise_exception_err(EXCP0D_GPF, 0);
2200 if (load_segment(&e1, &e2, new_cs) != 0)
2201 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2202 cpl = env->hflags & HF_CPL_MASK;
2203 if (e2 & DESC_S_MASK) {
2204 if (!(e2 & DESC_CS_MASK))
2205 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2206 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2207 if (e2 & DESC_C_MASK) {
2208 /* conforming code segment */
2209 if (dpl > cpl)
2210 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2211 } else {
2212 /* non conforming code segment */
2213 rpl = new_cs & 3;
2214 if (rpl > cpl)
2215 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2216 if (dpl != cpl)
2217 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2218 }
2219 if (!(e2 & DESC_P_MASK))
2220 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2221 limit = get_seg_limit(e1, e2);
2222 if (new_eip > limit &&
2223 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2224 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2225 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2226 get_seg_base(e1, e2), limit, e2);
2227 EIP = new_eip;
2228 } else {
2229 /* jump to call or task gate */
2230 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2231 rpl = new_cs & 3;
2232 cpl = env->hflags & HF_CPL_MASK;
2233 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2234 switch(type) {
2235 case 1: /* 286 TSS */
2236 case 9: /* 386 TSS */
2237 case 5: /* task gate */
2238 if (dpl < cpl || dpl < rpl)
2239 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2240 next_eip = env->eip + next_eip_addend;
2241 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2242 CC_OP = CC_OP_EFLAGS;
2243 break;
2244 case 4: /* 286 call gate */
2245 case 12: /* 386 call gate */
2246 if ((dpl < cpl) || (dpl < rpl))
2247 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2248 if (!(e2 & DESC_P_MASK))
2249 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2250 gate_cs = e1 >> 16;
2251 new_eip = (e1 & 0xffff);
2252 if (type == 12)
2253 new_eip |= (e2 & 0xffff0000);
2254 if (load_segment(&e1, &e2, gate_cs) != 0)
2255 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2256 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2257 /* must be code segment */
2258 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2259 (DESC_S_MASK | DESC_CS_MASK)))
2260 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2261 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2262 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2263 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2264 if (!(e2 & DESC_P_MASK))
2265 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2266 limit = get_seg_limit(e1, e2);
2267 if (new_eip > limit)
2268 raise_exception_err(EXCP0D_GPF, 0);
2269 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2270 get_seg_base(e1, e2), limit, e2);
2271 EIP = new_eip;
2272 break;
2273 default:
2274 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2275 break;
2276 }
2277 }
2278}
2279
2280/* real mode call */
2281void helper_lcall_real(int new_cs, target_ulong new_eip1,
2282 int shift, int next_eip)
2283{
2284 int new_eip;
2285 uint32_t esp, esp_mask;
2286 target_ulong ssp;
2287
2288 new_eip = new_eip1;
2289 esp = ESP;
2290 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2291 ssp = env->segs[R_SS].base;
2292 if (shift) {
2293 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2294 PUSHL(ssp, esp, esp_mask, next_eip);
2295 } else {
2296 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2297 PUSHW(ssp, esp, esp_mask, next_eip);
2298 }
2299
2300 SET_ESP(esp, esp_mask);
2301 env->eip = new_eip;
2302 env->segs[R_CS].selector = new_cs;
2303 env->segs[R_CS].base = (new_cs << 4);
2304}
2305
2306/* protected mode call */
2307void helper_lcall_protected(int new_cs, target_ulong new_eip,
2308 int shift, int next_eip_addend)
2309{
2310 int new_stack, i;
2311 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 2312 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
2313 uint32_t val, limit, old_sp_mask;
2314 target_ulong ssp, old_ssp, next_eip;
2315
2316 next_eip = env->eip + next_eip_addend;
d12d51d5
AL
2317 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2318 LOG_PCALL_STATE(env);
eaa728ee
FB
2319 if ((new_cs & 0xfffc) == 0)
2320 raise_exception_err(EXCP0D_GPF, 0);
2321 if (load_segment(&e1, &e2, new_cs) != 0)
2322 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2323 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 2324 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee
FB
2325 if (e2 & DESC_S_MASK) {
2326 if (!(e2 & DESC_CS_MASK))
2327 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2328 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2329 if (e2 & DESC_C_MASK) {
2330 /* conforming code segment */
2331 if (dpl > cpl)
2332 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2333 } else {
2334 /* non conforming code segment */
2335 rpl = new_cs & 3;
2336 if (rpl > cpl)
2337 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2338 if (dpl != cpl)
2339 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2340 }
2341 if (!(e2 & DESC_P_MASK))
2342 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2343
2344#ifdef TARGET_X86_64
2345 /* XXX: check 16/32 bit cases in long mode */
2346 if (shift == 2) {
2347 target_ulong rsp;
2348 /* 64 bit case */
2349 rsp = ESP;
2350 PUSHQ(rsp, env->segs[R_CS].selector);
2351 PUSHQ(rsp, next_eip);
2352 /* from this point, not restartable */
2353 ESP = rsp;
2354 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2355 get_seg_base(e1, e2),
2356 get_seg_limit(e1, e2), e2);
2357 EIP = new_eip;
2358 } else
2359#endif
2360 {
2361 sp = ESP;
2362 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2363 ssp = env->segs[R_SS].base;
2364 if (shift) {
2365 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2366 PUSHL(ssp, sp, sp_mask, next_eip);
2367 } else {
2368 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2369 PUSHW(ssp, sp, sp_mask, next_eip);
2370 }
2371
2372 limit = get_seg_limit(e1, e2);
2373 if (new_eip > limit)
2374 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2375 /* from this point, not restartable */
2376 SET_ESP(sp, sp_mask);
2377 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2378 get_seg_base(e1, e2), limit, e2);
2379 EIP = new_eip;
2380 }
2381 } else {
2382 /* check gate type */
2383 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2384 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2385 rpl = new_cs & 3;
2386 switch(type) {
2387 case 1: /* available 286 TSS */
2388 case 9: /* available 386 TSS */
2389 case 5: /* task gate */
2390 if (dpl < cpl || dpl < rpl)
2391 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2392 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2393 CC_OP = CC_OP_EFLAGS;
2394 return;
2395 case 4: /* 286 call gate */
2396 case 12: /* 386 call gate */
2397 break;
2398 default:
2399 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2400 break;
2401 }
2402 shift = type >> 3;
2403
2404 if (dpl < cpl || dpl < rpl)
2405 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2406 /* check valid bit */
2407 if (!(e2 & DESC_P_MASK))
2408 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2409 selector = e1 >> 16;
2410 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2411 param_count = e2 & 0x1f;
2412 if ((selector & 0xfffc) == 0)
2413 raise_exception_err(EXCP0D_GPF, 0);
2414
2415 if (load_segment(&e1, &e2, selector) != 0)
2416 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2417 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2418 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2419 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2420 if (dpl > cpl)
2421 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2422 if (!(e2 & DESC_P_MASK))
2423 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2424
2425 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2426 /* to inner privilege */
2427 get_ss_esp_from_tss(&ss, &sp, dpl);
d12d51d5 2428 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
eaa728ee 2429 ss, sp, param_count, ESP);
eaa728ee
FB
2430 if ((ss & 0xfffc) == 0)
2431 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2432 if ((ss & 3) != dpl)
2433 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2434 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2435 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2436 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2437 if (ss_dpl != dpl)
2438 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2439 if (!(ss_e2 & DESC_S_MASK) ||
2440 (ss_e2 & DESC_CS_MASK) ||
2441 !(ss_e2 & DESC_W_MASK))
2442 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2443 if (!(ss_e2 & DESC_P_MASK))
2444 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2445
2446 // push_size = ((param_count * 2) + 8) << shift;
2447
2448 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2449 old_ssp = env->segs[R_SS].base;
2450
2451 sp_mask = get_sp_mask(ss_e2);
2452 ssp = get_seg_base(ss_e1, ss_e2);
2453 if (shift) {
2454 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2455 PUSHL(ssp, sp, sp_mask, ESP);
2456 for(i = param_count - 1; i >= 0; i--) {
2457 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2458 PUSHL(ssp, sp, sp_mask, val);
2459 }
2460 } else {
2461 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2462 PUSHW(ssp, sp, sp_mask, ESP);
2463 for(i = param_count - 1; i >= 0; i--) {
2464 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2465 PUSHW(ssp, sp, sp_mask, val);
2466 }
2467 }
2468 new_stack = 1;
2469 } else {
2470 /* to same privilege */
2471 sp = ESP;
2472 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2473 ssp = env->segs[R_SS].base;
2474 // push_size = (4 << shift);
2475 new_stack = 0;
2476 }
2477
2478 if (shift) {
2479 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2480 PUSHL(ssp, sp, sp_mask, next_eip);
2481 } else {
2482 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2483 PUSHW(ssp, sp, sp_mask, next_eip);
2484 }
2485
2486 /* from this point, not restartable */
2487
2488 if (new_stack) {
2489 ss = (ss & ~3) | dpl;
2490 cpu_x86_load_seg_cache(env, R_SS, ss,
2491 ssp,
2492 get_seg_limit(ss_e1, ss_e2),
2493 ss_e2);
2494 }
2495
2496 selector = (selector & ~3) | dpl;
2497 cpu_x86_load_seg_cache(env, R_CS, selector,
2498 get_seg_base(e1, e2),
2499 get_seg_limit(e1, e2),
2500 e2);
2501 cpu_x86_set_cpl(env, dpl);
2502 SET_ESP(sp, sp_mask);
2503 EIP = offset;
2504 }
eaa728ee
FB
2505}
2506
2507/* real and vm86 mode iret */
2508void helper_iret_real(int shift)
2509{
2510 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2511 target_ulong ssp;
2512 int eflags_mask;
2513
2514 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2515 sp = ESP;
2516 ssp = env->segs[R_SS].base;
2517 if (shift == 1) {
2518 /* 32 bits */
2519 POPL(ssp, sp, sp_mask, new_eip);
2520 POPL(ssp, sp, sp_mask, new_cs);
2521 new_cs &= 0xffff;
2522 POPL(ssp, sp, sp_mask, new_eflags);
2523 } else {
2524 /* 16 bits */
2525 POPW(ssp, sp, sp_mask, new_eip);
2526 POPW(ssp, sp, sp_mask, new_cs);
2527 POPW(ssp, sp, sp_mask, new_eflags);
2528 }
2529 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2530 env->segs[R_CS].selector = new_cs;
2531 env->segs[R_CS].base = (new_cs << 4);
eaa728ee
FB
2532 env->eip = new_eip;
2533 if (env->eflags & VM_MASK)
2534 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2535 else
2536 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2537 if (shift == 0)
2538 eflags_mask &= 0xffff;
2539 load_eflags(new_eflags, eflags_mask);
db620f46 2540 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2541}
2542
2543static inline void validate_seg(int seg_reg, int cpl)
2544{
2545 int dpl;
2546 uint32_t e2;
2547
2548 /* XXX: on x86_64, we do not want to nullify FS and GS because
2549 they may still contain a valid base. I would be interested to
2550 know how a real x86_64 CPU behaves */
2551 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2552 (env->segs[seg_reg].selector & 0xfffc) == 0)
2553 return;
2554
2555 e2 = env->segs[seg_reg].flags;
2556 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2557 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2558 /* data or non conforming code segment */
2559 if (dpl < cpl) {
2560 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2561 }
2562 }
2563}
2564
2565/* protected mode iret */
2566static inline void helper_ret_protected(int shift, int is_iret, int addend)
2567{
2568 uint32_t new_cs, new_eflags, new_ss;
2569 uint32_t new_es, new_ds, new_fs, new_gs;
2570 uint32_t e1, e2, ss_e1, ss_e2;
2571 int cpl, dpl, rpl, eflags_mask, iopl;
2572 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2573
2574#ifdef TARGET_X86_64
2575 if (shift == 2)
2576 sp_mask = -1;
2577 else
2578#endif
2579 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2580 sp = ESP;
2581 ssp = env->segs[R_SS].base;
2582 new_eflags = 0; /* avoid warning */
2583#ifdef TARGET_X86_64
2584 if (shift == 2) {
2585 POPQ(sp, new_eip);
2586 POPQ(sp, new_cs);
2587 new_cs &= 0xffff;
2588 if (is_iret) {
2589 POPQ(sp, new_eflags);
2590 }
2591 } else
2592#endif
2593 if (shift == 1) {
2594 /* 32 bits */
2595 POPL(ssp, sp, sp_mask, new_eip);
2596 POPL(ssp, sp, sp_mask, new_cs);
2597 new_cs &= 0xffff;
2598 if (is_iret) {
2599 POPL(ssp, sp, sp_mask, new_eflags);
2600 if (new_eflags & VM_MASK)
2601 goto return_to_vm86;
2602 }
2603 } else {
2604 /* 16 bits */
2605 POPW(ssp, sp, sp_mask, new_eip);
2606 POPW(ssp, sp, sp_mask, new_cs);
2607 if (is_iret)
2608 POPW(ssp, sp, sp_mask, new_eflags);
2609 }
d12d51d5
AL
2610 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2611 new_cs, new_eip, shift, addend);
2612 LOG_PCALL_STATE(env);
eaa728ee
FB
2613 if ((new_cs & 0xfffc) == 0)
2614 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2615 if (load_segment(&e1, &e2, new_cs) != 0)
2616 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2617 if (!(e2 & DESC_S_MASK) ||
2618 !(e2 & DESC_CS_MASK))
2619 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2620 cpl = env->hflags & HF_CPL_MASK;
2621 rpl = new_cs & 3;
2622 if (rpl < cpl)
2623 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2624 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2625 if (e2 & DESC_C_MASK) {
2626 if (dpl > rpl)
2627 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2628 } else {
2629 if (dpl != rpl)
2630 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2631 }
2632 if (!(e2 & DESC_P_MASK))
2633 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2634
2635 sp += addend;
2636 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2637 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2638 /* return to same privilege level */
eaa728ee
FB
2639 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2640 get_seg_base(e1, e2),
2641 get_seg_limit(e1, e2),
2642 e2);
2643 } else {
2644 /* return to different privilege level */
2645#ifdef TARGET_X86_64
2646 if (shift == 2) {
2647 POPQ(sp, new_esp);
2648 POPQ(sp, new_ss);
2649 new_ss &= 0xffff;
2650 } else
2651#endif
2652 if (shift == 1) {
2653 /* 32 bits */
2654 POPL(ssp, sp, sp_mask, new_esp);
2655 POPL(ssp, sp, sp_mask, new_ss);
2656 new_ss &= 0xffff;
2657 } else {
2658 /* 16 bits */
2659 POPW(ssp, sp, sp_mask, new_esp);
2660 POPW(ssp, sp, sp_mask, new_ss);
2661 }
d12d51d5 2662 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
eaa728ee 2663 new_ss, new_esp);
eaa728ee
FB
2664 if ((new_ss & 0xfffc) == 0) {
2665#ifdef TARGET_X86_64
2666 /* NULL ss is allowed in long mode if cpl != 3*/
2667 /* XXX: test CS64 ? */
2668 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2669 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2670 0, 0xffffffff,
2671 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2672 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2673 DESC_W_MASK | DESC_A_MASK);
2674 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2675 } else
2676#endif
2677 {
2678 raise_exception_err(EXCP0D_GPF, 0);
2679 }
2680 } else {
2681 if ((new_ss & 3) != rpl)
2682 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2683 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2684 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2685 if (!(ss_e2 & DESC_S_MASK) ||
2686 (ss_e2 & DESC_CS_MASK) ||
2687 !(ss_e2 & DESC_W_MASK))
2688 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2689 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2690 if (dpl != rpl)
2691 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2692 if (!(ss_e2 & DESC_P_MASK))
2693 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2694 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2695 get_seg_base(ss_e1, ss_e2),
2696 get_seg_limit(ss_e1, ss_e2),
2697 ss_e2);
2698 }
2699
2700 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2701 get_seg_base(e1, e2),
2702 get_seg_limit(e1, e2),
2703 e2);
2704 cpu_x86_set_cpl(env, rpl);
2705 sp = new_esp;
2706#ifdef TARGET_X86_64
2707 if (env->hflags & HF_CS64_MASK)
2708 sp_mask = -1;
2709 else
2710#endif
2711 sp_mask = get_sp_mask(ss_e2);
2712
2713 /* validate data segments */
2714 validate_seg(R_ES, rpl);
2715 validate_seg(R_DS, rpl);
2716 validate_seg(R_FS, rpl);
2717 validate_seg(R_GS, rpl);
2718
2719 sp += addend;
2720 }
2721 SET_ESP(sp, sp_mask);
2722 env->eip = new_eip;
2723 if (is_iret) {
2724 /* NOTE: 'cpl' is the _old_ CPL */
2725 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2726 if (cpl == 0)
2727 eflags_mask |= IOPL_MASK;
2728 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2729 if (cpl <= iopl)
2730 eflags_mask |= IF_MASK;
2731 if (shift == 0)
2732 eflags_mask &= 0xffff;
2733 load_eflags(new_eflags, eflags_mask);
2734 }
2735 return;
2736
2737 return_to_vm86:
2738 POPL(ssp, sp, sp_mask, new_esp);
2739 POPL(ssp, sp, sp_mask, new_ss);
2740 POPL(ssp, sp, sp_mask, new_es);
2741 POPL(ssp, sp, sp_mask, new_ds);
2742 POPL(ssp, sp, sp_mask, new_fs);
2743 POPL(ssp, sp, sp_mask, new_gs);
2744
2745 /* modify processor state */
2746 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2747 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2748 load_seg_vm(R_CS, new_cs & 0xffff);
2749 cpu_x86_set_cpl(env, 3);
2750 load_seg_vm(R_SS, new_ss & 0xffff);
2751 load_seg_vm(R_ES, new_es & 0xffff);
2752 load_seg_vm(R_DS, new_ds & 0xffff);
2753 load_seg_vm(R_FS, new_fs & 0xffff);
2754 load_seg_vm(R_GS, new_gs & 0xffff);
2755
2756 env->eip = new_eip & 0xffff;
2757 ESP = new_esp;
2758}
2759
2760void helper_iret_protected(int shift, int next_eip)
2761{
2762 int tss_selector, type;
2763 uint32_t e1, e2;
2764
2765 /* specific case for TSS */
2766 if (env->eflags & NT_MASK) {
2767#ifdef TARGET_X86_64
2768 if (env->hflags & HF_LMA_MASK)
2769 raise_exception_err(EXCP0D_GPF, 0);
2770#endif
2771 tss_selector = lduw_kernel(env->tr.base + 0);
2772 if (tss_selector & 4)
2773 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2774 if (load_segment(&e1, &e2, tss_selector) != 0)
2775 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2776 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2777 /* NOTE: we check both segment and busy TSS */
2778 if (type != 3)
2779 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2780 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2781 } else {
2782 helper_ret_protected(shift, 1, 0);
2783 }
db620f46 2784 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2785}
2786
2787void helper_lret_protected(int shift, int addend)
2788{
2789 helper_ret_protected(shift, 0, addend);
eaa728ee
FB
2790}
2791
2792void helper_sysenter(void)
2793{
2794 if (env->sysenter_cs == 0) {
2795 raise_exception_err(EXCP0D_GPF, 0);
2796 }
2797 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2798 cpu_x86_set_cpl(env, 0);
2436b61a
AZ
2799
2800#ifdef TARGET_X86_64
2801 if (env->hflags & HF_LMA_MASK) {
2802 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2803 0, 0xffffffff,
2804 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2805 DESC_S_MASK |
2806 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2807 } else
2808#endif
2809 {
2810 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2811 0, 0xffffffff,
2812 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2813 DESC_S_MASK |
2814 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2815 }
eaa728ee
FB
2816 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2817 0, 0xffffffff,
2818 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2819 DESC_S_MASK |
2820 DESC_W_MASK | DESC_A_MASK);
2821 ESP = env->sysenter_esp;
2822 EIP = env->sysenter_eip;
2823}
2824
2436b61a 2825void helper_sysexit(int dflag)
eaa728ee
FB
2826{
2827 int cpl;
2828
2829 cpl = env->hflags & HF_CPL_MASK;
2830 if (env->sysenter_cs == 0 || cpl != 0) {
2831 raise_exception_err(EXCP0D_GPF, 0);
2832 }
2833 cpu_x86_set_cpl(env, 3);
2436b61a
AZ
2834#ifdef TARGET_X86_64
2835 if (dflag == 2) {
2836 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2837 0, 0xffffffff,
2838 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2839 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2840 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2841 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2842 0, 0xffffffff,
2843 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2844 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2845 DESC_W_MASK | DESC_A_MASK);
2846 } else
2847#endif
2848 {
2849 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2850 0, 0xffffffff,
2851 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2852 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2853 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2854 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2855 0, 0xffffffff,
2856 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2857 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2858 DESC_W_MASK | DESC_A_MASK);
2859 }
eaa728ee
FB
2860 ESP = ECX;
2861 EIP = EDX;
eaa728ee
FB
2862}
2863
872929aa
FB
2864#if defined(CONFIG_USER_ONLY)
2865target_ulong helper_read_crN(int reg)
eaa728ee 2866{
872929aa
FB
2867 return 0;
2868}
2869
2870void helper_write_crN(int reg, target_ulong t0)
2871{
2872}
01df040b
AL
2873
2874void helper_movl_drN_T0(int reg, target_ulong t0)
2875{
2876}
872929aa
FB
2877#else
2878target_ulong helper_read_crN(int reg)
2879{
2880 target_ulong val;
2881
2882 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2883 switch(reg) {
2884 default:
2885 val = env->cr[reg];
2886 break;
2887 case 8:
db620f46 2888 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 2889 val = cpu_get_apic_tpr(env->apic_state);
db620f46
FB
2890 } else {
2891 val = env->v_tpr;
2892 }
872929aa
FB
2893 break;
2894 }
2895 return val;
2896}
2897
2898void helper_write_crN(int reg, target_ulong t0)
2899{
2900 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
2901 switch(reg) {
2902 case 0:
2903 cpu_x86_update_cr0(env, t0);
2904 break;
2905 case 3:
2906 cpu_x86_update_cr3(env, t0);
2907 break;
2908 case 4:
2909 cpu_x86_update_cr4(env, t0);
2910 break;
2911 case 8:
db620f46 2912 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 2913 cpu_set_apic_tpr(env->apic_state, t0);
db620f46
FB
2914 }
2915 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
2916 break;
2917 default:
2918 env->cr[reg] = t0;
2919 break;
2920 }
eaa728ee 2921}
01df040b
AL
2922
2923void helper_movl_drN_T0(int reg, target_ulong t0)
2924{
2925 int i;
2926
2927 if (reg < 4) {
2928 hw_breakpoint_remove(env, reg);
2929 env->dr[reg] = t0;
2930 hw_breakpoint_insert(env, reg);
2931 } else if (reg == 7) {
2932 for (i = 0; i < 4; i++)
2933 hw_breakpoint_remove(env, i);
2934 env->dr[7] = t0;
2935 for (i = 0; i < 4; i++)
2936 hw_breakpoint_insert(env, i);
2937 } else
2938 env->dr[reg] = t0;
2939}
872929aa 2940#endif
eaa728ee
FB
2941
2942void helper_lmsw(target_ulong t0)
2943{
2944 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2945 if already set to one. */
2946 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 2947 helper_write_crN(0, t0);
eaa728ee
FB
2948}
2949
2950void helper_clts(void)
2951{
2952 env->cr[0] &= ~CR0_TS_MASK;
2953 env->hflags &= ~HF_TS_MASK;
2954}
2955
eaa728ee
FB
2956void helper_invlpg(target_ulong addr)
2957{
872929aa 2958 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 2959 tlb_flush_page(env, addr);
eaa728ee
FB
2960}
2961
2962void helper_rdtsc(void)
2963{
2964 uint64_t val;
2965
2966 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2967 raise_exception(EXCP0D_GPF);
2968 }
872929aa
FB
2969 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
2970
33c263df 2971 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
2972 EAX = (uint32_t)(val);
2973 EDX = (uint32_t)(val >> 32);
2974}
2975
1b050077
AP
2976void helper_rdtscp(void)
2977{
2978 helper_rdtsc();
2979 ECX = (uint32_t)(env->tsc_aux);
2980}
2981
eaa728ee
FB
2982void helper_rdpmc(void)
2983{
2984 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2985 raise_exception(EXCP0D_GPF);
2986 }
eaa728ee
FB
2987 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
2988
2989 /* currently unimplemented */
2990 raise_exception_err(EXCP06_ILLOP, 0);
2991}
2992
2993#if defined(CONFIG_USER_ONLY)
2994void helper_wrmsr(void)
2995{
2996}
2997
2998void helper_rdmsr(void)
2999{
3000}
3001#else
3002void helper_wrmsr(void)
3003{
3004 uint64_t val;
3005
872929aa
FB
3006 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3007
eaa728ee
FB
3008 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3009
3010 switch((uint32_t)ECX) {
3011 case MSR_IA32_SYSENTER_CS:
3012 env->sysenter_cs = val & 0xffff;
3013 break;
3014 case MSR_IA32_SYSENTER_ESP:
3015 env->sysenter_esp = val;
3016 break;
3017 case MSR_IA32_SYSENTER_EIP:
3018 env->sysenter_eip = val;
3019 break;
3020 case MSR_IA32_APICBASE:
4a942cea 3021 cpu_set_apic_base(env->apic_state, val);
eaa728ee
FB
3022 break;
3023 case MSR_EFER:
3024 {
3025 uint64_t update_mask;
3026 update_mask = 0;
3027 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3028 update_mask |= MSR_EFER_SCE;
3029 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3030 update_mask |= MSR_EFER_LME;
3031 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3032 update_mask |= MSR_EFER_FFXSR;
3033 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3034 update_mask |= MSR_EFER_NXE;
5efc27bb
FB
3035 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3036 update_mask |= MSR_EFER_SVME;
eef26553
AL
3037 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3038 update_mask |= MSR_EFER_FFXSR;
5efc27bb
FB
3039 cpu_load_efer(env, (env->efer & ~update_mask) |
3040 (val & update_mask));
eaa728ee
FB
3041 }
3042 break;
3043 case MSR_STAR:
3044 env->star = val;
3045 break;
3046 case MSR_PAT:
3047 env->pat = val;
3048 break;
3049 case MSR_VM_HSAVE_PA:
3050 env->vm_hsave = val;
3051 break;
3052#ifdef TARGET_X86_64
3053 case MSR_LSTAR:
3054 env->lstar = val;
3055 break;
3056 case MSR_CSTAR:
3057 env->cstar = val;
3058 break;
3059 case MSR_FMASK:
3060 env->fmask = val;
3061 break;
3062 case MSR_FSBASE:
3063 env->segs[R_FS].base = val;
3064 break;
3065 case MSR_GSBASE:
3066 env->segs[R_GS].base = val;
3067 break;
3068 case MSR_KERNELGSBASE:
3069 env->kernelgsbase = val;
3070 break;
3071#endif
165d9b82
AL
3072 case MSR_MTRRphysBase(0):
3073 case MSR_MTRRphysBase(1):
3074 case MSR_MTRRphysBase(2):
3075 case MSR_MTRRphysBase(3):
3076 case MSR_MTRRphysBase(4):
3077 case MSR_MTRRphysBase(5):
3078 case MSR_MTRRphysBase(6):
3079 case MSR_MTRRphysBase(7):
3080 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3081 break;
3082 case MSR_MTRRphysMask(0):
3083 case MSR_MTRRphysMask(1):
3084 case MSR_MTRRphysMask(2):
3085 case MSR_MTRRphysMask(3):
3086 case MSR_MTRRphysMask(4):
3087 case MSR_MTRRphysMask(5):
3088 case MSR_MTRRphysMask(6):
3089 case MSR_MTRRphysMask(7):
3090 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3091 break;
3092 case MSR_MTRRfix64K_00000:
3093 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3094 break;
3095 case MSR_MTRRfix16K_80000:
3096 case MSR_MTRRfix16K_A0000:
3097 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3098 break;
3099 case MSR_MTRRfix4K_C0000:
3100 case MSR_MTRRfix4K_C8000:
3101 case MSR_MTRRfix4K_D0000:
3102 case MSR_MTRRfix4K_D8000:
3103 case MSR_MTRRfix4K_E0000:
3104 case MSR_MTRRfix4K_E8000:
3105 case MSR_MTRRfix4K_F0000:
3106 case MSR_MTRRfix4K_F8000:
3107 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3108 break;
3109 case MSR_MTRRdefType:
3110 env->mtrr_deftype = val;
3111 break;
79c4f6b0
HY
3112 case MSR_MCG_STATUS:
3113 env->mcg_status = val;
3114 break;
3115 case MSR_MCG_CTL:
3116 if ((env->mcg_cap & MCG_CTL_P)
3117 && (val == 0 || val == ~(uint64_t)0))
3118 env->mcg_ctl = val;
3119 break;
1b050077
AP
3120 case MSR_TSC_AUX:
3121 env->tsc_aux = val;
3122 break;
eaa728ee 3123 default:
79c4f6b0
HY
3124 if ((uint32_t)ECX >= MSR_MC0_CTL
3125 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3126 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3127 if ((offset & 0x3) != 0
3128 || (val == 0 || val == ~(uint64_t)0))
3129 env->mce_banks[offset] = val;
3130 break;
3131 }
eaa728ee
FB
3132 /* XXX: exception ? */
3133 break;
3134 }
3135}
3136
3137void helper_rdmsr(void)
3138{
3139 uint64_t val;
872929aa
FB
3140
3141 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3142
eaa728ee
FB
3143 switch((uint32_t)ECX) {
3144 case MSR_IA32_SYSENTER_CS:
3145 val = env->sysenter_cs;
3146 break;
3147 case MSR_IA32_SYSENTER_ESP:
3148 val = env->sysenter_esp;
3149 break;
3150 case MSR_IA32_SYSENTER_EIP:
3151 val = env->sysenter_eip;
3152 break;
3153 case MSR_IA32_APICBASE:
4a942cea 3154 val = cpu_get_apic_base(env->apic_state);
eaa728ee
FB
3155 break;
3156 case MSR_EFER:
3157 val = env->efer;
3158 break;
3159 case MSR_STAR:
3160 val = env->star;
3161 break;
3162 case MSR_PAT:
3163 val = env->pat;
3164 break;
3165 case MSR_VM_HSAVE_PA:
3166 val = env->vm_hsave;
3167 break;
d5e49a81
AZ
3168 case MSR_IA32_PERF_STATUS:
3169 /* tsc_increment_by_tick */
3170 val = 1000ULL;
3171 /* CPU multiplier */
3172 val |= (((uint64_t)4ULL) << 40);
3173 break;
eaa728ee
FB
3174#ifdef TARGET_X86_64
3175 case MSR_LSTAR:
3176 val = env->lstar;
3177 break;
3178 case MSR_CSTAR:
3179 val = env->cstar;
3180 break;
3181 case MSR_FMASK:
3182 val = env->fmask;
3183 break;
3184 case MSR_FSBASE:
3185 val = env->segs[R_FS].base;
3186 break;
3187 case MSR_GSBASE:
3188 val = env->segs[R_GS].base;
3189 break;
3190 case MSR_KERNELGSBASE:
3191 val = env->kernelgsbase;
3192 break;
1b050077
AP
3193 case MSR_TSC_AUX:
3194 val = env->tsc_aux;
3195 break;
eaa728ee 3196#endif
165d9b82
AL
3197 case MSR_MTRRphysBase(0):
3198 case MSR_MTRRphysBase(1):
3199 case MSR_MTRRphysBase(2):
3200 case MSR_MTRRphysBase(3):
3201 case MSR_MTRRphysBase(4):
3202 case MSR_MTRRphysBase(5):
3203 case MSR_MTRRphysBase(6):
3204 case MSR_MTRRphysBase(7):
3205 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3206 break;
3207 case MSR_MTRRphysMask(0):
3208 case MSR_MTRRphysMask(1):
3209 case MSR_MTRRphysMask(2):
3210 case MSR_MTRRphysMask(3):
3211 case MSR_MTRRphysMask(4):
3212 case MSR_MTRRphysMask(5):
3213 case MSR_MTRRphysMask(6):
3214 case MSR_MTRRphysMask(7):
3215 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3216 break;
3217 case MSR_MTRRfix64K_00000:
3218 val = env->mtrr_fixed[0];
3219 break;
3220 case MSR_MTRRfix16K_80000:
3221 case MSR_MTRRfix16K_A0000:
3222 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3223 break;
3224 case MSR_MTRRfix4K_C0000:
3225 case MSR_MTRRfix4K_C8000:
3226 case MSR_MTRRfix4K_D0000:
3227 case MSR_MTRRfix4K_D8000:
3228 case MSR_MTRRfix4K_E0000:
3229 case MSR_MTRRfix4K_E8000:
3230 case MSR_MTRRfix4K_F0000:
3231 case MSR_MTRRfix4K_F8000:
3232 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3233 break;
3234 case MSR_MTRRdefType:
3235 val = env->mtrr_deftype;
3236 break;
dd5e3b17
AL
3237 case MSR_MTRRcap:
3238 if (env->cpuid_features & CPUID_MTRR)
3239 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3240 else
3241 /* XXX: exception ? */
3242 val = 0;
3243 break;
79c4f6b0
HY
3244 case MSR_MCG_CAP:
3245 val = env->mcg_cap;
3246 break;
3247 case MSR_MCG_CTL:
3248 if (env->mcg_cap & MCG_CTL_P)
3249 val = env->mcg_ctl;
3250 else
3251 val = 0;
3252 break;
3253 case MSR_MCG_STATUS:
3254 val = env->mcg_status;
3255 break;
eaa728ee 3256 default:
79c4f6b0
HY
3257 if ((uint32_t)ECX >= MSR_MC0_CTL
3258 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3259 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3260 val = env->mce_banks[offset];
3261 break;
3262 }
eaa728ee
FB
3263 /* XXX: exception ? */
3264 val = 0;
3265 break;
3266 }
3267 EAX = (uint32_t)(val);
3268 EDX = (uint32_t)(val >> 32);
3269}
3270#endif
3271
3272target_ulong helper_lsl(target_ulong selector1)
3273{
3274 unsigned int limit;
3275 uint32_t e1, e2, eflags, selector;
3276 int rpl, dpl, cpl, type;
3277
3278 selector = selector1 & 0xffff;
a7812ae4 3279 eflags = helper_cc_compute_all(CC_OP);
dc1ded53
AL
3280 if ((selector & 0xfffc) == 0)
3281 goto fail;
eaa728ee
FB
3282 if (load_segment(&e1, &e2, selector) != 0)
3283 goto fail;
3284 rpl = selector & 3;
3285 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3286 cpl = env->hflags & HF_CPL_MASK;
3287 if (e2 & DESC_S_MASK) {
3288 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3289 /* conforming */
3290 } else {
3291 if (dpl < cpl || dpl < rpl)
3292 goto fail;
3293 }
3294 } else {
3295 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3296 switch(type) {
3297 case 1:
3298 case 2:
3299 case 3:
3300 case 9:
3301 case 11:
3302 break;
3303 default:
3304 goto fail;
3305 }
3306 if (dpl < cpl || dpl < rpl) {
3307 fail:
3308 CC_SRC = eflags & ~CC_Z;
3309 return 0;
3310 }
3311 }
3312 limit = get_seg_limit(e1, e2);
3313 CC_SRC = eflags | CC_Z;
3314 return limit;
3315}
3316
3317target_ulong helper_lar(target_ulong selector1)
3318{
3319 uint32_t e1, e2, eflags, selector;
3320 int rpl, dpl, cpl, type;
3321
3322 selector = selector1 & 0xffff;
a7812ae4 3323 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3324 if ((selector & 0xfffc) == 0)
3325 goto fail;
3326 if (load_segment(&e1, &e2, selector) != 0)
3327 goto fail;
3328 rpl = selector & 3;
3329 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3330 cpl = env->hflags & HF_CPL_MASK;
3331 if (e2 & DESC_S_MASK) {
3332 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3333 /* conforming */
3334 } else {
3335 if (dpl < cpl || dpl < rpl)
3336 goto fail;
3337 }
3338 } else {
3339 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3340 switch(type) {
3341 case 1:
3342 case 2:
3343 case 3:
3344 case 4:
3345 case 5:
3346 case 9:
3347 case 11:
3348 case 12:
3349 break;
3350 default:
3351 goto fail;
3352 }
3353 if (dpl < cpl || dpl < rpl) {
3354 fail:
3355 CC_SRC = eflags & ~CC_Z;
3356 return 0;
3357 }
3358 }
3359 CC_SRC = eflags | CC_Z;
3360 return e2 & 0x00f0ff00;
3361}
3362
3363void helper_verr(target_ulong selector1)
3364{
3365 uint32_t e1, e2, eflags, selector;
3366 int rpl, dpl, cpl;
3367
3368 selector = selector1 & 0xffff;
a7812ae4 3369 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3370 if ((selector & 0xfffc) == 0)
3371 goto fail;
3372 if (load_segment(&e1, &e2, selector) != 0)
3373 goto fail;
3374 if (!(e2 & DESC_S_MASK))
3375 goto fail;
3376 rpl = selector & 3;
3377 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3378 cpl = env->hflags & HF_CPL_MASK;
3379 if (e2 & DESC_CS_MASK) {
3380 if (!(e2 & DESC_R_MASK))
3381 goto fail;
3382 if (!(e2 & DESC_C_MASK)) {
3383 if (dpl < cpl || dpl < rpl)
3384 goto fail;
3385 }
3386 } else {
3387 if (dpl < cpl || dpl < rpl) {
3388 fail:
3389 CC_SRC = eflags & ~CC_Z;
3390 return;
3391 }
3392 }
3393 CC_SRC = eflags | CC_Z;
3394}
3395
3396void helper_verw(target_ulong selector1)
3397{
3398 uint32_t e1, e2, eflags, selector;
3399 int rpl, dpl, cpl;
3400
3401 selector = selector1 & 0xffff;
a7812ae4 3402 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3403 if ((selector & 0xfffc) == 0)
3404 goto fail;
3405 if (load_segment(&e1, &e2, selector) != 0)
3406 goto fail;
3407 if (!(e2 & DESC_S_MASK))
3408 goto fail;
3409 rpl = selector & 3;
3410 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3411 cpl = env->hflags & HF_CPL_MASK;
3412 if (e2 & DESC_CS_MASK) {
3413 goto fail;
3414 } else {
3415 if (dpl < cpl || dpl < rpl)
3416 goto fail;
3417 if (!(e2 & DESC_W_MASK)) {
3418 fail:
3419 CC_SRC = eflags & ~CC_Z;
3420 return;
3421 }
3422 }
3423 CC_SRC = eflags | CC_Z;
3424}
3425
3426/* x87 FPU helpers */
3427
c31da136 3428static inline double floatx80_to_double(floatx80 a)
47c0143c
AJ
3429{
3430 union {
3431 float64 f64;
3432 double d;
3433 } u;
3434
c31da136 3435 u.f64 = floatx80_to_float64(a, &env->fp_status);
47c0143c
AJ
3436 return u.d;
3437}
3438
c31da136 3439static inline floatx80 double_to_floatx80(double a)
47c0143c
AJ
3440{
3441 union {
3442 float64 f64;
3443 double d;
3444 } u;
3445
3446 u.d = a;
c31da136 3447 return float64_to_floatx80(u.f64, &env->fp_status);
47c0143c
AJ
3448}
3449
eaa728ee
FB
3450static void fpu_set_exception(int mask)
3451{
3452 env->fpus |= mask;
3453 if (env->fpus & (~env->fpuc & FPUC_EM))
3454 env->fpus |= FPUS_SE | FPUS_B;
3455}
3456
c31da136 3457static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
eaa728ee 3458{
c31da136 3459 if (floatx80_is_zero(b)) {
eaa728ee 3460 fpu_set_exception(FPUS_ZE);
13822781 3461 }
c31da136 3462 return floatx80_div(a, b, &env->fp_status);
eaa728ee
FB
3463}
3464
d9957a8b 3465static void fpu_raise_exception(void)
eaa728ee
FB
3466{
3467 if (env->cr[0] & CR0_NE_MASK) {
3468 raise_exception(EXCP10_COPR);
3469 }
3470#if !defined(CONFIG_USER_ONLY)
3471 else {
3472 cpu_set_ferr(env);
3473 }
3474#endif
3475}
3476
3477void helper_flds_FT0(uint32_t val)
3478{
3479 union {
3480 float32 f;
3481 uint32_t i;
3482 } u;
3483 u.i = val;
c31da136 3484 FT0 = float32_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3485}
3486
3487void helper_fldl_FT0(uint64_t val)
3488{
3489 union {
3490 float64 f;
3491 uint64_t i;
3492 } u;
3493 u.i = val;
c31da136 3494 FT0 = float64_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3495}
3496
3497void helper_fildl_FT0(int32_t val)
3498{
c31da136 3499 FT0 = int32_to_floatx80(val, &env->fp_status);
eaa728ee
FB
3500}
3501
3502void helper_flds_ST0(uint32_t val)
3503{
3504 int new_fpstt;
3505 union {
3506 float32 f;
3507 uint32_t i;
3508 } u;
3509 new_fpstt = (env->fpstt - 1) & 7;
3510 u.i = val;
c31da136 3511 env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3512 env->fpstt = new_fpstt;
3513 env->fptags[new_fpstt] = 0; /* validate stack entry */
3514}
3515
3516void helper_fldl_ST0(uint64_t val)
3517{
3518 int new_fpstt;
3519 union {
3520 float64 f;
3521 uint64_t i;
3522 } u;
3523 new_fpstt = (env->fpstt - 1) & 7;
3524 u.i = val;
c31da136 3525 env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3526 env->fpstt = new_fpstt;
3527 env->fptags[new_fpstt] = 0; /* validate stack entry */
3528}
3529
3530void helper_fildl_ST0(int32_t val)
3531{
3532 int new_fpstt;
3533 new_fpstt = (env->fpstt - 1) & 7;
c31da136 3534 env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
eaa728ee
FB
3535 env->fpstt = new_fpstt;
3536 env->fptags[new_fpstt] = 0; /* validate stack entry */
3537}
3538
3539void helper_fildll_ST0(int64_t val)
3540{
3541 int new_fpstt;
3542 new_fpstt = (env->fpstt - 1) & 7;
c31da136 3543 env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
eaa728ee
FB
3544 env->fpstt = new_fpstt;
3545 env->fptags[new_fpstt] = 0; /* validate stack entry */
3546}
3547
3548uint32_t helper_fsts_ST0(void)
3549{
3550 union {
3551 float32 f;
3552 uint32_t i;
3553 } u;
c31da136 3554 u.f = floatx80_to_float32(ST0, &env->fp_status);
eaa728ee
FB
3555 return u.i;
3556}
3557
3558uint64_t helper_fstl_ST0(void)
3559{
3560 union {
3561 float64 f;
3562 uint64_t i;
3563 } u;
c31da136 3564 u.f = floatx80_to_float64(ST0, &env->fp_status);
eaa728ee
FB
3565 return u.i;
3566}
3567
3568int32_t helper_fist_ST0(void)
3569{
3570 int32_t val;
c31da136 3571 val = floatx80_to_int32(ST0, &env->fp_status);
eaa728ee
FB
3572 if (val != (int16_t)val)
3573 val = -32768;
3574 return val;
3575}
3576
3577int32_t helper_fistl_ST0(void)
3578{
3579 int32_t val;
c31da136 3580 val = floatx80_to_int32(ST0, &env->fp_status);
eaa728ee
FB
3581 return val;
3582}
3583
3584int64_t helper_fistll_ST0(void)
3585{
3586 int64_t val;
c31da136 3587 val = floatx80_to_int64(ST0, &env->fp_status);
eaa728ee
FB
3588 return val;
3589}
3590
3591int32_t helper_fistt_ST0(void)
3592{
3593 int32_t val;
c31da136 3594 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
eaa728ee
FB
3595 if (val != (int16_t)val)
3596 val = -32768;
3597 return val;
3598}
3599
3600int32_t helper_fisttl_ST0(void)
3601{
3602 int32_t val;
c31da136 3603 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
eaa728ee
FB
3604 return val;
3605}
3606
3607int64_t helper_fisttll_ST0(void)
3608{
3609 int64_t val;
c31da136 3610 val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
eaa728ee
FB
3611 return val;
3612}
3613
3614void helper_fldt_ST0(target_ulong ptr)
3615{
3616 int new_fpstt;
3617 new_fpstt = (env->fpstt - 1) & 7;
3618 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3619 env->fpstt = new_fpstt;
3620 env->fptags[new_fpstt] = 0; /* validate stack entry */
3621}
3622
3623void helper_fstt_ST0(target_ulong ptr)
3624{
3625 helper_fstt(ST0, ptr);
3626}
3627
3628void helper_fpush(void)
3629{
3630 fpush();
3631}
3632
3633void helper_fpop(void)
3634{
3635 fpop();
3636}
3637
3638void helper_fdecstp(void)
3639{
3640 env->fpstt = (env->fpstt - 1) & 7;
3641 env->fpus &= (~0x4700);
3642}
3643
3644void helper_fincstp(void)
3645{
3646 env->fpstt = (env->fpstt + 1) & 7;
3647 env->fpus &= (~0x4700);
3648}
3649
3650/* FPU move */
3651
3652void helper_ffree_STN(int st_index)
3653{
3654 env->fptags[(env->fpstt + st_index) & 7] = 1;
3655}
3656
3657void helper_fmov_ST0_FT0(void)
3658{
3659 ST0 = FT0;
3660}
3661
3662void helper_fmov_FT0_STN(int st_index)
3663{
3664 FT0 = ST(st_index);
3665}
3666
3667void helper_fmov_ST0_STN(int st_index)
3668{
3669 ST0 = ST(st_index);
3670}
3671
3672void helper_fmov_STN_ST0(int st_index)
3673{
3674 ST(st_index) = ST0;
3675}
3676
3677void helper_fxchg_ST0_STN(int st_index)
3678{
c31da136 3679 floatx80 tmp;
eaa728ee
FB
3680 tmp = ST(st_index);
3681 ST(st_index) = ST0;
3682 ST0 = tmp;
3683}
3684
3685/* FPU operations */
3686
3687static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3688
3689void helper_fcom_ST0_FT0(void)
3690{
3691 int ret;
3692
c31da136 3693 ret = floatx80_compare(ST0, FT0, &env->fp_status);
eaa728ee 3694 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
eaa728ee
FB
3695}
3696
3697void helper_fucom_ST0_FT0(void)
3698{
3699 int ret;
3700
c31da136 3701 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
eaa728ee 3702 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
eaa728ee
FB
3703}
3704
3705static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3706
3707void helper_fcomi_ST0_FT0(void)
3708{
3709 int eflags;
3710 int ret;
3711
c31da136 3712 ret = floatx80_compare(ST0, FT0, &env->fp_status);
a7812ae4 3713 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3714 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3715 CC_SRC = eflags;
eaa728ee
FB
3716}
3717
3718void helper_fucomi_ST0_FT0(void)
3719{
3720 int eflags;
3721 int ret;
3722
c31da136 3723 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
a7812ae4 3724 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3725 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3726 CC_SRC = eflags;
eaa728ee
FB
3727}
3728
3729void helper_fadd_ST0_FT0(void)
3730{
c31da136 3731 ST0 = floatx80_add(ST0, FT0, &env->fp_status);
eaa728ee
FB
3732}
3733
3734void helper_fmul_ST0_FT0(void)
3735{
c31da136 3736 ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
eaa728ee
FB
3737}
3738
3739void helper_fsub_ST0_FT0(void)
3740{
c31da136 3741 ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
eaa728ee
FB
3742}
3743
3744void helper_fsubr_ST0_FT0(void)
3745{
c31da136 3746 ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
eaa728ee
FB
3747}
3748
3749void helper_fdiv_ST0_FT0(void)
3750{
3751 ST0 = helper_fdiv(ST0, FT0);
3752}
3753
3754void helper_fdivr_ST0_FT0(void)
3755{
3756 ST0 = helper_fdiv(FT0, ST0);
3757}
3758
3759/* fp operations between STN and ST0 */
3760
3761void helper_fadd_STN_ST0(int st_index)
3762{
c31da136 3763 ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3764}
3765
3766void helper_fmul_STN_ST0(int st_index)
3767{
c31da136 3768 ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3769}
3770
3771void helper_fsub_STN_ST0(int st_index)
3772{
c31da136 3773 ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3774}
3775
3776void helper_fsubr_STN_ST0(int st_index)
3777{
c31da136 3778 ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
eaa728ee
FB
3779}
3780
3781void helper_fdiv_STN_ST0(int st_index)
3782{
c31da136 3783 floatx80 *p;
eaa728ee
FB
3784 p = &ST(st_index);
3785 *p = helper_fdiv(*p, ST0);
3786}
3787
3788void helper_fdivr_STN_ST0(int st_index)
3789{
c31da136 3790 floatx80 *p;
eaa728ee
FB
3791 p = &ST(st_index);
3792 *p = helper_fdiv(ST0, *p);
3793}
3794
3795/* misc FPU operations */
3796void helper_fchs_ST0(void)
3797{
c31da136 3798 ST0 = floatx80_chs(ST0);
eaa728ee
FB
3799}
3800
3801void helper_fabs_ST0(void)
3802{
c31da136 3803 ST0 = floatx80_abs(ST0);
eaa728ee
FB
3804}
3805
3806void helper_fld1_ST0(void)
3807{
66fcf8ff 3808 ST0 = floatx80_one;
eaa728ee
FB
3809}
3810
3811void helper_fldl2t_ST0(void)
3812{
66fcf8ff 3813 ST0 = floatx80_l2t;
eaa728ee
FB
3814}
3815
3816void helper_fldl2e_ST0(void)
3817{
66fcf8ff 3818 ST0 = floatx80_l2e;
eaa728ee
FB
3819}
3820
3821void helper_fldpi_ST0(void)
3822{
66fcf8ff 3823 ST0 = floatx80_pi;
eaa728ee
FB
3824}
3825
3826void helper_fldlg2_ST0(void)
3827{
66fcf8ff 3828 ST0 = floatx80_lg2;
eaa728ee
FB
3829}
3830
3831void helper_fldln2_ST0(void)
3832{
66fcf8ff 3833 ST0 = floatx80_ln2;
eaa728ee
FB
3834}
3835
3836void helper_fldz_ST0(void)
3837{
66fcf8ff 3838 ST0 = floatx80_zero;
eaa728ee
FB
3839}
3840
3841void helper_fldz_FT0(void)
3842{
66fcf8ff 3843 FT0 = floatx80_zero;
eaa728ee
FB
3844}
3845
3846uint32_t helper_fnstsw(void)
3847{
3848 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3849}
3850
3851uint32_t helper_fnstcw(void)
3852{
3853 return env->fpuc;
3854}
3855
3856static void update_fp_status(void)
3857{
3858 int rnd_type;
3859
3860 /* set rounding mode */
3861 switch(env->fpuc & RC_MASK) {
3862 default:
3863 case RC_NEAR:
3864 rnd_type = float_round_nearest_even;
3865 break;
3866 case RC_DOWN:
3867 rnd_type = float_round_down;
3868 break;
3869 case RC_UP:
3870 rnd_type = float_round_up;
3871 break;
3872 case RC_CHOP:
3873 rnd_type = float_round_to_zero;
3874 break;
3875 }
3876 set_float_rounding_mode(rnd_type, &env->fp_status);
eaa728ee
FB
3877 switch((env->fpuc >> 8) & 3) {
3878 case 0:
3879 rnd_type = 32;
3880 break;
3881 case 2:
3882 rnd_type = 64;
3883 break;
3884 case 3:
3885 default:
3886 rnd_type = 80;
3887 break;
3888 }
3889 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
eaa728ee
FB
3890}
3891
3892void helper_fldcw(uint32_t val)
3893{
3894 env->fpuc = val;
3895 update_fp_status();
3896}
3897
3898void helper_fclex(void)
3899{
3900 env->fpus &= 0x7f00;
3901}
3902
3903void helper_fwait(void)
3904{
3905 if (env->fpus & FPUS_SE)
3906 fpu_raise_exception();
eaa728ee
FB
3907}
3908
3909void helper_fninit(void)
3910{
3911 env->fpus = 0;
3912 env->fpstt = 0;
3913 env->fpuc = 0x37f;
3914 env->fptags[0] = 1;
3915 env->fptags[1] = 1;
3916 env->fptags[2] = 1;
3917 env->fptags[3] = 1;
3918 env->fptags[4] = 1;
3919 env->fptags[5] = 1;
3920 env->fptags[6] = 1;
3921 env->fptags[7] = 1;
3922}
3923
3924/* BCD ops */
3925
3926void helper_fbld_ST0(target_ulong ptr)
3927{
c31da136 3928 floatx80 tmp;
eaa728ee
FB
3929 uint64_t val;
3930 unsigned int v;
3931 int i;
3932
3933 val = 0;
3934 for(i = 8; i >= 0; i--) {
3935 v = ldub(ptr + i);
3936 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3937 }
c31da136 3938 tmp = int64_to_floatx80(val, &env->fp_status);
788e7336 3939 if (ldub(ptr + 9) & 0x80) {
c31da136 3940 floatx80_chs(tmp);
788e7336 3941 }
eaa728ee
FB
3942 fpush();
3943 ST0 = tmp;
3944}
3945
3946void helper_fbst_ST0(target_ulong ptr)
3947{
3948 int v;
3949 target_ulong mem_ref, mem_end;
3950 int64_t val;
3951
c31da136 3952 val = floatx80_to_int64(ST0, &env->fp_status);
eaa728ee
FB
3953 mem_ref = ptr;
3954 mem_end = mem_ref + 9;
3955 if (val < 0) {
3956 stb(mem_end, 0x80);
3957 val = -val;
3958 } else {
3959 stb(mem_end, 0x00);
3960 }
3961 while (mem_ref < mem_end) {
3962 if (val == 0)
3963 break;
3964 v = val % 100;
3965 val = val / 100;
3966 v = ((v / 10) << 4) | (v % 10);
3967 stb(mem_ref++, v);
3968 }
3969 while (mem_ref < mem_end) {
3970 stb(mem_ref++, 0);
3971 }
3972}
3973
3974void helper_f2xm1(void)
3975{
c31da136 3976 double val = floatx80_to_double(ST0);
a2c9ed3c 3977 val = pow(2.0, val) - 1.0;
c31da136 3978 ST0 = double_to_floatx80(val);
eaa728ee
FB
3979}
3980
3981void helper_fyl2x(void)
3982{
c31da136 3983 double fptemp = floatx80_to_double(ST0);
eaa728ee 3984
eaa728ee 3985 if (fptemp>0.0){
a2c9ed3c 3986 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
c31da136
AJ
3987 fptemp *= floatx80_to_double(ST1);
3988 ST1 = double_to_floatx80(fptemp);
eaa728ee
FB
3989 fpop();
3990 } else {
3991 env->fpus &= (~0x4700);
3992 env->fpus |= 0x400;
3993 }
3994}
3995
3996void helper_fptan(void)
3997{
c31da136 3998 double fptemp = floatx80_to_double(ST0);
eaa728ee 3999
eaa728ee
FB
4000 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4001 env->fpus |= 0x400;
4002 } else {
a2c9ed3c 4003 fptemp = tan(fptemp);
c31da136 4004 ST0 = double_to_floatx80(fptemp);
eaa728ee 4005 fpush();
c31da136 4006 ST0 = floatx80_one;
eaa728ee
FB
4007 env->fpus &= (~0x400); /* C2 <-- 0 */
4008 /* the above code is for |arg| < 2**52 only */
4009 }
4010}
4011
4012void helper_fpatan(void)
4013{
a2c9ed3c 4014 double fptemp, fpsrcop;
eaa728ee 4015
c31da136
AJ
4016 fpsrcop = floatx80_to_double(ST1);
4017 fptemp = floatx80_to_double(ST0);
4018 ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
eaa728ee
FB
4019 fpop();
4020}
4021
4022void helper_fxtract(void)
4023{
c31da136 4024 CPU_LDoubleU temp;
eaa728ee
FB
4025
4026 temp.d = ST0;
c9ad19c5 4027
c31da136 4028 if (floatx80_is_zero(ST0)) {
c9ad19c5 4029 /* Easy way to generate -inf and raising division by 0 exception */
c31da136 4030 ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
c9ad19c5
AJ
4031 fpush();
4032 ST0 = temp.d;
4033 } else {
4034 int expdif;
4035
4036 expdif = EXPD(temp) - EXPBIAS;
4037 /*DP exponent bias*/
c31da136 4038 ST0 = int32_to_floatx80(expdif, &env->fp_status);
c9ad19c5
AJ
4039 fpush();
4040 BIASEXPONENT(temp);
4041 ST0 = temp.d;
4042 }
eaa728ee
FB
4043}
4044
4045void helper_fprem1(void)
4046{
bcb5fec5 4047 double st0, st1, dblq, fpsrcop, fptemp;
c31da136 4048 CPU_LDoubleU fpsrcop1, fptemp1;
eaa728ee
FB
4049 int expdif;
4050 signed long long int q;
4051
c31da136
AJ
4052 st0 = floatx80_to_double(ST0);
4053 st1 = floatx80_to_double(ST1);
bcb5fec5
AJ
4054
4055 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
c31da136 4056 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
eaa728ee
FB
4057 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4058 return;
4059 }
4060
bcb5fec5
AJ
4061 fpsrcop = st0;
4062 fptemp = st1;
4063 fpsrcop1.d = ST0;
4064 fptemp1.d = ST1;
eaa728ee
FB
4065 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4066
4067 if (expdif < 0) {
4068 /* optimisation? taken from the AMD docs */
4069 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4070 /* ST0 is unchanged */
4071 return;
4072 }
4073
4074 if (expdif < 53) {
4075 dblq = fpsrcop / fptemp;
4076 /* round dblq towards nearest integer */
4077 dblq = rint(dblq);
bcb5fec5 4078 st0 = fpsrcop - fptemp * dblq;
eaa728ee
FB
4079
4080 /* convert dblq to q by truncating towards zero */
4081 if (dblq < 0.0)
4082 q = (signed long long int)(-dblq);
4083 else
4084 q = (signed long long int)dblq;
4085
4086 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4087 /* (C0,C3,C1) <-- (q2,q1,q0) */
4088 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4089 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4090 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4091 } else {
4092 env->fpus |= 0x400; /* C2 <-- 1 */
4093 fptemp = pow(2.0, expdif - 50);
bcb5fec5 4094 fpsrcop = (st0 / st1) / fptemp;
eaa728ee
FB
4095 /* fpsrcop = integer obtained by chopping */
4096 fpsrcop = (fpsrcop < 0.0) ?
4097 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
bcb5fec5 4098 st0 -= (st1 * fpsrcop * fptemp);
eaa728ee 4099 }
c31da136 4100 ST0 = double_to_floatx80(st0);
eaa728ee
FB
4101}
4102
4103void helper_fprem(void)
4104{
bcb5fec5 4105 double st0, st1, dblq, fpsrcop, fptemp;
c31da136 4106 CPU_LDoubleU fpsrcop1, fptemp1;
eaa728ee
FB
4107 int expdif;
4108 signed long long int q;
4109
c31da136
AJ
4110 st0 = floatx80_to_double(ST0);
4111 st1 = floatx80_to_double(ST1);
bcb5fec5
AJ
4112
4113 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
c31da136 4114 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
eaa728ee
FB
4115 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4116 return;
4117 }
4118
bcb5fec5
AJ
4119 fpsrcop = st0;
4120 fptemp = st1;
4121 fpsrcop1.d = ST0;
4122 fptemp1.d = ST1;
eaa728ee
FB
4123 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4124
4125 if (expdif < 0) {
4126 /* optimisation? taken from the AMD docs */
4127 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4128 /* ST0 is unchanged */
4129 return;
4130 }
4131
4132 if ( expdif < 53 ) {
4133 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4134 /* round dblq towards zero */
4135 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
bcb5fec5 4136 st0 = fpsrcop/*ST0*/ - fptemp * dblq;
eaa728ee
FB
4137
4138 /* convert dblq to q by truncating towards zero */
4139 if (dblq < 0.0)
4140 q = (signed long long int)(-dblq);
4141 else
4142 q = (signed long long int)dblq;
4143
4144 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4145 /* (C0,C3,C1) <-- (q2,q1,q0) */
4146 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4147 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4148 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4149 } else {
4150 int N = 32 + (expdif % 32); /* as per AMD docs */
4151 env->fpus |= 0x400; /* C2 <-- 1 */
4152 fptemp = pow(2.0, (double)(expdif - N));
bcb5fec5 4153 fpsrcop = (st0 / st1) / fptemp;
eaa728ee
FB
4154 /* fpsrcop = integer obtained by chopping */
4155 fpsrcop = (fpsrcop < 0.0) ?
4156 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
bcb5fec5 4157 st0 -= (st1 * fpsrcop * fptemp);
eaa728ee 4158 }
c31da136 4159 ST0 = double_to_floatx80(st0);
eaa728ee
FB
4160}
4161
4162void helper_fyl2xp1(void)
4163{
c31da136 4164 double fptemp = floatx80_to_double(ST0);
eaa728ee 4165
eaa728ee
FB
4166 if ((fptemp+1.0)>0.0) {
4167 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
c31da136
AJ
4168 fptemp *= floatx80_to_double(ST1);
4169 ST1 = double_to_floatx80(fptemp);
eaa728ee
FB
4170 fpop();
4171 } else {
4172 env->fpus &= (~0x4700);
4173 env->fpus |= 0x400;
4174 }
4175}
4176
4177void helper_fsqrt(void)
4178{
c31da136 4179 if (floatx80_is_neg(ST0)) {
eaa728ee
FB
4180 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4181 env->fpus |= 0x400;
4182 }
c31da136 4183 ST0 = floatx80_sqrt(ST0, &env->fp_status);
eaa728ee
FB
4184}
4185
4186void helper_fsincos(void)
4187{
c31da136 4188 double fptemp = floatx80_to_double(ST0);
eaa728ee 4189
eaa728ee
FB
4190 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4191 env->fpus |= 0x400;
4192 } else {
c31da136 4193 ST0 = double_to_floatx80(sin(fptemp));
eaa728ee 4194 fpush();
c31da136 4195 ST0 = double_to_floatx80(cos(fptemp));
eaa728ee
FB
4196 env->fpus &= (~0x400); /* C2 <-- 0 */
4197 /* the above code is for |arg| < 2**63 only */
4198 }
4199}
4200
4201void helper_frndint(void)
4202{
c31da136 4203 ST0 = floatx80_round_to_int(ST0, &env->fp_status);
eaa728ee
FB
4204}
4205
4206void helper_fscale(void)
4207{
c31da136 4208 if (floatx80_is_any_nan(ST1)) {
be1c17c7
AJ
4209 ST0 = ST1;
4210 } else {
c31da136
AJ
4211 int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
4212 ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
be1c17c7 4213 }
eaa728ee
FB
4214}
4215
4216void helper_fsin(void)
4217{
c31da136 4218 double fptemp = floatx80_to_double(ST0);
eaa728ee 4219
eaa728ee
FB
4220 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4221 env->fpus |= 0x400;
4222 } else {
c31da136 4223 ST0 = double_to_floatx80(sin(fptemp));
eaa728ee
FB
4224 env->fpus &= (~0x400); /* C2 <-- 0 */
4225 /* the above code is for |arg| < 2**53 only */
4226 }
4227}
4228
4229void helper_fcos(void)
4230{
c31da136 4231 double fptemp = floatx80_to_double(ST0);
eaa728ee 4232
eaa728ee
FB
4233 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4234 env->fpus |= 0x400;
4235 } else {
c31da136 4236 ST0 = double_to_floatx80(cos(fptemp));
eaa728ee
FB
4237 env->fpus &= (~0x400); /* C2 <-- 0 */
4238 /* the above code is for |arg5 < 2**63 only */
4239 }
4240}
4241
4242void helper_fxam_ST0(void)
4243{
c31da136 4244 CPU_LDoubleU temp;
eaa728ee
FB
4245 int expdif;
4246
4247 temp.d = ST0;
4248
4249 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4250 if (SIGND(temp))
4251 env->fpus |= 0x200; /* C1 <-- 1 */
4252
4253 /* XXX: test fptags too */
4254 expdif = EXPD(temp);
4255 if (expdif == MAXEXPD) {
eaa728ee 4256 if (MANTD(temp) == 0x8000000000000000ULL)
eaa728ee
FB
4257 env->fpus |= 0x500 /*Infinity*/;
4258 else
4259 env->fpus |= 0x100 /*NaN*/;
4260 } else if (expdif == 0) {
4261 if (MANTD(temp) == 0)
4262 env->fpus |= 0x4000 /*Zero*/;
4263 else
4264 env->fpus |= 0x4400 /*Denormal*/;
4265 } else {
4266 env->fpus |= 0x400;
4267 }
4268}
4269
4270void helper_fstenv(target_ulong ptr, int data32)
4271{
4272 int fpus, fptag, exp, i;
4273 uint64_t mant;
c31da136 4274 CPU_LDoubleU tmp;
eaa728ee
FB
4275
4276 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4277 fptag = 0;
4278 for (i=7; i>=0; i--) {
4279 fptag <<= 2;
4280 if (env->fptags[i]) {
4281 fptag |= 3;
4282 } else {
4283 tmp.d = env->fpregs[i].d;
4284 exp = EXPD(tmp);
4285 mant = MANTD(tmp);
4286 if (exp == 0 && mant == 0) {
4287 /* zero */
4288 fptag |= 1;
4289 } else if (exp == 0 || exp == MAXEXPD
eaa728ee 4290 || (mant & (1LL << 63)) == 0
eaa728ee
FB
4291 ) {
4292 /* NaNs, infinity, denormal */
4293 fptag |= 2;
4294 }
4295 }
4296 }
4297 if (data32) {
4298 /* 32 bit */
4299 stl(ptr, env->fpuc);
4300 stl(ptr + 4, fpus);
4301 stl(ptr + 8, fptag);
4302 stl(ptr + 12, 0); /* fpip */
4303 stl(ptr + 16, 0); /* fpcs */
4304 stl(ptr + 20, 0); /* fpoo */
4305 stl(ptr + 24, 0); /* fpos */
4306 } else {
4307 /* 16 bit */
4308 stw(ptr, env->fpuc);
4309 stw(ptr + 2, fpus);
4310 stw(ptr + 4, fptag);
4311 stw(ptr + 6, 0);
4312 stw(ptr + 8, 0);
4313 stw(ptr + 10, 0);
4314 stw(ptr + 12, 0);
4315 }
4316}
4317
4318void helper_fldenv(target_ulong ptr, int data32)
4319{
4320 int i, fpus, fptag;
4321
4322 if (data32) {
4323 env->fpuc = lduw(ptr);
4324 fpus = lduw(ptr + 4);
4325 fptag = lduw(ptr + 8);
4326 }
4327 else {
4328 env->fpuc = lduw(ptr);
4329 fpus = lduw(ptr + 2);
4330 fptag = lduw(ptr + 4);
4331 }
4332 env->fpstt = (fpus >> 11) & 7;
4333 env->fpus = fpus & ~0x3800;
4334 for(i = 0;i < 8; i++) {
4335 env->fptags[i] = ((fptag & 3) == 3);
4336 fptag >>= 2;
4337 }
4338}
4339
4340void helper_fsave(target_ulong ptr, int data32)
4341{
c31da136 4342 floatx80 tmp;
eaa728ee
FB
4343 int i;
4344
4345 helper_fstenv(ptr, data32);
4346
4347 ptr += (14 << data32);
4348 for(i = 0;i < 8; i++) {
4349 tmp = ST(i);
4350 helper_fstt(tmp, ptr);
4351 ptr += 10;
4352 }
4353
4354 /* fninit */
4355 env->fpus = 0;
4356 env->fpstt = 0;
4357 env->fpuc = 0x37f;
4358 env->fptags[0] = 1;
4359 env->fptags[1] = 1;
4360 env->fptags[2] = 1;
4361 env->fptags[3] = 1;
4362 env->fptags[4] = 1;
4363 env->fptags[5] = 1;
4364 env->fptags[6] = 1;
4365 env->fptags[7] = 1;
4366}
4367
4368void helper_frstor(target_ulong ptr, int data32)
4369{
c31da136 4370 floatx80 tmp;
eaa728ee
FB
4371 int i;
4372
4373 helper_fldenv(ptr, data32);
4374 ptr += (14 << data32);
4375
4376 for(i = 0;i < 8; i++) {
4377 tmp = helper_fldt(ptr);
4378 ST(i) = tmp;
4379 ptr += 10;
4380 }
4381}
4382
4383void helper_fxsave(target_ulong ptr, int data64)
4384{
4385 int fpus, fptag, i, nb_xmm_regs;
c31da136 4386 floatx80 tmp;
eaa728ee
FB
4387 target_ulong addr;
4388
09d85fb8
KW
4389 /* The operand must be 16 byte aligned */
4390 if (ptr & 0xf) {
4391 raise_exception(EXCP0D_GPF);
4392 }
4393
eaa728ee
FB
4394 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4395 fptag = 0;
4396 for(i = 0; i < 8; i++) {
4397 fptag |= (env->fptags[i] << i);
4398 }
4399 stw(ptr, env->fpuc);
4400 stw(ptr + 2, fpus);
4401 stw(ptr + 4, fptag ^ 0xff);
4402#ifdef TARGET_X86_64
4403 if (data64) {
4404 stq(ptr + 0x08, 0); /* rip */
4405 stq(ptr + 0x10, 0); /* rdp */
4406 } else
4407#endif
4408 {
4409 stl(ptr + 0x08, 0); /* eip */
4410 stl(ptr + 0x0c, 0); /* sel */
4411 stl(ptr + 0x10, 0); /* dp */
4412 stl(ptr + 0x14, 0); /* sel */
4413 }
4414
4415 addr = ptr + 0x20;
4416 for(i = 0;i < 8; i++) {
4417 tmp = ST(i);
4418 helper_fstt(tmp, addr);
4419 addr += 16;
4420 }
4421
4422 if (env->cr[4] & CR4_OSFXSR_MASK) {
4423 /* XXX: finish it */
4424 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4425 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4426 if (env->hflags & HF_CS64_MASK)
4427 nb_xmm_regs = 16;
4428 else
4429 nb_xmm_regs = 8;
4430 addr = ptr + 0xa0;
eef26553
AL
4431 /* Fast FXSAVE leaves out the XMM registers */
4432 if (!(env->efer & MSR_EFER_FFXSR)
4433 || (env->hflags & HF_CPL_MASK)
4434 || !(env->hflags & HF_LMA_MASK)) {
4435 for(i = 0; i < nb_xmm_regs; i++) {
4436 stq(addr, env->xmm_regs[i].XMM_Q(0));
4437 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4438 addr += 16;
4439 }
eaa728ee
FB
4440 }
4441 }
4442}
4443
4444void helper_fxrstor(target_ulong ptr, int data64)
4445{
4446 int i, fpus, fptag, nb_xmm_regs;
c31da136 4447 floatx80 tmp;
eaa728ee
FB
4448 target_ulong addr;
4449
09d85fb8
KW
4450 /* The operand must be 16 byte aligned */
4451 if (ptr & 0xf) {
4452 raise_exception(EXCP0D_GPF);
4453 }
4454
eaa728ee
FB
4455 env->fpuc = lduw(ptr);
4456 fpus = lduw(ptr + 2);
4457 fptag = lduw(ptr + 4);
4458 env->fpstt = (fpus >> 11) & 7;
4459 env->fpus = fpus & ~0x3800;
4460 fptag ^= 0xff;
4461 for(i = 0;i < 8; i++) {
4462 env->fptags[i] = ((fptag >> i) & 1);
4463 }
4464
4465 addr = ptr + 0x20;
4466 for(i = 0;i < 8; i++) {
4467 tmp = helper_fldt(addr);
4468 ST(i) = tmp;
4469 addr += 16;
4470 }
4471
4472 if (env->cr[4] & CR4_OSFXSR_MASK) {
4473 /* XXX: finish it */
4474 env->mxcsr = ldl(ptr + 0x18);
4475 //ldl(ptr + 0x1c);
4476 if (env->hflags & HF_CS64_MASK)
4477 nb_xmm_regs = 16;
4478 else
4479 nb_xmm_regs = 8;
4480 addr = ptr + 0xa0;
eef26553
AL
4481 /* Fast FXRESTORE leaves out the XMM registers */
4482 if (!(env->efer & MSR_EFER_FFXSR)
4483 || (env->hflags & HF_CPL_MASK)
4484 || !(env->hflags & HF_LMA_MASK)) {
4485 for(i = 0; i < nb_xmm_regs; i++) {
4486 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4487 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4488 addr += 16;
4489 }
eaa728ee
FB
4490 }
4491 }
4492}
4493
c31da136 4494void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
eaa728ee 4495{
c31da136 4496 CPU_LDoubleU temp;
eaa728ee
FB
4497
4498 temp.d = f;
4499 *pmant = temp.l.lower;
4500 *pexp = temp.l.upper;
4501}
4502
c31da136 4503floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
eaa728ee 4504{
c31da136 4505 CPU_LDoubleU temp;
eaa728ee
FB
4506
4507 temp.l.upper = upper;
4508 temp.l.lower = mant;
4509 return temp.d;
4510}
eaa728ee
FB
4511
4512#ifdef TARGET_X86_64
4513
4514//#define DEBUG_MULDIV
4515
4516static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4517{
4518 *plow += a;
4519 /* carry test */
4520 if (*plow < a)
4521 (*phigh)++;
4522 *phigh += b;
4523}
4524
4525static void neg128(uint64_t *plow, uint64_t *phigh)
4526{
4527 *plow = ~ *plow;
4528 *phigh = ~ *phigh;
4529 add128(plow, phigh, 1, 0);
4530}
4531
4532/* return TRUE if overflow */
4533static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4534{
4535 uint64_t q, r, a1, a0;
4536 int i, qb, ab;
4537
4538 a0 = *plow;
4539 a1 = *phigh;
4540 if (a1 == 0) {
4541 q = a0 / b;
4542 r = a0 % b;
4543 *plow = q;
4544 *phigh = r;
4545 } else {
4546 if (a1 >= b)
4547 return 1;
4548 /* XXX: use a better algorithm */
4549 for(i = 0; i < 64; i++) {
4550 ab = a1 >> 63;
4551 a1 = (a1 << 1) | (a0 >> 63);
4552 if (ab || a1 >= b) {
4553 a1 -= b;
4554 qb = 1;
4555 } else {
4556 qb = 0;
4557 }
4558 a0 = (a0 << 1) | qb;
4559 }
4560#if defined(DEBUG_MULDIV)
4561 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4562 *phigh, *plow, b, a0, a1);
4563#endif
4564 *plow = a0;
4565 *phigh = a1;
4566 }
4567 return 0;
4568}
4569
4570/* return TRUE if overflow */
4571static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4572{
4573 int sa, sb;
4574 sa = ((int64_t)*phigh < 0);
4575 if (sa)
4576 neg128(plow, phigh);
4577 sb = (b < 0);
4578 if (sb)
4579 b = -b;
4580 if (div64(plow, phigh, b) != 0)
4581 return 1;
4582 if (sa ^ sb) {
4583 if (*plow > (1ULL << 63))
4584 return 1;
4585 *plow = - *plow;
4586 } else {
4587 if (*plow >= (1ULL << 63))
4588 return 1;
4589 }
4590 if (sa)
4591 *phigh = - *phigh;
4592 return 0;
4593}
4594
4595void helper_mulq_EAX_T0(target_ulong t0)
4596{
4597 uint64_t r0, r1;
4598
4599 mulu64(&r0, &r1, EAX, t0);
4600 EAX = r0;
4601 EDX = r1;
4602 CC_DST = r0;
4603 CC_SRC = r1;
4604}
4605
4606void helper_imulq_EAX_T0(target_ulong t0)
4607{
4608 uint64_t r0, r1;
4609
4610 muls64(&r0, &r1, EAX, t0);
4611 EAX = r0;
4612 EDX = r1;
4613 CC_DST = r0;
4614 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4615}
4616
4617target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4618{
4619 uint64_t r0, r1;
4620
4621 muls64(&r0, &r1, t0, t1);
4622 CC_DST = r0;
4623 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4624 return r0;
4625}
4626
4627void helper_divq_EAX(target_ulong t0)
4628{
4629 uint64_t r0, r1;
4630 if (t0 == 0) {
4631 raise_exception(EXCP00_DIVZ);
4632 }
4633 r0 = EAX;
4634 r1 = EDX;
4635 if (div64(&r0, &r1, t0))
4636 raise_exception(EXCP00_DIVZ);
4637 EAX = r0;
4638 EDX = r1;
4639}
4640
4641void helper_idivq_EAX(target_ulong t0)
4642{
4643 uint64_t r0, r1;
4644 if (t0 == 0) {
4645 raise_exception(EXCP00_DIVZ);
4646 }
4647 r0 = EAX;
4648 r1 = EDX;
4649 if (idiv64(&r0, &r1, t0))
4650 raise_exception(EXCP00_DIVZ);
4651 EAX = r0;
4652 EDX = r1;
4653}
4654#endif
4655
94451178 4656static void do_hlt(void)
eaa728ee
FB
4657{
4658 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4659 env->halted = 1;
eaa728ee
FB
4660 env->exception_index = EXCP_HLT;
4661 cpu_loop_exit();
4662}
4663
94451178
FB
4664void helper_hlt(int next_eip_addend)
4665{
4666 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4667 EIP += next_eip_addend;
4668
4669 do_hlt();
4670}
4671
eaa728ee
FB
4672void helper_monitor(target_ulong ptr)
4673{
4674 if ((uint32_t)ECX != 0)
4675 raise_exception(EXCP0D_GPF);
4676 /* XXX: store address ? */
872929aa 4677 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4678}
4679
94451178 4680void helper_mwait(int next_eip_addend)
eaa728ee
FB
4681{
4682 if ((uint32_t)ECX != 0)
4683 raise_exception(EXCP0D_GPF);
872929aa 4684 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
94451178
FB
4685 EIP += next_eip_addend;
4686
eaa728ee
FB
4687 /* XXX: not complete but not completely erroneous */
4688 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4689 /* more than one CPU: do not sleep because another CPU may
4690 wake this one */
4691 } else {
94451178 4692 do_hlt();
eaa728ee
FB
4693 }
4694}
4695
4696void helper_debug(void)
4697{
4698 env->exception_index = EXCP_DEBUG;
4699 cpu_loop_exit();
4700}
4701
a2397807
JK
4702void helper_reset_rf(void)
4703{
4704 env->eflags &= ~RF_MASK;
4705}
4706
eaa728ee
FB
4707void helper_raise_interrupt(int intno, int next_eip_addend)
4708{
4709 raise_interrupt(intno, 1, 0, next_eip_addend);
4710}
4711
4712void helper_raise_exception(int exception_index)
4713{
4714 raise_exception(exception_index);
4715}
4716
4717void helper_cli(void)
4718{
4719 env->eflags &= ~IF_MASK;
4720}
4721
4722void helper_sti(void)
4723{
4724 env->eflags |= IF_MASK;
4725}
4726
4727#if 0
4728/* vm86plus instructions */
4729void helper_cli_vm(void)
4730{
4731 env->eflags &= ~VIF_MASK;
4732}
4733
4734void helper_sti_vm(void)
4735{
4736 env->eflags |= VIF_MASK;
4737 if (env->eflags & VIP_MASK) {
4738 raise_exception(EXCP0D_GPF);
4739 }
4740}
4741#endif
4742
4743void helper_set_inhibit_irq(void)
4744{
4745 env->hflags |= HF_INHIBIT_IRQ_MASK;
4746}
4747
4748void helper_reset_inhibit_irq(void)
4749{
4750 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4751}
4752
4753void helper_boundw(target_ulong a0, int v)
4754{
4755 int low, high;
4756 low = ldsw(a0);
4757 high = ldsw(a0 + 2);
4758 v = (int16_t)v;
4759 if (v < low || v > high) {
4760 raise_exception(EXCP05_BOUND);
4761 }
eaa728ee
FB
4762}
4763
4764void helper_boundl(target_ulong a0, int v)
4765{
4766 int low, high;
4767 low = ldl(a0);
4768 high = ldl(a0 + 4);
4769 if (v < low || v > high) {
4770 raise_exception(EXCP05_BOUND);
4771 }
eaa728ee
FB
4772}
4773
eaa728ee
FB
4774#if !defined(CONFIG_USER_ONLY)
4775
4776#define MMUSUFFIX _mmu
4777
4778#define SHIFT 0
4779#include "softmmu_template.h"
4780
4781#define SHIFT 1
4782#include "softmmu_template.h"
4783
4784#define SHIFT 2
4785#include "softmmu_template.h"
4786
4787#define SHIFT 3
4788#include "softmmu_template.h"
4789
4790#endif
4791
d9957a8b 4792#if !defined(CONFIG_USER_ONLY)
eaa728ee
FB
4793/* try to fill the TLB and return an exception if error. If retaddr is
4794 NULL, it means that the function was called in C code (i.e. not
4795 from generated code or from helper.c) */
4796/* XXX: fix it to restore all registers */
4797void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4798{
4799 TranslationBlock *tb;
4800 int ret;
4801 unsigned long pc;
4802 CPUX86State *saved_env;
4803
4804 /* XXX: hack to restore env in all cases, even if not called from
4805 generated code */
4806 saved_env = env;
4807 env = cpu_single_env;
4808
4809 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4810 if (ret) {
4811 if (retaddr) {
4812 /* now we have a real cpu fault */
4813 pc = (unsigned long)retaddr;
4814 tb = tb_find_pc(pc);
4815 if (tb) {
4816 /* the PC is inside the translated code. It means that we have
4817 a virtual CPU fault */
618ba8e6 4818 cpu_restore_state(tb, env, pc);
eaa728ee
FB
4819 }
4820 }
872929aa 4821 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
4822 }
4823 env = saved_env;
4824}
d9957a8b 4825#endif
eaa728ee
FB
4826
4827/* Secure Virtual Machine helpers */
4828
eaa728ee
FB
4829#if defined(CONFIG_USER_ONLY)
4830
db620f46 4831void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4832{
4833}
4834void helper_vmmcall(void)
4835{
4836}
914178d3 4837void helper_vmload(int aflag)
eaa728ee
FB
4838{
4839}
914178d3 4840void helper_vmsave(int aflag)
eaa728ee
FB
4841{
4842}
872929aa
FB
4843void helper_stgi(void)
4844{
4845}
4846void helper_clgi(void)
4847{
4848}
eaa728ee
FB
4849void helper_skinit(void)
4850{
4851}
914178d3 4852void helper_invlpga(int aflag)
eaa728ee
FB
4853{
4854}
4855void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4856{
4857}
4858void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4859{
4860}
4861
4862void helper_svm_check_io(uint32_t port, uint32_t param,
4863 uint32_t next_eip_addend)
4864{
4865}
4866#else
4867
c227f099 4868static inline void svm_save_seg(target_phys_addr_t addr,
872929aa 4869 const SegmentCache *sc)
eaa728ee 4870{
872929aa
FB
4871 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4872 sc->selector);
4873 stq_phys(addr + offsetof(struct vmcb_seg, base),
4874 sc->base);
4875 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4876 sc->limit);
4877 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 4878 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa
FB
4879}
4880
c227f099 4881static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
872929aa
FB
4882{
4883 unsigned int flags;
4884
4885 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4886 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4887 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4888 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4889 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
4890}
4891
c227f099 4892static inline void svm_load_seg_cache(target_phys_addr_t addr,
872929aa 4893 CPUState *env, int seg_reg)
eaa728ee 4894{
872929aa
FB
4895 SegmentCache sc1, *sc = &sc1;
4896 svm_load_seg(addr, sc);
4897 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4898 sc->base, sc->limit, sc->flags);
eaa728ee
FB
4899}
4900
db620f46 4901void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4902{
4903 target_ulong addr;
4904 uint32_t event_inj;
4905 uint32_t int_ctl;
4906
872929aa
FB
4907 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4908
914178d3
FB
4909 if (aflag == 2)
4910 addr = EAX;
4911 else
4912 addr = (uint32_t)EAX;
4913
93fcfe39 4914 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
eaa728ee
FB
4915
4916 env->vm_vmcb = addr;
4917
4918 /* save the current CPU state in the hsave page */
4919 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4920 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4921
4922 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4923 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4924
4925 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4926 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4927 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4928 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
4929 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4930 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4931
4932 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4933 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4934
872929aa
FB
4935 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4936 &env->segs[R_ES]);
4937 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4938 &env->segs[R_CS]);
4939 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4940 &env->segs[R_SS]);
4941 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4942 &env->segs[R_DS]);
eaa728ee 4943
db620f46
FB
4944 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4945 EIP + next_eip_addend);
eaa728ee
FB
4946 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4947 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4948
4949 /* load the interception bitmaps so we do not need to access the
4950 vmcb in svm mode */
872929aa 4951 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
4952 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4953 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4954 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4955 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4956 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4957
872929aa
FB
4958 /* enable intercepts */
4959 env->hflags |= HF_SVMI_MASK;
4960
33c263df
FB
4961 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4962
eaa728ee
FB
4963 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4964 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4965
4966 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4967 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4968
4969 /* clear exit_info_2 so we behave like the real hardware */
4970 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4971
4972 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4973 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4974 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4975 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4976 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 4977 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 4978 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
4979 env->v_tpr = int_ctl & V_TPR_MASK;
4980 env->hflags2 |= HF2_VINTR_MASK;
eaa728ee 4981 if (env->eflags & IF_MASK)
db620f46 4982 env->hflags2 |= HF2_HIF_MASK;
eaa728ee
FB
4983 }
4984
5efc27bb
FB
4985 cpu_load_efer(env,
4986 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
4987 env->eflags = 0;
4988 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4989 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4990 CC_OP = CC_OP_EFLAGS;
eaa728ee 4991
872929aa
FB
4992 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
4993 env, R_ES);
4994 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
4995 env, R_CS);
4996 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
4997 env, R_SS);
4998 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
4999 env, R_DS);
eaa728ee
FB
5000
5001 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5002 env->eip = EIP;
5003 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5004 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5005 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5006 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5007 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5008
5009 /* FIXME: guest state consistency checks */
5010
5011 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5012 case TLB_CONTROL_DO_NOTHING:
5013 break;
5014 case TLB_CONTROL_FLUSH_ALL_ASID:
5015 /* FIXME: this is not 100% correct but should work for now */
5016 tlb_flush(env, 1);
5017 break;
5018 }
5019
960540b4 5020 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 5021
db620f46
FB
5022 if (int_ctl & V_IRQ_MASK) {
5023 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5024 }
5025
eaa728ee
FB
5026 /* maybe we need to inject an event */
5027 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5028 if (event_inj & SVM_EVTINJ_VALID) {
5029 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5030 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5031 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
eaa728ee 5032
93fcfe39 5033 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
eaa728ee
FB
5034 /* FIXME: need to implement valid_err */
5035 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5036 case SVM_EVTINJ_TYPE_INTR:
5037 env->exception_index = vector;
5038 env->error_code = event_inj_err;
5039 env->exception_is_int = 0;
5040 env->exception_next_eip = -1;
93fcfe39 5041 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
db620f46
FB
5042 /* XXX: is it always correct ? */
5043 do_interrupt(vector, 0, 0, 0, 1);
eaa728ee
FB
5044 break;
5045 case SVM_EVTINJ_TYPE_NMI:
db620f46 5046 env->exception_index = EXCP02_NMI;
eaa728ee
FB
5047 env->error_code = event_inj_err;
5048 env->exception_is_int = 0;
5049 env->exception_next_eip = EIP;
93fcfe39 5050 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
db620f46 5051 cpu_loop_exit();
eaa728ee
FB
5052 break;
5053 case SVM_EVTINJ_TYPE_EXEPT:
5054 env->exception_index = vector;
5055 env->error_code = event_inj_err;
5056 env->exception_is_int = 0;
5057 env->exception_next_eip = -1;
93fcfe39 5058 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
db620f46 5059 cpu_loop_exit();
eaa728ee
FB
5060 break;
5061 case SVM_EVTINJ_TYPE_SOFT:
5062 env->exception_index = vector;
5063 env->error_code = event_inj_err;
5064 env->exception_is_int = 1;
5065 env->exception_next_eip = EIP;
93fcfe39 5066 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
db620f46 5067 cpu_loop_exit();
eaa728ee
FB
5068 break;
5069 }
93fcfe39 5070 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
eaa728ee 5071 }
eaa728ee
FB
5072}
5073
5074void helper_vmmcall(void)
5075{
872929aa
FB
5076 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5077 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5078}
5079
914178d3 5080void helper_vmload(int aflag)
eaa728ee
FB
5081{
5082 target_ulong addr;
872929aa
FB
5083 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5084
914178d3
FB
5085 if (aflag == 2)
5086 addr = EAX;
5087 else
5088 addr = (uint32_t)EAX;
5089
93fcfe39 5090 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5091 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5092 env->segs[R_FS].base);
5093
872929aa
FB
5094 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5095 env, R_FS);
5096 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5097 env, R_GS);
5098 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5099 &env->tr);
5100 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5101 &env->ldt);
eaa728ee
FB
5102
5103#ifdef TARGET_X86_64
5104 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5105 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5106 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5107 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5108#endif
5109 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5110 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5111 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5112 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5113}
5114
914178d3 5115void helper_vmsave(int aflag)
eaa728ee
FB
5116{
5117 target_ulong addr;
872929aa 5118 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3
FB
5119
5120 if (aflag == 2)
5121 addr = EAX;
5122 else
5123 addr = (uint32_t)EAX;
5124
93fcfe39 5125 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5126 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5127 env->segs[R_FS].base);
5128
872929aa
FB
5129 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5130 &env->segs[R_FS]);
5131 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5132 &env->segs[R_GS]);
5133 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5134 &env->tr);
5135 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5136 &env->ldt);
eaa728ee
FB
5137
5138#ifdef TARGET_X86_64
5139 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5140 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5141 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5142 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5143#endif
5144 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5145 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5146 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5147 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5148}
5149
872929aa
FB
5150void helper_stgi(void)
5151{
5152 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 5153 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
5154}
5155
5156void helper_clgi(void)
5157{
5158 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 5159 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
5160}
5161
eaa728ee
FB
5162void helper_skinit(void)
5163{
872929aa
FB
5164 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5165 /* XXX: not implemented */
872929aa 5166 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5167}
5168
914178d3 5169void helper_invlpga(int aflag)
eaa728ee 5170{
914178d3 5171 target_ulong addr;
872929aa 5172 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
914178d3
FB
5173
5174 if (aflag == 2)
5175 addr = EAX;
5176 else
5177 addr = (uint32_t)EAX;
5178
5179 /* XXX: could use the ASID to see if it is needed to do the
5180 flush */
5181 tlb_flush_page(env, addr);
eaa728ee
FB
5182}
5183
5184void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5185{
872929aa
FB
5186 if (likely(!(env->hflags & HF_SVMI_MASK)))
5187 return;
eaa728ee
FB
5188 switch(type) {
5189 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5190 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5191 helper_vmexit(type, param);
5192 }
5193 break;
872929aa
FB
5194 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5195 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5196 helper_vmexit(type, param);
5197 }
5198 break;
872929aa
FB
5199 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5200 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5201 helper_vmexit(type, param);
5202 }
5203 break;
872929aa
FB
5204 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5205 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5206 helper_vmexit(type, param);
5207 }
5208 break;
872929aa
FB
5209 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5210 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5211 helper_vmexit(type, param);
5212 }
5213 break;
eaa728ee 5214 case SVM_EXIT_MSR:
872929aa 5215 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5216 /* FIXME: this should be read in at vmrun (faster this way?) */
5217 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5218 uint32_t t0, t1;
5219 switch((uint32_t)ECX) {
5220 case 0 ... 0x1fff:
5221 t0 = (ECX * 2) % 8;
583cd3cb 5222 t1 = (ECX * 2) / 8;
eaa728ee
FB
5223 break;
5224 case 0xc0000000 ... 0xc0001fff:
5225 t0 = (8192 + ECX - 0xc0000000) * 2;
5226 t1 = (t0 / 8);
5227 t0 %= 8;
5228 break;
5229 case 0xc0010000 ... 0xc0011fff:
5230 t0 = (16384 + ECX - 0xc0010000) * 2;
5231 t1 = (t0 / 8);
5232 t0 %= 8;
5233 break;
5234 default:
5235 helper_vmexit(type, param);
5236 t0 = 0;
5237 t1 = 0;
5238 break;
5239 }
5240 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5241 helper_vmexit(type, param);
5242 }
5243 break;
5244 default:
872929aa 5245 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5246 helper_vmexit(type, param);
5247 }
5248 break;
5249 }
5250}
5251
5252void helper_svm_check_io(uint32_t port, uint32_t param,
5253 uint32_t next_eip_addend)
5254{
872929aa 5255 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5256 /* FIXME: this should be read in at vmrun (faster this way?) */
5257 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5258 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5259 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5260 /* next EIP */
5261 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5262 env->eip + next_eip_addend);
5263 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5264 }
5265 }
5266}
5267
5268/* Note: currently only 32 bits of exit_code are used */
5269void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5270{
5271 uint32_t int_ctl;
5272
93fcfe39 5273 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
eaa728ee
FB
5274 exit_code, exit_info_1,
5275 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5276 EIP);
5277
5278 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5279 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5280 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5281 } else {
5282 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5283 }
5284
5285 /* Save the VM state in the vmcb */
872929aa
FB
5286 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5287 &env->segs[R_ES]);
5288 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5289 &env->segs[R_CS]);
5290 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5291 &env->segs[R_SS]);
5292 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5293 &env->segs[R_DS]);
eaa728ee
FB
5294
5295 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5296 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5297
5298 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5299 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5300
5301 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5302 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5303 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5304 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5305 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5306
db620f46
FB
5307 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5308 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5309 int_ctl |= env->v_tpr & V_TPR_MASK;
5310 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5311 int_ctl |= V_IRQ_MASK;
5312 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee
FB
5313
5314 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5315 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5316 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5317 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5318 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5319 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5320 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5321
5322 /* Reload the host state from vm_hsave */
db620f46 5323 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 5324 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5325 env->intercept = 0;
5326 env->intercept_exceptions = 0;
5327 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 5328 env->tsc_offset = 0;
eaa728ee
FB
5329
5330 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5331 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5332
5333 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5334 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5335
5336 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5337 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5338 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5efc27bb
FB
5339 /* we need to set the efer after the crs so the hidden flags get
5340 set properly */
5efc27bb
FB
5341 cpu_load_efer(env,
5342 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5343 env->eflags = 0;
5344 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5345 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5346 CC_OP = CC_OP_EFLAGS;
5347
872929aa
FB
5348 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5349 env, R_ES);
5350 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5351 env, R_CS);
5352 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5353 env, R_SS);
5354 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5355 env, R_DS);
eaa728ee
FB
5356
5357 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5358 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5359 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5360
5361 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5362 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5363
5364 /* other setups */
5365 cpu_x86_set_cpl(env, 0);
5366 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5367 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5368
2ed51f5b
AL
5369 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5370 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5371 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5372 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
ab5ea558 5373 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
2ed51f5b 5374
960540b4 5375 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
5376 /* FIXME: Resets the current ASID register to zero (host ASID). */
5377
5378 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5379
5380 /* Clears the TSC_OFFSET inside the processor. */
5381
5382 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5383 from the page table indicated the host's CR3. If the PDPEs contain
5384 illegal state, the processor causes a shutdown. */
5385
5386 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5387 env->cr[0] |= CR0_PE_MASK;
5388 env->eflags &= ~VM_MASK;
5389
5390 /* Disables all breakpoints in the host DR7 register. */
5391
5392 /* Checks the reloaded host state for consistency. */
5393
5394 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5395 host's code segment or non-canonical (in the case of long mode), a
5396 #GP fault is delivered inside the host.) */
5397
5398 /* remove any pending exception */
5399 env->exception_index = -1;
5400 env->error_code = 0;
5401 env->old_exception = -1;
5402
5403 cpu_loop_exit();
5404}
5405
5406#endif
5407
5408/* MMX/SSE */
5409/* XXX: optimize by storing fptt and fptags in the static cpu state */
5410void helper_enter_mmx(void)
5411{
5412 env->fpstt = 0;
5413 *(uint32_t *)(env->fptags) = 0;
5414 *(uint32_t *)(env->fptags + 4) = 0;
5415}
5416
5417void helper_emms(void)
5418{
5419 /* set to empty state */
5420 *(uint32_t *)(env->fptags) = 0x01010101;
5421 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5422}
5423
5424/* XXX: suppress */
a7812ae4 5425void helper_movq(void *d, void *s)
eaa728ee 5426{
a7812ae4 5427 *(uint64_t *)d = *(uint64_t *)s;
eaa728ee
FB
5428}
5429
5430#define SHIFT 0
5431#include "ops_sse.h"
5432
5433#define SHIFT 1
5434#include "ops_sse.h"
5435
5436#define SHIFT 0
5437#include "helper_template.h"
5438#undef SHIFT
5439
5440#define SHIFT 1
5441#include "helper_template.h"
5442#undef SHIFT
5443
5444#define SHIFT 2
5445#include "helper_template.h"
5446#undef SHIFT
5447
5448#ifdef TARGET_X86_64
5449
5450#define SHIFT 3
5451#include "helper_template.h"
5452#undef SHIFT
5453
5454#endif
5455
5456/* bit operations */
5457target_ulong helper_bsf(target_ulong t0)
5458{
5459 int count;
5460 target_ulong res;
5461
5462 res = t0;
5463 count = 0;
5464 while ((res & 1) == 0) {
5465 count++;
5466 res >>= 1;
5467 }
5468 return count;
5469}
5470
31501a71 5471target_ulong helper_lzcnt(target_ulong t0, int wordsize)
eaa728ee
FB
5472{
5473 int count;
5474 target_ulong res, mask;
31501a71
AP
5475
5476 if (wordsize > 0 && t0 == 0) {
5477 return wordsize;
5478 }
eaa728ee
FB
5479 res = t0;
5480 count = TARGET_LONG_BITS - 1;
5481 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5482 while ((res & mask) == 0) {
5483 count--;
5484 res <<= 1;
5485 }
31501a71
AP
5486 if (wordsize > 0) {
5487 return wordsize - 1 - count;
5488 }
eaa728ee
FB
5489 return count;
5490}
5491
31501a71
AP
5492target_ulong helper_bsr(target_ulong t0)
5493{
5494 return helper_lzcnt(t0, 0);
5495}
eaa728ee
FB
5496
5497static int compute_all_eflags(void)
5498{
5499 return CC_SRC;
5500}
5501
5502static int compute_c_eflags(void)
5503{
5504 return CC_SRC & CC_C;
5505}
5506
a7812ae4
PB
5507uint32_t helper_cc_compute_all(int op)
5508{
5509 switch (op) {
5510 default: /* should never happen */ return 0;
eaa728ee 5511
a7812ae4 5512 case CC_OP_EFLAGS: return compute_all_eflags();
eaa728ee 5513
a7812ae4
PB
5514 case CC_OP_MULB: return compute_all_mulb();
5515 case CC_OP_MULW: return compute_all_mulw();
5516 case CC_OP_MULL: return compute_all_mull();
eaa728ee 5517
a7812ae4
PB
5518 case CC_OP_ADDB: return compute_all_addb();
5519 case CC_OP_ADDW: return compute_all_addw();
5520 case CC_OP_ADDL: return compute_all_addl();
eaa728ee 5521
a7812ae4
PB
5522 case CC_OP_ADCB: return compute_all_adcb();
5523 case CC_OP_ADCW: return compute_all_adcw();
5524 case CC_OP_ADCL: return compute_all_adcl();
eaa728ee 5525
a7812ae4
PB
5526 case CC_OP_SUBB: return compute_all_subb();
5527 case CC_OP_SUBW: return compute_all_subw();
5528 case CC_OP_SUBL: return compute_all_subl();
eaa728ee 5529
a7812ae4
PB
5530 case CC_OP_SBBB: return compute_all_sbbb();
5531 case CC_OP_SBBW: return compute_all_sbbw();
5532 case CC_OP_SBBL: return compute_all_sbbl();
eaa728ee 5533
a7812ae4
PB
5534 case CC_OP_LOGICB: return compute_all_logicb();
5535 case CC_OP_LOGICW: return compute_all_logicw();
5536 case CC_OP_LOGICL: return compute_all_logicl();
eaa728ee 5537
a7812ae4
PB
5538 case CC_OP_INCB: return compute_all_incb();
5539 case CC_OP_INCW: return compute_all_incw();
5540 case CC_OP_INCL: return compute_all_incl();
eaa728ee 5541
a7812ae4
PB
5542 case CC_OP_DECB: return compute_all_decb();
5543 case CC_OP_DECW: return compute_all_decw();
5544 case CC_OP_DECL: return compute_all_decl();
eaa728ee 5545
a7812ae4
PB
5546 case CC_OP_SHLB: return compute_all_shlb();
5547 case CC_OP_SHLW: return compute_all_shlw();
5548 case CC_OP_SHLL: return compute_all_shll();
eaa728ee 5549
a7812ae4
PB
5550 case CC_OP_SARB: return compute_all_sarb();
5551 case CC_OP_SARW: return compute_all_sarw();
5552 case CC_OP_SARL: return compute_all_sarl();
eaa728ee
FB
5553
5554#ifdef TARGET_X86_64
a7812ae4 5555 case CC_OP_MULQ: return compute_all_mulq();
eaa728ee 5556
a7812ae4 5557 case CC_OP_ADDQ: return compute_all_addq();
eaa728ee 5558
a7812ae4 5559 case CC_OP_ADCQ: return compute_all_adcq();
eaa728ee 5560
a7812ae4 5561 case CC_OP_SUBQ: return compute_all_subq();
eaa728ee 5562
a7812ae4 5563 case CC_OP_SBBQ: return compute_all_sbbq();
eaa728ee 5564
a7812ae4 5565 case CC_OP_LOGICQ: return compute_all_logicq();
eaa728ee 5566
a7812ae4 5567 case CC_OP_INCQ: return compute_all_incq();
eaa728ee 5568
a7812ae4 5569 case CC_OP_DECQ: return compute_all_decq();
eaa728ee 5570
a7812ae4 5571 case CC_OP_SHLQ: return compute_all_shlq();
eaa728ee 5572
a7812ae4 5573 case CC_OP_SARQ: return compute_all_sarq();
eaa728ee 5574#endif
a7812ae4
PB
5575 }
5576}
5577
5578uint32_t helper_cc_compute_c(int op)
5579{
5580 switch (op) {
5581 default: /* should never happen */ return 0;
5582
5583 case CC_OP_EFLAGS: return compute_c_eflags();
5584
5585 case CC_OP_MULB: return compute_c_mull();
5586 case CC_OP_MULW: return compute_c_mull();
5587 case CC_OP_MULL: return compute_c_mull();
5588
5589 case CC_OP_ADDB: return compute_c_addb();
5590 case CC_OP_ADDW: return compute_c_addw();
5591 case CC_OP_ADDL: return compute_c_addl();
5592
5593 case CC_OP_ADCB: return compute_c_adcb();
5594 case CC_OP_ADCW: return compute_c_adcw();
5595 case CC_OP_ADCL: return compute_c_adcl();
5596
5597 case CC_OP_SUBB: return compute_c_subb();
5598 case CC_OP_SUBW: return compute_c_subw();
5599 case CC_OP_SUBL: return compute_c_subl();
5600
5601 case CC_OP_SBBB: return compute_c_sbbb();
5602 case CC_OP_SBBW: return compute_c_sbbw();
5603 case CC_OP_SBBL: return compute_c_sbbl();
5604
5605 case CC_OP_LOGICB: return compute_c_logicb();
5606 case CC_OP_LOGICW: return compute_c_logicw();
5607 case CC_OP_LOGICL: return compute_c_logicl();
5608
5609 case CC_OP_INCB: return compute_c_incl();
5610 case CC_OP_INCW: return compute_c_incl();
5611 case CC_OP_INCL: return compute_c_incl();
5612
5613 case CC_OP_DECB: return compute_c_incl();
5614 case CC_OP_DECW: return compute_c_incl();
5615 case CC_OP_DECL: return compute_c_incl();
eaa728ee 5616
a7812ae4
PB
5617 case CC_OP_SHLB: return compute_c_shlb();
5618 case CC_OP_SHLW: return compute_c_shlw();
5619 case CC_OP_SHLL: return compute_c_shll();
5620
5621 case CC_OP_SARB: return compute_c_sarl();
5622 case CC_OP_SARW: return compute_c_sarl();
5623 case CC_OP_SARL: return compute_c_sarl();
5624
5625#ifdef TARGET_X86_64
5626 case CC_OP_MULQ: return compute_c_mull();
5627
5628 case CC_OP_ADDQ: return compute_c_addq();
5629
5630 case CC_OP_ADCQ: return compute_c_adcq();
5631
5632 case CC_OP_SUBQ: return compute_c_subq();
5633
5634 case CC_OP_SBBQ: return compute_c_sbbq();
5635
5636 case CC_OP_LOGICQ: return compute_c_logicq();
5637
5638 case CC_OP_INCQ: return compute_c_incl();
5639
5640 case CC_OP_DECQ: return compute_c_incl();
5641
5642 case CC_OP_SHLQ: return compute_c_shlq();
5643
5644 case CC_OP_SARQ: return compute_c_sarl();
5645#endif
5646 }
5647}