]> git.proxmox.com Git - qemu.git/blame - target-i386/op_helper.c
x86: use caller supplied CPUState for interrupt related stuff
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 18 */
83dae095 19
a2c9ed3c 20#include <math.h>
eaa728ee 21#include "exec.h"
d9957a8b 22#include "exec-all.h"
eaa728ee 23#include "host-utils.h"
35bed8ee 24#include "ioport.h"
eaa728ee
FB
25
26//#define DEBUG_PCALL
27
d12d51d5
AL
28
29#ifdef DEBUG_PCALL
93fcfe39
AL
30# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
31# define LOG_PCALL_STATE(env) \
32 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
d12d51d5
AL
33#else
34# define LOG_PCALL(...) do { } while (0)
35# define LOG_PCALL_STATE(env) do { } while (0)
36#endif
37
38
eaa728ee
FB
39#if 0
40#define raise_exception_err(a, b)\
41do {\
93fcfe39 42 qemu_log("raise_exception line=%d\n", __LINE__);\
eaa728ee
FB
43 (raise_exception_err)(a, b);\
44} while (0)
45#endif
46
d9957a8b 47static const uint8_t parity_table[256] = {
eaa728ee
FB
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
68 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
69 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
73 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
75 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
76 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
77 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
79 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
80};
81
82/* modulo 17 table */
d9957a8b 83static const uint8_t rclw_table[32] = {
eaa728ee
FB
84 0, 1, 2, 3, 4, 5, 6, 7,
85 8, 9,10,11,12,13,14,15,
86 16, 0, 1, 2, 3, 4, 5, 6,
87 7, 8, 9,10,11,12,13,14,
88};
89
90/* modulo 9 table */
d9957a8b 91static const uint8_t rclb_table[32] = {
eaa728ee
FB
92 0, 1, 2, 3, 4, 5, 6, 7,
93 8, 0, 1, 2, 3, 4, 5, 6,
94 7, 8, 0, 1, 2, 3, 4, 5,
95 6, 7, 8, 0, 1, 2, 3, 4,
96};
97
c31da136
AJ
98#define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
99#define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
100#define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
101
eaa728ee
FB
102/* broken thread support */
103
c227f099 104static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
eaa728ee
FB
105
106void helper_lock(void)
107{
108 spin_lock(&global_cpu_lock);
109}
110
111void helper_unlock(void)
112{
113 spin_unlock(&global_cpu_lock);
114}
115
116void helper_write_eflags(target_ulong t0, uint32_t update_mask)
117{
118 load_eflags(t0, update_mask);
119}
120
121target_ulong helper_read_eflags(void)
122{
123 uint32_t eflags;
a7812ae4 124 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
125 eflags |= (DF & DF_MASK);
126 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
127 return eflags;
128}
129
130/* return non zero if error */
131static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
132 int selector)
133{
134 SegmentCache *dt;
135 int index;
136 target_ulong ptr;
137
138 if (selector & 0x4)
139 dt = &env->ldt;
140 else
141 dt = &env->gdt;
142 index = selector & ~7;
143 if ((index + 7) > dt->limit)
144 return -1;
145 ptr = dt->base + index;
146 *e1_ptr = ldl_kernel(ptr);
147 *e2_ptr = ldl_kernel(ptr + 4);
148 return 0;
149}
150
151static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
152{
153 unsigned int limit;
154 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
155 if (e2 & DESC_G_MASK)
156 limit = (limit << 12) | 0xfff;
157 return limit;
158}
159
160static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
161{
162 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
163}
164
165static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
166{
167 sc->base = get_seg_base(e1, e2);
168 sc->limit = get_seg_limit(e1, e2);
169 sc->flags = e2;
170}
171
172/* init the segment cache in vm86 mode. */
173static inline void load_seg_vm(int seg, int selector)
174{
175 selector &= 0xffff;
176 cpu_x86_load_seg_cache(env, seg, selector,
177 (selector << 4), 0xffff, 0);
178}
179
180static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
181 uint32_t *esp_ptr, int dpl)
182{
183 int type, index, shift;
184
185#if 0
186 {
187 int i;
188 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
189 for(i=0;i<env->tr.limit;i++) {
190 printf("%02x ", env->tr.base[i]);
191 if ((i & 7) == 7) printf("\n");
192 }
193 printf("\n");
194 }
195#endif
196
197 if (!(env->tr.flags & DESC_P_MASK))
198 cpu_abort(env, "invalid tss");
199 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
200 if ((type & 7) != 1)
201 cpu_abort(env, "invalid tss type");
202 shift = type >> 3;
203 index = (dpl * 4 + 2) << shift;
204 if (index + (4 << shift) - 1 > env->tr.limit)
205 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
206 if (shift == 0) {
207 *esp_ptr = lduw_kernel(env->tr.base + index);
208 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
209 } else {
210 *esp_ptr = ldl_kernel(env->tr.base + index);
211 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
212 }
213}
214
215/* XXX: merge with load_seg() */
216static void tss_load_seg(int seg_reg, int selector)
217{
218 uint32_t e1, e2;
219 int rpl, dpl, cpl;
220
221 if ((selector & 0xfffc) != 0) {
222 if (load_segment(&e1, &e2, selector) != 0)
223 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
224 if (!(e2 & DESC_S_MASK))
225 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
226 rpl = selector & 3;
227 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
228 cpl = env->hflags & HF_CPL_MASK;
229 if (seg_reg == R_CS) {
230 if (!(e2 & DESC_CS_MASK))
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232 /* XXX: is it correct ? */
233 if (dpl != rpl)
234 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
235 if ((e2 & DESC_C_MASK) && dpl > rpl)
236 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
237 } else if (seg_reg == R_SS) {
238 /* SS must be writable data */
239 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
240 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241 if (dpl != cpl || dpl != rpl)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243 } else {
244 /* not readable code */
245 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
246 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247 /* if data or non conforming code, checks the rights */
248 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
249 if (dpl < cpl || dpl < rpl)
250 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
251 }
252 }
253 if (!(e2 & DESC_P_MASK))
254 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
255 cpu_x86_load_seg_cache(env, seg_reg, selector,
256 get_seg_base(e1, e2),
257 get_seg_limit(e1, e2),
258 e2);
259 } else {
260 if (seg_reg == R_SS || seg_reg == R_CS)
261 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
262 }
263}
264
265#define SWITCH_TSS_JMP 0
266#define SWITCH_TSS_IRET 1
267#define SWITCH_TSS_CALL 2
268
269/* XXX: restore CPU state in registers (PowerPC case) */
270static void switch_tss(int tss_selector,
271 uint32_t e1, uint32_t e2, int source,
272 uint32_t next_eip)
273{
274 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
275 target_ulong tss_base;
276 uint32_t new_regs[8], new_segs[6];
277 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
278 uint32_t old_eflags, eflags_mask;
279 SegmentCache *dt;
280 int index;
281 target_ulong ptr;
282
283 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
d12d51d5 284 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
eaa728ee
FB
285
286 /* if task gate, we read the TSS segment and we load it */
287 if (type == 5) {
288 if (!(e2 & DESC_P_MASK))
289 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
290 tss_selector = e1 >> 16;
291 if (tss_selector & 4)
292 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
293 if (load_segment(&e1, &e2, tss_selector) != 0)
294 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
295 if (e2 & DESC_S_MASK)
296 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
297 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
298 if ((type & 7) != 1)
299 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
300 }
301
302 if (!(e2 & DESC_P_MASK))
303 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
304
305 if (type & 8)
306 tss_limit_max = 103;
307 else
308 tss_limit_max = 43;
309 tss_limit = get_seg_limit(e1, e2);
310 tss_base = get_seg_base(e1, e2);
311 if ((tss_selector & 4) != 0 ||
312 tss_limit < tss_limit_max)
313 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
314 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
315 if (old_type & 8)
316 old_tss_limit_max = 103;
317 else
318 old_tss_limit_max = 43;
319
320 /* read all the registers from the new TSS */
321 if (type & 8) {
322 /* 32 bit */
323 new_cr3 = ldl_kernel(tss_base + 0x1c);
324 new_eip = ldl_kernel(tss_base + 0x20);
325 new_eflags = ldl_kernel(tss_base + 0x24);
326 for(i = 0; i < 8; i++)
327 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
328 for(i = 0; i < 6; i++)
329 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
330 new_ldt = lduw_kernel(tss_base + 0x60);
331 new_trap = ldl_kernel(tss_base + 0x64);
332 } else {
333 /* 16 bit */
334 new_cr3 = 0;
335 new_eip = lduw_kernel(tss_base + 0x0e);
336 new_eflags = lduw_kernel(tss_base + 0x10);
337 for(i = 0; i < 8; i++)
338 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
339 for(i = 0; i < 4; i++)
340 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
341 new_ldt = lduw_kernel(tss_base + 0x2a);
342 new_segs[R_FS] = 0;
343 new_segs[R_GS] = 0;
344 new_trap = 0;
345 }
4581cbcd
BS
346 /* XXX: avoid a compiler warning, see
347 http://support.amd.com/us/Processor_TechDocs/24593.pdf
348 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
349 (void)new_trap;
eaa728ee
FB
350
351 /* NOTE: we must avoid memory exceptions during the task switch,
352 so we make dummy accesses before */
353 /* XXX: it can still fail in some cases, so a bigger hack is
354 necessary to valid the TLB after having done the accesses */
355
356 v1 = ldub_kernel(env->tr.base);
357 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
358 stb_kernel(env->tr.base, v1);
359 stb_kernel(env->tr.base + old_tss_limit_max, v2);
360
361 /* clear busy bit (it is restartable) */
362 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
363 target_ulong ptr;
364 uint32_t e2;
365 ptr = env->gdt.base + (env->tr.selector & ~7);
366 e2 = ldl_kernel(ptr + 4);
367 e2 &= ~DESC_TSS_BUSY_MASK;
368 stl_kernel(ptr + 4, e2);
369 }
370 old_eflags = compute_eflags();
371 if (source == SWITCH_TSS_IRET)
372 old_eflags &= ~NT_MASK;
373
374 /* save the current state in the old TSS */
375 if (type & 8) {
376 /* 32 bit */
377 stl_kernel(env->tr.base + 0x20, next_eip);
378 stl_kernel(env->tr.base + 0x24, old_eflags);
379 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
380 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
381 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
382 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
383 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
384 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
385 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
386 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
387 for(i = 0; i < 6; i++)
388 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
389 } else {
390 /* 16 bit */
391 stw_kernel(env->tr.base + 0x0e, next_eip);
392 stw_kernel(env->tr.base + 0x10, old_eflags);
393 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
394 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
395 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
396 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
397 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
398 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
399 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
400 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
401 for(i = 0; i < 4; i++)
402 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
403 }
404
405 /* now if an exception occurs, it will occurs in the next task
406 context */
407
408 if (source == SWITCH_TSS_CALL) {
409 stw_kernel(tss_base, env->tr.selector);
410 new_eflags |= NT_MASK;
411 }
412
413 /* set busy bit */
414 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
415 target_ulong ptr;
416 uint32_t e2;
417 ptr = env->gdt.base + (tss_selector & ~7);
418 e2 = ldl_kernel(ptr + 4);
419 e2 |= DESC_TSS_BUSY_MASK;
420 stl_kernel(ptr + 4, e2);
421 }
422
423 /* set the new CPU state */
424 /* from this point, any exception which occurs can give problems */
425 env->cr[0] |= CR0_TS_MASK;
426 env->hflags |= HF_TS_MASK;
427 env->tr.selector = tss_selector;
428 env->tr.base = tss_base;
429 env->tr.limit = tss_limit;
430 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
431
432 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
433 cpu_x86_update_cr3(env, new_cr3);
434 }
435
436 /* load all registers without an exception, then reload them with
437 possible exception */
438 env->eip = new_eip;
439 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
440 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
441 if (!(type & 8))
442 eflags_mask &= 0xffff;
443 load_eflags(new_eflags, eflags_mask);
444 /* XXX: what to do in 16 bit case ? */
445 EAX = new_regs[0];
446 ECX = new_regs[1];
447 EDX = new_regs[2];
448 EBX = new_regs[3];
449 ESP = new_regs[4];
450 EBP = new_regs[5];
451 ESI = new_regs[6];
452 EDI = new_regs[7];
453 if (new_eflags & VM_MASK) {
454 for(i = 0; i < 6; i++)
455 load_seg_vm(i, new_segs[i]);
456 /* in vm86, CPL is always 3 */
457 cpu_x86_set_cpl(env, 3);
458 } else {
459 /* CPL is set the RPL of CS */
460 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
461 /* first just selectors as the rest may trigger exceptions */
462 for(i = 0; i < 6; i++)
463 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
464 }
465
466 env->ldt.selector = new_ldt & ~4;
467 env->ldt.base = 0;
468 env->ldt.limit = 0;
469 env->ldt.flags = 0;
470
471 /* load the LDT */
472 if (new_ldt & 4)
473 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
474
475 if ((new_ldt & 0xfffc) != 0) {
476 dt = &env->gdt;
477 index = new_ldt & ~7;
478 if ((index + 7) > dt->limit)
479 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
480 ptr = dt->base + index;
481 e1 = ldl_kernel(ptr);
482 e2 = ldl_kernel(ptr + 4);
483 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
484 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
485 if (!(e2 & DESC_P_MASK))
486 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487 load_seg_cache_raw_dt(&env->ldt, e1, e2);
488 }
489
490 /* load the segments */
491 if (!(new_eflags & VM_MASK)) {
492 tss_load_seg(R_CS, new_segs[R_CS]);
493 tss_load_seg(R_SS, new_segs[R_SS]);
494 tss_load_seg(R_ES, new_segs[R_ES]);
495 tss_load_seg(R_DS, new_segs[R_DS]);
496 tss_load_seg(R_FS, new_segs[R_FS]);
497 tss_load_seg(R_GS, new_segs[R_GS]);
498 }
499
500 /* check that EIP is in the CS segment limits */
501 if (new_eip > env->segs[R_CS].limit) {
502 /* XXX: different exception if CALL ? */
503 raise_exception_err(EXCP0D_GPF, 0);
504 }
01df040b
AL
505
506#ifndef CONFIG_USER_ONLY
507 /* reset local breakpoints */
508 if (env->dr[7] & 0x55) {
509 for (i = 0; i < 4; i++) {
510 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
511 hw_breakpoint_remove(env, i);
512 }
513 env->dr[7] &= ~0x55;
514 }
515#endif
eaa728ee
FB
516}
517
518/* check if Port I/O is allowed in TSS */
519static inline void check_io(int addr, int size)
520{
521 int io_offset, val, mask;
522
523 /* TSS must be a valid 32 bit one */
524 if (!(env->tr.flags & DESC_P_MASK) ||
525 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
526 env->tr.limit < 103)
527 goto fail;
528 io_offset = lduw_kernel(env->tr.base + 0x66);
529 io_offset += (addr >> 3);
530 /* Note: the check needs two bytes */
531 if ((io_offset + 1) > env->tr.limit)
532 goto fail;
533 val = lduw_kernel(env->tr.base + io_offset);
534 val >>= (addr & 7);
535 mask = (1 << size) - 1;
536 /* all bits must be zero to allow the I/O */
537 if ((val & mask) != 0) {
538 fail:
539 raise_exception_err(EXCP0D_GPF, 0);
540 }
541}
542
543void helper_check_iob(uint32_t t0)
544{
545 check_io(t0, 1);
546}
547
548void helper_check_iow(uint32_t t0)
549{
550 check_io(t0, 2);
551}
552
553void helper_check_iol(uint32_t t0)
554{
555 check_io(t0, 4);
556}
557
558void helper_outb(uint32_t port, uint32_t data)
559{
afcea8cb 560 cpu_outb(port, data & 0xff);
eaa728ee
FB
561}
562
563target_ulong helper_inb(uint32_t port)
564{
afcea8cb 565 return cpu_inb(port);
eaa728ee
FB
566}
567
568void helper_outw(uint32_t port, uint32_t data)
569{
afcea8cb 570 cpu_outw(port, data & 0xffff);
eaa728ee
FB
571}
572
573target_ulong helper_inw(uint32_t port)
574{
afcea8cb 575 return cpu_inw(port);
eaa728ee
FB
576}
577
578void helper_outl(uint32_t port, uint32_t data)
579{
afcea8cb 580 cpu_outl(port, data);
eaa728ee
FB
581}
582
583target_ulong helper_inl(uint32_t port)
584{
afcea8cb 585 return cpu_inl(port);
eaa728ee
FB
586}
587
588static inline unsigned int get_sp_mask(unsigned int e2)
589{
590 if (e2 & DESC_B_MASK)
591 return 0xffffffff;
592 else
593 return 0xffff;
594}
595
2ed51f5b
AL
596static int exeption_has_error_code(int intno)
597{
598 switch(intno) {
599 case 8:
600 case 10:
601 case 11:
602 case 12:
603 case 13:
604 case 14:
605 case 17:
606 return 1;
607 }
608 return 0;
609}
610
eaa728ee
FB
611#ifdef TARGET_X86_64
612#define SET_ESP(val, sp_mask)\
613do {\
614 if ((sp_mask) == 0xffff)\
615 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
616 else if ((sp_mask) == 0xffffffffLL)\
617 ESP = (uint32_t)(val);\
618 else\
619 ESP = (val);\
620} while (0)
621#else
622#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
623#endif
624
c0a04f0e
AL
625/* in 64-bit machines, this can overflow. So this segment addition macro
626 * can be used to trim the value to 32-bit whenever needed */
627#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
628
eaa728ee
FB
629/* XXX: add a is_user flag to have proper security support */
630#define PUSHW(ssp, sp, sp_mask, val)\
631{\
632 sp -= 2;\
633 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
634}
635
636#define PUSHL(ssp, sp, sp_mask, val)\
637{\
638 sp -= 4;\
c0a04f0e 639 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
eaa728ee
FB
640}
641
642#define POPW(ssp, sp, sp_mask, val)\
643{\
644 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
645 sp += 2;\
646}
647
648#define POPL(ssp, sp, sp_mask, val)\
649{\
c0a04f0e 650 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
eaa728ee
FB
651 sp += 4;\
652}
653
654/* protected mode interrupt */
655static void do_interrupt_protected(int intno, int is_int, int error_code,
656 unsigned int next_eip, int is_hw)
657{
658 SegmentCache *dt;
659 target_ulong ptr, ssp;
660 int type, dpl, selector, ss_dpl, cpl;
661 int has_error_code, new_stack, shift;
1c918eba 662 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 663 uint32_t old_eip, sp_mask;
eaa728ee 664
eaa728ee 665 has_error_code = 0;
2ed51f5b
AL
666 if (!is_int && !is_hw)
667 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
668 if (is_int)
669 old_eip = next_eip;
670 else
671 old_eip = env->eip;
672
673 dt = &env->idt;
674 if (intno * 8 + 7 > dt->limit)
675 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676 ptr = dt->base + intno * 8;
677 e1 = ldl_kernel(ptr);
678 e2 = ldl_kernel(ptr + 4);
679 /* check gate type */
680 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
681 switch(type) {
682 case 5: /* task gate */
683 /* must do that check here to return the correct error code */
684 if (!(e2 & DESC_P_MASK))
685 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
686 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
687 if (has_error_code) {
688 int type;
689 uint32_t mask;
690 /* push the error code */
691 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
692 shift = type >> 3;
693 if (env->segs[R_SS].flags & DESC_B_MASK)
694 mask = 0xffffffff;
695 else
696 mask = 0xffff;
697 esp = (ESP - (2 << shift)) & mask;
698 ssp = env->segs[R_SS].base + esp;
699 if (shift)
700 stl_kernel(ssp, error_code);
701 else
702 stw_kernel(ssp, error_code);
703 SET_ESP(esp, mask);
704 }
705 return;
706 case 6: /* 286 interrupt gate */
707 case 7: /* 286 trap gate */
708 case 14: /* 386 interrupt gate */
709 case 15: /* 386 trap gate */
710 break;
711 default:
712 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
713 break;
714 }
715 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
716 cpl = env->hflags & HF_CPL_MASK;
1235fc06 717 /* check privilege if software int */
eaa728ee
FB
718 if (is_int && dpl < cpl)
719 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
720 /* check valid bit */
721 if (!(e2 & DESC_P_MASK))
722 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
723 selector = e1 >> 16;
724 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
725 if ((selector & 0xfffc) == 0)
726 raise_exception_err(EXCP0D_GPF, 0);
727
728 if (load_segment(&e1, &e2, selector) != 0)
729 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
730 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
731 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
732 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
733 if (dpl > cpl)
734 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
735 if (!(e2 & DESC_P_MASK))
736 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
737 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
738 /* to inner privilege */
739 get_ss_esp_from_tss(&ss, &esp, dpl);
740 if ((ss & 0xfffc) == 0)
741 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
742 if ((ss & 3) != dpl)
743 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
745 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
746 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
747 if (ss_dpl != dpl)
748 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
749 if (!(ss_e2 & DESC_S_MASK) ||
750 (ss_e2 & DESC_CS_MASK) ||
751 !(ss_e2 & DESC_W_MASK))
752 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
753 if (!(ss_e2 & DESC_P_MASK))
754 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755 new_stack = 1;
756 sp_mask = get_sp_mask(ss_e2);
757 ssp = get_seg_base(ss_e1, ss_e2);
758 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
759 /* to same privilege */
760 if (env->eflags & VM_MASK)
761 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
762 new_stack = 0;
763 sp_mask = get_sp_mask(env->segs[R_SS].flags);
764 ssp = env->segs[R_SS].base;
765 esp = ESP;
766 dpl = cpl;
767 } else {
768 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
769 new_stack = 0; /* avoid warning */
770 sp_mask = 0; /* avoid warning */
771 ssp = 0; /* avoid warning */
772 esp = 0; /* avoid warning */
773 }
774
775 shift = type >> 3;
776
777#if 0
778 /* XXX: check that enough room is available */
779 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
780 if (env->eflags & VM_MASK)
781 push_size += 8;
782 push_size <<= shift;
783#endif
784 if (shift == 1) {
785 if (new_stack) {
786 if (env->eflags & VM_MASK) {
787 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
788 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
789 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
790 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
791 }
792 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
793 PUSHL(ssp, esp, sp_mask, ESP);
794 }
795 PUSHL(ssp, esp, sp_mask, compute_eflags());
796 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
797 PUSHL(ssp, esp, sp_mask, old_eip);
798 if (has_error_code) {
799 PUSHL(ssp, esp, sp_mask, error_code);
800 }
801 } else {
802 if (new_stack) {
803 if (env->eflags & VM_MASK) {
804 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
805 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
806 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
807 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
808 }
809 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
810 PUSHW(ssp, esp, sp_mask, ESP);
811 }
812 PUSHW(ssp, esp, sp_mask, compute_eflags());
813 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
814 PUSHW(ssp, esp, sp_mask, old_eip);
815 if (has_error_code) {
816 PUSHW(ssp, esp, sp_mask, error_code);
817 }
818 }
819
820 if (new_stack) {
821 if (env->eflags & VM_MASK) {
822 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
823 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
824 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
825 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
826 }
827 ss = (ss & ~3) | dpl;
828 cpu_x86_load_seg_cache(env, R_SS, ss,
829 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
830 }
831 SET_ESP(esp, sp_mask);
832
833 selector = (selector & ~3) | dpl;
834 cpu_x86_load_seg_cache(env, R_CS, selector,
835 get_seg_base(e1, e2),
836 get_seg_limit(e1, e2),
837 e2);
838 cpu_x86_set_cpl(env, dpl);
839 env->eip = offset;
840
841 /* interrupt gate clear IF mask */
842 if ((type & 1) == 0) {
843 env->eflags &= ~IF_MASK;
844 }
845 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
846}
847
848#ifdef TARGET_X86_64
849
850#define PUSHQ(sp, val)\
851{\
852 sp -= 8;\
853 stq_kernel(sp, (val));\
854}
855
856#define POPQ(sp, val)\
857{\
858 val = ldq_kernel(sp);\
859 sp += 8;\
860}
861
862static inline target_ulong get_rsp_from_tss(int level)
863{
864 int index;
865
866#if 0
867 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
868 env->tr.base, env->tr.limit);
869#endif
870
871 if (!(env->tr.flags & DESC_P_MASK))
872 cpu_abort(env, "invalid tss");
873 index = 8 * level + 4;
874 if ((index + 7) > env->tr.limit)
875 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
876 return ldq_kernel(env->tr.base + index);
877}
878
879/* 64 bit interrupt */
880static void do_interrupt64(int intno, int is_int, int error_code,
881 target_ulong next_eip, int is_hw)
882{
883 SegmentCache *dt;
884 target_ulong ptr;
885 int type, dpl, selector, cpl, ist;
886 int has_error_code, new_stack;
887 uint32_t e1, e2, e3, ss;
888 target_ulong old_eip, esp, offset;
eaa728ee 889
eaa728ee 890 has_error_code = 0;
2ed51f5b
AL
891 if (!is_int && !is_hw)
892 has_error_code = exeption_has_error_code(intno);
eaa728ee
FB
893 if (is_int)
894 old_eip = next_eip;
895 else
896 old_eip = env->eip;
897
898 dt = &env->idt;
899 if (intno * 16 + 15 > dt->limit)
900 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
901 ptr = dt->base + intno * 16;
902 e1 = ldl_kernel(ptr);
903 e2 = ldl_kernel(ptr + 4);
904 e3 = ldl_kernel(ptr + 8);
905 /* check gate type */
906 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
907 switch(type) {
908 case 14: /* 386 interrupt gate */
909 case 15: /* 386 trap gate */
910 break;
911 default:
912 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
913 break;
914 }
915 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
916 cpl = env->hflags & HF_CPL_MASK;
1235fc06 917 /* check privilege if software int */
eaa728ee
FB
918 if (is_int && dpl < cpl)
919 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
920 /* check valid bit */
921 if (!(e2 & DESC_P_MASK))
922 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
923 selector = e1 >> 16;
924 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
925 ist = e2 & 7;
926 if ((selector & 0xfffc) == 0)
927 raise_exception_err(EXCP0D_GPF, 0);
928
929 if (load_segment(&e1, &e2, selector) != 0)
930 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
931 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
932 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
934 if (dpl > cpl)
935 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
936 if (!(e2 & DESC_P_MASK))
937 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
938 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
939 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
940 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
941 /* to inner privilege */
942 if (ist != 0)
943 esp = get_rsp_from_tss(ist + 3);
944 else
945 esp = get_rsp_from_tss(dpl);
946 esp &= ~0xfLL; /* align stack */
947 ss = 0;
948 new_stack = 1;
949 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
950 /* to same privilege */
951 if (env->eflags & VM_MASK)
952 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
953 new_stack = 0;
954 if (ist != 0)
955 esp = get_rsp_from_tss(ist + 3);
956 else
957 esp = ESP;
958 esp &= ~0xfLL; /* align stack */
959 dpl = cpl;
960 } else {
961 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
962 new_stack = 0; /* avoid warning */
963 esp = 0; /* avoid warning */
964 }
965
966 PUSHQ(esp, env->segs[R_SS].selector);
967 PUSHQ(esp, ESP);
968 PUSHQ(esp, compute_eflags());
969 PUSHQ(esp, env->segs[R_CS].selector);
970 PUSHQ(esp, old_eip);
971 if (has_error_code) {
972 PUSHQ(esp, error_code);
973 }
974
975 if (new_stack) {
976 ss = 0 | dpl;
977 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
978 }
979 ESP = esp;
980
981 selector = (selector & ~3) | dpl;
982 cpu_x86_load_seg_cache(env, R_CS, selector,
983 get_seg_base(e1, e2),
984 get_seg_limit(e1, e2),
985 e2);
986 cpu_x86_set_cpl(env, dpl);
987 env->eip = offset;
988
989 /* interrupt gate clear IF mask */
990 if ((type & 1) == 0) {
991 env->eflags &= ~IF_MASK;
992 }
993 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
994}
995#endif
996
d9957a8b 997#ifdef TARGET_X86_64
eaa728ee
FB
998#if defined(CONFIG_USER_ONLY)
999void helper_syscall(int next_eip_addend)
1000{
1001 env->exception_index = EXCP_SYSCALL;
1002 env->exception_next_eip = env->eip + next_eip_addend;
1162c041 1003 cpu_loop_exit(env);
eaa728ee
FB
1004}
1005#else
1006void helper_syscall(int next_eip_addend)
1007{
1008 int selector;
1009
1010 if (!(env->efer & MSR_EFER_SCE)) {
1011 raise_exception_err(EXCP06_ILLOP, 0);
1012 }
1013 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
1014 if (env->hflags & HF_LMA_MASK) {
1015 int code64;
1016
1017 ECX = env->eip + next_eip_addend;
1018 env->regs[11] = compute_eflags();
1019
1020 code64 = env->hflags & HF_CS64_MASK;
1021
1022 cpu_x86_set_cpl(env, 0);
1023 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1024 0, 0xffffffff,
1025 DESC_G_MASK | DESC_P_MASK |
1026 DESC_S_MASK |
1027 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1028 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1029 0, 0xffffffff,
1030 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1031 DESC_S_MASK |
1032 DESC_W_MASK | DESC_A_MASK);
1033 env->eflags &= ~env->fmask;
1034 load_eflags(env->eflags, 0);
1035 if (code64)
1036 env->eip = env->lstar;
1037 else
1038 env->eip = env->cstar;
d9957a8b 1039 } else {
eaa728ee
FB
1040 ECX = (uint32_t)(env->eip + next_eip_addend);
1041
1042 cpu_x86_set_cpl(env, 0);
1043 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1044 0, 0xffffffff,
1045 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1046 DESC_S_MASK |
1047 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1048 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1049 0, 0xffffffff,
1050 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1051 DESC_S_MASK |
1052 DESC_W_MASK | DESC_A_MASK);
1053 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1054 env->eip = (uint32_t)env->star;
1055 }
1056}
1057#endif
d9957a8b 1058#endif
eaa728ee 1059
d9957a8b 1060#ifdef TARGET_X86_64
eaa728ee
FB
1061void helper_sysret(int dflag)
1062{
1063 int cpl, selector;
1064
1065 if (!(env->efer & MSR_EFER_SCE)) {
1066 raise_exception_err(EXCP06_ILLOP, 0);
1067 }
1068 cpl = env->hflags & HF_CPL_MASK;
1069 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1070 raise_exception_err(EXCP0D_GPF, 0);
1071 }
1072 selector = (env->star >> 48) & 0xffff;
eaa728ee
FB
1073 if (env->hflags & HF_LMA_MASK) {
1074 if (dflag == 2) {
1075 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1076 0, 0xffffffff,
1077 DESC_G_MASK | DESC_P_MASK |
1078 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1079 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1080 DESC_L_MASK);
1081 env->eip = ECX;
1082 } else {
1083 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1084 0, 0xffffffff,
1085 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1086 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1088 env->eip = (uint32_t)ECX;
1089 }
1090 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1091 0, 0xffffffff,
1092 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1093 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1094 DESC_W_MASK | DESC_A_MASK);
1095 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1096 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1097 cpu_x86_set_cpl(env, 3);
d9957a8b 1098 } else {
eaa728ee
FB
1099 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1100 0, 0xffffffff,
1101 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1102 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1103 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1104 env->eip = (uint32_t)ECX;
1105 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1106 0, 0xffffffff,
1107 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1108 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1109 DESC_W_MASK | DESC_A_MASK);
1110 env->eflags |= IF_MASK;
1111 cpu_x86_set_cpl(env, 3);
1112 }
eaa728ee 1113}
d9957a8b 1114#endif
eaa728ee
FB
1115
1116/* real mode interrupt */
1117static void do_interrupt_real(int intno, int is_int, int error_code,
1118 unsigned int next_eip)
1119{
1120 SegmentCache *dt;
1121 target_ulong ptr, ssp;
1122 int selector;
1123 uint32_t offset, esp;
1124 uint32_t old_cs, old_eip;
eaa728ee 1125
eaa728ee
FB
1126 /* real mode (simpler !) */
1127 dt = &env->idt;
1128 if (intno * 4 + 3 > dt->limit)
1129 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1130 ptr = dt->base + intno * 4;
1131 offset = lduw_kernel(ptr);
1132 selector = lduw_kernel(ptr + 2);
1133 esp = ESP;
1134 ssp = env->segs[R_SS].base;
1135 if (is_int)
1136 old_eip = next_eip;
1137 else
1138 old_eip = env->eip;
1139 old_cs = env->segs[R_CS].selector;
1140 /* XXX: use SS segment size ? */
1141 PUSHW(ssp, esp, 0xffff, compute_eflags());
1142 PUSHW(ssp, esp, 0xffff, old_cs);
1143 PUSHW(ssp, esp, 0xffff, old_eip);
1144
1145 /* update processor state */
1146 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1147 env->eip = offset;
1148 env->segs[R_CS].selector = selector;
1149 env->segs[R_CS].base = (selector << 4);
1150 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1151}
1152
e694d4e2 1153#if defined(CONFIG_USER_ONLY)
eaa728ee 1154/* fake user mode interrupt */
e694d4e2
BS
1155static void do_interrupt_user(int intno, int is_int, int error_code,
1156 target_ulong next_eip)
eaa728ee
FB
1157{
1158 SegmentCache *dt;
1159 target_ulong ptr;
1160 int dpl, cpl, shift;
1161 uint32_t e2;
1162
1163 dt = &env->idt;
1164 if (env->hflags & HF_LMA_MASK) {
1165 shift = 4;
1166 } else {
1167 shift = 3;
1168 }
1169 ptr = dt->base + (intno << shift);
1170 e2 = ldl_kernel(ptr + 4);
1171
1172 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1173 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1174 /* check privilege if software int */
eaa728ee
FB
1175 if (is_int && dpl < cpl)
1176 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1177
1178 /* Since we emulate only user space, we cannot do more than
1179 exiting the emulation with the suitable exception and error
1180 code */
1181 if (is_int)
1182 EIP = next_eip;
1183}
1184
e694d4e2
BS
1185#else
1186
2ed51f5b
AL
1187static void handle_even_inj(int intno, int is_int, int error_code,
1188 int is_hw, int rm)
1189{
1190 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1191 if (!(event_inj & SVM_EVTINJ_VALID)) {
1192 int type;
1193 if (is_int)
1194 type = SVM_EVTINJ_TYPE_SOFT;
1195 else
1196 type = SVM_EVTINJ_TYPE_EXEPT;
1197 event_inj = intno | type | SVM_EVTINJ_VALID;
1198 if (!rm && exeption_has_error_code(intno)) {
1199 event_inj |= SVM_EVTINJ_VALID_ERR;
1200 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1201 }
1202 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1203 }
1204}
00ea18d1 1205#endif
2ed51f5b 1206
eaa728ee
FB
1207/*
1208 * Begin execution of an interruption. is_int is TRUE if coming from
1209 * the int instruction. next_eip is the EIP value AFTER the interrupt
1210 * instruction. It is only relevant if is_int is TRUE.
1211 */
e694d4e2
BS
1212static void do_interrupt_all(int intno, int is_int, int error_code,
1213 target_ulong next_eip, int is_hw)
eaa728ee 1214{
8fec2b8c 1215 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1216 if ((env->cr[0] & CR0_PE_MASK)) {
1217 static int count;
93fcfe39 1218 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
eaa728ee
FB
1219 count, intno, error_code, is_int,
1220 env->hflags & HF_CPL_MASK,
1221 env->segs[R_CS].selector, EIP,
1222 (int)env->segs[R_CS].base + EIP,
1223 env->segs[R_SS].selector, ESP);
1224 if (intno == 0x0e) {
93fcfe39 1225 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1226 } else {
93fcfe39 1227 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
eaa728ee 1228 }
93fcfe39
AL
1229 qemu_log("\n");
1230 log_cpu_state(env, X86_DUMP_CCOP);
eaa728ee
FB
1231#if 0
1232 {
1233 int i;
9bd5494e 1234 target_ulong ptr;
93fcfe39 1235 qemu_log(" code=");
eaa728ee
FB
1236 ptr = env->segs[R_CS].base + env->eip;
1237 for(i = 0; i < 16; i++) {
93fcfe39 1238 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1239 }
93fcfe39 1240 qemu_log("\n");
eaa728ee
FB
1241 }
1242#endif
1243 count++;
1244 }
1245 }
1246 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1247#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1248 if (env->hflags & HF_SVMI_MASK)
1249 handle_even_inj(intno, is_int, error_code, is_hw, 0);
00ea18d1 1250#endif
eb38c52c 1251#ifdef TARGET_X86_64
eaa728ee
FB
1252 if (env->hflags & HF_LMA_MASK) {
1253 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1254 } else
1255#endif
1256 {
1257 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1258 }
1259 } else {
00ea18d1 1260#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1261 if (env->hflags & HF_SVMI_MASK)
1262 handle_even_inj(intno, is_int, error_code, is_hw, 1);
00ea18d1 1263#endif
eaa728ee
FB
1264 do_interrupt_real(intno, is_int, error_code, next_eip);
1265 }
2ed51f5b 1266
00ea18d1 1267#if !defined(CONFIG_USER_ONLY)
2ed51f5b
AL
1268 if (env->hflags & HF_SVMI_MASK) {
1269 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1270 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1271 }
00ea18d1 1272#endif
eaa728ee
FB
1273}
1274
e694d4e2
BS
1275void do_interrupt(CPUState *env1)
1276{
1277 CPUState *saved_env;
1278
1279 saved_env = env;
1280 env = env1;
1281#if defined(CONFIG_USER_ONLY)
1282 /* if user mode only, we simulate a fake exception
1283 which will be handled outside the cpu execution
1284 loop */
1285 do_interrupt_user(env->exception_index,
1286 env->exception_is_int,
1287 env->error_code,
1288 env->exception_next_eip);
1289 /* successfully delivered */
1290 env->old_exception = -1;
1291#else
1292 /* simulate a real cpu exception. On i386, it can
1293 trigger new exceptions, but we do not handle
1294 double or triple faults yet. */
1295 do_interrupt_all(env->exception_index,
1296 env->exception_is_int,
1297 env->error_code,
1298 env->exception_next_eip, 0);
1299 /* successfully delivered */
1300 env->old_exception = -1;
1301#endif
1302 env = saved_env;
1303}
1304
1305void do_interrupt_x86_hardirq(CPUState *env1, int intno, int is_hw)
1306{
1307 CPUState *saved_env;
1308
1309 saved_env = env;
1310 env = env1;
1311 do_interrupt_all(intno, 0, 0, 0, is_hw);
1312 env = saved_env;
1313}
1314
f55761a0
AL
1315/* This should come from sysemu.h - if we could include it here... */
1316void qemu_system_reset_request(void);
1317
eaa728ee
FB
1318/*
1319 * Check nested exceptions and change to double or triple fault if
1320 * needed. It should only be called, if this is not an interrupt.
1321 * Returns the new exception number.
1322 */
1323static int check_exception(int intno, int *error_code)
1324{
1325 int first_contributory = env->old_exception == 0 ||
1326 (env->old_exception >= 10 &&
1327 env->old_exception <= 13);
1328 int second_contributory = intno == 0 ||
1329 (intno >= 10 && intno <= 13);
1330
93fcfe39 1331 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
eaa728ee
FB
1332 env->old_exception, intno);
1333
f55761a0
AL
1334#if !defined(CONFIG_USER_ONLY)
1335 if (env->old_exception == EXCP08_DBLE) {
1336 if (env->hflags & HF_SVMI_MASK)
1337 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1338
680c3069 1339 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
f55761a0
AL
1340
1341 qemu_system_reset_request();
1342 return EXCP_HLT;
1343 }
1344#endif
eaa728ee
FB
1345
1346 if ((first_contributory && second_contributory)
1347 || (env->old_exception == EXCP0E_PAGE &&
1348 (second_contributory || (intno == EXCP0E_PAGE)))) {
1349 intno = EXCP08_DBLE;
1350 *error_code = 0;
1351 }
1352
1353 if (second_contributory || (intno == EXCP0E_PAGE) ||
1354 (intno == EXCP08_DBLE))
1355 env->old_exception = intno;
1356
1357 return intno;
1358}
1359
1360/*
1361 * Signal an interruption. It is executed in the main CPU loop.
1362 * is_int is TRUE if coming from the int instruction. next_eip is the
1363 * EIP value AFTER the interrupt instruction. It is only relevant if
1364 * is_int is TRUE.
1365 */
a5e50b26 1366static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1367 int next_eip_addend)
eaa728ee
FB
1368{
1369 if (!is_int) {
1370 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1371 intno = check_exception(intno, &error_code);
872929aa
FB
1372 } else {
1373 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
eaa728ee
FB
1374 }
1375
1376 env->exception_index = intno;
1377 env->error_code = error_code;
1378 env->exception_is_int = is_int;
1379 env->exception_next_eip = env->eip + next_eip_addend;
1162c041 1380 cpu_loop_exit(env);
eaa728ee
FB
1381}
1382
eaa728ee
FB
1383/* shortcuts to generate exceptions */
1384
d9957a8b 1385void raise_exception_err(int exception_index, int error_code)
eaa728ee
FB
1386{
1387 raise_interrupt(exception_index, 0, error_code, 0);
1388}
1389
1390void raise_exception(int exception_index)
1391{
1392 raise_interrupt(exception_index, 0, 0, 0);
1393}
1394
63a54736
JW
1395void raise_exception_env(int exception_index, CPUState *nenv)
1396{
1397 env = nenv;
1398 raise_exception(exception_index);
1399}
eaa728ee
FB
1400/* SMM support */
1401
1402#if defined(CONFIG_USER_ONLY)
1403
e694d4e2 1404void do_smm_enter(CPUState *env1)
eaa728ee
FB
1405{
1406}
1407
1408void helper_rsm(void)
1409{
1410}
1411
1412#else
1413
1414#ifdef TARGET_X86_64
1415#define SMM_REVISION_ID 0x00020064
1416#else
1417#define SMM_REVISION_ID 0x00020000
1418#endif
1419
e694d4e2 1420void do_smm_enter(CPUState *env1)
eaa728ee
FB
1421{
1422 target_ulong sm_state;
1423 SegmentCache *dt;
1424 int i, offset;
e694d4e2
BS
1425 CPUState *saved_env;
1426
1427 saved_env = env;
1428 env = env1;
eaa728ee 1429
93fcfe39
AL
1430 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1431 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1432
1433 env->hflags |= HF_SMM_MASK;
1434 cpu_smm_update(env);
1435
1436 sm_state = env->smbase + 0x8000;
1437
1438#ifdef TARGET_X86_64
1439 for(i = 0; i < 6; i++) {
1440 dt = &env->segs[i];
1441 offset = 0x7e00 + i * 16;
1442 stw_phys(sm_state + offset, dt->selector);
1443 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1444 stl_phys(sm_state + offset + 4, dt->limit);
1445 stq_phys(sm_state + offset + 8, dt->base);
1446 }
1447
1448 stq_phys(sm_state + 0x7e68, env->gdt.base);
1449 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1450
1451 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1452 stq_phys(sm_state + 0x7e78, env->ldt.base);
1453 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1454 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1455
1456 stq_phys(sm_state + 0x7e88, env->idt.base);
1457 stl_phys(sm_state + 0x7e84, env->idt.limit);
1458
1459 stw_phys(sm_state + 0x7e90, env->tr.selector);
1460 stq_phys(sm_state + 0x7e98, env->tr.base);
1461 stl_phys(sm_state + 0x7e94, env->tr.limit);
1462 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1463
1464 stq_phys(sm_state + 0x7ed0, env->efer);
1465
1466 stq_phys(sm_state + 0x7ff8, EAX);
1467 stq_phys(sm_state + 0x7ff0, ECX);
1468 stq_phys(sm_state + 0x7fe8, EDX);
1469 stq_phys(sm_state + 0x7fe0, EBX);
1470 stq_phys(sm_state + 0x7fd8, ESP);
1471 stq_phys(sm_state + 0x7fd0, EBP);
1472 stq_phys(sm_state + 0x7fc8, ESI);
1473 stq_phys(sm_state + 0x7fc0, EDI);
1474 for(i = 8; i < 16; i++)
1475 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1476 stq_phys(sm_state + 0x7f78, env->eip);
1477 stl_phys(sm_state + 0x7f70, compute_eflags());
1478 stl_phys(sm_state + 0x7f68, env->dr[6]);
1479 stl_phys(sm_state + 0x7f60, env->dr[7]);
1480
1481 stl_phys(sm_state + 0x7f48, env->cr[4]);
1482 stl_phys(sm_state + 0x7f50, env->cr[3]);
1483 stl_phys(sm_state + 0x7f58, env->cr[0]);
1484
1485 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1486 stl_phys(sm_state + 0x7f00, env->smbase);
1487#else
1488 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1489 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1490 stl_phys(sm_state + 0x7ff4, compute_eflags());
1491 stl_phys(sm_state + 0x7ff0, env->eip);
1492 stl_phys(sm_state + 0x7fec, EDI);
1493 stl_phys(sm_state + 0x7fe8, ESI);
1494 stl_phys(sm_state + 0x7fe4, EBP);
1495 stl_phys(sm_state + 0x7fe0, ESP);
1496 stl_phys(sm_state + 0x7fdc, EBX);
1497 stl_phys(sm_state + 0x7fd8, EDX);
1498 stl_phys(sm_state + 0x7fd4, ECX);
1499 stl_phys(sm_state + 0x7fd0, EAX);
1500 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1501 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1502
1503 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1504 stl_phys(sm_state + 0x7f64, env->tr.base);
1505 stl_phys(sm_state + 0x7f60, env->tr.limit);
1506 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1507
1508 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1509 stl_phys(sm_state + 0x7f80, env->ldt.base);
1510 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1511 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1512
1513 stl_phys(sm_state + 0x7f74, env->gdt.base);
1514 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1515
1516 stl_phys(sm_state + 0x7f58, env->idt.base);
1517 stl_phys(sm_state + 0x7f54, env->idt.limit);
1518
1519 for(i = 0; i < 6; i++) {
1520 dt = &env->segs[i];
1521 if (i < 3)
1522 offset = 0x7f84 + i * 12;
1523 else
1524 offset = 0x7f2c + (i - 3) * 12;
1525 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1526 stl_phys(sm_state + offset + 8, dt->base);
1527 stl_phys(sm_state + offset + 4, dt->limit);
1528 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1529 }
1530 stl_phys(sm_state + 0x7f14, env->cr[4]);
1531
1532 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1533 stl_phys(sm_state + 0x7ef8, env->smbase);
1534#endif
1535 /* init SMM cpu state */
1536
1537#ifdef TARGET_X86_64
5efc27bb 1538 cpu_load_efer(env, 0);
eaa728ee
FB
1539#endif
1540 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1541 env->eip = 0x00008000;
1542 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1543 0xffffffff, 0);
1544 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1545 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1546 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1547 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1548 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1549
1550 cpu_x86_update_cr0(env,
1551 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1552 cpu_x86_update_cr4(env, 0);
1553 env->dr[7] = 0x00000400;
1554 CC_OP = CC_OP_EFLAGS;
e694d4e2 1555 env = saved_env;
eaa728ee
FB
1556}
1557
1558void helper_rsm(void)
1559{
1560 target_ulong sm_state;
1561 int i, offset;
1562 uint32_t val;
1563
1564 sm_state = env->smbase + 0x8000;
1565#ifdef TARGET_X86_64
5efc27bb 1566 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee
FB
1567
1568 for(i = 0; i < 6; i++) {
1569 offset = 0x7e00 + i * 16;
1570 cpu_x86_load_seg_cache(env, i,
1571 lduw_phys(sm_state + offset),
1572 ldq_phys(sm_state + offset + 8),
1573 ldl_phys(sm_state + offset + 4),
1574 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1575 }
1576
1577 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1578 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1579
1580 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1581 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1582 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1583 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1584
1585 env->idt.base = ldq_phys(sm_state + 0x7e88);
1586 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1587
1588 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1589 env->tr.base = ldq_phys(sm_state + 0x7e98);
1590 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1591 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1592
1593 EAX = ldq_phys(sm_state + 0x7ff8);
1594 ECX = ldq_phys(sm_state + 0x7ff0);
1595 EDX = ldq_phys(sm_state + 0x7fe8);
1596 EBX = ldq_phys(sm_state + 0x7fe0);
1597 ESP = ldq_phys(sm_state + 0x7fd8);
1598 EBP = ldq_phys(sm_state + 0x7fd0);
1599 ESI = ldq_phys(sm_state + 0x7fc8);
1600 EDI = ldq_phys(sm_state + 0x7fc0);
1601 for(i = 8; i < 16; i++)
1602 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1603 env->eip = ldq_phys(sm_state + 0x7f78);
1604 load_eflags(ldl_phys(sm_state + 0x7f70),
1605 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1606 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1607 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1608
1609 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1610 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1611 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1612
1613 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1614 if (val & 0x20000) {
1615 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1616 }
1617#else
1618 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1619 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1620 load_eflags(ldl_phys(sm_state + 0x7ff4),
1621 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1622 env->eip = ldl_phys(sm_state + 0x7ff0);
1623 EDI = ldl_phys(sm_state + 0x7fec);
1624 ESI = ldl_phys(sm_state + 0x7fe8);
1625 EBP = ldl_phys(sm_state + 0x7fe4);
1626 ESP = ldl_phys(sm_state + 0x7fe0);
1627 EBX = ldl_phys(sm_state + 0x7fdc);
1628 EDX = ldl_phys(sm_state + 0x7fd8);
1629 ECX = ldl_phys(sm_state + 0x7fd4);
1630 EAX = ldl_phys(sm_state + 0x7fd0);
1631 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1632 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1633
1634 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1635 env->tr.base = ldl_phys(sm_state + 0x7f64);
1636 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1637 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1638
1639 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1640 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1641 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1642 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1643
1644 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1645 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1646
1647 env->idt.base = ldl_phys(sm_state + 0x7f58);
1648 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1649
1650 for(i = 0; i < 6; i++) {
1651 if (i < 3)
1652 offset = 0x7f84 + i * 12;
1653 else
1654 offset = 0x7f2c + (i - 3) * 12;
1655 cpu_x86_load_seg_cache(env, i,
1656 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1657 ldl_phys(sm_state + offset + 8),
1658 ldl_phys(sm_state + offset + 4),
1659 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1660 }
1661 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1662
1663 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1664 if (val & 0x20000) {
1665 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1666 }
1667#endif
1668 CC_OP = CC_OP_EFLAGS;
1669 env->hflags &= ~HF_SMM_MASK;
1670 cpu_smm_update(env);
1671
93fcfe39
AL
1672 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1673 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1674}
1675
1676#endif /* !CONFIG_USER_ONLY */
1677
1678
1679/* division, flags are undefined */
1680
1681void helper_divb_AL(target_ulong t0)
1682{
1683 unsigned int num, den, q, r;
1684
1685 num = (EAX & 0xffff);
1686 den = (t0 & 0xff);
1687 if (den == 0) {
1688 raise_exception(EXCP00_DIVZ);
1689 }
1690 q = (num / den);
1691 if (q > 0xff)
1692 raise_exception(EXCP00_DIVZ);
1693 q &= 0xff;
1694 r = (num % den) & 0xff;
1695 EAX = (EAX & ~0xffff) | (r << 8) | q;
1696}
1697
1698void helper_idivb_AL(target_ulong t0)
1699{
1700 int num, den, q, r;
1701
1702 num = (int16_t)EAX;
1703 den = (int8_t)t0;
1704 if (den == 0) {
1705 raise_exception(EXCP00_DIVZ);
1706 }
1707 q = (num / den);
1708 if (q != (int8_t)q)
1709 raise_exception(EXCP00_DIVZ);
1710 q &= 0xff;
1711 r = (num % den) & 0xff;
1712 EAX = (EAX & ~0xffff) | (r << 8) | q;
1713}
1714
1715void helper_divw_AX(target_ulong t0)
1716{
1717 unsigned int num, den, q, r;
1718
1719 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1720 den = (t0 & 0xffff);
1721 if (den == 0) {
1722 raise_exception(EXCP00_DIVZ);
1723 }
1724 q = (num / den);
1725 if (q > 0xffff)
1726 raise_exception(EXCP00_DIVZ);
1727 q &= 0xffff;
1728 r = (num % den) & 0xffff;
1729 EAX = (EAX & ~0xffff) | q;
1730 EDX = (EDX & ~0xffff) | r;
1731}
1732
1733void helper_idivw_AX(target_ulong t0)
1734{
1735 int num, den, q, r;
1736
1737 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1738 den = (int16_t)t0;
1739 if (den == 0) {
1740 raise_exception(EXCP00_DIVZ);
1741 }
1742 q = (num / den);
1743 if (q != (int16_t)q)
1744 raise_exception(EXCP00_DIVZ);
1745 q &= 0xffff;
1746 r = (num % den) & 0xffff;
1747 EAX = (EAX & ~0xffff) | q;
1748 EDX = (EDX & ~0xffff) | r;
1749}
1750
1751void helper_divl_EAX(target_ulong t0)
1752{
1753 unsigned int den, r;
1754 uint64_t num, q;
1755
1756 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1757 den = t0;
1758 if (den == 0) {
1759 raise_exception(EXCP00_DIVZ);
1760 }
1761 q = (num / den);
1762 r = (num % den);
1763 if (q > 0xffffffff)
1764 raise_exception(EXCP00_DIVZ);
1765 EAX = (uint32_t)q;
1766 EDX = (uint32_t)r;
1767}
1768
1769void helper_idivl_EAX(target_ulong t0)
1770{
1771 int den, r;
1772 int64_t num, q;
1773
1774 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1775 den = t0;
1776 if (den == 0) {
1777 raise_exception(EXCP00_DIVZ);
1778 }
1779 q = (num / den);
1780 r = (num % den);
1781 if (q != (int32_t)q)
1782 raise_exception(EXCP00_DIVZ);
1783 EAX = (uint32_t)q;
1784 EDX = (uint32_t)r;
1785}
1786
1787/* bcd */
1788
1789/* XXX: exception */
1790void helper_aam(int base)
1791{
1792 int al, ah;
1793 al = EAX & 0xff;
1794 ah = al / base;
1795 al = al % base;
1796 EAX = (EAX & ~0xffff) | al | (ah << 8);
1797 CC_DST = al;
1798}
1799
1800void helper_aad(int base)
1801{
1802 int al, ah;
1803 al = EAX & 0xff;
1804 ah = (EAX >> 8) & 0xff;
1805 al = ((ah * base) + al) & 0xff;
1806 EAX = (EAX & ~0xffff) | al;
1807 CC_DST = al;
1808}
1809
1810void helper_aaa(void)
1811{
1812 int icarry;
1813 int al, ah, af;
1814 int eflags;
1815
a7812ae4 1816 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1817 af = eflags & CC_A;
1818 al = EAX & 0xff;
1819 ah = (EAX >> 8) & 0xff;
1820
1821 icarry = (al > 0xf9);
1822 if (((al & 0x0f) > 9 ) || af) {
1823 al = (al + 6) & 0x0f;
1824 ah = (ah + 1 + icarry) & 0xff;
1825 eflags |= CC_C | CC_A;
1826 } else {
1827 eflags &= ~(CC_C | CC_A);
1828 al &= 0x0f;
1829 }
1830 EAX = (EAX & ~0xffff) | al | (ah << 8);
1831 CC_SRC = eflags;
eaa728ee
FB
1832}
1833
1834void helper_aas(void)
1835{
1836 int icarry;
1837 int al, ah, af;
1838 int eflags;
1839
a7812ae4 1840 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1841 af = eflags & CC_A;
1842 al = EAX & 0xff;
1843 ah = (EAX >> 8) & 0xff;
1844
1845 icarry = (al < 6);
1846 if (((al & 0x0f) > 9 ) || af) {
1847 al = (al - 6) & 0x0f;
1848 ah = (ah - 1 - icarry) & 0xff;
1849 eflags |= CC_C | CC_A;
1850 } else {
1851 eflags &= ~(CC_C | CC_A);
1852 al &= 0x0f;
1853 }
1854 EAX = (EAX & ~0xffff) | al | (ah << 8);
1855 CC_SRC = eflags;
eaa728ee
FB
1856}
1857
1858void helper_daa(void)
1859{
1860 int al, af, cf;
1861 int eflags;
1862
a7812ae4 1863 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1864 cf = eflags & CC_C;
1865 af = eflags & CC_A;
1866 al = EAX & 0xff;
1867
1868 eflags = 0;
1869 if (((al & 0x0f) > 9 ) || af) {
1870 al = (al + 6) & 0xff;
1871 eflags |= CC_A;
1872 }
1873 if ((al > 0x9f) || cf) {
1874 al = (al + 0x60) & 0xff;
1875 eflags |= CC_C;
1876 }
1877 EAX = (EAX & ~0xff) | al;
1878 /* well, speed is not an issue here, so we compute the flags by hand */
1879 eflags |= (al == 0) << 6; /* zf */
1880 eflags |= parity_table[al]; /* pf */
1881 eflags |= (al & 0x80); /* sf */
1882 CC_SRC = eflags;
eaa728ee
FB
1883}
1884
1885void helper_das(void)
1886{
1887 int al, al1, af, cf;
1888 int eflags;
1889
a7812ae4 1890 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1891 cf = eflags & CC_C;
1892 af = eflags & CC_A;
1893 al = EAX & 0xff;
1894
1895 eflags = 0;
1896 al1 = al;
1897 if (((al & 0x0f) > 9 ) || af) {
1898 eflags |= CC_A;
1899 if (al < 6 || cf)
1900 eflags |= CC_C;
1901 al = (al - 6) & 0xff;
1902 }
1903 if ((al1 > 0x99) || cf) {
1904 al = (al - 0x60) & 0xff;
1905 eflags |= CC_C;
1906 }
1907 EAX = (EAX & ~0xff) | al;
1908 /* well, speed is not an issue here, so we compute the flags by hand */
1909 eflags |= (al == 0) << 6; /* zf */
1910 eflags |= parity_table[al]; /* pf */
1911 eflags |= (al & 0x80); /* sf */
1912 CC_SRC = eflags;
eaa728ee
FB
1913}
1914
1915void helper_into(int next_eip_addend)
1916{
1917 int eflags;
a7812ae4 1918 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1919 if (eflags & CC_O) {
1920 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1921 }
1922}
1923
1924void helper_cmpxchg8b(target_ulong a0)
1925{
1926 uint64_t d;
1927 int eflags;
1928
a7812ae4 1929 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1930 d = ldq(a0);
1931 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1932 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1933 eflags |= CC_Z;
1934 } else {
278ed7c3
FB
1935 /* always do the store */
1936 stq(a0, d);
eaa728ee
FB
1937 EDX = (uint32_t)(d >> 32);
1938 EAX = (uint32_t)d;
1939 eflags &= ~CC_Z;
1940 }
1941 CC_SRC = eflags;
1942}
1943
1944#ifdef TARGET_X86_64
1945void helper_cmpxchg16b(target_ulong a0)
1946{
1947 uint64_t d0, d1;
1948 int eflags;
1949
278ed7c3
FB
1950 if ((a0 & 0xf) != 0)
1951 raise_exception(EXCP0D_GPF);
a7812ae4 1952 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1953 d0 = ldq(a0);
1954 d1 = ldq(a0 + 8);
1955 if (d0 == EAX && d1 == EDX) {
1956 stq(a0, EBX);
1957 stq(a0 + 8, ECX);
1958 eflags |= CC_Z;
1959 } else {
278ed7c3
FB
1960 /* always do the store */
1961 stq(a0, d0);
1962 stq(a0 + 8, d1);
eaa728ee
FB
1963 EDX = d1;
1964 EAX = d0;
1965 eflags &= ~CC_Z;
1966 }
1967 CC_SRC = eflags;
1968}
1969#endif
1970
1971void helper_single_step(void)
1972{
01df040b
AL
1973#ifndef CONFIG_USER_ONLY
1974 check_hw_breakpoints(env, 1);
1975 env->dr[6] |= DR6_BS;
1976#endif
1977 raise_exception(EXCP01_DB);
eaa728ee
FB
1978}
1979
1980void helper_cpuid(void)
1981{
6fd805e1 1982 uint32_t eax, ebx, ecx, edx;
eaa728ee 1983
872929aa 1984 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
e737b32a 1985
e00b6f80 1986 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
6fd805e1
AL
1987 EAX = eax;
1988 EBX = ebx;
1989 ECX = ecx;
1990 EDX = edx;
eaa728ee
FB
1991}
1992
1993void helper_enter_level(int level, int data32, target_ulong t1)
1994{
1995 target_ulong ssp;
1996 uint32_t esp_mask, esp, ebp;
1997
1998 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1999 ssp = env->segs[R_SS].base;
2000 ebp = EBP;
2001 esp = ESP;
2002 if (data32) {
2003 /* 32 bit */
2004 esp -= 4;
2005 while (--level) {
2006 esp -= 4;
2007 ebp -= 4;
2008 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2009 }
2010 esp -= 4;
2011 stl(ssp + (esp & esp_mask), t1);
2012 } else {
2013 /* 16 bit */
2014 esp -= 2;
2015 while (--level) {
2016 esp -= 2;
2017 ebp -= 2;
2018 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2019 }
2020 esp -= 2;
2021 stw(ssp + (esp & esp_mask), t1);
2022 }
2023}
2024
2025#ifdef TARGET_X86_64
2026void helper_enter64_level(int level, int data64, target_ulong t1)
2027{
2028 target_ulong esp, ebp;
2029 ebp = EBP;
2030 esp = ESP;
2031
2032 if (data64) {
2033 /* 64 bit */
2034 esp -= 8;
2035 while (--level) {
2036 esp -= 8;
2037 ebp -= 8;
2038 stq(esp, ldq(ebp));
2039 }
2040 esp -= 8;
2041 stq(esp, t1);
2042 } else {
2043 /* 16 bit */
2044 esp -= 2;
2045 while (--level) {
2046 esp -= 2;
2047 ebp -= 2;
2048 stw(esp, lduw(ebp));
2049 }
2050 esp -= 2;
2051 stw(esp, t1);
2052 }
2053}
2054#endif
2055
2056void helper_lldt(int selector)
2057{
2058 SegmentCache *dt;
2059 uint32_t e1, e2;
2060 int index, entry_limit;
2061 target_ulong ptr;
2062
2063 selector &= 0xffff;
2064 if ((selector & 0xfffc) == 0) {
2065 /* XXX: NULL selector case: invalid LDT */
2066 env->ldt.base = 0;
2067 env->ldt.limit = 0;
2068 } else {
2069 if (selector & 0x4)
2070 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2071 dt = &env->gdt;
2072 index = selector & ~7;
2073#ifdef TARGET_X86_64
2074 if (env->hflags & HF_LMA_MASK)
2075 entry_limit = 15;
2076 else
2077#endif
2078 entry_limit = 7;
2079 if ((index + entry_limit) > dt->limit)
2080 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2081 ptr = dt->base + index;
2082 e1 = ldl_kernel(ptr);
2083 e2 = ldl_kernel(ptr + 4);
2084 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2085 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2086 if (!(e2 & DESC_P_MASK))
2087 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2088#ifdef TARGET_X86_64
2089 if (env->hflags & HF_LMA_MASK) {
2090 uint32_t e3;
2091 e3 = ldl_kernel(ptr + 8);
2092 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2093 env->ldt.base |= (target_ulong)e3 << 32;
2094 } else
2095#endif
2096 {
2097 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2098 }
2099 }
2100 env->ldt.selector = selector;
2101}
2102
2103void helper_ltr(int selector)
2104{
2105 SegmentCache *dt;
2106 uint32_t e1, e2;
2107 int index, type, entry_limit;
2108 target_ulong ptr;
2109
2110 selector &= 0xffff;
2111 if ((selector & 0xfffc) == 0) {
2112 /* NULL selector case: invalid TR */
2113 env->tr.base = 0;
2114 env->tr.limit = 0;
2115 env->tr.flags = 0;
2116 } else {
2117 if (selector & 0x4)
2118 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2119 dt = &env->gdt;
2120 index = selector & ~7;
2121#ifdef TARGET_X86_64
2122 if (env->hflags & HF_LMA_MASK)
2123 entry_limit = 15;
2124 else
2125#endif
2126 entry_limit = 7;
2127 if ((index + entry_limit) > dt->limit)
2128 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2129 ptr = dt->base + index;
2130 e1 = ldl_kernel(ptr);
2131 e2 = ldl_kernel(ptr + 4);
2132 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2133 if ((e2 & DESC_S_MASK) ||
2134 (type != 1 && type != 9))
2135 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2136 if (!(e2 & DESC_P_MASK))
2137 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2138#ifdef TARGET_X86_64
2139 if (env->hflags & HF_LMA_MASK) {
2140 uint32_t e3, e4;
2141 e3 = ldl_kernel(ptr + 8);
2142 e4 = ldl_kernel(ptr + 12);
2143 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2144 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2145 load_seg_cache_raw_dt(&env->tr, e1, e2);
2146 env->tr.base |= (target_ulong)e3 << 32;
2147 } else
2148#endif
2149 {
2150 load_seg_cache_raw_dt(&env->tr, e1, e2);
2151 }
2152 e2 |= DESC_TSS_BUSY_MASK;
2153 stl_kernel(ptr + 4, e2);
2154 }
2155 env->tr.selector = selector;
2156}
2157
2158/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2159void helper_load_seg(int seg_reg, int selector)
2160{
2161 uint32_t e1, e2;
2162 int cpl, dpl, rpl;
2163 SegmentCache *dt;
2164 int index;
2165 target_ulong ptr;
2166
2167 selector &= 0xffff;
2168 cpl = env->hflags & HF_CPL_MASK;
2169 if ((selector & 0xfffc) == 0) {
2170 /* null selector case */
2171 if (seg_reg == R_SS
2172#ifdef TARGET_X86_64
2173 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2174#endif
2175 )
2176 raise_exception_err(EXCP0D_GPF, 0);
2177 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2178 } else {
2179
2180 if (selector & 0x4)
2181 dt = &env->ldt;
2182 else
2183 dt = &env->gdt;
2184 index = selector & ~7;
2185 if ((index + 7) > dt->limit)
2186 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2187 ptr = dt->base + index;
2188 e1 = ldl_kernel(ptr);
2189 e2 = ldl_kernel(ptr + 4);
2190
2191 if (!(e2 & DESC_S_MASK))
2192 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2193 rpl = selector & 3;
2194 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2195 if (seg_reg == R_SS) {
2196 /* must be writable segment */
2197 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2198 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2199 if (rpl != cpl || dpl != cpl)
2200 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2201 } else {
2202 /* must be readable segment */
2203 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2204 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2205
2206 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2207 /* if not conforming code, test rights */
2208 if (dpl < cpl || dpl < rpl)
2209 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2210 }
2211 }
2212
2213 if (!(e2 & DESC_P_MASK)) {
2214 if (seg_reg == R_SS)
2215 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2216 else
2217 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2218 }
2219
2220 /* set the access bit if not already set */
2221 if (!(e2 & DESC_A_MASK)) {
2222 e2 |= DESC_A_MASK;
2223 stl_kernel(ptr + 4, e2);
2224 }
2225
2226 cpu_x86_load_seg_cache(env, seg_reg, selector,
2227 get_seg_base(e1, e2),
2228 get_seg_limit(e1, e2),
2229 e2);
2230#if 0
93fcfe39 2231 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
2232 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2233#endif
2234 }
2235}
2236
2237/* protected mode jump */
2238void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2239 int next_eip_addend)
2240{
2241 int gate_cs, type;
2242 uint32_t e1, e2, cpl, dpl, rpl, limit;
2243 target_ulong next_eip;
2244
2245 if ((new_cs & 0xfffc) == 0)
2246 raise_exception_err(EXCP0D_GPF, 0);
2247 if (load_segment(&e1, &e2, new_cs) != 0)
2248 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2249 cpl = env->hflags & HF_CPL_MASK;
2250 if (e2 & DESC_S_MASK) {
2251 if (!(e2 & DESC_CS_MASK))
2252 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2253 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2254 if (e2 & DESC_C_MASK) {
2255 /* conforming code segment */
2256 if (dpl > cpl)
2257 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2258 } else {
2259 /* non conforming code segment */
2260 rpl = new_cs & 3;
2261 if (rpl > cpl)
2262 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2263 if (dpl != cpl)
2264 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2265 }
2266 if (!(e2 & DESC_P_MASK))
2267 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2268 limit = get_seg_limit(e1, e2);
2269 if (new_eip > limit &&
2270 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2271 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2272 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2273 get_seg_base(e1, e2), limit, e2);
2274 EIP = new_eip;
2275 } else {
2276 /* jump to call or task gate */
2277 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2278 rpl = new_cs & 3;
2279 cpl = env->hflags & HF_CPL_MASK;
2280 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2281 switch(type) {
2282 case 1: /* 286 TSS */
2283 case 9: /* 386 TSS */
2284 case 5: /* task gate */
2285 if (dpl < cpl || dpl < rpl)
2286 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2287 next_eip = env->eip + next_eip_addend;
2288 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2289 CC_OP = CC_OP_EFLAGS;
2290 break;
2291 case 4: /* 286 call gate */
2292 case 12: /* 386 call gate */
2293 if ((dpl < cpl) || (dpl < rpl))
2294 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2295 if (!(e2 & DESC_P_MASK))
2296 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2297 gate_cs = e1 >> 16;
2298 new_eip = (e1 & 0xffff);
2299 if (type == 12)
2300 new_eip |= (e2 & 0xffff0000);
2301 if (load_segment(&e1, &e2, gate_cs) != 0)
2302 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2303 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2304 /* must be code segment */
2305 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2306 (DESC_S_MASK | DESC_CS_MASK)))
2307 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2308 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2309 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2310 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2311 if (!(e2 & DESC_P_MASK))
2312 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2313 limit = get_seg_limit(e1, e2);
2314 if (new_eip > limit)
2315 raise_exception_err(EXCP0D_GPF, 0);
2316 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2317 get_seg_base(e1, e2), limit, e2);
2318 EIP = new_eip;
2319 break;
2320 default:
2321 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2322 break;
2323 }
2324 }
2325}
2326
2327/* real mode call */
2328void helper_lcall_real(int new_cs, target_ulong new_eip1,
2329 int shift, int next_eip)
2330{
2331 int new_eip;
2332 uint32_t esp, esp_mask;
2333 target_ulong ssp;
2334
2335 new_eip = new_eip1;
2336 esp = ESP;
2337 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2338 ssp = env->segs[R_SS].base;
2339 if (shift) {
2340 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2341 PUSHL(ssp, esp, esp_mask, next_eip);
2342 } else {
2343 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2344 PUSHW(ssp, esp, esp_mask, next_eip);
2345 }
2346
2347 SET_ESP(esp, esp_mask);
2348 env->eip = new_eip;
2349 env->segs[R_CS].selector = new_cs;
2350 env->segs[R_CS].base = (new_cs << 4);
2351}
2352
2353/* protected mode call */
2354void helper_lcall_protected(int new_cs, target_ulong new_eip,
2355 int shift, int next_eip_addend)
2356{
2357 int new_stack, i;
2358 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 2359 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
2360 uint32_t val, limit, old_sp_mask;
2361 target_ulong ssp, old_ssp, next_eip;
2362
2363 next_eip = env->eip + next_eip_addend;
d12d51d5
AL
2364 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2365 LOG_PCALL_STATE(env);
eaa728ee
FB
2366 if ((new_cs & 0xfffc) == 0)
2367 raise_exception_err(EXCP0D_GPF, 0);
2368 if (load_segment(&e1, &e2, new_cs) != 0)
2369 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2370 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 2371 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee
FB
2372 if (e2 & DESC_S_MASK) {
2373 if (!(e2 & DESC_CS_MASK))
2374 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2375 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2376 if (e2 & DESC_C_MASK) {
2377 /* conforming code segment */
2378 if (dpl > cpl)
2379 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2380 } else {
2381 /* non conforming code segment */
2382 rpl = new_cs & 3;
2383 if (rpl > cpl)
2384 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2385 if (dpl != cpl)
2386 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2387 }
2388 if (!(e2 & DESC_P_MASK))
2389 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2390
2391#ifdef TARGET_X86_64
2392 /* XXX: check 16/32 bit cases in long mode */
2393 if (shift == 2) {
2394 target_ulong rsp;
2395 /* 64 bit case */
2396 rsp = ESP;
2397 PUSHQ(rsp, env->segs[R_CS].selector);
2398 PUSHQ(rsp, next_eip);
2399 /* from this point, not restartable */
2400 ESP = rsp;
2401 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2402 get_seg_base(e1, e2),
2403 get_seg_limit(e1, e2), e2);
2404 EIP = new_eip;
2405 } else
2406#endif
2407 {
2408 sp = ESP;
2409 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2410 ssp = env->segs[R_SS].base;
2411 if (shift) {
2412 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2413 PUSHL(ssp, sp, sp_mask, next_eip);
2414 } else {
2415 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2416 PUSHW(ssp, sp, sp_mask, next_eip);
2417 }
2418
2419 limit = get_seg_limit(e1, e2);
2420 if (new_eip > limit)
2421 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2422 /* from this point, not restartable */
2423 SET_ESP(sp, sp_mask);
2424 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2425 get_seg_base(e1, e2), limit, e2);
2426 EIP = new_eip;
2427 }
2428 } else {
2429 /* check gate type */
2430 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2431 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2432 rpl = new_cs & 3;
2433 switch(type) {
2434 case 1: /* available 286 TSS */
2435 case 9: /* available 386 TSS */
2436 case 5: /* task gate */
2437 if (dpl < cpl || dpl < rpl)
2438 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2439 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2440 CC_OP = CC_OP_EFLAGS;
2441 return;
2442 case 4: /* 286 call gate */
2443 case 12: /* 386 call gate */
2444 break;
2445 default:
2446 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2447 break;
2448 }
2449 shift = type >> 3;
2450
2451 if (dpl < cpl || dpl < rpl)
2452 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2453 /* check valid bit */
2454 if (!(e2 & DESC_P_MASK))
2455 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2456 selector = e1 >> 16;
2457 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2458 param_count = e2 & 0x1f;
2459 if ((selector & 0xfffc) == 0)
2460 raise_exception_err(EXCP0D_GPF, 0);
2461
2462 if (load_segment(&e1, &e2, selector) != 0)
2463 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2464 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2465 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2466 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2467 if (dpl > cpl)
2468 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2469 if (!(e2 & DESC_P_MASK))
2470 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2471
2472 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2473 /* to inner privilege */
2474 get_ss_esp_from_tss(&ss, &sp, dpl);
d12d51d5 2475 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
eaa728ee 2476 ss, sp, param_count, ESP);
eaa728ee
FB
2477 if ((ss & 0xfffc) == 0)
2478 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2479 if ((ss & 3) != dpl)
2480 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2481 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2482 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2483 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2484 if (ss_dpl != dpl)
2485 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2486 if (!(ss_e2 & DESC_S_MASK) ||
2487 (ss_e2 & DESC_CS_MASK) ||
2488 !(ss_e2 & DESC_W_MASK))
2489 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2490 if (!(ss_e2 & DESC_P_MASK))
2491 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2492
2493 // push_size = ((param_count * 2) + 8) << shift;
2494
2495 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2496 old_ssp = env->segs[R_SS].base;
2497
2498 sp_mask = get_sp_mask(ss_e2);
2499 ssp = get_seg_base(ss_e1, ss_e2);
2500 if (shift) {
2501 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2502 PUSHL(ssp, sp, sp_mask, ESP);
2503 for(i = param_count - 1; i >= 0; i--) {
2504 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2505 PUSHL(ssp, sp, sp_mask, val);
2506 }
2507 } else {
2508 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2509 PUSHW(ssp, sp, sp_mask, ESP);
2510 for(i = param_count - 1; i >= 0; i--) {
2511 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2512 PUSHW(ssp, sp, sp_mask, val);
2513 }
2514 }
2515 new_stack = 1;
2516 } else {
2517 /* to same privilege */
2518 sp = ESP;
2519 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2520 ssp = env->segs[R_SS].base;
2521 // push_size = (4 << shift);
2522 new_stack = 0;
2523 }
2524
2525 if (shift) {
2526 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2527 PUSHL(ssp, sp, sp_mask, next_eip);
2528 } else {
2529 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2530 PUSHW(ssp, sp, sp_mask, next_eip);
2531 }
2532
2533 /* from this point, not restartable */
2534
2535 if (new_stack) {
2536 ss = (ss & ~3) | dpl;
2537 cpu_x86_load_seg_cache(env, R_SS, ss,
2538 ssp,
2539 get_seg_limit(ss_e1, ss_e2),
2540 ss_e2);
2541 }
2542
2543 selector = (selector & ~3) | dpl;
2544 cpu_x86_load_seg_cache(env, R_CS, selector,
2545 get_seg_base(e1, e2),
2546 get_seg_limit(e1, e2),
2547 e2);
2548 cpu_x86_set_cpl(env, dpl);
2549 SET_ESP(sp, sp_mask);
2550 EIP = offset;
2551 }
eaa728ee
FB
2552}
2553
2554/* real and vm86 mode iret */
2555void helper_iret_real(int shift)
2556{
2557 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2558 target_ulong ssp;
2559 int eflags_mask;
2560
2561 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2562 sp = ESP;
2563 ssp = env->segs[R_SS].base;
2564 if (shift == 1) {
2565 /* 32 bits */
2566 POPL(ssp, sp, sp_mask, new_eip);
2567 POPL(ssp, sp, sp_mask, new_cs);
2568 new_cs &= 0xffff;
2569 POPL(ssp, sp, sp_mask, new_eflags);
2570 } else {
2571 /* 16 bits */
2572 POPW(ssp, sp, sp_mask, new_eip);
2573 POPW(ssp, sp, sp_mask, new_cs);
2574 POPW(ssp, sp, sp_mask, new_eflags);
2575 }
2576 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2577 env->segs[R_CS].selector = new_cs;
2578 env->segs[R_CS].base = (new_cs << 4);
eaa728ee
FB
2579 env->eip = new_eip;
2580 if (env->eflags & VM_MASK)
2581 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2582 else
2583 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2584 if (shift == 0)
2585 eflags_mask &= 0xffff;
2586 load_eflags(new_eflags, eflags_mask);
db620f46 2587 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2588}
2589
2590static inline void validate_seg(int seg_reg, int cpl)
2591{
2592 int dpl;
2593 uint32_t e2;
2594
2595 /* XXX: on x86_64, we do not want to nullify FS and GS because
2596 they may still contain a valid base. I would be interested to
2597 know how a real x86_64 CPU behaves */
2598 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2599 (env->segs[seg_reg].selector & 0xfffc) == 0)
2600 return;
2601
2602 e2 = env->segs[seg_reg].flags;
2603 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2604 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2605 /* data or non conforming code segment */
2606 if (dpl < cpl) {
2607 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2608 }
2609 }
2610}
2611
2612/* protected mode iret */
2613static inline void helper_ret_protected(int shift, int is_iret, int addend)
2614{
2615 uint32_t new_cs, new_eflags, new_ss;
2616 uint32_t new_es, new_ds, new_fs, new_gs;
2617 uint32_t e1, e2, ss_e1, ss_e2;
2618 int cpl, dpl, rpl, eflags_mask, iopl;
2619 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2620
2621#ifdef TARGET_X86_64
2622 if (shift == 2)
2623 sp_mask = -1;
2624 else
2625#endif
2626 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2627 sp = ESP;
2628 ssp = env->segs[R_SS].base;
2629 new_eflags = 0; /* avoid warning */
2630#ifdef TARGET_X86_64
2631 if (shift == 2) {
2632 POPQ(sp, new_eip);
2633 POPQ(sp, new_cs);
2634 new_cs &= 0xffff;
2635 if (is_iret) {
2636 POPQ(sp, new_eflags);
2637 }
2638 } else
2639#endif
2640 if (shift == 1) {
2641 /* 32 bits */
2642 POPL(ssp, sp, sp_mask, new_eip);
2643 POPL(ssp, sp, sp_mask, new_cs);
2644 new_cs &= 0xffff;
2645 if (is_iret) {
2646 POPL(ssp, sp, sp_mask, new_eflags);
2647 if (new_eflags & VM_MASK)
2648 goto return_to_vm86;
2649 }
2650 } else {
2651 /* 16 bits */
2652 POPW(ssp, sp, sp_mask, new_eip);
2653 POPW(ssp, sp, sp_mask, new_cs);
2654 if (is_iret)
2655 POPW(ssp, sp, sp_mask, new_eflags);
2656 }
d12d51d5
AL
2657 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2658 new_cs, new_eip, shift, addend);
2659 LOG_PCALL_STATE(env);
eaa728ee
FB
2660 if ((new_cs & 0xfffc) == 0)
2661 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2662 if (load_segment(&e1, &e2, new_cs) != 0)
2663 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2664 if (!(e2 & DESC_S_MASK) ||
2665 !(e2 & DESC_CS_MASK))
2666 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2667 cpl = env->hflags & HF_CPL_MASK;
2668 rpl = new_cs & 3;
2669 if (rpl < cpl)
2670 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2671 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2672 if (e2 & DESC_C_MASK) {
2673 if (dpl > rpl)
2674 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2675 } else {
2676 if (dpl != rpl)
2677 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2678 }
2679 if (!(e2 & DESC_P_MASK))
2680 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2681
2682 sp += addend;
2683 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2684 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2685 /* return to same privilege level */
eaa728ee
FB
2686 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2687 get_seg_base(e1, e2),
2688 get_seg_limit(e1, e2),
2689 e2);
2690 } else {
2691 /* return to different privilege level */
2692#ifdef TARGET_X86_64
2693 if (shift == 2) {
2694 POPQ(sp, new_esp);
2695 POPQ(sp, new_ss);
2696 new_ss &= 0xffff;
2697 } else
2698#endif
2699 if (shift == 1) {
2700 /* 32 bits */
2701 POPL(ssp, sp, sp_mask, new_esp);
2702 POPL(ssp, sp, sp_mask, new_ss);
2703 new_ss &= 0xffff;
2704 } else {
2705 /* 16 bits */
2706 POPW(ssp, sp, sp_mask, new_esp);
2707 POPW(ssp, sp, sp_mask, new_ss);
2708 }
d12d51d5 2709 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
eaa728ee 2710 new_ss, new_esp);
eaa728ee
FB
2711 if ((new_ss & 0xfffc) == 0) {
2712#ifdef TARGET_X86_64
2713 /* NULL ss is allowed in long mode if cpl != 3*/
2714 /* XXX: test CS64 ? */
2715 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2716 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2717 0, 0xffffffff,
2718 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2719 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2720 DESC_W_MASK | DESC_A_MASK);
2721 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2722 } else
2723#endif
2724 {
2725 raise_exception_err(EXCP0D_GPF, 0);
2726 }
2727 } else {
2728 if ((new_ss & 3) != rpl)
2729 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2730 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2731 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2732 if (!(ss_e2 & DESC_S_MASK) ||
2733 (ss_e2 & DESC_CS_MASK) ||
2734 !(ss_e2 & DESC_W_MASK))
2735 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2736 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2737 if (dpl != rpl)
2738 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2739 if (!(ss_e2 & DESC_P_MASK))
2740 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2741 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2742 get_seg_base(ss_e1, ss_e2),
2743 get_seg_limit(ss_e1, ss_e2),
2744 ss_e2);
2745 }
2746
2747 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2748 get_seg_base(e1, e2),
2749 get_seg_limit(e1, e2),
2750 e2);
2751 cpu_x86_set_cpl(env, rpl);
2752 sp = new_esp;
2753#ifdef TARGET_X86_64
2754 if (env->hflags & HF_CS64_MASK)
2755 sp_mask = -1;
2756 else
2757#endif
2758 sp_mask = get_sp_mask(ss_e2);
2759
2760 /* validate data segments */
2761 validate_seg(R_ES, rpl);
2762 validate_seg(R_DS, rpl);
2763 validate_seg(R_FS, rpl);
2764 validate_seg(R_GS, rpl);
2765
2766 sp += addend;
2767 }
2768 SET_ESP(sp, sp_mask);
2769 env->eip = new_eip;
2770 if (is_iret) {
2771 /* NOTE: 'cpl' is the _old_ CPL */
2772 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2773 if (cpl == 0)
2774 eflags_mask |= IOPL_MASK;
2775 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2776 if (cpl <= iopl)
2777 eflags_mask |= IF_MASK;
2778 if (shift == 0)
2779 eflags_mask &= 0xffff;
2780 load_eflags(new_eflags, eflags_mask);
2781 }
2782 return;
2783
2784 return_to_vm86:
2785 POPL(ssp, sp, sp_mask, new_esp);
2786 POPL(ssp, sp, sp_mask, new_ss);
2787 POPL(ssp, sp, sp_mask, new_es);
2788 POPL(ssp, sp, sp_mask, new_ds);
2789 POPL(ssp, sp, sp_mask, new_fs);
2790 POPL(ssp, sp, sp_mask, new_gs);
2791
2792 /* modify processor state */
2793 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2794 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2795 load_seg_vm(R_CS, new_cs & 0xffff);
2796 cpu_x86_set_cpl(env, 3);
2797 load_seg_vm(R_SS, new_ss & 0xffff);
2798 load_seg_vm(R_ES, new_es & 0xffff);
2799 load_seg_vm(R_DS, new_ds & 0xffff);
2800 load_seg_vm(R_FS, new_fs & 0xffff);
2801 load_seg_vm(R_GS, new_gs & 0xffff);
2802
2803 env->eip = new_eip & 0xffff;
2804 ESP = new_esp;
2805}
2806
2807void helper_iret_protected(int shift, int next_eip)
2808{
2809 int tss_selector, type;
2810 uint32_t e1, e2;
2811
2812 /* specific case for TSS */
2813 if (env->eflags & NT_MASK) {
2814#ifdef TARGET_X86_64
2815 if (env->hflags & HF_LMA_MASK)
2816 raise_exception_err(EXCP0D_GPF, 0);
2817#endif
2818 tss_selector = lduw_kernel(env->tr.base + 0);
2819 if (tss_selector & 4)
2820 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2821 if (load_segment(&e1, &e2, tss_selector) != 0)
2822 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2823 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2824 /* NOTE: we check both segment and busy TSS */
2825 if (type != 3)
2826 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2827 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2828 } else {
2829 helper_ret_protected(shift, 1, 0);
2830 }
db620f46 2831 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2832}
2833
2834void helper_lret_protected(int shift, int addend)
2835{
2836 helper_ret_protected(shift, 0, addend);
eaa728ee
FB
2837}
2838
2839void helper_sysenter(void)
2840{
2841 if (env->sysenter_cs == 0) {
2842 raise_exception_err(EXCP0D_GPF, 0);
2843 }
2844 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2845 cpu_x86_set_cpl(env, 0);
2436b61a
AZ
2846
2847#ifdef TARGET_X86_64
2848 if (env->hflags & HF_LMA_MASK) {
2849 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2850 0, 0xffffffff,
2851 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2852 DESC_S_MASK |
2853 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2854 } else
2855#endif
2856 {
2857 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2858 0, 0xffffffff,
2859 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2860 DESC_S_MASK |
2861 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2862 }
eaa728ee
FB
2863 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2864 0, 0xffffffff,
2865 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2866 DESC_S_MASK |
2867 DESC_W_MASK | DESC_A_MASK);
2868 ESP = env->sysenter_esp;
2869 EIP = env->sysenter_eip;
2870}
2871
2436b61a 2872void helper_sysexit(int dflag)
eaa728ee
FB
2873{
2874 int cpl;
2875
2876 cpl = env->hflags & HF_CPL_MASK;
2877 if (env->sysenter_cs == 0 || cpl != 0) {
2878 raise_exception_err(EXCP0D_GPF, 0);
2879 }
2880 cpu_x86_set_cpl(env, 3);
2436b61a
AZ
2881#ifdef TARGET_X86_64
2882 if (dflag == 2) {
2883 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2884 0, 0xffffffff,
2885 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2886 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2887 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2888 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2889 0, 0xffffffff,
2890 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2891 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2892 DESC_W_MASK | DESC_A_MASK);
2893 } else
2894#endif
2895 {
2896 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2897 0, 0xffffffff,
2898 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2899 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2900 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2901 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2902 0, 0xffffffff,
2903 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2904 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2905 DESC_W_MASK | DESC_A_MASK);
2906 }
eaa728ee
FB
2907 ESP = ECX;
2908 EIP = EDX;
eaa728ee
FB
2909}
2910
872929aa
FB
2911#if defined(CONFIG_USER_ONLY)
2912target_ulong helper_read_crN(int reg)
eaa728ee 2913{
872929aa
FB
2914 return 0;
2915}
2916
2917void helper_write_crN(int reg, target_ulong t0)
2918{
2919}
01df040b
AL
2920
2921void helper_movl_drN_T0(int reg, target_ulong t0)
2922{
2923}
872929aa
FB
2924#else
2925target_ulong helper_read_crN(int reg)
2926{
2927 target_ulong val;
2928
2929 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2930 switch(reg) {
2931 default:
2932 val = env->cr[reg];
2933 break;
2934 case 8:
db620f46 2935 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 2936 val = cpu_get_apic_tpr(env->apic_state);
db620f46
FB
2937 } else {
2938 val = env->v_tpr;
2939 }
872929aa
FB
2940 break;
2941 }
2942 return val;
2943}
2944
2945void helper_write_crN(int reg, target_ulong t0)
2946{
2947 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
eaa728ee
FB
2948 switch(reg) {
2949 case 0:
2950 cpu_x86_update_cr0(env, t0);
2951 break;
2952 case 3:
2953 cpu_x86_update_cr3(env, t0);
2954 break;
2955 case 4:
2956 cpu_x86_update_cr4(env, t0);
2957 break;
2958 case 8:
db620f46 2959 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 2960 cpu_set_apic_tpr(env->apic_state, t0);
db620f46
FB
2961 }
2962 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
2963 break;
2964 default:
2965 env->cr[reg] = t0;
2966 break;
2967 }
eaa728ee 2968}
01df040b
AL
2969
2970void helper_movl_drN_T0(int reg, target_ulong t0)
2971{
2972 int i;
2973
2974 if (reg < 4) {
2975 hw_breakpoint_remove(env, reg);
2976 env->dr[reg] = t0;
2977 hw_breakpoint_insert(env, reg);
2978 } else if (reg == 7) {
2979 for (i = 0; i < 4; i++)
2980 hw_breakpoint_remove(env, i);
2981 env->dr[7] = t0;
2982 for (i = 0; i < 4; i++)
2983 hw_breakpoint_insert(env, i);
2984 } else
2985 env->dr[reg] = t0;
2986}
872929aa 2987#endif
eaa728ee
FB
2988
2989void helper_lmsw(target_ulong t0)
2990{
2991 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2992 if already set to one. */
2993 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 2994 helper_write_crN(0, t0);
eaa728ee
FB
2995}
2996
2997void helper_clts(void)
2998{
2999 env->cr[0] &= ~CR0_TS_MASK;
3000 env->hflags &= ~HF_TS_MASK;
3001}
3002
eaa728ee
FB
3003void helper_invlpg(target_ulong addr)
3004{
872929aa 3005 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
914178d3 3006 tlb_flush_page(env, addr);
eaa728ee
FB
3007}
3008
3009void helper_rdtsc(void)
3010{
3011 uint64_t val;
3012
3013 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3014 raise_exception(EXCP0D_GPF);
3015 }
872929aa
FB
3016 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3017
33c263df 3018 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
3019 EAX = (uint32_t)(val);
3020 EDX = (uint32_t)(val >> 32);
3021}
3022
1b050077
AP
3023void helper_rdtscp(void)
3024{
3025 helper_rdtsc();
3026 ECX = (uint32_t)(env->tsc_aux);
3027}
3028
eaa728ee
FB
3029void helper_rdpmc(void)
3030{
3031 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3032 raise_exception(EXCP0D_GPF);
3033 }
eaa728ee
FB
3034 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3035
3036 /* currently unimplemented */
3037 raise_exception_err(EXCP06_ILLOP, 0);
3038}
3039
3040#if defined(CONFIG_USER_ONLY)
3041void helper_wrmsr(void)
3042{
3043}
3044
3045void helper_rdmsr(void)
3046{
3047}
3048#else
3049void helper_wrmsr(void)
3050{
3051 uint64_t val;
3052
872929aa
FB
3053 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3054
eaa728ee
FB
3055 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3056
3057 switch((uint32_t)ECX) {
3058 case MSR_IA32_SYSENTER_CS:
3059 env->sysenter_cs = val & 0xffff;
3060 break;
3061 case MSR_IA32_SYSENTER_ESP:
3062 env->sysenter_esp = val;
3063 break;
3064 case MSR_IA32_SYSENTER_EIP:
3065 env->sysenter_eip = val;
3066 break;
3067 case MSR_IA32_APICBASE:
4a942cea 3068 cpu_set_apic_base(env->apic_state, val);
eaa728ee
FB
3069 break;
3070 case MSR_EFER:
3071 {
3072 uint64_t update_mask;
3073 update_mask = 0;
3074 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3075 update_mask |= MSR_EFER_SCE;
3076 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3077 update_mask |= MSR_EFER_LME;
3078 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3079 update_mask |= MSR_EFER_FFXSR;
3080 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3081 update_mask |= MSR_EFER_NXE;
5efc27bb
FB
3082 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3083 update_mask |= MSR_EFER_SVME;
eef26553
AL
3084 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3085 update_mask |= MSR_EFER_FFXSR;
5efc27bb
FB
3086 cpu_load_efer(env, (env->efer & ~update_mask) |
3087 (val & update_mask));
eaa728ee
FB
3088 }
3089 break;
3090 case MSR_STAR:
3091 env->star = val;
3092 break;
3093 case MSR_PAT:
3094 env->pat = val;
3095 break;
3096 case MSR_VM_HSAVE_PA:
3097 env->vm_hsave = val;
3098 break;
3099#ifdef TARGET_X86_64
3100 case MSR_LSTAR:
3101 env->lstar = val;
3102 break;
3103 case MSR_CSTAR:
3104 env->cstar = val;
3105 break;
3106 case MSR_FMASK:
3107 env->fmask = val;
3108 break;
3109 case MSR_FSBASE:
3110 env->segs[R_FS].base = val;
3111 break;
3112 case MSR_GSBASE:
3113 env->segs[R_GS].base = val;
3114 break;
3115 case MSR_KERNELGSBASE:
3116 env->kernelgsbase = val;
3117 break;
3118#endif
165d9b82
AL
3119 case MSR_MTRRphysBase(0):
3120 case MSR_MTRRphysBase(1):
3121 case MSR_MTRRphysBase(2):
3122 case MSR_MTRRphysBase(3):
3123 case MSR_MTRRphysBase(4):
3124 case MSR_MTRRphysBase(5):
3125 case MSR_MTRRphysBase(6):
3126 case MSR_MTRRphysBase(7):
3127 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3128 break;
3129 case MSR_MTRRphysMask(0):
3130 case MSR_MTRRphysMask(1):
3131 case MSR_MTRRphysMask(2):
3132 case MSR_MTRRphysMask(3):
3133 case MSR_MTRRphysMask(4):
3134 case MSR_MTRRphysMask(5):
3135 case MSR_MTRRphysMask(6):
3136 case MSR_MTRRphysMask(7):
3137 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3138 break;
3139 case MSR_MTRRfix64K_00000:
3140 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3141 break;
3142 case MSR_MTRRfix16K_80000:
3143 case MSR_MTRRfix16K_A0000:
3144 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3145 break;
3146 case MSR_MTRRfix4K_C0000:
3147 case MSR_MTRRfix4K_C8000:
3148 case MSR_MTRRfix4K_D0000:
3149 case MSR_MTRRfix4K_D8000:
3150 case MSR_MTRRfix4K_E0000:
3151 case MSR_MTRRfix4K_E8000:
3152 case MSR_MTRRfix4K_F0000:
3153 case MSR_MTRRfix4K_F8000:
3154 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3155 break;
3156 case MSR_MTRRdefType:
3157 env->mtrr_deftype = val;
3158 break;
79c4f6b0
HY
3159 case MSR_MCG_STATUS:
3160 env->mcg_status = val;
3161 break;
3162 case MSR_MCG_CTL:
3163 if ((env->mcg_cap & MCG_CTL_P)
3164 && (val == 0 || val == ~(uint64_t)0))
3165 env->mcg_ctl = val;
3166 break;
1b050077
AP
3167 case MSR_TSC_AUX:
3168 env->tsc_aux = val;
3169 break;
eaa728ee 3170 default:
79c4f6b0
HY
3171 if ((uint32_t)ECX >= MSR_MC0_CTL
3172 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3173 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3174 if ((offset & 0x3) != 0
3175 || (val == 0 || val == ~(uint64_t)0))
3176 env->mce_banks[offset] = val;
3177 break;
3178 }
eaa728ee
FB
3179 /* XXX: exception ? */
3180 break;
3181 }
3182}
3183
3184void helper_rdmsr(void)
3185{
3186 uint64_t val;
872929aa
FB
3187
3188 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3189
eaa728ee
FB
3190 switch((uint32_t)ECX) {
3191 case MSR_IA32_SYSENTER_CS:
3192 val = env->sysenter_cs;
3193 break;
3194 case MSR_IA32_SYSENTER_ESP:
3195 val = env->sysenter_esp;
3196 break;
3197 case MSR_IA32_SYSENTER_EIP:
3198 val = env->sysenter_eip;
3199 break;
3200 case MSR_IA32_APICBASE:
4a942cea 3201 val = cpu_get_apic_base(env->apic_state);
eaa728ee
FB
3202 break;
3203 case MSR_EFER:
3204 val = env->efer;
3205 break;
3206 case MSR_STAR:
3207 val = env->star;
3208 break;
3209 case MSR_PAT:
3210 val = env->pat;
3211 break;
3212 case MSR_VM_HSAVE_PA:
3213 val = env->vm_hsave;
3214 break;
d5e49a81
AZ
3215 case MSR_IA32_PERF_STATUS:
3216 /* tsc_increment_by_tick */
3217 val = 1000ULL;
3218 /* CPU multiplier */
3219 val |= (((uint64_t)4ULL) << 40);
3220 break;
eaa728ee
FB
3221#ifdef TARGET_X86_64
3222 case MSR_LSTAR:
3223 val = env->lstar;
3224 break;
3225 case MSR_CSTAR:
3226 val = env->cstar;
3227 break;
3228 case MSR_FMASK:
3229 val = env->fmask;
3230 break;
3231 case MSR_FSBASE:
3232 val = env->segs[R_FS].base;
3233 break;
3234 case MSR_GSBASE:
3235 val = env->segs[R_GS].base;
3236 break;
3237 case MSR_KERNELGSBASE:
3238 val = env->kernelgsbase;
3239 break;
1b050077
AP
3240 case MSR_TSC_AUX:
3241 val = env->tsc_aux;
3242 break;
eaa728ee 3243#endif
165d9b82
AL
3244 case MSR_MTRRphysBase(0):
3245 case MSR_MTRRphysBase(1):
3246 case MSR_MTRRphysBase(2):
3247 case MSR_MTRRphysBase(3):
3248 case MSR_MTRRphysBase(4):
3249 case MSR_MTRRphysBase(5):
3250 case MSR_MTRRphysBase(6):
3251 case MSR_MTRRphysBase(7):
3252 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3253 break;
3254 case MSR_MTRRphysMask(0):
3255 case MSR_MTRRphysMask(1):
3256 case MSR_MTRRphysMask(2):
3257 case MSR_MTRRphysMask(3):
3258 case MSR_MTRRphysMask(4):
3259 case MSR_MTRRphysMask(5):
3260 case MSR_MTRRphysMask(6):
3261 case MSR_MTRRphysMask(7):
3262 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3263 break;
3264 case MSR_MTRRfix64K_00000:
3265 val = env->mtrr_fixed[0];
3266 break;
3267 case MSR_MTRRfix16K_80000:
3268 case MSR_MTRRfix16K_A0000:
3269 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3270 break;
3271 case MSR_MTRRfix4K_C0000:
3272 case MSR_MTRRfix4K_C8000:
3273 case MSR_MTRRfix4K_D0000:
3274 case MSR_MTRRfix4K_D8000:
3275 case MSR_MTRRfix4K_E0000:
3276 case MSR_MTRRfix4K_E8000:
3277 case MSR_MTRRfix4K_F0000:
3278 case MSR_MTRRfix4K_F8000:
3279 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3280 break;
3281 case MSR_MTRRdefType:
3282 val = env->mtrr_deftype;
3283 break;
dd5e3b17
AL
3284 case MSR_MTRRcap:
3285 if (env->cpuid_features & CPUID_MTRR)
3286 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3287 else
3288 /* XXX: exception ? */
3289 val = 0;
3290 break;
79c4f6b0
HY
3291 case MSR_MCG_CAP:
3292 val = env->mcg_cap;
3293 break;
3294 case MSR_MCG_CTL:
3295 if (env->mcg_cap & MCG_CTL_P)
3296 val = env->mcg_ctl;
3297 else
3298 val = 0;
3299 break;
3300 case MSR_MCG_STATUS:
3301 val = env->mcg_status;
3302 break;
eaa728ee 3303 default:
79c4f6b0
HY
3304 if ((uint32_t)ECX >= MSR_MC0_CTL
3305 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3306 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3307 val = env->mce_banks[offset];
3308 break;
3309 }
eaa728ee
FB
3310 /* XXX: exception ? */
3311 val = 0;
3312 break;
3313 }
3314 EAX = (uint32_t)(val);
3315 EDX = (uint32_t)(val >> 32);
3316}
3317#endif
3318
3319target_ulong helper_lsl(target_ulong selector1)
3320{
3321 unsigned int limit;
3322 uint32_t e1, e2, eflags, selector;
3323 int rpl, dpl, cpl, type;
3324
3325 selector = selector1 & 0xffff;
a7812ae4 3326 eflags = helper_cc_compute_all(CC_OP);
dc1ded53
AL
3327 if ((selector & 0xfffc) == 0)
3328 goto fail;
eaa728ee
FB
3329 if (load_segment(&e1, &e2, selector) != 0)
3330 goto fail;
3331 rpl = selector & 3;
3332 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3333 cpl = env->hflags & HF_CPL_MASK;
3334 if (e2 & DESC_S_MASK) {
3335 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3336 /* conforming */
3337 } else {
3338 if (dpl < cpl || dpl < rpl)
3339 goto fail;
3340 }
3341 } else {
3342 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3343 switch(type) {
3344 case 1:
3345 case 2:
3346 case 3:
3347 case 9:
3348 case 11:
3349 break;
3350 default:
3351 goto fail;
3352 }
3353 if (dpl < cpl || dpl < rpl) {
3354 fail:
3355 CC_SRC = eflags & ~CC_Z;
3356 return 0;
3357 }
3358 }
3359 limit = get_seg_limit(e1, e2);
3360 CC_SRC = eflags | CC_Z;
3361 return limit;
3362}
3363
3364target_ulong helper_lar(target_ulong selector1)
3365{
3366 uint32_t e1, e2, eflags, selector;
3367 int rpl, dpl, cpl, type;
3368
3369 selector = selector1 & 0xffff;
a7812ae4 3370 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3371 if ((selector & 0xfffc) == 0)
3372 goto fail;
3373 if (load_segment(&e1, &e2, selector) != 0)
3374 goto fail;
3375 rpl = selector & 3;
3376 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3377 cpl = env->hflags & HF_CPL_MASK;
3378 if (e2 & DESC_S_MASK) {
3379 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3380 /* conforming */
3381 } else {
3382 if (dpl < cpl || dpl < rpl)
3383 goto fail;
3384 }
3385 } else {
3386 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3387 switch(type) {
3388 case 1:
3389 case 2:
3390 case 3:
3391 case 4:
3392 case 5:
3393 case 9:
3394 case 11:
3395 case 12:
3396 break;
3397 default:
3398 goto fail;
3399 }
3400 if (dpl < cpl || dpl < rpl) {
3401 fail:
3402 CC_SRC = eflags & ~CC_Z;
3403 return 0;
3404 }
3405 }
3406 CC_SRC = eflags | CC_Z;
3407 return e2 & 0x00f0ff00;
3408}
3409
3410void helper_verr(target_ulong selector1)
3411{
3412 uint32_t e1, e2, eflags, selector;
3413 int rpl, dpl, cpl;
3414
3415 selector = selector1 & 0xffff;
a7812ae4 3416 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3417 if ((selector & 0xfffc) == 0)
3418 goto fail;
3419 if (load_segment(&e1, &e2, selector) != 0)
3420 goto fail;
3421 if (!(e2 & DESC_S_MASK))
3422 goto fail;
3423 rpl = selector & 3;
3424 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3425 cpl = env->hflags & HF_CPL_MASK;
3426 if (e2 & DESC_CS_MASK) {
3427 if (!(e2 & DESC_R_MASK))
3428 goto fail;
3429 if (!(e2 & DESC_C_MASK)) {
3430 if (dpl < cpl || dpl < rpl)
3431 goto fail;
3432 }
3433 } else {
3434 if (dpl < cpl || dpl < rpl) {
3435 fail:
3436 CC_SRC = eflags & ~CC_Z;
3437 return;
3438 }
3439 }
3440 CC_SRC = eflags | CC_Z;
3441}
3442
3443void helper_verw(target_ulong selector1)
3444{
3445 uint32_t e1, e2, eflags, selector;
3446 int rpl, dpl, cpl;
3447
3448 selector = selector1 & 0xffff;
a7812ae4 3449 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3450 if ((selector & 0xfffc) == 0)
3451 goto fail;
3452 if (load_segment(&e1, &e2, selector) != 0)
3453 goto fail;
3454 if (!(e2 & DESC_S_MASK))
3455 goto fail;
3456 rpl = selector & 3;
3457 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3458 cpl = env->hflags & HF_CPL_MASK;
3459 if (e2 & DESC_CS_MASK) {
3460 goto fail;
3461 } else {
3462 if (dpl < cpl || dpl < rpl)
3463 goto fail;
3464 if (!(e2 & DESC_W_MASK)) {
3465 fail:
3466 CC_SRC = eflags & ~CC_Z;
3467 return;
3468 }
3469 }
3470 CC_SRC = eflags | CC_Z;
3471}
3472
3473/* x87 FPU helpers */
3474
c31da136 3475static inline double floatx80_to_double(floatx80 a)
47c0143c
AJ
3476{
3477 union {
3478 float64 f64;
3479 double d;
3480 } u;
3481
c31da136 3482 u.f64 = floatx80_to_float64(a, &env->fp_status);
47c0143c
AJ
3483 return u.d;
3484}
3485
c31da136 3486static inline floatx80 double_to_floatx80(double a)
47c0143c
AJ
3487{
3488 union {
3489 float64 f64;
3490 double d;
3491 } u;
3492
3493 u.d = a;
c31da136 3494 return float64_to_floatx80(u.f64, &env->fp_status);
47c0143c
AJ
3495}
3496
eaa728ee
FB
3497static void fpu_set_exception(int mask)
3498{
3499 env->fpus |= mask;
3500 if (env->fpus & (~env->fpuc & FPUC_EM))
3501 env->fpus |= FPUS_SE | FPUS_B;
3502}
3503
c31da136 3504static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
eaa728ee 3505{
c31da136 3506 if (floatx80_is_zero(b)) {
eaa728ee 3507 fpu_set_exception(FPUS_ZE);
13822781 3508 }
c31da136 3509 return floatx80_div(a, b, &env->fp_status);
eaa728ee
FB
3510}
3511
d9957a8b 3512static void fpu_raise_exception(void)
eaa728ee
FB
3513{
3514 if (env->cr[0] & CR0_NE_MASK) {
3515 raise_exception(EXCP10_COPR);
3516 }
3517#if !defined(CONFIG_USER_ONLY)
3518 else {
3519 cpu_set_ferr(env);
3520 }
3521#endif
3522}
3523
3524void helper_flds_FT0(uint32_t val)
3525{
3526 union {
3527 float32 f;
3528 uint32_t i;
3529 } u;
3530 u.i = val;
c31da136 3531 FT0 = float32_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3532}
3533
3534void helper_fldl_FT0(uint64_t val)
3535{
3536 union {
3537 float64 f;
3538 uint64_t i;
3539 } u;
3540 u.i = val;
c31da136 3541 FT0 = float64_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3542}
3543
3544void helper_fildl_FT0(int32_t val)
3545{
c31da136 3546 FT0 = int32_to_floatx80(val, &env->fp_status);
eaa728ee
FB
3547}
3548
3549void helper_flds_ST0(uint32_t val)
3550{
3551 int new_fpstt;
3552 union {
3553 float32 f;
3554 uint32_t i;
3555 } u;
3556 new_fpstt = (env->fpstt - 1) & 7;
3557 u.i = val;
c31da136 3558 env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3559 env->fpstt = new_fpstt;
3560 env->fptags[new_fpstt] = 0; /* validate stack entry */
3561}
3562
3563void helper_fldl_ST0(uint64_t val)
3564{
3565 int new_fpstt;
3566 union {
3567 float64 f;
3568 uint64_t i;
3569 } u;
3570 new_fpstt = (env->fpstt - 1) & 7;
3571 u.i = val;
c31da136 3572 env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
eaa728ee
FB
3573 env->fpstt = new_fpstt;
3574 env->fptags[new_fpstt] = 0; /* validate stack entry */
3575}
3576
3577void helper_fildl_ST0(int32_t val)
3578{
3579 int new_fpstt;
3580 new_fpstt = (env->fpstt - 1) & 7;
c31da136 3581 env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
eaa728ee
FB
3582 env->fpstt = new_fpstt;
3583 env->fptags[new_fpstt] = 0; /* validate stack entry */
3584}
3585
3586void helper_fildll_ST0(int64_t val)
3587{
3588 int new_fpstt;
3589 new_fpstt = (env->fpstt - 1) & 7;
c31da136 3590 env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
eaa728ee
FB
3591 env->fpstt = new_fpstt;
3592 env->fptags[new_fpstt] = 0; /* validate stack entry */
3593}
3594
3595uint32_t helper_fsts_ST0(void)
3596{
3597 union {
3598 float32 f;
3599 uint32_t i;
3600 } u;
c31da136 3601 u.f = floatx80_to_float32(ST0, &env->fp_status);
eaa728ee
FB
3602 return u.i;
3603}
3604
3605uint64_t helper_fstl_ST0(void)
3606{
3607 union {
3608 float64 f;
3609 uint64_t i;
3610 } u;
c31da136 3611 u.f = floatx80_to_float64(ST0, &env->fp_status);
eaa728ee
FB
3612 return u.i;
3613}
3614
3615int32_t helper_fist_ST0(void)
3616{
3617 int32_t val;
c31da136 3618 val = floatx80_to_int32(ST0, &env->fp_status);
eaa728ee
FB
3619 if (val != (int16_t)val)
3620 val = -32768;
3621 return val;
3622}
3623
3624int32_t helper_fistl_ST0(void)
3625{
3626 int32_t val;
c31da136 3627 val = floatx80_to_int32(ST0, &env->fp_status);
eaa728ee
FB
3628 return val;
3629}
3630
3631int64_t helper_fistll_ST0(void)
3632{
3633 int64_t val;
c31da136 3634 val = floatx80_to_int64(ST0, &env->fp_status);
eaa728ee
FB
3635 return val;
3636}
3637
3638int32_t helper_fistt_ST0(void)
3639{
3640 int32_t val;
c31da136 3641 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
eaa728ee
FB
3642 if (val != (int16_t)val)
3643 val = -32768;
3644 return val;
3645}
3646
3647int32_t helper_fisttl_ST0(void)
3648{
3649 int32_t val;
c31da136 3650 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
eaa728ee
FB
3651 return val;
3652}
3653
3654int64_t helper_fisttll_ST0(void)
3655{
3656 int64_t val;
c31da136 3657 val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
eaa728ee
FB
3658 return val;
3659}
3660
3661void helper_fldt_ST0(target_ulong ptr)
3662{
3663 int new_fpstt;
3664 new_fpstt = (env->fpstt - 1) & 7;
3665 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3666 env->fpstt = new_fpstt;
3667 env->fptags[new_fpstt] = 0; /* validate stack entry */
3668}
3669
3670void helper_fstt_ST0(target_ulong ptr)
3671{
3672 helper_fstt(ST0, ptr);
3673}
3674
3675void helper_fpush(void)
3676{
3677 fpush();
3678}
3679
3680void helper_fpop(void)
3681{
3682 fpop();
3683}
3684
3685void helper_fdecstp(void)
3686{
3687 env->fpstt = (env->fpstt - 1) & 7;
3688 env->fpus &= (~0x4700);
3689}
3690
3691void helper_fincstp(void)
3692{
3693 env->fpstt = (env->fpstt + 1) & 7;
3694 env->fpus &= (~0x4700);
3695}
3696
3697/* FPU move */
3698
3699void helper_ffree_STN(int st_index)
3700{
3701 env->fptags[(env->fpstt + st_index) & 7] = 1;
3702}
3703
3704void helper_fmov_ST0_FT0(void)
3705{
3706 ST0 = FT0;
3707}
3708
3709void helper_fmov_FT0_STN(int st_index)
3710{
3711 FT0 = ST(st_index);
3712}
3713
3714void helper_fmov_ST0_STN(int st_index)
3715{
3716 ST0 = ST(st_index);
3717}
3718
3719void helper_fmov_STN_ST0(int st_index)
3720{
3721 ST(st_index) = ST0;
3722}
3723
3724void helper_fxchg_ST0_STN(int st_index)
3725{
c31da136 3726 floatx80 tmp;
eaa728ee
FB
3727 tmp = ST(st_index);
3728 ST(st_index) = ST0;
3729 ST0 = tmp;
3730}
3731
3732/* FPU operations */
3733
3734static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3735
3736void helper_fcom_ST0_FT0(void)
3737{
3738 int ret;
3739
c31da136 3740 ret = floatx80_compare(ST0, FT0, &env->fp_status);
eaa728ee 3741 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
eaa728ee
FB
3742}
3743
3744void helper_fucom_ST0_FT0(void)
3745{
3746 int ret;
3747
c31da136 3748 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
eaa728ee 3749 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
eaa728ee
FB
3750}
3751
3752static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3753
3754void helper_fcomi_ST0_FT0(void)
3755{
3756 int eflags;
3757 int ret;
3758
c31da136 3759 ret = floatx80_compare(ST0, FT0, &env->fp_status);
a7812ae4 3760 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3761 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3762 CC_SRC = eflags;
eaa728ee
FB
3763}
3764
3765void helper_fucomi_ST0_FT0(void)
3766{
3767 int eflags;
3768 int ret;
3769
c31da136 3770 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
a7812ae4 3771 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
3772 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3773 CC_SRC = eflags;
eaa728ee
FB
3774}
3775
3776void helper_fadd_ST0_FT0(void)
3777{
c31da136 3778 ST0 = floatx80_add(ST0, FT0, &env->fp_status);
eaa728ee
FB
3779}
3780
3781void helper_fmul_ST0_FT0(void)
3782{
c31da136 3783 ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
eaa728ee
FB
3784}
3785
3786void helper_fsub_ST0_FT0(void)
3787{
c31da136 3788 ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
eaa728ee
FB
3789}
3790
3791void helper_fsubr_ST0_FT0(void)
3792{
c31da136 3793 ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
eaa728ee
FB
3794}
3795
3796void helper_fdiv_ST0_FT0(void)
3797{
3798 ST0 = helper_fdiv(ST0, FT0);
3799}
3800
3801void helper_fdivr_ST0_FT0(void)
3802{
3803 ST0 = helper_fdiv(FT0, ST0);
3804}
3805
3806/* fp operations between STN and ST0 */
3807
3808void helper_fadd_STN_ST0(int st_index)
3809{
c31da136 3810 ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3811}
3812
3813void helper_fmul_STN_ST0(int st_index)
3814{
c31da136 3815 ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3816}
3817
3818void helper_fsub_STN_ST0(int st_index)
3819{
c31da136 3820 ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
eaa728ee
FB
3821}
3822
3823void helper_fsubr_STN_ST0(int st_index)
3824{
c31da136 3825 ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
eaa728ee
FB
3826}
3827
3828void helper_fdiv_STN_ST0(int st_index)
3829{
c31da136 3830 floatx80 *p;
eaa728ee
FB
3831 p = &ST(st_index);
3832 *p = helper_fdiv(*p, ST0);
3833}
3834
3835void helper_fdivr_STN_ST0(int st_index)
3836{
c31da136 3837 floatx80 *p;
eaa728ee
FB
3838 p = &ST(st_index);
3839 *p = helper_fdiv(ST0, *p);
3840}
3841
3842/* misc FPU operations */
3843void helper_fchs_ST0(void)
3844{
c31da136 3845 ST0 = floatx80_chs(ST0);
eaa728ee
FB
3846}
3847
3848void helper_fabs_ST0(void)
3849{
c31da136 3850 ST0 = floatx80_abs(ST0);
eaa728ee
FB
3851}
3852
3853void helper_fld1_ST0(void)
3854{
66fcf8ff 3855 ST0 = floatx80_one;
eaa728ee
FB
3856}
3857
3858void helper_fldl2t_ST0(void)
3859{
66fcf8ff 3860 ST0 = floatx80_l2t;
eaa728ee
FB
3861}
3862
3863void helper_fldl2e_ST0(void)
3864{
66fcf8ff 3865 ST0 = floatx80_l2e;
eaa728ee
FB
3866}
3867
3868void helper_fldpi_ST0(void)
3869{
66fcf8ff 3870 ST0 = floatx80_pi;
eaa728ee
FB
3871}
3872
3873void helper_fldlg2_ST0(void)
3874{
66fcf8ff 3875 ST0 = floatx80_lg2;
eaa728ee
FB
3876}
3877
3878void helper_fldln2_ST0(void)
3879{
66fcf8ff 3880 ST0 = floatx80_ln2;
eaa728ee
FB
3881}
3882
3883void helper_fldz_ST0(void)
3884{
66fcf8ff 3885 ST0 = floatx80_zero;
eaa728ee
FB
3886}
3887
3888void helper_fldz_FT0(void)
3889{
66fcf8ff 3890 FT0 = floatx80_zero;
eaa728ee
FB
3891}
3892
3893uint32_t helper_fnstsw(void)
3894{
3895 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3896}
3897
3898uint32_t helper_fnstcw(void)
3899{
3900 return env->fpuc;
3901}
3902
3903static void update_fp_status(void)
3904{
3905 int rnd_type;
3906
3907 /* set rounding mode */
3908 switch(env->fpuc & RC_MASK) {
3909 default:
3910 case RC_NEAR:
3911 rnd_type = float_round_nearest_even;
3912 break;
3913 case RC_DOWN:
3914 rnd_type = float_round_down;
3915 break;
3916 case RC_UP:
3917 rnd_type = float_round_up;
3918 break;
3919 case RC_CHOP:
3920 rnd_type = float_round_to_zero;
3921 break;
3922 }
3923 set_float_rounding_mode(rnd_type, &env->fp_status);
eaa728ee
FB
3924 switch((env->fpuc >> 8) & 3) {
3925 case 0:
3926 rnd_type = 32;
3927 break;
3928 case 2:
3929 rnd_type = 64;
3930 break;
3931 case 3:
3932 default:
3933 rnd_type = 80;
3934 break;
3935 }
3936 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
eaa728ee
FB
3937}
3938
3939void helper_fldcw(uint32_t val)
3940{
3941 env->fpuc = val;
3942 update_fp_status();
3943}
3944
3945void helper_fclex(void)
3946{
3947 env->fpus &= 0x7f00;
3948}
3949
3950void helper_fwait(void)
3951{
3952 if (env->fpus & FPUS_SE)
3953 fpu_raise_exception();
eaa728ee
FB
3954}
3955
3956void helper_fninit(void)
3957{
3958 env->fpus = 0;
3959 env->fpstt = 0;
3960 env->fpuc = 0x37f;
3961 env->fptags[0] = 1;
3962 env->fptags[1] = 1;
3963 env->fptags[2] = 1;
3964 env->fptags[3] = 1;
3965 env->fptags[4] = 1;
3966 env->fptags[5] = 1;
3967 env->fptags[6] = 1;
3968 env->fptags[7] = 1;
3969}
3970
3971/* BCD ops */
3972
3973void helper_fbld_ST0(target_ulong ptr)
3974{
c31da136 3975 floatx80 tmp;
eaa728ee
FB
3976 uint64_t val;
3977 unsigned int v;
3978 int i;
3979
3980 val = 0;
3981 for(i = 8; i >= 0; i--) {
3982 v = ldub(ptr + i);
3983 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3984 }
c31da136 3985 tmp = int64_to_floatx80(val, &env->fp_status);
788e7336 3986 if (ldub(ptr + 9) & 0x80) {
c31da136 3987 floatx80_chs(tmp);
788e7336 3988 }
eaa728ee
FB
3989 fpush();
3990 ST0 = tmp;
3991}
3992
3993void helper_fbst_ST0(target_ulong ptr)
3994{
3995 int v;
3996 target_ulong mem_ref, mem_end;
3997 int64_t val;
3998
c31da136 3999 val = floatx80_to_int64(ST0, &env->fp_status);
eaa728ee
FB
4000 mem_ref = ptr;
4001 mem_end = mem_ref + 9;
4002 if (val < 0) {
4003 stb(mem_end, 0x80);
4004 val = -val;
4005 } else {
4006 stb(mem_end, 0x00);
4007 }
4008 while (mem_ref < mem_end) {
4009 if (val == 0)
4010 break;
4011 v = val % 100;
4012 val = val / 100;
4013 v = ((v / 10) << 4) | (v % 10);
4014 stb(mem_ref++, v);
4015 }
4016 while (mem_ref < mem_end) {
4017 stb(mem_ref++, 0);
4018 }
4019}
4020
4021void helper_f2xm1(void)
4022{
c31da136 4023 double val = floatx80_to_double(ST0);
a2c9ed3c 4024 val = pow(2.0, val) - 1.0;
c31da136 4025 ST0 = double_to_floatx80(val);
eaa728ee
FB
4026}
4027
4028void helper_fyl2x(void)
4029{
c31da136 4030 double fptemp = floatx80_to_double(ST0);
eaa728ee 4031
eaa728ee 4032 if (fptemp>0.0){
a2c9ed3c 4033 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
c31da136
AJ
4034 fptemp *= floatx80_to_double(ST1);
4035 ST1 = double_to_floatx80(fptemp);
eaa728ee
FB
4036 fpop();
4037 } else {
4038 env->fpus &= (~0x4700);
4039 env->fpus |= 0x400;
4040 }
4041}
4042
4043void helper_fptan(void)
4044{
c31da136 4045 double fptemp = floatx80_to_double(ST0);
eaa728ee 4046
eaa728ee
FB
4047 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4048 env->fpus |= 0x400;
4049 } else {
a2c9ed3c 4050 fptemp = tan(fptemp);
c31da136 4051 ST0 = double_to_floatx80(fptemp);
eaa728ee 4052 fpush();
c31da136 4053 ST0 = floatx80_one;
eaa728ee
FB
4054 env->fpus &= (~0x400); /* C2 <-- 0 */
4055 /* the above code is for |arg| < 2**52 only */
4056 }
4057}
4058
4059void helper_fpatan(void)
4060{
a2c9ed3c 4061 double fptemp, fpsrcop;
eaa728ee 4062
c31da136
AJ
4063 fpsrcop = floatx80_to_double(ST1);
4064 fptemp = floatx80_to_double(ST0);
4065 ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
eaa728ee
FB
4066 fpop();
4067}
4068
4069void helper_fxtract(void)
4070{
c31da136 4071 CPU_LDoubleU temp;
eaa728ee
FB
4072
4073 temp.d = ST0;
c9ad19c5 4074
c31da136 4075 if (floatx80_is_zero(ST0)) {
c9ad19c5 4076 /* Easy way to generate -inf and raising division by 0 exception */
c31da136 4077 ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
c9ad19c5
AJ
4078 fpush();
4079 ST0 = temp.d;
4080 } else {
4081 int expdif;
4082
4083 expdif = EXPD(temp) - EXPBIAS;
4084 /*DP exponent bias*/
c31da136 4085 ST0 = int32_to_floatx80(expdif, &env->fp_status);
c9ad19c5
AJ
4086 fpush();
4087 BIASEXPONENT(temp);
4088 ST0 = temp.d;
4089 }
eaa728ee
FB
4090}
4091
4092void helper_fprem1(void)
4093{
bcb5fec5 4094 double st0, st1, dblq, fpsrcop, fptemp;
c31da136 4095 CPU_LDoubleU fpsrcop1, fptemp1;
eaa728ee
FB
4096 int expdif;
4097 signed long long int q;
4098
c31da136
AJ
4099 st0 = floatx80_to_double(ST0);
4100 st1 = floatx80_to_double(ST1);
bcb5fec5
AJ
4101
4102 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
c31da136 4103 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
eaa728ee
FB
4104 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4105 return;
4106 }
4107
bcb5fec5
AJ
4108 fpsrcop = st0;
4109 fptemp = st1;
4110 fpsrcop1.d = ST0;
4111 fptemp1.d = ST1;
eaa728ee
FB
4112 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4113
4114 if (expdif < 0) {
4115 /* optimisation? taken from the AMD docs */
4116 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4117 /* ST0 is unchanged */
4118 return;
4119 }
4120
4121 if (expdif < 53) {
4122 dblq = fpsrcop / fptemp;
4123 /* round dblq towards nearest integer */
4124 dblq = rint(dblq);
bcb5fec5 4125 st0 = fpsrcop - fptemp * dblq;
eaa728ee
FB
4126
4127 /* convert dblq to q by truncating towards zero */
4128 if (dblq < 0.0)
4129 q = (signed long long int)(-dblq);
4130 else
4131 q = (signed long long int)dblq;
4132
4133 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4134 /* (C0,C3,C1) <-- (q2,q1,q0) */
4135 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4136 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4137 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4138 } else {
4139 env->fpus |= 0x400; /* C2 <-- 1 */
4140 fptemp = pow(2.0, expdif - 50);
bcb5fec5 4141 fpsrcop = (st0 / st1) / fptemp;
eaa728ee
FB
4142 /* fpsrcop = integer obtained by chopping */
4143 fpsrcop = (fpsrcop < 0.0) ?
4144 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
bcb5fec5 4145 st0 -= (st1 * fpsrcop * fptemp);
eaa728ee 4146 }
c31da136 4147 ST0 = double_to_floatx80(st0);
eaa728ee
FB
4148}
4149
4150void helper_fprem(void)
4151{
bcb5fec5 4152 double st0, st1, dblq, fpsrcop, fptemp;
c31da136 4153 CPU_LDoubleU fpsrcop1, fptemp1;
eaa728ee
FB
4154 int expdif;
4155 signed long long int q;
4156
c31da136
AJ
4157 st0 = floatx80_to_double(ST0);
4158 st1 = floatx80_to_double(ST1);
bcb5fec5
AJ
4159
4160 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
c31da136 4161 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
eaa728ee
FB
4162 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4163 return;
4164 }
4165
bcb5fec5
AJ
4166 fpsrcop = st0;
4167 fptemp = st1;
4168 fpsrcop1.d = ST0;
4169 fptemp1.d = ST1;
eaa728ee
FB
4170 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4171
4172 if (expdif < 0) {
4173 /* optimisation? taken from the AMD docs */
4174 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4175 /* ST0 is unchanged */
4176 return;
4177 }
4178
4179 if ( expdif < 53 ) {
4180 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4181 /* round dblq towards zero */
4182 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
bcb5fec5 4183 st0 = fpsrcop/*ST0*/ - fptemp * dblq;
eaa728ee
FB
4184
4185 /* convert dblq to q by truncating towards zero */
4186 if (dblq < 0.0)
4187 q = (signed long long int)(-dblq);
4188 else
4189 q = (signed long long int)dblq;
4190
4191 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4192 /* (C0,C3,C1) <-- (q2,q1,q0) */
4193 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4194 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4195 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4196 } else {
4197 int N = 32 + (expdif % 32); /* as per AMD docs */
4198 env->fpus |= 0x400; /* C2 <-- 1 */
4199 fptemp = pow(2.0, (double)(expdif - N));
bcb5fec5 4200 fpsrcop = (st0 / st1) / fptemp;
eaa728ee
FB
4201 /* fpsrcop = integer obtained by chopping */
4202 fpsrcop = (fpsrcop < 0.0) ?
4203 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
bcb5fec5 4204 st0 -= (st1 * fpsrcop * fptemp);
eaa728ee 4205 }
c31da136 4206 ST0 = double_to_floatx80(st0);
eaa728ee
FB
4207}
4208
4209void helper_fyl2xp1(void)
4210{
c31da136 4211 double fptemp = floatx80_to_double(ST0);
eaa728ee 4212
eaa728ee
FB
4213 if ((fptemp+1.0)>0.0) {
4214 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
c31da136
AJ
4215 fptemp *= floatx80_to_double(ST1);
4216 ST1 = double_to_floatx80(fptemp);
eaa728ee
FB
4217 fpop();
4218 } else {
4219 env->fpus &= (~0x4700);
4220 env->fpus |= 0x400;
4221 }
4222}
4223
4224void helper_fsqrt(void)
4225{
c31da136 4226 if (floatx80_is_neg(ST0)) {
eaa728ee
FB
4227 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4228 env->fpus |= 0x400;
4229 }
c31da136 4230 ST0 = floatx80_sqrt(ST0, &env->fp_status);
eaa728ee
FB
4231}
4232
4233void helper_fsincos(void)
4234{
c31da136 4235 double fptemp = floatx80_to_double(ST0);
eaa728ee 4236
eaa728ee
FB
4237 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4238 env->fpus |= 0x400;
4239 } else {
c31da136 4240 ST0 = double_to_floatx80(sin(fptemp));
eaa728ee 4241 fpush();
c31da136 4242 ST0 = double_to_floatx80(cos(fptemp));
eaa728ee
FB
4243 env->fpus &= (~0x400); /* C2 <-- 0 */
4244 /* the above code is for |arg| < 2**63 only */
4245 }
4246}
4247
4248void helper_frndint(void)
4249{
c31da136 4250 ST0 = floatx80_round_to_int(ST0, &env->fp_status);
eaa728ee
FB
4251}
4252
4253void helper_fscale(void)
4254{
c31da136 4255 if (floatx80_is_any_nan(ST1)) {
be1c17c7
AJ
4256 ST0 = ST1;
4257 } else {
c31da136
AJ
4258 int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
4259 ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
be1c17c7 4260 }
eaa728ee
FB
4261}
4262
4263void helper_fsin(void)
4264{
c31da136 4265 double fptemp = floatx80_to_double(ST0);
eaa728ee 4266
eaa728ee
FB
4267 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4268 env->fpus |= 0x400;
4269 } else {
c31da136 4270 ST0 = double_to_floatx80(sin(fptemp));
eaa728ee
FB
4271 env->fpus &= (~0x400); /* C2 <-- 0 */
4272 /* the above code is for |arg| < 2**53 only */
4273 }
4274}
4275
4276void helper_fcos(void)
4277{
c31da136 4278 double fptemp = floatx80_to_double(ST0);
eaa728ee 4279
eaa728ee
FB
4280 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4281 env->fpus |= 0x400;
4282 } else {
c31da136 4283 ST0 = double_to_floatx80(cos(fptemp));
eaa728ee
FB
4284 env->fpus &= (~0x400); /* C2 <-- 0 */
4285 /* the above code is for |arg5 < 2**63 only */
4286 }
4287}
4288
4289void helper_fxam_ST0(void)
4290{
c31da136 4291 CPU_LDoubleU temp;
eaa728ee
FB
4292 int expdif;
4293
4294 temp.d = ST0;
4295
4296 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4297 if (SIGND(temp))
4298 env->fpus |= 0x200; /* C1 <-- 1 */
4299
4300 /* XXX: test fptags too */
4301 expdif = EXPD(temp);
4302 if (expdif == MAXEXPD) {
eaa728ee 4303 if (MANTD(temp) == 0x8000000000000000ULL)
eaa728ee
FB
4304 env->fpus |= 0x500 /*Infinity*/;
4305 else
4306 env->fpus |= 0x100 /*NaN*/;
4307 } else if (expdif == 0) {
4308 if (MANTD(temp) == 0)
4309 env->fpus |= 0x4000 /*Zero*/;
4310 else
4311 env->fpus |= 0x4400 /*Denormal*/;
4312 } else {
4313 env->fpus |= 0x400;
4314 }
4315}
4316
4317void helper_fstenv(target_ulong ptr, int data32)
4318{
4319 int fpus, fptag, exp, i;
4320 uint64_t mant;
c31da136 4321 CPU_LDoubleU tmp;
eaa728ee
FB
4322
4323 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4324 fptag = 0;
4325 for (i=7; i>=0; i--) {
4326 fptag <<= 2;
4327 if (env->fptags[i]) {
4328 fptag |= 3;
4329 } else {
4330 tmp.d = env->fpregs[i].d;
4331 exp = EXPD(tmp);
4332 mant = MANTD(tmp);
4333 if (exp == 0 && mant == 0) {
4334 /* zero */
4335 fptag |= 1;
4336 } else if (exp == 0 || exp == MAXEXPD
eaa728ee 4337 || (mant & (1LL << 63)) == 0
eaa728ee
FB
4338 ) {
4339 /* NaNs, infinity, denormal */
4340 fptag |= 2;
4341 }
4342 }
4343 }
4344 if (data32) {
4345 /* 32 bit */
4346 stl(ptr, env->fpuc);
4347 stl(ptr + 4, fpus);
4348 stl(ptr + 8, fptag);
4349 stl(ptr + 12, 0); /* fpip */
4350 stl(ptr + 16, 0); /* fpcs */
4351 stl(ptr + 20, 0); /* fpoo */
4352 stl(ptr + 24, 0); /* fpos */
4353 } else {
4354 /* 16 bit */
4355 stw(ptr, env->fpuc);
4356 stw(ptr + 2, fpus);
4357 stw(ptr + 4, fptag);
4358 stw(ptr + 6, 0);
4359 stw(ptr + 8, 0);
4360 stw(ptr + 10, 0);
4361 stw(ptr + 12, 0);
4362 }
4363}
4364
4365void helper_fldenv(target_ulong ptr, int data32)
4366{
4367 int i, fpus, fptag;
4368
4369 if (data32) {
4370 env->fpuc = lduw(ptr);
4371 fpus = lduw(ptr + 4);
4372 fptag = lduw(ptr + 8);
4373 }
4374 else {
4375 env->fpuc = lduw(ptr);
4376 fpus = lduw(ptr + 2);
4377 fptag = lduw(ptr + 4);
4378 }
4379 env->fpstt = (fpus >> 11) & 7;
4380 env->fpus = fpus & ~0x3800;
4381 for(i = 0;i < 8; i++) {
4382 env->fptags[i] = ((fptag & 3) == 3);
4383 fptag >>= 2;
4384 }
4385}
4386
4387void helper_fsave(target_ulong ptr, int data32)
4388{
c31da136 4389 floatx80 tmp;
eaa728ee
FB
4390 int i;
4391
4392 helper_fstenv(ptr, data32);
4393
4394 ptr += (14 << data32);
4395 for(i = 0;i < 8; i++) {
4396 tmp = ST(i);
4397 helper_fstt(tmp, ptr);
4398 ptr += 10;
4399 }
4400
4401 /* fninit */
4402 env->fpus = 0;
4403 env->fpstt = 0;
4404 env->fpuc = 0x37f;
4405 env->fptags[0] = 1;
4406 env->fptags[1] = 1;
4407 env->fptags[2] = 1;
4408 env->fptags[3] = 1;
4409 env->fptags[4] = 1;
4410 env->fptags[5] = 1;
4411 env->fptags[6] = 1;
4412 env->fptags[7] = 1;
4413}
4414
4415void helper_frstor(target_ulong ptr, int data32)
4416{
c31da136 4417 floatx80 tmp;
eaa728ee
FB
4418 int i;
4419
4420 helper_fldenv(ptr, data32);
4421 ptr += (14 << data32);
4422
4423 for(i = 0;i < 8; i++) {
4424 tmp = helper_fldt(ptr);
4425 ST(i) = tmp;
4426 ptr += 10;
4427 }
4428}
4429
4430void helper_fxsave(target_ulong ptr, int data64)
4431{
4432 int fpus, fptag, i, nb_xmm_regs;
c31da136 4433 floatx80 tmp;
eaa728ee
FB
4434 target_ulong addr;
4435
09d85fb8
KW
4436 /* The operand must be 16 byte aligned */
4437 if (ptr & 0xf) {
4438 raise_exception(EXCP0D_GPF);
4439 }
4440
eaa728ee
FB
4441 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4442 fptag = 0;
4443 for(i = 0; i < 8; i++) {
4444 fptag |= (env->fptags[i] << i);
4445 }
4446 stw(ptr, env->fpuc);
4447 stw(ptr + 2, fpus);
4448 stw(ptr + 4, fptag ^ 0xff);
4449#ifdef TARGET_X86_64
4450 if (data64) {
4451 stq(ptr + 0x08, 0); /* rip */
4452 stq(ptr + 0x10, 0); /* rdp */
4453 } else
4454#endif
4455 {
4456 stl(ptr + 0x08, 0); /* eip */
4457 stl(ptr + 0x0c, 0); /* sel */
4458 stl(ptr + 0x10, 0); /* dp */
4459 stl(ptr + 0x14, 0); /* sel */
4460 }
4461
4462 addr = ptr + 0x20;
4463 for(i = 0;i < 8; i++) {
4464 tmp = ST(i);
4465 helper_fstt(tmp, addr);
4466 addr += 16;
4467 }
4468
4469 if (env->cr[4] & CR4_OSFXSR_MASK) {
4470 /* XXX: finish it */
4471 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4472 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4473 if (env->hflags & HF_CS64_MASK)
4474 nb_xmm_regs = 16;
4475 else
4476 nb_xmm_regs = 8;
4477 addr = ptr + 0xa0;
eef26553
AL
4478 /* Fast FXSAVE leaves out the XMM registers */
4479 if (!(env->efer & MSR_EFER_FFXSR)
4480 || (env->hflags & HF_CPL_MASK)
4481 || !(env->hflags & HF_LMA_MASK)) {
4482 for(i = 0; i < nb_xmm_regs; i++) {
4483 stq(addr, env->xmm_regs[i].XMM_Q(0));
4484 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4485 addr += 16;
4486 }
eaa728ee
FB
4487 }
4488 }
4489}
4490
4491void helper_fxrstor(target_ulong ptr, int data64)
4492{
4493 int i, fpus, fptag, nb_xmm_regs;
c31da136 4494 floatx80 tmp;
eaa728ee
FB
4495 target_ulong addr;
4496
09d85fb8
KW
4497 /* The operand must be 16 byte aligned */
4498 if (ptr & 0xf) {
4499 raise_exception(EXCP0D_GPF);
4500 }
4501
eaa728ee
FB
4502 env->fpuc = lduw(ptr);
4503 fpus = lduw(ptr + 2);
4504 fptag = lduw(ptr + 4);
4505 env->fpstt = (fpus >> 11) & 7;
4506 env->fpus = fpus & ~0x3800;
4507 fptag ^= 0xff;
4508 for(i = 0;i < 8; i++) {
4509 env->fptags[i] = ((fptag >> i) & 1);
4510 }
4511
4512 addr = ptr + 0x20;
4513 for(i = 0;i < 8; i++) {
4514 tmp = helper_fldt(addr);
4515 ST(i) = tmp;
4516 addr += 16;
4517 }
4518
4519 if (env->cr[4] & CR4_OSFXSR_MASK) {
4520 /* XXX: finish it */
4521 env->mxcsr = ldl(ptr + 0x18);
4522 //ldl(ptr + 0x1c);
4523 if (env->hflags & HF_CS64_MASK)
4524 nb_xmm_regs = 16;
4525 else
4526 nb_xmm_regs = 8;
4527 addr = ptr + 0xa0;
eef26553
AL
4528 /* Fast FXRESTORE leaves out the XMM registers */
4529 if (!(env->efer & MSR_EFER_FFXSR)
4530 || (env->hflags & HF_CPL_MASK)
4531 || !(env->hflags & HF_LMA_MASK)) {
4532 for(i = 0; i < nb_xmm_regs; i++) {
4533 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4534 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4535 addr += 16;
4536 }
eaa728ee
FB
4537 }
4538 }
4539}
4540
c31da136 4541void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
eaa728ee 4542{
c31da136 4543 CPU_LDoubleU temp;
eaa728ee
FB
4544
4545 temp.d = f;
4546 *pmant = temp.l.lower;
4547 *pexp = temp.l.upper;
4548}
4549
c31da136 4550floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
eaa728ee 4551{
c31da136 4552 CPU_LDoubleU temp;
eaa728ee
FB
4553
4554 temp.l.upper = upper;
4555 temp.l.lower = mant;
4556 return temp.d;
4557}
eaa728ee
FB
4558
4559#ifdef TARGET_X86_64
4560
4561//#define DEBUG_MULDIV
4562
4563static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4564{
4565 *plow += a;
4566 /* carry test */
4567 if (*plow < a)
4568 (*phigh)++;
4569 *phigh += b;
4570}
4571
4572static void neg128(uint64_t *plow, uint64_t *phigh)
4573{
4574 *plow = ~ *plow;
4575 *phigh = ~ *phigh;
4576 add128(plow, phigh, 1, 0);
4577}
4578
4579/* return TRUE if overflow */
4580static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4581{
4582 uint64_t q, r, a1, a0;
4583 int i, qb, ab;
4584
4585 a0 = *plow;
4586 a1 = *phigh;
4587 if (a1 == 0) {
4588 q = a0 / b;
4589 r = a0 % b;
4590 *plow = q;
4591 *phigh = r;
4592 } else {
4593 if (a1 >= b)
4594 return 1;
4595 /* XXX: use a better algorithm */
4596 for(i = 0; i < 64; i++) {
4597 ab = a1 >> 63;
4598 a1 = (a1 << 1) | (a0 >> 63);
4599 if (ab || a1 >= b) {
4600 a1 -= b;
4601 qb = 1;
4602 } else {
4603 qb = 0;
4604 }
4605 a0 = (a0 << 1) | qb;
4606 }
4607#if defined(DEBUG_MULDIV)
4608 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4609 *phigh, *plow, b, a0, a1);
4610#endif
4611 *plow = a0;
4612 *phigh = a1;
4613 }
4614 return 0;
4615}
4616
4617/* return TRUE if overflow */
4618static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4619{
4620 int sa, sb;
4621 sa = ((int64_t)*phigh < 0);
4622 if (sa)
4623 neg128(plow, phigh);
4624 sb = (b < 0);
4625 if (sb)
4626 b = -b;
4627 if (div64(plow, phigh, b) != 0)
4628 return 1;
4629 if (sa ^ sb) {
4630 if (*plow > (1ULL << 63))
4631 return 1;
4632 *plow = - *plow;
4633 } else {
4634 if (*plow >= (1ULL << 63))
4635 return 1;
4636 }
4637 if (sa)
4638 *phigh = - *phigh;
4639 return 0;
4640}
4641
4642void helper_mulq_EAX_T0(target_ulong t0)
4643{
4644 uint64_t r0, r1;
4645
4646 mulu64(&r0, &r1, EAX, t0);
4647 EAX = r0;
4648 EDX = r1;
4649 CC_DST = r0;
4650 CC_SRC = r1;
4651}
4652
4653void helper_imulq_EAX_T0(target_ulong t0)
4654{
4655 uint64_t r0, r1;
4656
4657 muls64(&r0, &r1, EAX, t0);
4658 EAX = r0;
4659 EDX = r1;
4660 CC_DST = r0;
4661 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4662}
4663
4664target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4665{
4666 uint64_t r0, r1;
4667
4668 muls64(&r0, &r1, t0, t1);
4669 CC_DST = r0;
4670 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4671 return r0;
4672}
4673
4674void helper_divq_EAX(target_ulong t0)
4675{
4676 uint64_t r0, r1;
4677 if (t0 == 0) {
4678 raise_exception(EXCP00_DIVZ);
4679 }
4680 r0 = EAX;
4681 r1 = EDX;
4682 if (div64(&r0, &r1, t0))
4683 raise_exception(EXCP00_DIVZ);
4684 EAX = r0;
4685 EDX = r1;
4686}
4687
4688void helper_idivq_EAX(target_ulong t0)
4689{
4690 uint64_t r0, r1;
4691 if (t0 == 0) {
4692 raise_exception(EXCP00_DIVZ);
4693 }
4694 r0 = EAX;
4695 r1 = EDX;
4696 if (idiv64(&r0, &r1, t0))
4697 raise_exception(EXCP00_DIVZ);
4698 EAX = r0;
4699 EDX = r1;
4700}
4701#endif
4702
94451178 4703static void do_hlt(void)
eaa728ee
FB
4704{
4705 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
ce5232c5 4706 env->halted = 1;
eaa728ee 4707 env->exception_index = EXCP_HLT;
1162c041 4708 cpu_loop_exit(env);
eaa728ee
FB
4709}
4710
94451178
FB
4711void helper_hlt(int next_eip_addend)
4712{
4713 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4714 EIP += next_eip_addend;
4715
4716 do_hlt();
4717}
4718
eaa728ee
FB
4719void helper_monitor(target_ulong ptr)
4720{
4721 if ((uint32_t)ECX != 0)
4722 raise_exception(EXCP0D_GPF);
4723 /* XXX: store address ? */
872929aa 4724 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
eaa728ee
FB
4725}
4726
94451178 4727void helper_mwait(int next_eip_addend)
eaa728ee
FB
4728{
4729 if ((uint32_t)ECX != 0)
4730 raise_exception(EXCP0D_GPF);
872929aa 4731 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
94451178
FB
4732 EIP += next_eip_addend;
4733
eaa728ee
FB
4734 /* XXX: not complete but not completely erroneous */
4735 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4736 /* more than one CPU: do not sleep because another CPU may
4737 wake this one */
4738 } else {
94451178 4739 do_hlt();
eaa728ee
FB
4740 }
4741}
4742
4743void helper_debug(void)
4744{
4745 env->exception_index = EXCP_DEBUG;
1162c041 4746 cpu_loop_exit(env);
eaa728ee
FB
4747}
4748
a2397807
JK
4749void helper_reset_rf(void)
4750{
4751 env->eflags &= ~RF_MASK;
4752}
4753
eaa728ee
FB
4754void helper_raise_interrupt(int intno, int next_eip_addend)
4755{
4756 raise_interrupt(intno, 1, 0, next_eip_addend);
4757}
4758
4759void helper_raise_exception(int exception_index)
4760{
4761 raise_exception(exception_index);
4762}
4763
4764void helper_cli(void)
4765{
4766 env->eflags &= ~IF_MASK;
4767}
4768
4769void helper_sti(void)
4770{
4771 env->eflags |= IF_MASK;
4772}
4773
4774#if 0
4775/* vm86plus instructions */
4776void helper_cli_vm(void)
4777{
4778 env->eflags &= ~VIF_MASK;
4779}
4780
4781void helper_sti_vm(void)
4782{
4783 env->eflags |= VIF_MASK;
4784 if (env->eflags & VIP_MASK) {
4785 raise_exception(EXCP0D_GPF);
4786 }
4787}
4788#endif
4789
4790void helper_set_inhibit_irq(void)
4791{
4792 env->hflags |= HF_INHIBIT_IRQ_MASK;
4793}
4794
4795void helper_reset_inhibit_irq(void)
4796{
4797 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4798}
4799
4800void helper_boundw(target_ulong a0, int v)
4801{
4802 int low, high;
4803 low = ldsw(a0);
4804 high = ldsw(a0 + 2);
4805 v = (int16_t)v;
4806 if (v < low || v > high) {
4807 raise_exception(EXCP05_BOUND);
4808 }
eaa728ee
FB
4809}
4810
4811void helper_boundl(target_ulong a0, int v)
4812{
4813 int low, high;
4814 low = ldl(a0);
4815 high = ldl(a0 + 4);
4816 if (v < low || v > high) {
4817 raise_exception(EXCP05_BOUND);
4818 }
eaa728ee
FB
4819}
4820
eaa728ee
FB
4821#if !defined(CONFIG_USER_ONLY)
4822
4823#define MMUSUFFIX _mmu
4824
4825#define SHIFT 0
4826#include "softmmu_template.h"
4827
4828#define SHIFT 1
4829#include "softmmu_template.h"
4830
4831#define SHIFT 2
4832#include "softmmu_template.h"
4833
4834#define SHIFT 3
4835#include "softmmu_template.h"
4836
4837#endif
4838
d9957a8b 4839#if !defined(CONFIG_USER_ONLY)
eaa728ee
FB
4840/* try to fill the TLB and return an exception if error. If retaddr is
4841 NULL, it means that the function was called in C code (i.e. not
4842 from generated code or from helper.c) */
4843/* XXX: fix it to restore all registers */
4844void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4845{
4846 TranslationBlock *tb;
4847 int ret;
4848 unsigned long pc;
4849 CPUX86State *saved_env;
4850
4851 /* XXX: hack to restore env in all cases, even if not called from
4852 generated code */
4853 saved_env = env;
4854 env = cpu_single_env;
4855
4856 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4857 if (ret) {
4858 if (retaddr) {
4859 /* now we have a real cpu fault */
4860 pc = (unsigned long)retaddr;
4861 tb = tb_find_pc(pc);
4862 if (tb) {
4863 /* the PC is inside the translated code. It means that we have
4864 a virtual CPU fault */
618ba8e6 4865 cpu_restore_state(tb, env, pc);
eaa728ee
FB
4866 }
4867 }
872929aa 4868 raise_exception_err(env->exception_index, env->error_code);
eaa728ee
FB
4869 }
4870 env = saved_env;
4871}
d9957a8b 4872#endif
eaa728ee
FB
4873
4874/* Secure Virtual Machine helpers */
4875
eaa728ee
FB
4876#if defined(CONFIG_USER_ONLY)
4877
db620f46 4878void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4879{
4880}
4881void helper_vmmcall(void)
4882{
4883}
914178d3 4884void helper_vmload(int aflag)
eaa728ee
FB
4885{
4886}
914178d3 4887void helper_vmsave(int aflag)
eaa728ee
FB
4888{
4889}
872929aa
FB
4890void helper_stgi(void)
4891{
4892}
4893void helper_clgi(void)
4894{
4895}
eaa728ee
FB
4896void helper_skinit(void)
4897{
4898}
914178d3 4899void helper_invlpga(int aflag)
eaa728ee
FB
4900{
4901}
4902void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4903{
4904}
4905void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4906{
4907}
4908
e694d4e2
BS
4909void svm_check_intercept(CPUState *env1, uint32_t type)
4910{
4911}
4912
eaa728ee
FB
4913void helper_svm_check_io(uint32_t port, uint32_t param,
4914 uint32_t next_eip_addend)
4915{
4916}
4917#else
4918
c227f099 4919static inline void svm_save_seg(target_phys_addr_t addr,
872929aa 4920 const SegmentCache *sc)
eaa728ee 4921{
872929aa
FB
4922 stw_phys(addr + offsetof(struct vmcb_seg, selector),
4923 sc->selector);
4924 stq_phys(addr + offsetof(struct vmcb_seg, base),
4925 sc->base);
4926 stl_phys(addr + offsetof(struct vmcb_seg, limit),
4927 sc->limit);
4928 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
e72210e1 4929 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
872929aa
FB
4930}
4931
c227f099 4932static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
872929aa
FB
4933{
4934 unsigned int flags;
4935
4936 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4937 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4938 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4939 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4940 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
eaa728ee
FB
4941}
4942
c227f099 4943static inline void svm_load_seg_cache(target_phys_addr_t addr,
872929aa 4944 CPUState *env, int seg_reg)
eaa728ee 4945{
872929aa
FB
4946 SegmentCache sc1, *sc = &sc1;
4947 svm_load_seg(addr, sc);
4948 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4949 sc->base, sc->limit, sc->flags);
eaa728ee
FB
4950}
4951
db620f46 4952void helper_vmrun(int aflag, int next_eip_addend)
eaa728ee
FB
4953{
4954 target_ulong addr;
4955 uint32_t event_inj;
4956 uint32_t int_ctl;
4957
872929aa
FB
4958 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4959
914178d3
FB
4960 if (aflag == 2)
4961 addr = EAX;
4962 else
4963 addr = (uint32_t)EAX;
4964
93fcfe39 4965 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
eaa728ee
FB
4966
4967 env->vm_vmcb = addr;
4968
4969 /* save the current CPU state in the hsave page */
4970 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4971 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4972
4973 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4974 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4975
4976 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4977 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4978 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4979 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
eaa728ee
FB
4980 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4981 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4982
4983 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4984 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4985
872929aa
FB
4986 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4987 &env->segs[R_ES]);
4988 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4989 &env->segs[R_CS]);
4990 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4991 &env->segs[R_SS]);
4992 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4993 &env->segs[R_DS]);
eaa728ee 4994
db620f46
FB
4995 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4996 EIP + next_eip_addend);
eaa728ee
FB
4997 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4998 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4999
5000 /* load the interception bitmaps so we do not need to access the
5001 vmcb in svm mode */
872929aa 5002 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
eaa728ee
FB
5003 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
5004 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
5005 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
5006 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
5007 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
5008
872929aa
FB
5009 /* enable intercepts */
5010 env->hflags |= HF_SVMI_MASK;
5011
33c263df
FB
5012 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
5013
eaa728ee
FB
5014 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5015 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5016
5017 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5018 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5019
5020 /* clear exit_info_2 so we behave like the real hardware */
5021 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5022
5023 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5024 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5025 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5026 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5027 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
db620f46 5028 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
eaa728ee 5029 if (int_ctl & V_INTR_MASKING_MASK) {
db620f46
FB
5030 env->v_tpr = int_ctl & V_TPR_MASK;
5031 env->hflags2 |= HF2_VINTR_MASK;
eaa728ee 5032 if (env->eflags & IF_MASK)
db620f46 5033 env->hflags2 |= HF2_HIF_MASK;
eaa728ee
FB
5034 }
5035
5efc27bb
FB
5036 cpu_load_efer(env,
5037 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5038 env->eflags = 0;
5039 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5040 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5041 CC_OP = CC_OP_EFLAGS;
eaa728ee 5042
872929aa
FB
5043 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5044 env, R_ES);
5045 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5046 env, R_CS);
5047 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5048 env, R_SS);
5049 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5050 env, R_DS);
eaa728ee
FB
5051
5052 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5053 env->eip = EIP;
5054 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5055 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5056 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5057 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5058 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5059
5060 /* FIXME: guest state consistency checks */
5061
5062 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5063 case TLB_CONTROL_DO_NOTHING:
5064 break;
5065 case TLB_CONTROL_FLUSH_ALL_ASID:
5066 /* FIXME: this is not 100% correct but should work for now */
5067 tlb_flush(env, 1);
5068 break;
5069 }
5070
960540b4 5071 env->hflags2 |= HF2_GIF_MASK;
eaa728ee 5072
db620f46
FB
5073 if (int_ctl & V_IRQ_MASK) {
5074 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5075 }
5076
eaa728ee
FB
5077 /* maybe we need to inject an event */
5078 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5079 if (event_inj & SVM_EVTINJ_VALID) {
5080 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5081 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5082 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
eaa728ee 5083
93fcfe39 5084 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
eaa728ee
FB
5085 /* FIXME: need to implement valid_err */
5086 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5087 case SVM_EVTINJ_TYPE_INTR:
5088 env->exception_index = vector;
5089 env->error_code = event_inj_err;
5090 env->exception_is_int = 0;
5091 env->exception_next_eip = -1;
93fcfe39 5092 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
db620f46 5093 /* XXX: is it always correct ? */
e694d4e2 5094 do_interrupt_all(vector, 0, 0, 0, 1);
eaa728ee
FB
5095 break;
5096 case SVM_EVTINJ_TYPE_NMI:
db620f46 5097 env->exception_index = EXCP02_NMI;
eaa728ee
FB
5098 env->error_code = event_inj_err;
5099 env->exception_is_int = 0;
5100 env->exception_next_eip = EIP;
93fcfe39 5101 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
1162c041 5102 cpu_loop_exit(env);
eaa728ee
FB
5103 break;
5104 case SVM_EVTINJ_TYPE_EXEPT:
5105 env->exception_index = vector;
5106 env->error_code = event_inj_err;
5107 env->exception_is_int = 0;
5108 env->exception_next_eip = -1;
93fcfe39 5109 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
1162c041 5110 cpu_loop_exit(env);
eaa728ee
FB
5111 break;
5112 case SVM_EVTINJ_TYPE_SOFT:
5113 env->exception_index = vector;
5114 env->error_code = event_inj_err;
5115 env->exception_is_int = 1;
5116 env->exception_next_eip = EIP;
93fcfe39 5117 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
1162c041 5118 cpu_loop_exit(env);
eaa728ee
FB
5119 break;
5120 }
93fcfe39 5121 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
eaa728ee 5122 }
eaa728ee
FB
5123}
5124
5125void helper_vmmcall(void)
5126{
872929aa
FB
5127 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5128 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5129}
5130
914178d3 5131void helper_vmload(int aflag)
eaa728ee
FB
5132{
5133 target_ulong addr;
872929aa
FB
5134 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5135
914178d3
FB
5136 if (aflag == 2)
5137 addr = EAX;
5138 else
5139 addr = (uint32_t)EAX;
5140
93fcfe39 5141 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5142 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5143 env->segs[R_FS].base);
5144
872929aa
FB
5145 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5146 env, R_FS);
5147 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5148 env, R_GS);
5149 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5150 &env->tr);
5151 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5152 &env->ldt);
eaa728ee
FB
5153
5154#ifdef TARGET_X86_64
5155 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5156 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5157 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5158 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5159#endif
5160 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5161 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5162 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5163 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5164}
5165
914178d3 5166void helper_vmsave(int aflag)
eaa728ee
FB
5167{
5168 target_ulong addr;
872929aa 5169 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
914178d3
FB
5170
5171 if (aflag == 2)
5172 addr = EAX;
5173 else
5174 addr = (uint32_t)EAX;
5175
93fcfe39 5176 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
eaa728ee
FB
5177 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5178 env->segs[R_FS].base);
5179
872929aa
FB
5180 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5181 &env->segs[R_FS]);
5182 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5183 &env->segs[R_GS]);
5184 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5185 &env->tr);
5186 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5187 &env->ldt);
eaa728ee
FB
5188
5189#ifdef TARGET_X86_64
5190 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5191 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5192 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5193 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5194#endif
5195 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5196 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5197 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5198 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5199}
5200
872929aa
FB
5201void helper_stgi(void)
5202{
5203 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
db620f46 5204 env->hflags2 |= HF2_GIF_MASK;
872929aa
FB
5205}
5206
5207void helper_clgi(void)
5208{
5209 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
db620f46 5210 env->hflags2 &= ~HF2_GIF_MASK;
872929aa
FB
5211}
5212
eaa728ee
FB
5213void helper_skinit(void)
5214{
872929aa
FB
5215 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5216 /* XXX: not implemented */
872929aa 5217 raise_exception(EXCP06_ILLOP);
eaa728ee
FB
5218}
5219
914178d3 5220void helper_invlpga(int aflag)
eaa728ee 5221{
914178d3 5222 target_ulong addr;
872929aa 5223 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
914178d3
FB
5224
5225 if (aflag == 2)
5226 addr = EAX;
5227 else
5228 addr = (uint32_t)EAX;
5229
5230 /* XXX: could use the ASID to see if it is needed to do the
5231 flush */
5232 tlb_flush_page(env, addr);
eaa728ee
FB
5233}
5234
5235void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5236{
872929aa
FB
5237 if (likely(!(env->hflags & HF_SVMI_MASK)))
5238 return;
eaa728ee
FB
5239 switch(type) {
5240 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
872929aa 5241 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
eaa728ee
FB
5242 helper_vmexit(type, param);
5243 }
5244 break;
872929aa
FB
5245 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5246 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
eaa728ee
FB
5247 helper_vmexit(type, param);
5248 }
5249 break;
872929aa
FB
5250 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5251 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
eaa728ee
FB
5252 helper_vmexit(type, param);
5253 }
5254 break;
872929aa
FB
5255 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5256 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
eaa728ee
FB
5257 helper_vmexit(type, param);
5258 }
5259 break;
872929aa
FB
5260 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5261 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
eaa728ee
FB
5262 helper_vmexit(type, param);
5263 }
5264 break;
eaa728ee 5265 case SVM_EXIT_MSR:
872929aa 5266 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
eaa728ee
FB
5267 /* FIXME: this should be read in at vmrun (faster this way?) */
5268 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5269 uint32_t t0, t1;
5270 switch((uint32_t)ECX) {
5271 case 0 ... 0x1fff:
5272 t0 = (ECX * 2) % 8;
583cd3cb 5273 t1 = (ECX * 2) / 8;
eaa728ee
FB
5274 break;
5275 case 0xc0000000 ... 0xc0001fff:
5276 t0 = (8192 + ECX - 0xc0000000) * 2;
5277 t1 = (t0 / 8);
5278 t0 %= 8;
5279 break;
5280 case 0xc0010000 ... 0xc0011fff:
5281 t0 = (16384 + ECX - 0xc0010000) * 2;
5282 t1 = (t0 / 8);
5283 t0 %= 8;
5284 break;
5285 default:
5286 helper_vmexit(type, param);
5287 t0 = 0;
5288 t1 = 0;
5289 break;
5290 }
5291 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5292 helper_vmexit(type, param);
5293 }
5294 break;
5295 default:
872929aa 5296 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
eaa728ee
FB
5297 helper_vmexit(type, param);
5298 }
5299 break;
5300 }
5301}
5302
e694d4e2
BS
5303void svm_check_intercept(CPUState *env1, uint32_t type)
5304{
5305 CPUState *saved_env;
5306
5307 saved_env = env;
5308 env = env1;
5309 helper_svm_check_intercept_param(type, 0);
5310 env = saved_env;
5311}
5312
eaa728ee
FB
5313void helper_svm_check_io(uint32_t port, uint32_t param,
5314 uint32_t next_eip_addend)
5315{
872929aa 5316 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
eaa728ee
FB
5317 /* FIXME: this should be read in at vmrun (faster this way?) */
5318 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5319 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5320 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5321 /* next EIP */
5322 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5323 env->eip + next_eip_addend);
5324 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5325 }
5326 }
5327}
5328
5329/* Note: currently only 32 bits of exit_code are used */
5330void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5331{
5332 uint32_t int_ctl;
5333
93fcfe39 5334 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
eaa728ee
FB
5335 exit_code, exit_info_1,
5336 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5337 EIP);
5338
5339 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5340 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5341 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5342 } else {
5343 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5344 }
5345
5346 /* Save the VM state in the vmcb */
872929aa
FB
5347 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5348 &env->segs[R_ES]);
5349 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5350 &env->segs[R_CS]);
5351 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5352 &env->segs[R_SS]);
5353 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5354 &env->segs[R_DS]);
eaa728ee
FB
5355
5356 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5357 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5358
5359 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5360 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5361
5362 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5363 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5364 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5365 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5366 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5367
db620f46
FB
5368 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5369 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5370 int_ctl |= env->v_tpr & V_TPR_MASK;
5371 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5372 int_ctl |= V_IRQ_MASK;
5373 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
eaa728ee
FB
5374
5375 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5376 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5377 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5378 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5379 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5380 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5381 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5382
5383 /* Reload the host state from vm_hsave */
db620f46 5384 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
872929aa 5385 env->hflags &= ~HF_SVMI_MASK;
eaa728ee
FB
5386 env->intercept = 0;
5387 env->intercept_exceptions = 0;
5388 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
33c263df 5389 env->tsc_offset = 0;
eaa728ee
FB
5390
5391 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5392 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5393
5394 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5395 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5396
5397 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5398 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5399 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5efc27bb
FB
5400 /* we need to set the efer after the crs so the hidden flags get
5401 set properly */
5efc27bb
FB
5402 cpu_load_efer(env,
5403 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
eaa728ee
FB
5404 env->eflags = 0;
5405 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5406 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5407 CC_OP = CC_OP_EFLAGS;
5408
872929aa
FB
5409 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5410 env, R_ES);
5411 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5412 env, R_CS);
5413 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5414 env, R_SS);
5415 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5416 env, R_DS);
eaa728ee
FB
5417
5418 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5419 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5420 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5421
5422 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5423 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5424
5425 /* other setups */
5426 cpu_x86_set_cpl(env, 0);
5427 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5428 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5429
2ed51f5b
AL
5430 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5431 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5432 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5433 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
ab5ea558 5434 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
2ed51f5b 5435
960540b4 5436 env->hflags2 &= ~HF2_GIF_MASK;
eaa728ee
FB
5437 /* FIXME: Resets the current ASID register to zero (host ASID). */
5438
5439 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5440
5441 /* Clears the TSC_OFFSET inside the processor. */
5442
5443 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5444 from the page table indicated the host's CR3. If the PDPEs contain
5445 illegal state, the processor causes a shutdown. */
5446
5447 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5448 env->cr[0] |= CR0_PE_MASK;
5449 env->eflags &= ~VM_MASK;
5450
5451 /* Disables all breakpoints in the host DR7 register. */
5452
5453 /* Checks the reloaded host state for consistency. */
5454
5455 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5456 host's code segment or non-canonical (in the case of long mode), a
5457 #GP fault is delivered inside the host.) */
5458
5459 /* remove any pending exception */
5460 env->exception_index = -1;
5461 env->error_code = 0;
5462 env->old_exception = -1;
5463
1162c041 5464 cpu_loop_exit(env);
eaa728ee
FB
5465}
5466
5467#endif
5468
5469/* MMX/SSE */
5470/* XXX: optimize by storing fptt and fptags in the static cpu state */
5471void helper_enter_mmx(void)
5472{
5473 env->fpstt = 0;
5474 *(uint32_t *)(env->fptags) = 0;
5475 *(uint32_t *)(env->fptags + 4) = 0;
5476}
5477
5478void helper_emms(void)
5479{
5480 /* set to empty state */
5481 *(uint32_t *)(env->fptags) = 0x01010101;
5482 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5483}
5484
5485/* XXX: suppress */
a7812ae4 5486void helper_movq(void *d, void *s)
eaa728ee 5487{
a7812ae4 5488 *(uint64_t *)d = *(uint64_t *)s;
eaa728ee
FB
5489}
5490
5491#define SHIFT 0
5492#include "ops_sse.h"
5493
5494#define SHIFT 1
5495#include "ops_sse.h"
5496
5497#define SHIFT 0
5498#include "helper_template.h"
5499#undef SHIFT
5500
5501#define SHIFT 1
5502#include "helper_template.h"
5503#undef SHIFT
5504
5505#define SHIFT 2
5506#include "helper_template.h"
5507#undef SHIFT
5508
5509#ifdef TARGET_X86_64
5510
5511#define SHIFT 3
5512#include "helper_template.h"
5513#undef SHIFT
5514
5515#endif
5516
5517/* bit operations */
5518target_ulong helper_bsf(target_ulong t0)
5519{
5520 int count;
5521 target_ulong res;
5522
5523 res = t0;
5524 count = 0;
5525 while ((res & 1) == 0) {
5526 count++;
5527 res >>= 1;
5528 }
5529 return count;
5530}
5531
31501a71 5532target_ulong helper_lzcnt(target_ulong t0, int wordsize)
eaa728ee
FB
5533{
5534 int count;
5535 target_ulong res, mask;
31501a71
AP
5536
5537 if (wordsize > 0 && t0 == 0) {
5538 return wordsize;
5539 }
eaa728ee
FB
5540 res = t0;
5541 count = TARGET_LONG_BITS - 1;
5542 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5543 while ((res & mask) == 0) {
5544 count--;
5545 res <<= 1;
5546 }
31501a71
AP
5547 if (wordsize > 0) {
5548 return wordsize - 1 - count;
5549 }
eaa728ee
FB
5550 return count;
5551}
5552
31501a71
AP
5553target_ulong helper_bsr(target_ulong t0)
5554{
5555 return helper_lzcnt(t0, 0);
5556}
eaa728ee
FB
5557
5558static int compute_all_eflags(void)
5559{
5560 return CC_SRC;
5561}
5562
5563static int compute_c_eflags(void)
5564{
5565 return CC_SRC & CC_C;
5566}
5567
a7812ae4
PB
5568uint32_t helper_cc_compute_all(int op)
5569{
5570 switch (op) {
5571 default: /* should never happen */ return 0;
eaa728ee 5572
a7812ae4 5573 case CC_OP_EFLAGS: return compute_all_eflags();
eaa728ee 5574
a7812ae4
PB
5575 case CC_OP_MULB: return compute_all_mulb();
5576 case CC_OP_MULW: return compute_all_mulw();
5577 case CC_OP_MULL: return compute_all_mull();
eaa728ee 5578
a7812ae4
PB
5579 case CC_OP_ADDB: return compute_all_addb();
5580 case CC_OP_ADDW: return compute_all_addw();
5581 case CC_OP_ADDL: return compute_all_addl();
eaa728ee 5582
a7812ae4
PB
5583 case CC_OP_ADCB: return compute_all_adcb();
5584 case CC_OP_ADCW: return compute_all_adcw();
5585 case CC_OP_ADCL: return compute_all_adcl();
eaa728ee 5586
a7812ae4
PB
5587 case CC_OP_SUBB: return compute_all_subb();
5588 case CC_OP_SUBW: return compute_all_subw();
5589 case CC_OP_SUBL: return compute_all_subl();
eaa728ee 5590
a7812ae4
PB
5591 case CC_OP_SBBB: return compute_all_sbbb();
5592 case CC_OP_SBBW: return compute_all_sbbw();
5593 case CC_OP_SBBL: return compute_all_sbbl();
eaa728ee 5594
a7812ae4
PB
5595 case CC_OP_LOGICB: return compute_all_logicb();
5596 case CC_OP_LOGICW: return compute_all_logicw();
5597 case CC_OP_LOGICL: return compute_all_logicl();
eaa728ee 5598
a7812ae4
PB
5599 case CC_OP_INCB: return compute_all_incb();
5600 case CC_OP_INCW: return compute_all_incw();
5601 case CC_OP_INCL: return compute_all_incl();
eaa728ee 5602
a7812ae4
PB
5603 case CC_OP_DECB: return compute_all_decb();
5604 case CC_OP_DECW: return compute_all_decw();
5605 case CC_OP_DECL: return compute_all_decl();
eaa728ee 5606
a7812ae4
PB
5607 case CC_OP_SHLB: return compute_all_shlb();
5608 case CC_OP_SHLW: return compute_all_shlw();
5609 case CC_OP_SHLL: return compute_all_shll();
eaa728ee 5610
a7812ae4
PB
5611 case CC_OP_SARB: return compute_all_sarb();
5612 case CC_OP_SARW: return compute_all_sarw();
5613 case CC_OP_SARL: return compute_all_sarl();
eaa728ee
FB
5614
5615#ifdef TARGET_X86_64
a7812ae4 5616 case CC_OP_MULQ: return compute_all_mulq();
eaa728ee 5617
a7812ae4 5618 case CC_OP_ADDQ: return compute_all_addq();
eaa728ee 5619
a7812ae4 5620 case CC_OP_ADCQ: return compute_all_adcq();
eaa728ee 5621
a7812ae4 5622 case CC_OP_SUBQ: return compute_all_subq();
eaa728ee 5623
a7812ae4 5624 case CC_OP_SBBQ: return compute_all_sbbq();
eaa728ee 5625
a7812ae4 5626 case CC_OP_LOGICQ: return compute_all_logicq();
eaa728ee 5627
a7812ae4 5628 case CC_OP_INCQ: return compute_all_incq();
eaa728ee 5629
a7812ae4 5630 case CC_OP_DECQ: return compute_all_decq();
eaa728ee 5631
a7812ae4 5632 case CC_OP_SHLQ: return compute_all_shlq();
eaa728ee 5633
a7812ae4 5634 case CC_OP_SARQ: return compute_all_sarq();
eaa728ee 5635#endif
a7812ae4
PB
5636 }
5637}
5638
e694d4e2
BS
5639uint32_t cpu_cc_compute_all(CPUState *env1, int op)
5640{
5641 CPUState *saved_env;
5642 uint32_t ret;
5643
5644 saved_env = env;
5645 env = env1;
5646 ret = helper_cc_compute_all(op);
5647 env = saved_env;
5648 return ret;
5649}
5650
a7812ae4
PB
5651uint32_t helper_cc_compute_c(int op)
5652{
5653 switch (op) {
5654 default: /* should never happen */ return 0;
5655
5656 case CC_OP_EFLAGS: return compute_c_eflags();
5657
5658 case CC_OP_MULB: return compute_c_mull();
5659 case CC_OP_MULW: return compute_c_mull();
5660 case CC_OP_MULL: return compute_c_mull();
5661
5662 case CC_OP_ADDB: return compute_c_addb();
5663 case CC_OP_ADDW: return compute_c_addw();
5664 case CC_OP_ADDL: return compute_c_addl();
5665
5666 case CC_OP_ADCB: return compute_c_adcb();
5667 case CC_OP_ADCW: return compute_c_adcw();
5668 case CC_OP_ADCL: return compute_c_adcl();
5669
5670 case CC_OP_SUBB: return compute_c_subb();
5671 case CC_OP_SUBW: return compute_c_subw();
5672 case CC_OP_SUBL: return compute_c_subl();
5673
5674 case CC_OP_SBBB: return compute_c_sbbb();
5675 case CC_OP_SBBW: return compute_c_sbbw();
5676 case CC_OP_SBBL: return compute_c_sbbl();
5677
5678 case CC_OP_LOGICB: return compute_c_logicb();
5679 case CC_OP_LOGICW: return compute_c_logicw();
5680 case CC_OP_LOGICL: return compute_c_logicl();
5681
5682 case CC_OP_INCB: return compute_c_incl();
5683 case CC_OP_INCW: return compute_c_incl();
5684 case CC_OP_INCL: return compute_c_incl();
5685
5686 case CC_OP_DECB: return compute_c_incl();
5687 case CC_OP_DECW: return compute_c_incl();
5688 case CC_OP_DECL: return compute_c_incl();
eaa728ee 5689
a7812ae4
PB
5690 case CC_OP_SHLB: return compute_c_shlb();
5691 case CC_OP_SHLW: return compute_c_shlw();
5692 case CC_OP_SHLL: return compute_c_shll();
5693
5694 case CC_OP_SARB: return compute_c_sarl();
5695 case CC_OP_SARW: return compute_c_sarl();
5696 case CC_OP_SARL: return compute_c_sarl();
5697
5698#ifdef TARGET_X86_64
5699 case CC_OP_MULQ: return compute_c_mull();
5700
5701 case CC_OP_ADDQ: return compute_c_addq();
5702
5703 case CC_OP_ADCQ: return compute_c_adcq();
5704
5705 case CC_OP_SUBQ: return compute_c_subq();
5706
5707 case CC_OP_SBBQ: return compute_c_sbbq();
5708
5709 case CC_OP_LOGICQ: return compute_c_logicq();
5710
5711 case CC_OP_INCQ: return compute_c_incl();
5712
5713 case CC_OP_DECQ: return compute_c_incl();
5714
5715 case CC_OP_SHLQ: return compute_c_shlq();
5716
5717 case CC_OP_SARQ: return compute_c_sarl();
5718#endif
5719 }
5720}