]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/seg_helper.c
x86: Clean up includes
[mirror_qemu.git] / target-i386 / seg_helper.c
CommitLineData
eaa728ee 1/*
10774999
BS
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
eaa728ee
FB
4 *
5 * Copyright (c) 2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 19 */
83dae095 20
b6a0aa05 21#include "qemu/osdep.h"
3e457172 22#include "cpu.h"
1de7afc9 23#include "qemu/log.h"
2ef6175a 24#include "exec/helper-proto.h"
f08b6170 25#include "exec/cpu_ldst.h"
eaa728ee 26
3e457172 27//#define DEBUG_PCALL
d12d51d5
AL
28
29#ifdef DEBUG_PCALL
20054ef0 30# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
8995b7a0
AF
31# define LOG_PCALL_STATE(cpu) \
32 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
d12d51d5 33#else
20054ef0 34# define LOG_PCALL(...) do { } while (0)
8995b7a0 35# define LOG_PCALL_STATE(cpu) do { } while (0)
d12d51d5
AL
36#endif
37
9220fe54
PM
38#ifdef CONFIG_USER_ONLY
39#define MEMSUFFIX _kernel
40#define DATA_SIZE 1
41#include "exec/cpu_ldst_useronly_template.h"
42
43#define DATA_SIZE 2
44#include "exec/cpu_ldst_useronly_template.h"
45
46#define DATA_SIZE 4
47#include "exec/cpu_ldst_useronly_template.h"
48
49#define DATA_SIZE 8
50#include "exec/cpu_ldst_useronly_template.h"
51#undef MEMSUFFIX
52#else
8a201bd4
PB
53#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
54#define MEMSUFFIX _kernel
55#define DATA_SIZE 1
56#include "exec/cpu_ldst_template.h"
57
58#define DATA_SIZE 2
59#include "exec/cpu_ldst_template.h"
60
61#define DATA_SIZE 4
62#include "exec/cpu_ldst_template.h"
63
64#define DATA_SIZE 8
65#include "exec/cpu_ldst_template.h"
66#undef CPU_MMU_INDEX
67#undef MEMSUFFIX
68#endif
69
eaa728ee 70/* return non zero if error */
100ec099
PD
71static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
72 uint32_t *e2_ptr, int selector,
73 uintptr_t retaddr)
eaa728ee
FB
74{
75 SegmentCache *dt;
76 int index;
77 target_ulong ptr;
78
20054ef0 79 if (selector & 0x4) {
eaa728ee 80 dt = &env->ldt;
20054ef0 81 } else {
eaa728ee 82 dt = &env->gdt;
20054ef0 83 }
eaa728ee 84 index = selector & ~7;
20054ef0 85 if ((index + 7) > dt->limit) {
eaa728ee 86 return -1;
20054ef0 87 }
eaa728ee 88 ptr = dt->base + index;
100ec099
PD
89 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
90 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee
FB
91 return 0;
92}
93
100ec099
PD
94static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
95 uint32_t *e2_ptr, int selector)
96{
97 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
98}
99
eaa728ee
FB
100static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
101{
102 unsigned int limit;
20054ef0 103
eaa728ee 104 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
20054ef0 105 if (e2 & DESC_G_MASK) {
eaa728ee 106 limit = (limit << 12) | 0xfff;
20054ef0 107 }
eaa728ee
FB
108 return limit;
109}
110
111static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
112{
20054ef0 113 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
eaa728ee
FB
114}
115
20054ef0
BS
116static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
117 uint32_t e2)
eaa728ee
FB
118{
119 sc->base = get_seg_base(e1, e2);
120 sc->limit = get_seg_limit(e1, e2);
121 sc->flags = e2;
122}
123
124/* init the segment cache in vm86 mode. */
2999a0b2 125static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
eaa728ee
FB
126{
127 selector &= 0xffff;
b98dbc90
PB
128
129 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
130 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
131 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
eaa728ee
FB
132}
133
2999a0b2 134static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
100ec099
PD
135 uint32_t *esp_ptr, int dpl,
136 uintptr_t retaddr)
eaa728ee 137{
a47dddd7 138 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
139 int type, index, shift;
140
141#if 0
142 {
143 int i;
144 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
20054ef0 145 for (i = 0; i < env->tr.limit; i++) {
eaa728ee 146 printf("%02x ", env->tr.base[i]);
20054ef0
BS
147 if ((i & 7) == 7) {
148 printf("\n");
149 }
eaa728ee
FB
150 }
151 printf("\n");
152 }
153#endif
154
20054ef0 155 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 156 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 157 }
eaa728ee 158 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 159 if ((type & 7) != 1) {
a47dddd7 160 cpu_abort(CPU(cpu), "invalid tss type");
20054ef0 161 }
eaa728ee
FB
162 shift = type >> 3;
163 index = (dpl * 4 + 2) << shift;
20054ef0 164 if (index + (4 << shift) - 1 > env->tr.limit) {
100ec099 165 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
20054ef0 166 }
eaa728ee 167 if (shift == 0) {
100ec099
PD
168 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
169 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
eaa728ee 170 } else {
100ec099
PD
171 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
172 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
eaa728ee
FB
173 }
174}
175
100ec099
PD
176static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
177 uintptr_t retaddr)
eaa728ee
FB
178{
179 uint32_t e1, e2;
d3b54918 180 int rpl, dpl;
eaa728ee
FB
181
182 if ((selector & 0xfffc) != 0) {
100ec099
PD
183 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
184 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0
BS
185 }
186 if (!(e2 & DESC_S_MASK)) {
100ec099 187 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 188 }
eaa728ee
FB
189 rpl = selector & 3;
190 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
eaa728ee 191 if (seg_reg == R_CS) {
20054ef0 192 if (!(e2 & DESC_CS_MASK)) {
100ec099 193 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 194 }
20054ef0 195 if (dpl != rpl) {
100ec099 196 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 197 }
eaa728ee
FB
198 } else if (seg_reg == R_SS) {
199 /* SS must be writable data */
20054ef0 200 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
100ec099 201 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0
BS
202 }
203 if (dpl != cpl || dpl != rpl) {
100ec099 204 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 205 }
eaa728ee
FB
206 } else {
207 /* not readable code */
20054ef0 208 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
100ec099 209 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 210 }
eaa728ee
FB
211 /* if data or non conforming code, checks the rights */
212 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
20054ef0 213 if (dpl < cpl || dpl < rpl) {
100ec099 214 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 215 }
eaa728ee
FB
216 }
217 }
20054ef0 218 if (!(e2 & DESC_P_MASK)) {
100ec099 219 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
20054ef0 220 }
eaa728ee 221 cpu_x86_load_seg_cache(env, seg_reg, selector,
20054ef0
BS
222 get_seg_base(e1, e2),
223 get_seg_limit(e1, e2),
224 e2);
eaa728ee 225 } else {
20054ef0 226 if (seg_reg == R_SS || seg_reg == R_CS) {
100ec099 227 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 228 }
eaa728ee
FB
229 }
230}
231
232#define SWITCH_TSS_JMP 0
233#define SWITCH_TSS_IRET 1
234#define SWITCH_TSS_CALL 2
235
236/* XXX: restore CPU state in registers (PowerPC case) */
100ec099
PD
237static void switch_tss_ra(CPUX86State *env, int tss_selector,
238 uint32_t e1, uint32_t e2, int source,
239 uint32_t next_eip, uintptr_t retaddr)
eaa728ee
FB
240{
241 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
242 target_ulong tss_base;
243 uint32_t new_regs[8], new_segs[6];
244 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
245 uint32_t old_eflags, eflags_mask;
246 SegmentCache *dt;
247 int index;
248 target_ulong ptr;
249
250 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0
BS
251 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
252 source);
eaa728ee
FB
253
254 /* if task gate, we read the TSS segment and we load it */
255 if (type == 5) {
20054ef0 256 if (!(e2 & DESC_P_MASK)) {
100ec099 257 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
20054ef0 258 }
eaa728ee 259 tss_selector = e1 >> 16;
20054ef0 260 if (tss_selector & 4) {
100ec099 261 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
20054ef0 262 }
100ec099
PD
263 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
264 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0
BS
265 }
266 if (e2 & DESC_S_MASK) {
100ec099 267 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0 268 }
eaa728ee 269 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 270 if ((type & 7) != 1) {
100ec099 271 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0 272 }
eaa728ee
FB
273 }
274
20054ef0 275 if (!(e2 & DESC_P_MASK)) {
100ec099 276 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
20054ef0 277 }
eaa728ee 278
20054ef0 279 if (type & 8) {
eaa728ee 280 tss_limit_max = 103;
20054ef0 281 } else {
eaa728ee 282 tss_limit_max = 43;
20054ef0 283 }
eaa728ee
FB
284 tss_limit = get_seg_limit(e1, e2);
285 tss_base = get_seg_base(e1, e2);
286 if ((tss_selector & 4) != 0 ||
20054ef0 287 tss_limit < tss_limit_max) {
100ec099 288 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
20054ef0 289 }
eaa728ee 290 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 291 if (old_type & 8) {
eaa728ee 292 old_tss_limit_max = 103;
20054ef0 293 } else {
eaa728ee 294 old_tss_limit_max = 43;
20054ef0 295 }
eaa728ee
FB
296
297 /* read all the registers from the new TSS */
298 if (type & 8) {
299 /* 32 bit */
100ec099
PD
300 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
301 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
302 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
20054ef0 303 for (i = 0; i < 8; i++) {
100ec099
PD
304 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
305 retaddr);
20054ef0
BS
306 }
307 for (i = 0; i < 6; i++) {
100ec099
PD
308 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
309 retaddr);
20054ef0 310 }
100ec099
PD
311 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
312 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
eaa728ee
FB
313 } else {
314 /* 16 bit */
315 new_cr3 = 0;
100ec099
PD
316 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
317 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
20054ef0 318 for (i = 0; i < 8; i++) {
100ec099
PD
319 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
320 retaddr) | 0xffff0000;
20054ef0
BS
321 }
322 for (i = 0; i < 4; i++) {
100ec099
PD
323 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
324 retaddr);
20054ef0 325 }
100ec099 326 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
eaa728ee
FB
327 new_segs[R_FS] = 0;
328 new_segs[R_GS] = 0;
329 new_trap = 0;
330 }
4581cbcd
BS
331 /* XXX: avoid a compiler warning, see
332 http://support.amd.com/us/Processor_TechDocs/24593.pdf
333 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
334 (void)new_trap;
eaa728ee
FB
335
336 /* NOTE: we must avoid memory exceptions during the task switch,
337 so we make dummy accesses before */
338 /* XXX: it can still fail in some cases, so a bigger hack is
339 necessary to valid the TLB after having done the accesses */
340
100ec099
PD
341 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
342 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
343 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
344 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
eaa728ee
FB
345
346 /* clear busy bit (it is restartable) */
347 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
348 target_ulong ptr;
349 uint32_t e2;
20054ef0 350
eaa728ee 351 ptr = env->gdt.base + (env->tr.selector & ~7);
100ec099 352 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee 353 e2 &= ~DESC_TSS_BUSY_MASK;
100ec099 354 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
eaa728ee 355 }
997ff0d9 356 old_eflags = cpu_compute_eflags(env);
20054ef0 357 if (source == SWITCH_TSS_IRET) {
eaa728ee 358 old_eflags &= ~NT_MASK;
20054ef0 359 }
eaa728ee
FB
360
361 /* save the current state in the old TSS */
362 if (type & 8) {
363 /* 32 bit */
100ec099
PD
364 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
365 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
366 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
20054ef0 374 for (i = 0; i < 6; i++) {
100ec099
PD
375 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
376 env->segs[i].selector, retaddr);
20054ef0 377 }
eaa728ee
FB
378 } else {
379 /* 16 bit */
100ec099
PD
380 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
381 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
382 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
20054ef0 390 for (i = 0; i < 4; i++) {
100ec099
PD
391 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
392 env->segs[i].selector, retaddr);
20054ef0 393 }
eaa728ee
FB
394 }
395
396 /* now if an exception occurs, it will occurs in the next task
397 context */
398
399 if (source == SWITCH_TSS_CALL) {
100ec099 400 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
eaa728ee
FB
401 new_eflags |= NT_MASK;
402 }
403
404 /* set busy bit */
405 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
406 target_ulong ptr;
407 uint32_t e2;
20054ef0 408
eaa728ee 409 ptr = env->gdt.base + (tss_selector & ~7);
100ec099 410 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee 411 e2 |= DESC_TSS_BUSY_MASK;
100ec099 412 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
eaa728ee
FB
413 }
414
415 /* set the new CPU state */
416 /* from this point, any exception which occurs can give problems */
417 env->cr[0] |= CR0_TS_MASK;
418 env->hflags |= HF_TS_MASK;
419 env->tr.selector = tss_selector;
420 env->tr.base = tss_base;
421 env->tr.limit = tss_limit;
422 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
423
424 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
425 cpu_x86_update_cr3(env, new_cr3);
426 }
427
428 /* load all registers without an exception, then reload them with
429 possible exception */
430 env->eip = new_eip;
431 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
432 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
20054ef0 433 if (!(type & 8)) {
eaa728ee 434 eflags_mask &= 0xffff;
20054ef0 435 }
997ff0d9 436 cpu_load_eflags(env, new_eflags, eflags_mask);
20054ef0 437 /* XXX: what to do in 16 bit case? */
4b34e3ad 438 env->regs[R_EAX] = new_regs[0];
a4165610 439 env->regs[R_ECX] = new_regs[1];
00f5e6f2 440 env->regs[R_EDX] = new_regs[2];
70b51365 441 env->regs[R_EBX] = new_regs[3];
08b3ded6 442 env->regs[R_ESP] = new_regs[4];
c12dddd7 443 env->regs[R_EBP] = new_regs[5];
78c3c6d3 444 env->regs[R_ESI] = new_regs[6];
cf75c597 445 env->regs[R_EDI] = new_regs[7];
eaa728ee 446 if (new_eflags & VM_MASK) {
20054ef0 447 for (i = 0; i < 6; i++) {
2999a0b2 448 load_seg_vm(env, i, new_segs[i]);
20054ef0 449 }
eaa728ee 450 } else {
eaa728ee 451 /* first just selectors as the rest may trigger exceptions */
20054ef0 452 for (i = 0; i < 6; i++) {
eaa728ee 453 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
20054ef0 454 }
eaa728ee
FB
455 }
456
457 env->ldt.selector = new_ldt & ~4;
458 env->ldt.base = 0;
459 env->ldt.limit = 0;
460 env->ldt.flags = 0;
461
462 /* load the LDT */
20054ef0 463 if (new_ldt & 4) {
100ec099 464 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 465 }
eaa728ee
FB
466
467 if ((new_ldt & 0xfffc) != 0) {
468 dt = &env->gdt;
469 index = new_ldt & ~7;
20054ef0 470 if ((index + 7) > dt->limit) {
100ec099 471 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 472 }
eaa728ee 473 ptr = dt->base + index;
100ec099
PD
474 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
475 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
20054ef0 476 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
100ec099 477 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0
BS
478 }
479 if (!(e2 & DESC_P_MASK)) {
100ec099 480 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 481 }
eaa728ee
FB
482 load_seg_cache_raw_dt(&env->ldt, e1, e2);
483 }
484
485 /* load the segments */
486 if (!(new_eflags & VM_MASK)) {
d3b54918 487 int cpl = new_segs[R_CS] & 3;
100ec099
PD
488 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
489 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
490 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
491 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
492 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
493 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
eaa728ee
FB
494 }
495
a78d0eab 496 /* check that env->eip is in the CS segment limits */
eaa728ee 497 if (new_eip > env->segs[R_CS].limit) {
20054ef0 498 /* XXX: different exception if CALL? */
100ec099 499 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
eaa728ee 500 }
01df040b
AL
501
502#ifndef CONFIG_USER_ONLY
503 /* reset local breakpoints */
428065ce 504 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
93d00d0f 505 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
01df040b
AL
506 }
507#endif
eaa728ee
FB
508}
509
100ec099
PD
510static void switch_tss(CPUX86State *env, int tss_selector,
511 uint32_t e1, uint32_t e2, int source,
512 uint32_t next_eip)
513{
514 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
515}
516
eaa728ee
FB
517static inline unsigned int get_sp_mask(unsigned int e2)
518{
20054ef0 519 if (e2 & DESC_B_MASK) {
eaa728ee 520 return 0xffffffff;
20054ef0 521 } else {
eaa728ee 522 return 0xffff;
20054ef0 523 }
eaa728ee
FB
524}
525
20054ef0 526static int exception_has_error_code(int intno)
2ed51f5b 527{
20054ef0
BS
528 switch (intno) {
529 case 8:
530 case 10:
531 case 11:
532 case 12:
533 case 13:
534 case 14:
535 case 17:
536 return 1;
537 }
538 return 0;
2ed51f5b
AL
539}
540
eaa728ee 541#ifdef TARGET_X86_64
08b3ded6
LG
542#define SET_ESP(val, sp_mask) \
543 do { \
544 if ((sp_mask) == 0xffff) { \
545 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
546 ((val) & 0xffff); \
547 } else if ((sp_mask) == 0xffffffffLL) { \
548 env->regs[R_ESP] = (uint32_t)(val); \
549 } else { \
550 env->regs[R_ESP] = (val); \
551 } \
20054ef0 552 } while (0)
eaa728ee 553#else
08b3ded6
LG
554#define SET_ESP(val, sp_mask) \
555 do { \
556 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
557 ((val) & (sp_mask)); \
20054ef0 558 } while (0)
eaa728ee
FB
559#endif
560
c0a04f0e
AL
561/* in 64-bit machines, this can overflow. So this segment addition macro
562 * can be used to trim the value to 32-bit whenever needed */
563#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
564
eaa728ee 565/* XXX: add a is_user flag to have proper security support */
100ec099 566#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
329e607d
BS
567 { \
568 sp -= 2; \
100ec099 569 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
20054ef0 570 }
eaa728ee 571
100ec099 572#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
20054ef0
BS
573 { \
574 sp -= 4; \
100ec099 575 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
20054ef0 576 }
eaa728ee 577
100ec099 578#define POPW_RA(ssp, sp, sp_mask, val, ra) \
329e607d 579 { \
100ec099 580 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
329e607d 581 sp += 2; \
20054ef0 582 }
eaa728ee 583
100ec099 584#define POPL_RA(ssp, sp, sp_mask, val, ra) \
329e607d 585 { \
100ec099 586 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
329e607d 587 sp += 4; \
20054ef0 588 }
eaa728ee 589
100ec099
PD
590#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
591#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
592#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
593#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
594
eaa728ee 595/* protected mode interrupt */
2999a0b2
BS
596static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
597 int error_code, unsigned int next_eip,
598 int is_hw)
eaa728ee
FB
599{
600 SegmentCache *dt;
601 target_ulong ptr, ssp;
602 int type, dpl, selector, ss_dpl, cpl;
603 int has_error_code, new_stack, shift;
1c918eba 604 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 605 uint32_t old_eip, sp_mask;
87446327 606 int vm86 = env->eflags & VM_MASK;
eaa728ee 607
eaa728ee 608 has_error_code = 0;
20054ef0
BS
609 if (!is_int && !is_hw) {
610 has_error_code = exception_has_error_code(intno);
611 }
612 if (is_int) {
eaa728ee 613 old_eip = next_eip;
20054ef0 614 } else {
eaa728ee 615 old_eip = env->eip;
20054ef0 616 }
eaa728ee
FB
617
618 dt = &env->idt;
20054ef0 619 if (intno * 8 + 7 > dt->limit) {
77b2bc2c 620 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 621 }
eaa728ee 622 ptr = dt->base + intno * 8;
329e607d
BS
623 e1 = cpu_ldl_kernel(env, ptr);
624 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
625 /* check gate type */
626 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 627 switch (type) {
eaa728ee
FB
628 case 5: /* task gate */
629 /* must do that check here to return the correct error code */
20054ef0 630 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 631 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 632 }
2999a0b2 633 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
eaa728ee
FB
634 if (has_error_code) {
635 int type;
636 uint32_t mask;
20054ef0 637
eaa728ee
FB
638 /* push the error code */
639 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
640 shift = type >> 3;
20054ef0 641 if (env->segs[R_SS].flags & DESC_B_MASK) {
eaa728ee 642 mask = 0xffffffff;
20054ef0 643 } else {
eaa728ee 644 mask = 0xffff;
20054ef0 645 }
08b3ded6 646 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
eaa728ee 647 ssp = env->segs[R_SS].base + esp;
20054ef0 648 if (shift) {
329e607d 649 cpu_stl_kernel(env, ssp, error_code);
20054ef0 650 } else {
329e607d 651 cpu_stw_kernel(env, ssp, error_code);
20054ef0 652 }
eaa728ee
FB
653 SET_ESP(esp, mask);
654 }
655 return;
656 case 6: /* 286 interrupt gate */
657 case 7: /* 286 trap gate */
658 case 14: /* 386 interrupt gate */
659 case 15: /* 386 trap gate */
660 break;
661 default:
77b2bc2c 662 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
eaa728ee
FB
663 break;
664 }
665 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
666 cpl = env->hflags & HF_CPL_MASK;
1235fc06 667 /* check privilege if software int */
20054ef0 668 if (is_int && dpl < cpl) {
77b2bc2c 669 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 670 }
eaa728ee 671 /* check valid bit */
20054ef0 672 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 673 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 674 }
eaa728ee
FB
675 selector = e1 >> 16;
676 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
20054ef0 677 if ((selector & 0xfffc) == 0) {
77b2bc2c 678 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 679 }
2999a0b2 680 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 681 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
682 }
683 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 684 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 685 }
eaa728ee 686 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 687 if (dpl > cpl) {
77b2bc2c 688 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
689 }
690 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 691 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 692 }
eaa728ee
FB
693 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694 /* to inner privilege */
100ec099 695 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
20054ef0 696 if ((ss & 0xfffc) == 0) {
77b2bc2c 697 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
698 }
699 if ((ss & 3) != dpl) {
77b2bc2c 700 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 701 }
2999a0b2 702 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 703 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 704 }
eaa728ee 705 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 706 if (ss_dpl != dpl) {
77b2bc2c 707 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 708 }
eaa728ee
FB
709 if (!(ss_e2 & DESC_S_MASK) ||
710 (ss_e2 & DESC_CS_MASK) ||
20054ef0 711 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 712 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
713 }
714 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 715 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 716 }
eaa728ee
FB
717 new_stack = 1;
718 sp_mask = get_sp_mask(ss_e2);
719 ssp = get_seg_base(ss_e1, ss_e2);
720 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
721 /* to same privilege */
87446327 722 if (vm86) {
77b2bc2c 723 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 724 }
eaa728ee
FB
725 new_stack = 0;
726 sp_mask = get_sp_mask(env->segs[R_SS].flags);
727 ssp = env->segs[R_SS].base;
08b3ded6 728 esp = env->regs[R_ESP];
eaa728ee
FB
729 dpl = cpl;
730 } else {
77b2bc2c 731 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
732 new_stack = 0; /* avoid warning */
733 sp_mask = 0; /* avoid warning */
734 ssp = 0; /* avoid warning */
735 esp = 0; /* avoid warning */
736 }
737
738 shift = type >> 3;
739
740#if 0
741 /* XXX: check that enough room is available */
742 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
87446327 743 if (vm86) {
eaa728ee 744 push_size += 8;
20054ef0 745 }
eaa728ee
FB
746 push_size <<= shift;
747#endif
748 if (shift == 1) {
749 if (new_stack) {
87446327 750 if (vm86) {
eaa728ee
FB
751 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
752 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
753 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
754 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
755 }
756 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 757 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 758 }
997ff0d9 759 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
760 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
761 PUSHL(ssp, esp, sp_mask, old_eip);
762 if (has_error_code) {
763 PUSHL(ssp, esp, sp_mask, error_code);
764 }
765 } else {
766 if (new_stack) {
87446327 767 if (vm86) {
eaa728ee
FB
768 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
769 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
770 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
771 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
772 }
773 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 774 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 775 }
997ff0d9 776 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
777 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
778 PUSHW(ssp, esp, sp_mask, old_eip);
779 if (has_error_code) {
780 PUSHW(ssp, esp, sp_mask, error_code);
781 }
782 }
783
fd460606
KC
784 /* interrupt gate clear IF mask */
785 if ((type & 1) == 0) {
786 env->eflags &= ~IF_MASK;
787 }
788 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
789
eaa728ee 790 if (new_stack) {
87446327 791 if (vm86) {
eaa728ee
FB
792 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
793 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
794 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
796 }
797 ss = (ss & ~3) | dpl;
798 cpu_x86_load_seg_cache(env, R_SS, ss,
799 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
800 }
801 SET_ESP(esp, sp_mask);
802
803 selector = (selector & ~3) | dpl;
804 cpu_x86_load_seg_cache(env, R_CS, selector,
805 get_seg_base(e1, e2),
806 get_seg_limit(e1, e2),
807 e2);
eaa728ee 808 env->eip = offset;
eaa728ee
FB
809}
810
811#ifdef TARGET_X86_64
812
100ec099 813#define PUSHQ_RA(sp, val, ra) \
20054ef0
BS
814 { \
815 sp -= 8; \
100ec099 816 cpu_stq_kernel_ra(env, sp, (val), ra); \
20054ef0 817 }
eaa728ee 818
100ec099 819#define POPQ_RA(sp, val, ra) \
20054ef0 820 { \
100ec099 821 val = cpu_ldq_kernel_ra(env, sp, ra); \
20054ef0
BS
822 sp += 8; \
823 }
eaa728ee 824
100ec099
PD
825#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
826#define POPQ(sp, val) POPQ_RA(sp, val, 0)
827
2999a0b2 828static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
eaa728ee 829{
a47dddd7 830 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
831 int index;
832
833#if 0
834 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
835 env->tr.base, env->tr.limit);
836#endif
837
20054ef0 838 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 839 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 840 }
eaa728ee 841 index = 8 * level + 4;
20054ef0 842 if ((index + 7) > env->tr.limit) {
77b2bc2c 843 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 844 }
329e607d 845 return cpu_ldq_kernel(env, env->tr.base + index);
eaa728ee
FB
846}
847
848/* 64 bit interrupt */
2999a0b2
BS
849static void do_interrupt64(CPUX86State *env, int intno, int is_int,
850 int error_code, target_ulong next_eip, int is_hw)
eaa728ee
FB
851{
852 SegmentCache *dt;
853 target_ulong ptr;
854 int type, dpl, selector, cpl, ist;
855 int has_error_code, new_stack;
856 uint32_t e1, e2, e3, ss;
857 target_ulong old_eip, esp, offset;
eaa728ee 858
eaa728ee 859 has_error_code = 0;
20054ef0
BS
860 if (!is_int && !is_hw) {
861 has_error_code = exception_has_error_code(intno);
862 }
863 if (is_int) {
eaa728ee 864 old_eip = next_eip;
20054ef0 865 } else {
eaa728ee 866 old_eip = env->eip;
20054ef0 867 }
eaa728ee
FB
868
869 dt = &env->idt;
20054ef0 870 if (intno * 16 + 15 > dt->limit) {
77b2bc2c 871 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 872 }
eaa728ee 873 ptr = dt->base + intno * 16;
329e607d
BS
874 e1 = cpu_ldl_kernel(env, ptr);
875 e2 = cpu_ldl_kernel(env, ptr + 4);
876 e3 = cpu_ldl_kernel(env, ptr + 8);
eaa728ee
FB
877 /* check gate type */
878 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 879 switch (type) {
eaa728ee
FB
880 case 14: /* 386 interrupt gate */
881 case 15: /* 386 trap gate */
882 break;
883 default:
77b2bc2c 884 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
eaa728ee
FB
885 break;
886 }
887 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
888 cpl = env->hflags & HF_CPL_MASK;
1235fc06 889 /* check privilege if software int */
20054ef0 890 if (is_int && dpl < cpl) {
77b2bc2c 891 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 892 }
eaa728ee 893 /* check valid bit */
20054ef0 894 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 895 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
20054ef0 896 }
eaa728ee
FB
897 selector = e1 >> 16;
898 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
899 ist = e2 & 7;
20054ef0 900 if ((selector & 0xfffc) == 0) {
77b2bc2c 901 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 902 }
eaa728ee 903
2999a0b2 904 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 905 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
906 }
907 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 908 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 909 }
eaa728ee 910 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 911 if (dpl > cpl) {
77b2bc2c 912 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
913 }
914 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 915 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0
BS
916 }
917 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
77b2bc2c 918 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 919 }
eaa728ee
FB
920 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
921 /* to inner privilege */
eaa728ee 922 new_stack = 1;
ae67dc72
PB
923 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
924 ss = 0;
eaa728ee
FB
925 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
926 /* to same privilege */
20054ef0 927 if (env->eflags & VM_MASK) {
77b2bc2c 928 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 929 }
eaa728ee 930 new_stack = 0;
ae67dc72 931 esp = env->regs[R_ESP];
eaa728ee
FB
932 dpl = cpl;
933 } else {
77b2bc2c 934 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
935 new_stack = 0; /* avoid warning */
936 esp = 0; /* avoid warning */
937 }
ae67dc72 938 esp &= ~0xfLL; /* align stack */
eaa728ee
FB
939
940 PUSHQ(esp, env->segs[R_SS].selector);
08b3ded6 941 PUSHQ(esp, env->regs[R_ESP]);
997ff0d9 942 PUSHQ(esp, cpu_compute_eflags(env));
eaa728ee
FB
943 PUSHQ(esp, env->segs[R_CS].selector);
944 PUSHQ(esp, old_eip);
945 if (has_error_code) {
946 PUSHQ(esp, error_code);
947 }
948
fd460606
KC
949 /* interrupt gate clear IF mask */
950 if ((type & 1) == 0) {
951 env->eflags &= ~IF_MASK;
952 }
953 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
954
eaa728ee
FB
955 if (new_stack) {
956 ss = 0 | dpl;
957 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
958 }
08b3ded6 959 env->regs[R_ESP] = esp;
eaa728ee
FB
960
961 selector = (selector & ~3) | dpl;
962 cpu_x86_load_seg_cache(env, R_CS, selector,
963 get_seg_base(e1, e2),
964 get_seg_limit(e1, e2),
965 e2);
eaa728ee 966 env->eip = offset;
eaa728ee
FB
967}
968#endif
969
d9957a8b 970#ifdef TARGET_X86_64
eaa728ee 971#if defined(CONFIG_USER_ONLY)
2999a0b2 972void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee 973{
27103424
AF
974 CPUState *cs = CPU(x86_env_get_cpu(env));
975
976 cs->exception_index = EXCP_SYSCALL;
eaa728ee 977 env->exception_next_eip = env->eip + next_eip_addend;
5638d180 978 cpu_loop_exit(cs);
eaa728ee
FB
979}
980#else
2999a0b2 981void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee
FB
982{
983 int selector;
984
985 if (!(env->efer & MSR_EFER_SCE)) {
100ec099 986 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
eaa728ee
FB
987 }
988 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
989 if (env->hflags & HF_LMA_MASK) {
990 int code64;
991
a4165610 992 env->regs[R_ECX] = env->eip + next_eip_addend;
997ff0d9 993 env->regs[11] = cpu_compute_eflags(env);
eaa728ee
FB
994
995 code64 = env->hflags & HF_CS64_MASK;
996
fd460606
KC
997 env->eflags &= ~env->fmask;
998 cpu_load_eflags(env, env->eflags, 0);
eaa728ee
FB
999 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1000 0, 0xffffffff,
1001 DESC_G_MASK | DESC_P_MASK |
1002 DESC_S_MASK |
20054ef0
BS
1003 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1004 DESC_L_MASK);
eaa728ee
FB
1005 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1006 0, 0xffffffff,
1007 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1008 DESC_S_MASK |
1009 DESC_W_MASK | DESC_A_MASK);
20054ef0 1010 if (code64) {
eaa728ee 1011 env->eip = env->lstar;
20054ef0 1012 } else {
eaa728ee 1013 env->eip = env->cstar;
20054ef0 1014 }
d9957a8b 1015 } else {
a4165610 1016 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
eaa728ee 1017
fd460606 1018 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
eaa728ee
FB
1019 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1020 0, 0xffffffff,
1021 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1022 DESC_S_MASK |
1023 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1024 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1025 0, 0xffffffff,
1026 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1027 DESC_S_MASK |
1028 DESC_W_MASK | DESC_A_MASK);
eaa728ee
FB
1029 env->eip = (uint32_t)env->star;
1030 }
1031}
1032#endif
d9957a8b 1033#endif
eaa728ee 1034
d9957a8b 1035#ifdef TARGET_X86_64
2999a0b2 1036void helper_sysret(CPUX86State *env, int dflag)
eaa728ee
FB
1037{
1038 int cpl, selector;
1039
1040 if (!(env->efer & MSR_EFER_SCE)) {
100ec099 1041 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
eaa728ee
FB
1042 }
1043 cpl = env->hflags & HF_CPL_MASK;
1044 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
100ec099 1045 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee
FB
1046 }
1047 selector = (env->star >> 48) & 0xffff;
eaa728ee 1048 if (env->hflags & HF_LMA_MASK) {
fd460606
KC
1049 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1050 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1051 NT_MASK);
eaa728ee
FB
1052 if (dflag == 2) {
1053 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1054 0, 0xffffffff,
1055 DESC_G_MASK | DESC_P_MASK |
1056 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1057 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1058 DESC_L_MASK);
a4165610 1059 env->eip = env->regs[R_ECX];
eaa728ee
FB
1060 } else {
1061 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1062 0, 0xffffffff,
1063 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1064 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1065 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1066 env->eip = (uint32_t)env->regs[R_ECX];
eaa728ee 1067 }
ac576229 1068 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1069 0, 0xffffffff,
1070 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1071 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1072 DESC_W_MASK | DESC_A_MASK);
d9957a8b 1073 } else {
fd460606 1074 env->eflags |= IF_MASK;
eaa728ee
FB
1075 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1076 0, 0xffffffff,
1077 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1078 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1079 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1080 env->eip = (uint32_t)env->regs[R_ECX];
ac576229 1081 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1082 0, 0xffffffff,
1083 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1084 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1085 DESC_W_MASK | DESC_A_MASK);
eaa728ee 1086 }
eaa728ee 1087}
d9957a8b 1088#endif
eaa728ee
FB
1089
1090/* real mode interrupt */
2999a0b2
BS
1091static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1092 int error_code, unsigned int next_eip)
eaa728ee
FB
1093{
1094 SegmentCache *dt;
1095 target_ulong ptr, ssp;
1096 int selector;
1097 uint32_t offset, esp;
1098 uint32_t old_cs, old_eip;
eaa728ee 1099
20054ef0 1100 /* real mode (simpler!) */
eaa728ee 1101 dt = &env->idt;
20054ef0 1102 if (intno * 4 + 3 > dt->limit) {
77b2bc2c 1103 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 1104 }
eaa728ee 1105 ptr = dt->base + intno * 4;
329e607d
BS
1106 offset = cpu_lduw_kernel(env, ptr);
1107 selector = cpu_lduw_kernel(env, ptr + 2);
08b3ded6 1108 esp = env->regs[R_ESP];
eaa728ee 1109 ssp = env->segs[R_SS].base;
20054ef0 1110 if (is_int) {
eaa728ee 1111 old_eip = next_eip;
20054ef0 1112 } else {
eaa728ee 1113 old_eip = env->eip;
20054ef0 1114 }
eaa728ee 1115 old_cs = env->segs[R_CS].selector;
20054ef0 1116 /* XXX: use SS segment size? */
997ff0d9 1117 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
eaa728ee
FB
1118 PUSHW(ssp, esp, 0xffff, old_cs);
1119 PUSHW(ssp, esp, 0xffff, old_eip);
1120
1121 /* update processor state */
08b3ded6 1122 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
eaa728ee
FB
1123 env->eip = offset;
1124 env->segs[R_CS].selector = selector;
1125 env->segs[R_CS].base = (selector << 4);
1126 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1127}
1128
e694d4e2 1129#if defined(CONFIG_USER_ONLY)
eaa728ee 1130/* fake user mode interrupt */
2999a0b2
BS
1131static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1132 int error_code, target_ulong next_eip)
eaa728ee
FB
1133{
1134 SegmentCache *dt;
1135 target_ulong ptr;
1136 int dpl, cpl, shift;
1137 uint32_t e2;
1138
1139 dt = &env->idt;
1140 if (env->hflags & HF_LMA_MASK) {
1141 shift = 4;
1142 } else {
1143 shift = 3;
1144 }
1145 ptr = dt->base + (intno << shift);
329e607d 1146 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
1147
1148 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1149 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1150 /* check privilege if software int */
20054ef0 1151 if (is_int && dpl < cpl) {
77b2bc2c 1152 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
20054ef0 1153 }
eaa728ee
FB
1154
1155 /* Since we emulate only user space, we cannot do more than
1156 exiting the emulation with the suitable exception and error
47575997
JM
1157 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1158 if (is_int || intno == EXCP_SYSCALL) {
a78d0eab 1159 env->eip = next_eip;
20054ef0 1160 }
eaa728ee
FB
1161}
1162
e694d4e2
BS
1163#else
1164
2999a0b2
BS
1165static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1166 int error_code, int is_hw, int rm)
2ed51f5b 1167{
19d6ca16 1168 CPUState *cs = CPU(x86_env_get_cpu(env));
b216aa6c 1169 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1170 control.event_inj));
1171
2ed51f5b 1172 if (!(event_inj & SVM_EVTINJ_VALID)) {
20054ef0
BS
1173 int type;
1174
1175 if (is_int) {
1176 type = SVM_EVTINJ_TYPE_SOFT;
1177 } else {
1178 type = SVM_EVTINJ_TYPE_EXEPT;
1179 }
1180 event_inj = intno | type | SVM_EVTINJ_VALID;
1181 if (!rm && exception_has_error_code(intno)) {
1182 event_inj |= SVM_EVTINJ_VALID_ERR;
b216aa6c 1183 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1184 control.event_inj_err),
1185 error_code);
1186 }
b216aa6c 1187 x86_stl_phys(cs,
ab1da857 1188 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1189 event_inj);
2ed51f5b
AL
1190 }
1191}
00ea18d1 1192#endif
2ed51f5b 1193
eaa728ee
FB
1194/*
1195 * Begin execution of an interruption. is_int is TRUE if coming from
a78d0eab 1196 * the int instruction. next_eip is the env->eip value AFTER the interrupt
eaa728ee
FB
1197 * instruction. It is only relevant if is_int is TRUE.
1198 */
ca4c810a 1199static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
2999a0b2 1200 int error_code, target_ulong next_eip, int is_hw)
eaa728ee 1201{
ca4c810a
AF
1202 CPUX86State *env = &cpu->env;
1203
8fec2b8c 1204 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1205 if ((env->cr[0] & CR0_PE_MASK)) {
1206 static int count;
20054ef0
BS
1207
1208 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1209 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1210 count, intno, error_code, is_int,
1211 env->hflags & HF_CPL_MASK,
a78d0eab
LG
1212 env->segs[R_CS].selector, env->eip,
1213 (int)env->segs[R_CS].base + env->eip,
08b3ded6 1214 env->segs[R_SS].selector, env->regs[R_ESP]);
eaa728ee 1215 if (intno == 0x0e) {
93fcfe39 1216 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1217 } else {
4b34e3ad 1218 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
eaa728ee 1219 }
93fcfe39 1220 qemu_log("\n");
a0762859 1221 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
eaa728ee
FB
1222#if 0
1223 {
1224 int i;
9bd5494e 1225 target_ulong ptr;
20054ef0 1226
93fcfe39 1227 qemu_log(" code=");
eaa728ee 1228 ptr = env->segs[R_CS].base + env->eip;
20054ef0 1229 for (i = 0; i < 16; i++) {
93fcfe39 1230 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1231 }
93fcfe39 1232 qemu_log("\n");
eaa728ee
FB
1233 }
1234#endif
1235 count++;
1236 }
1237 }
1238 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1239#if !defined(CONFIG_USER_ONLY)
20054ef0 1240 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1241 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
20054ef0 1242 }
00ea18d1 1243#endif
eb38c52c 1244#ifdef TARGET_X86_64
eaa728ee 1245 if (env->hflags & HF_LMA_MASK) {
2999a0b2 1246 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
eaa728ee
FB
1247 } else
1248#endif
1249 {
2999a0b2
BS
1250 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1251 is_hw);
eaa728ee
FB
1252 }
1253 } else {
00ea18d1 1254#if !defined(CONFIG_USER_ONLY)
20054ef0 1255 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1256 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
20054ef0 1257 }
00ea18d1 1258#endif
2999a0b2 1259 do_interrupt_real(env, intno, is_int, error_code, next_eip);
eaa728ee 1260 }
2ed51f5b 1261
00ea18d1 1262#if !defined(CONFIG_USER_ONLY)
2ed51f5b 1263 if (env->hflags & HF_SVMI_MASK) {
fdfba1a2 1264 CPUState *cs = CPU(cpu);
b216aa6c 1265 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
20054ef0
BS
1266 offsetof(struct vmcb,
1267 control.event_inj));
1268
b216aa6c 1269 x86_stl_phys(cs,
ab1da857 1270 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1271 event_inj & ~SVM_EVTINJ_VALID);
2ed51f5b 1272 }
00ea18d1 1273#endif
eaa728ee
FB
1274}
1275
97a8ea5a 1276void x86_cpu_do_interrupt(CPUState *cs)
e694d4e2 1277{
97a8ea5a
AF
1278 X86CPU *cpu = X86_CPU(cs);
1279 CPUX86State *env = &cpu->env;
1280
e694d4e2
BS
1281#if defined(CONFIG_USER_ONLY)
1282 /* if user mode only, we simulate a fake exception
1283 which will be handled outside the cpu execution
1284 loop */
27103424 1285 do_interrupt_user(env, cs->exception_index,
e694d4e2
BS
1286 env->exception_is_int,
1287 env->error_code,
1288 env->exception_next_eip);
1289 /* successfully delivered */
1290 env->old_exception = -1;
1291#else
1292 /* simulate a real cpu exception. On i386, it can
1293 trigger new exceptions, but we do not handle
1294 double or triple faults yet. */
27103424 1295 do_interrupt_all(cpu, cs->exception_index,
e694d4e2
BS
1296 env->exception_is_int,
1297 env->error_code,
1298 env->exception_next_eip, 0);
1299 /* successfully delivered */
1300 env->old_exception = -1;
1301#endif
e694d4e2
BS
1302}
1303
2999a0b2 1304void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
e694d4e2 1305{
ca4c810a 1306 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
e694d4e2
BS
1307}
1308
42f53fea
RH
1309bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1310{
1311 X86CPU *cpu = X86_CPU(cs);
1312 CPUX86State *env = &cpu->env;
1313 bool ret = false;
1314
1315#if !defined(CONFIG_USER_ONLY)
1316 if (interrupt_request & CPU_INTERRUPT_POLL) {
1317 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1318 apic_poll_irq(cpu->apic_state);
a4fc3212
PD
1319 /* Don't process multiple interrupt requests in a single call.
1320 This is required to make icount-driven execution deterministic. */
1321 return true;
42f53fea
RH
1322 }
1323#endif
1324 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1325 do_cpu_sipi(cpu);
1326 } else if (env->hflags2 & HF2_GIF_MASK) {
1327 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1328 !(env->hflags & HF_SMM_MASK)) {
1329 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1330 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1331 do_smm_enter(cpu);
1332 ret = true;
1333 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1334 !(env->hflags2 & HF2_NMI_MASK)) {
1335 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1336 env->hflags2 |= HF2_NMI_MASK;
1337 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1338 ret = true;
1339 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1340 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1341 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1342 ret = true;
1343 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1344 (((env->hflags2 & HF2_VINTR_MASK) &&
1345 (env->hflags2 & HF2_HIF_MASK)) ||
1346 (!(env->hflags2 & HF2_VINTR_MASK) &&
1347 (env->eflags & IF_MASK &&
1348 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1349 int intno;
1350 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1351 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1352 CPU_INTERRUPT_VIRQ);
1353 intno = cpu_get_pic_interrupt(env);
1354 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1355 "Servicing hardware INT=0x%02x\n", intno);
1356 do_interrupt_x86_hardirq(env, intno, 1);
1357 /* ensure that no TB jump will be modified as
1358 the program flow was changed */
1359 ret = true;
1360#if !defined(CONFIG_USER_ONLY)
1361 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1362 (env->eflags & IF_MASK) &&
1363 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1364 int intno;
1365 /* FIXME: this should respect TPR */
1366 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
b216aa6c 1367 intno = x86_ldl_phys(cs, env->vm_vmcb
42f53fea
RH
1368 + offsetof(struct vmcb, control.int_vector));
1369 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1370 "Servicing virtual hardware INT=0x%02x\n", intno);
1371 do_interrupt_x86_hardirq(env, intno, 1);
1372 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1373 ret = true;
1374#endif
1375 }
1376 }
1377
1378 return ret;
1379}
1380
2999a0b2
BS
1381void helper_enter_level(CPUX86State *env, int level, int data32,
1382 target_ulong t1)
eaa728ee
FB
1383{
1384 target_ulong ssp;
1385 uint32_t esp_mask, esp, ebp;
1386
1387 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1388 ssp = env->segs[R_SS].base;
c12dddd7 1389 ebp = env->regs[R_EBP];
08b3ded6 1390 esp = env->regs[R_ESP];
eaa728ee
FB
1391 if (data32) {
1392 /* 32 bit */
1393 esp -= 4;
1394 while (--level) {
1395 esp -= 4;
1396 ebp -= 4;
100ec099
PD
1397 cpu_stl_data_ra(env, ssp + (esp & esp_mask),
1398 cpu_ldl_data_ra(env, ssp + (ebp & esp_mask),
1399 GETPC()),
1400 GETPC());
eaa728ee
FB
1401 }
1402 esp -= 4;
100ec099 1403 cpu_stl_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
eaa728ee
FB
1404 } else {
1405 /* 16 bit */
1406 esp -= 2;
1407 while (--level) {
1408 esp -= 2;
1409 ebp -= 2;
100ec099
PD
1410 cpu_stw_data_ra(env, ssp + (esp & esp_mask),
1411 cpu_lduw_data_ra(env, ssp + (ebp & esp_mask),
1412 GETPC()),
1413 GETPC());
eaa728ee
FB
1414 }
1415 esp -= 2;
100ec099 1416 cpu_stw_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
eaa728ee
FB
1417 }
1418}
1419
1420#ifdef TARGET_X86_64
2999a0b2
BS
1421void helper_enter64_level(CPUX86State *env, int level, int data64,
1422 target_ulong t1)
eaa728ee
FB
1423{
1424 target_ulong esp, ebp;
20054ef0 1425
c12dddd7 1426 ebp = env->regs[R_EBP];
08b3ded6 1427 esp = env->regs[R_ESP];
eaa728ee
FB
1428
1429 if (data64) {
1430 /* 64 bit */
1431 esp -= 8;
1432 while (--level) {
1433 esp -= 8;
1434 ebp -= 8;
100ec099
PD
1435 cpu_stq_data_ra(env, esp, cpu_ldq_data_ra(env, ebp, GETPC()),
1436 GETPC());
eaa728ee
FB
1437 }
1438 esp -= 8;
100ec099 1439 cpu_stq_data_ra(env, esp, t1, GETPC());
eaa728ee
FB
1440 } else {
1441 /* 16 bit */
1442 esp -= 2;
1443 while (--level) {
1444 esp -= 2;
1445 ebp -= 2;
100ec099
PD
1446 cpu_stw_data_ra(env, esp, cpu_lduw_data_ra(env, ebp, GETPC()),
1447 GETPC());
eaa728ee
FB
1448 }
1449 esp -= 2;
100ec099 1450 cpu_stw_data_ra(env, esp, t1, GETPC());
eaa728ee
FB
1451 }
1452}
1453#endif
1454
2999a0b2 1455void helper_lldt(CPUX86State *env, int selector)
eaa728ee
FB
1456{
1457 SegmentCache *dt;
1458 uint32_t e1, e2;
1459 int index, entry_limit;
1460 target_ulong ptr;
1461
1462 selector &= 0xffff;
1463 if ((selector & 0xfffc) == 0) {
1464 /* XXX: NULL selector case: invalid LDT */
1465 env->ldt.base = 0;
1466 env->ldt.limit = 0;
1467 } else {
20054ef0 1468 if (selector & 0x4) {
100ec099 1469 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1470 }
eaa728ee
FB
1471 dt = &env->gdt;
1472 index = selector & ~7;
1473#ifdef TARGET_X86_64
20054ef0 1474 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1475 entry_limit = 15;
20054ef0 1476 } else
eaa728ee 1477#endif
20054ef0 1478 {
eaa728ee 1479 entry_limit = 7;
20054ef0
BS
1480 }
1481 if ((index + entry_limit) > dt->limit) {
100ec099 1482 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1483 }
eaa728ee 1484 ptr = dt->base + index;
100ec099
PD
1485 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1486 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
20054ef0 1487 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
100ec099 1488 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1489 }
1490 if (!(e2 & DESC_P_MASK)) {
100ec099 1491 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1492 }
eaa728ee
FB
1493#ifdef TARGET_X86_64
1494 if (env->hflags & HF_LMA_MASK) {
1495 uint32_t e3;
20054ef0 1496
100ec099 1497 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
eaa728ee
FB
1498 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1499 env->ldt.base |= (target_ulong)e3 << 32;
1500 } else
1501#endif
1502 {
1503 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1504 }
1505 }
1506 env->ldt.selector = selector;
1507}
1508
2999a0b2 1509void helper_ltr(CPUX86State *env, int selector)
eaa728ee
FB
1510{
1511 SegmentCache *dt;
1512 uint32_t e1, e2;
1513 int index, type, entry_limit;
1514 target_ulong ptr;
1515
1516 selector &= 0xffff;
1517 if ((selector & 0xfffc) == 0) {
1518 /* NULL selector case: invalid TR */
1519 env->tr.base = 0;
1520 env->tr.limit = 0;
1521 env->tr.flags = 0;
1522 } else {
20054ef0 1523 if (selector & 0x4) {
100ec099 1524 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1525 }
eaa728ee
FB
1526 dt = &env->gdt;
1527 index = selector & ~7;
1528#ifdef TARGET_X86_64
20054ef0 1529 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1530 entry_limit = 15;
20054ef0 1531 } else
eaa728ee 1532#endif
20054ef0 1533 {
eaa728ee 1534 entry_limit = 7;
20054ef0
BS
1535 }
1536 if ((index + entry_limit) > dt->limit) {
100ec099 1537 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1538 }
eaa728ee 1539 ptr = dt->base + index;
100ec099
PD
1540 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1541 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
eaa728ee
FB
1542 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1543 if ((e2 & DESC_S_MASK) ||
20054ef0 1544 (type != 1 && type != 9)) {
100ec099 1545 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1546 }
1547 if (!(e2 & DESC_P_MASK)) {
100ec099 1548 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1549 }
eaa728ee
FB
1550#ifdef TARGET_X86_64
1551 if (env->hflags & HF_LMA_MASK) {
1552 uint32_t e3, e4;
20054ef0 1553
100ec099
PD
1554 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1555 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
20054ef0 1556 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
100ec099 1557 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1558 }
eaa728ee
FB
1559 load_seg_cache_raw_dt(&env->tr, e1, e2);
1560 env->tr.base |= (target_ulong)e3 << 32;
1561 } else
1562#endif
1563 {
1564 load_seg_cache_raw_dt(&env->tr, e1, e2);
1565 }
1566 e2 |= DESC_TSS_BUSY_MASK;
100ec099 1567 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
eaa728ee
FB
1568 }
1569 env->tr.selector = selector;
1570}
1571
1572/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2999a0b2 1573void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee
FB
1574{
1575 uint32_t e1, e2;
1576 int cpl, dpl, rpl;
1577 SegmentCache *dt;
1578 int index;
1579 target_ulong ptr;
1580
1581 selector &= 0xffff;
1582 cpl = env->hflags & HF_CPL_MASK;
1583 if ((selector & 0xfffc) == 0) {
1584 /* null selector case */
1585 if (seg_reg == R_SS
1586#ifdef TARGET_X86_64
1587 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1588#endif
20054ef0 1589 ) {
100ec099 1590 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1591 }
eaa728ee
FB
1592 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1593 } else {
1594
20054ef0 1595 if (selector & 0x4) {
eaa728ee 1596 dt = &env->ldt;
20054ef0 1597 } else {
eaa728ee 1598 dt = &env->gdt;
20054ef0 1599 }
eaa728ee 1600 index = selector & ~7;
20054ef0 1601 if ((index + 7) > dt->limit) {
100ec099 1602 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1603 }
eaa728ee 1604 ptr = dt->base + index;
100ec099
PD
1605 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1606 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
eaa728ee 1607
20054ef0 1608 if (!(e2 & DESC_S_MASK)) {
100ec099 1609 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1610 }
eaa728ee
FB
1611 rpl = selector & 3;
1612 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1613 if (seg_reg == R_SS) {
1614 /* must be writable segment */
20054ef0 1615 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
100ec099 1616 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1617 }
1618 if (rpl != cpl || dpl != cpl) {
100ec099 1619 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1620 }
eaa728ee
FB
1621 } else {
1622 /* must be readable segment */
20054ef0 1623 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
100ec099 1624 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1625 }
eaa728ee
FB
1626
1627 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1628 /* if not conforming code, test rights */
20054ef0 1629 if (dpl < cpl || dpl < rpl) {
100ec099 1630 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1631 }
eaa728ee
FB
1632 }
1633 }
1634
1635 if (!(e2 & DESC_P_MASK)) {
20054ef0 1636 if (seg_reg == R_SS) {
100ec099 1637 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
20054ef0 1638 } else {
100ec099 1639 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1640 }
eaa728ee
FB
1641 }
1642
1643 /* set the access bit if not already set */
1644 if (!(e2 & DESC_A_MASK)) {
1645 e2 |= DESC_A_MASK;
100ec099 1646 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
eaa728ee
FB
1647 }
1648
1649 cpu_x86_load_seg_cache(env, seg_reg, selector,
1650 get_seg_base(e1, e2),
1651 get_seg_limit(e1, e2),
1652 e2);
1653#if 0
93fcfe39 1654 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
1655 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1656#endif
1657 }
1658}
1659
1660/* protected mode jump */
2999a0b2 1661void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
100ec099 1662 target_ulong next_eip)
eaa728ee
FB
1663{
1664 int gate_cs, type;
1665 uint32_t e1, e2, cpl, dpl, rpl, limit;
eaa728ee 1666
20054ef0 1667 if ((new_cs & 0xfffc) == 0) {
100ec099 1668 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1669 }
100ec099
PD
1670 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1671 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1672 }
eaa728ee
FB
1673 cpl = env->hflags & HF_CPL_MASK;
1674 if (e2 & DESC_S_MASK) {
20054ef0 1675 if (!(e2 & DESC_CS_MASK)) {
100ec099 1676 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1677 }
eaa728ee
FB
1678 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1679 if (e2 & DESC_C_MASK) {
1680 /* conforming code segment */
20054ef0 1681 if (dpl > cpl) {
100ec099 1682 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1683 }
eaa728ee
FB
1684 } else {
1685 /* non conforming code segment */
1686 rpl = new_cs & 3;
20054ef0 1687 if (rpl > cpl) {
100ec099 1688 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1689 }
1690 if (dpl != cpl) {
100ec099 1691 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1692 }
eaa728ee 1693 }
20054ef0 1694 if (!(e2 & DESC_P_MASK)) {
100ec099 1695 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1696 }
eaa728ee
FB
1697 limit = get_seg_limit(e1, e2);
1698 if (new_eip > limit &&
20054ef0 1699 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
100ec099 1700 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1701 }
eaa728ee
FB
1702 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1703 get_seg_base(e1, e2), limit, e2);
a78d0eab 1704 env->eip = new_eip;
eaa728ee
FB
1705 } else {
1706 /* jump to call or task gate */
1707 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1708 rpl = new_cs & 3;
1709 cpl = env->hflags & HF_CPL_MASK;
1710 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 1711 switch (type) {
eaa728ee
FB
1712 case 1: /* 286 TSS */
1713 case 9: /* 386 TSS */
1714 case 5: /* task gate */
20054ef0 1715 if (dpl < cpl || dpl < rpl) {
100ec099 1716 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1717 }
100ec099 1718 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
eaa728ee
FB
1719 break;
1720 case 4: /* 286 call gate */
1721 case 12: /* 386 call gate */
20054ef0 1722 if ((dpl < cpl) || (dpl < rpl)) {
100ec099 1723 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1724 }
1725 if (!(e2 & DESC_P_MASK)) {
100ec099 1726 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1727 }
eaa728ee
FB
1728 gate_cs = e1 >> 16;
1729 new_eip = (e1 & 0xffff);
20054ef0 1730 if (type == 12) {
eaa728ee 1731 new_eip |= (e2 & 0xffff0000);
20054ef0 1732 }
100ec099
PD
1733 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1734 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1735 }
eaa728ee
FB
1736 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1737 /* must be code segment */
1738 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
20054ef0 1739 (DESC_S_MASK | DESC_CS_MASK))) {
100ec099 1740 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1741 }
eaa728ee 1742 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
20054ef0 1743 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
100ec099 1744 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0
BS
1745 }
1746 if (!(e2 & DESC_P_MASK)) {
100ec099 1747 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1748 }
eaa728ee 1749 limit = get_seg_limit(e1, e2);
20054ef0 1750 if (new_eip > limit) {
100ec099 1751 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1752 }
eaa728ee
FB
1753 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1754 get_seg_base(e1, e2), limit, e2);
a78d0eab 1755 env->eip = new_eip;
eaa728ee
FB
1756 break;
1757 default:
100ec099 1758 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
eaa728ee
FB
1759 break;
1760 }
1761 }
1762}
1763
1764/* real mode call */
2999a0b2 1765void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
eaa728ee
FB
1766 int shift, int next_eip)
1767{
1768 int new_eip;
1769 uint32_t esp, esp_mask;
1770 target_ulong ssp;
1771
1772 new_eip = new_eip1;
08b3ded6 1773 esp = env->regs[R_ESP];
eaa728ee
FB
1774 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1775 ssp = env->segs[R_SS].base;
1776 if (shift) {
100ec099
PD
1777 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1778 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
eaa728ee 1779 } else {
100ec099
PD
1780 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1781 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
eaa728ee
FB
1782 }
1783
1784 SET_ESP(esp, esp_mask);
1785 env->eip = new_eip;
1786 env->segs[R_CS].selector = new_cs;
1787 env->segs[R_CS].base = (new_cs << 4);
1788}
1789
1790/* protected mode call */
2999a0b2 1791void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
100ec099 1792 int shift, target_ulong next_eip)
eaa728ee
FB
1793{
1794 int new_stack, i;
1795 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 1796 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee 1797 uint32_t val, limit, old_sp_mask;
100ec099 1798 target_ulong ssp, old_ssp;
eaa728ee 1799
d12d51d5 1800 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
8995b7a0 1801 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 1802 if ((new_cs & 0xfffc) == 0) {
100ec099 1803 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1804 }
100ec099
PD
1805 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1806 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1807 }
eaa728ee 1808 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 1809 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee 1810 if (e2 & DESC_S_MASK) {
20054ef0 1811 if (!(e2 & DESC_CS_MASK)) {
100ec099 1812 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1813 }
eaa728ee
FB
1814 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1815 if (e2 & DESC_C_MASK) {
1816 /* conforming code segment */
20054ef0 1817 if (dpl > cpl) {
100ec099 1818 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1819 }
eaa728ee
FB
1820 } else {
1821 /* non conforming code segment */
1822 rpl = new_cs & 3;
20054ef0 1823 if (rpl > cpl) {
100ec099 1824 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1825 }
1826 if (dpl != cpl) {
100ec099 1827 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1828 }
eaa728ee 1829 }
20054ef0 1830 if (!(e2 & DESC_P_MASK)) {
100ec099 1831 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1832 }
eaa728ee
FB
1833
1834#ifdef TARGET_X86_64
1835 /* XXX: check 16/32 bit cases in long mode */
1836 if (shift == 2) {
1837 target_ulong rsp;
20054ef0 1838
eaa728ee 1839 /* 64 bit case */
08b3ded6 1840 rsp = env->regs[R_ESP];
100ec099
PD
1841 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1842 PUSHQ_RA(rsp, next_eip, GETPC());
eaa728ee 1843 /* from this point, not restartable */
08b3ded6 1844 env->regs[R_ESP] = rsp;
eaa728ee
FB
1845 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1846 get_seg_base(e1, e2),
1847 get_seg_limit(e1, e2), e2);
a78d0eab 1848 env->eip = new_eip;
eaa728ee
FB
1849 } else
1850#endif
1851 {
08b3ded6 1852 sp = env->regs[R_ESP];
eaa728ee
FB
1853 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1854 ssp = env->segs[R_SS].base;
1855 if (shift) {
100ec099
PD
1856 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1857 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee 1858 } else {
100ec099
PD
1859 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1860 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee
FB
1861 }
1862
1863 limit = get_seg_limit(e1, e2);
20054ef0 1864 if (new_eip > limit) {
100ec099 1865 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1866 }
eaa728ee
FB
1867 /* from this point, not restartable */
1868 SET_ESP(sp, sp_mask);
1869 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1870 get_seg_base(e1, e2), limit, e2);
a78d0eab 1871 env->eip = new_eip;
eaa728ee
FB
1872 }
1873 } else {
1874 /* check gate type */
1875 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1876 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1877 rpl = new_cs & 3;
20054ef0 1878 switch (type) {
eaa728ee
FB
1879 case 1: /* available 286 TSS */
1880 case 9: /* available 386 TSS */
1881 case 5: /* task gate */
20054ef0 1882 if (dpl < cpl || dpl < rpl) {
100ec099 1883 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1884 }
100ec099 1885 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
eaa728ee
FB
1886 return;
1887 case 4: /* 286 call gate */
1888 case 12: /* 386 call gate */
1889 break;
1890 default:
100ec099 1891 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
eaa728ee
FB
1892 break;
1893 }
1894 shift = type >> 3;
1895
20054ef0 1896 if (dpl < cpl || dpl < rpl) {
100ec099 1897 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1898 }
eaa728ee 1899 /* check valid bit */
20054ef0 1900 if (!(e2 & DESC_P_MASK)) {
100ec099 1901 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1902 }
eaa728ee
FB
1903 selector = e1 >> 16;
1904 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1905 param_count = e2 & 0x1f;
20054ef0 1906 if ((selector & 0xfffc) == 0) {
100ec099 1907 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1908 }
eaa728ee 1909
100ec099
PD
1910 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1911 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1912 }
1913 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
100ec099 1914 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1915 }
eaa728ee 1916 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1917 if (dpl > cpl) {
100ec099 1918 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1919 }
1920 if (!(e2 & DESC_P_MASK)) {
100ec099 1921 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1922 }
eaa728ee
FB
1923
1924 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1925 /* to inner privilege */
100ec099 1926 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
90a2541b
LG
1927 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1928 TARGET_FMT_lx "\n", ss, sp, param_count,
1929 env->regs[R_ESP]);
20054ef0 1930 if ((ss & 0xfffc) == 0) {
100ec099 1931 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0
BS
1932 }
1933 if ((ss & 3) != dpl) {
100ec099 1934 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1935 }
100ec099
PD
1936 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1937 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1938 }
eaa728ee 1939 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1940 if (ss_dpl != dpl) {
100ec099 1941 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1942 }
eaa728ee
FB
1943 if (!(ss_e2 & DESC_S_MASK) ||
1944 (ss_e2 & DESC_CS_MASK) ||
20054ef0 1945 !(ss_e2 & DESC_W_MASK)) {
100ec099 1946 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0
BS
1947 }
1948 if (!(ss_e2 & DESC_P_MASK)) {
100ec099 1949 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1950 }
eaa728ee 1951
20054ef0 1952 /* push_size = ((param_count * 2) + 8) << shift; */
eaa728ee
FB
1953
1954 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1955 old_ssp = env->segs[R_SS].base;
1956
1957 sp_mask = get_sp_mask(ss_e2);
1958 ssp = get_seg_base(ss_e1, ss_e2);
1959 if (shift) {
100ec099
PD
1960 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1961 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
20054ef0 1962 for (i = param_count - 1; i >= 0; i--) {
100ec099
PD
1963 val = cpu_ldl_kernel_ra(env, old_ssp +
1964 ((env->regs[R_ESP] + i * 4) &
1965 old_sp_mask), GETPC());
1966 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
eaa728ee
FB
1967 }
1968 } else {
100ec099
PD
1969 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1970 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
20054ef0 1971 for (i = param_count - 1; i >= 0; i--) {
100ec099
PD
1972 val = cpu_lduw_kernel_ra(env, old_ssp +
1973 ((env->regs[R_ESP] + i * 2) &
1974 old_sp_mask), GETPC());
1975 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
eaa728ee
FB
1976 }
1977 }
1978 new_stack = 1;
1979 } else {
1980 /* to same privilege */
08b3ded6 1981 sp = env->regs[R_ESP];
eaa728ee
FB
1982 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1983 ssp = env->segs[R_SS].base;
20054ef0 1984 /* push_size = (4 << shift); */
eaa728ee
FB
1985 new_stack = 0;
1986 }
1987
1988 if (shift) {
100ec099
PD
1989 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1990 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee 1991 } else {
100ec099
PD
1992 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1993 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee
FB
1994 }
1995
1996 /* from this point, not restartable */
1997
1998 if (new_stack) {
1999 ss = (ss & ~3) | dpl;
2000 cpu_x86_load_seg_cache(env, R_SS, ss,
2001 ssp,
2002 get_seg_limit(ss_e1, ss_e2),
2003 ss_e2);
2004 }
2005
2006 selector = (selector & ~3) | dpl;
2007 cpu_x86_load_seg_cache(env, R_CS, selector,
2008 get_seg_base(e1, e2),
2009 get_seg_limit(e1, e2),
2010 e2);
eaa728ee 2011 SET_ESP(sp, sp_mask);
a78d0eab 2012 env->eip = offset;
eaa728ee 2013 }
eaa728ee
FB
2014}
2015
2016/* real and vm86 mode iret */
2999a0b2 2017void helper_iret_real(CPUX86State *env, int shift)
eaa728ee
FB
2018{
2019 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2020 target_ulong ssp;
2021 int eflags_mask;
2022
20054ef0 2023 sp_mask = 0xffff; /* XXXX: use SS segment size? */
08b3ded6 2024 sp = env->regs[R_ESP];
eaa728ee
FB
2025 ssp = env->segs[R_SS].base;
2026 if (shift == 1) {
2027 /* 32 bits */
100ec099
PD
2028 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2029 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
eaa728ee 2030 new_cs &= 0xffff;
100ec099 2031 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
eaa728ee
FB
2032 } else {
2033 /* 16 bits */
100ec099
PD
2034 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2035 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2036 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
eaa728ee 2037 }
08b3ded6 2038 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2039 env->segs[R_CS].selector = new_cs;
2040 env->segs[R_CS].base = (new_cs << 4);
eaa728ee 2041 env->eip = new_eip;
20054ef0
BS
2042 if (env->eflags & VM_MASK) {
2043 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2044 NT_MASK;
2045 } else {
2046 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2047 RF_MASK | NT_MASK;
2048 }
2049 if (shift == 0) {
eaa728ee 2050 eflags_mask &= 0xffff;
20054ef0 2051 }
997ff0d9 2052 cpu_load_eflags(env, new_eflags, eflags_mask);
db620f46 2053 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2054}
2055
2999a0b2 2056static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
eaa728ee
FB
2057{
2058 int dpl;
2059 uint32_t e2;
2060
2061 /* XXX: on x86_64, we do not want to nullify FS and GS because
2062 they may still contain a valid base. I would be interested to
2063 know how a real x86_64 CPU behaves */
2064 if ((seg_reg == R_FS || seg_reg == R_GS) &&
20054ef0 2065 (env->segs[seg_reg].selector & 0xfffc) == 0) {
eaa728ee 2066 return;
20054ef0 2067 }
eaa728ee
FB
2068
2069 e2 = env->segs[seg_reg].flags;
2070 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2071 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2072 /* data or non conforming code segment */
2073 if (dpl < cpl) {
2074 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2075 }
2076 }
2077}
2078
2079/* protected mode iret */
2999a0b2 2080static inline void helper_ret_protected(CPUX86State *env, int shift,
100ec099
PD
2081 int is_iret, int addend,
2082 uintptr_t retaddr)
eaa728ee
FB
2083{
2084 uint32_t new_cs, new_eflags, new_ss;
2085 uint32_t new_es, new_ds, new_fs, new_gs;
2086 uint32_t e1, e2, ss_e1, ss_e2;
2087 int cpl, dpl, rpl, eflags_mask, iopl;
2088 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2089
2090#ifdef TARGET_X86_64
20054ef0 2091 if (shift == 2) {
eaa728ee 2092 sp_mask = -1;
20054ef0 2093 } else
eaa728ee 2094#endif
20054ef0 2095 {
eaa728ee 2096 sp_mask = get_sp_mask(env->segs[R_SS].flags);
20054ef0 2097 }
08b3ded6 2098 sp = env->regs[R_ESP];
eaa728ee
FB
2099 ssp = env->segs[R_SS].base;
2100 new_eflags = 0; /* avoid warning */
2101#ifdef TARGET_X86_64
2102 if (shift == 2) {
100ec099
PD
2103 POPQ_RA(sp, new_eip, retaddr);
2104 POPQ_RA(sp, new_cs, retaddr);
eaa728ee
FB
2105 new_cs &= 0xffff;
2106 if (is_iret) {
100ec099 2107 POPQ_RA(sp, new_eflags, retaddr);
eaa728ee
FB
2108 }
2109 } else
2110#endif
20054ef0
BS
2111 {
2112 if (shift == 1) {
2113 /* 32 bits */
100ec099
PD
2114 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2115 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
20054ef0
BS
2116 new_cs &= 0xffff;
2117 if (is_iret) {
100ec099 2118 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
20054ef0
BS
2119 if (new_eflags & VM_MASK) {
2120 goto return_to_vm86;
2121 }
2122 }
2123 } else {
2124 /* 16 bits */
100ec099
PD
2125 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2126 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
20054ef0 2127 if (is_iret) {
100ec099 2128 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
20054ef0 2129 }
eaa728ee 2130 }
eaa728ee 2131 }
d12d51d5
AL
2132 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2133 new_cs, new_eip, shift, addend);
8995b7a0 2134 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 2135 if ((new_cs & 0xfffc) == 0) {
100ec099 2136 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2137 }
100ec099
PD
2138 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2139 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2140 }
eaa728ee 2141 if (!(e2 & DESC_S_MASK) ||
20054ef0 2142 !(e2 & DESC_CS_MASK)) {
100ec099 2143 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2144 }
eaa728ee
FB
2145 cpl = env->hflags & HF_CPL_MASK;
2146 rpl = new_cs & 3;
20054ef0 2147 if (rpl < cpl) {
100ec099 2148 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2149 }
eaa728ee
FB
2150 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2151 if (e2 & DESC_C_MASK) {
20054ef0 2152 if (dpl > rpl) {
100ec099 2153 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2154 }
eaa728ee 2155 } else {
20054ef0 2156 if (dpl != rpl) {
100ec099 2157 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2158 }
eaa728ee 2159 }
20054ef0 2160 if (!(e2 & DESC_P_MASK)) {
100ec099 2161 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
20054ef0 2162 }
eaa728ee
FB
2163
2164 sp += addend;
2165 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2166 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2167 /* return to same privilege level */
eaa728ee
FB
2168 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2169 get_seg_base(e1, e2),
2170 get_seg_limit(e1, e2),
2171 e2);
2172 } else {
2173 /* return to different privilege level */
2174#ifdef TARGET_X86_64
2175 if (shift == 2) {
100ec099
PD
2176 POPQ_RA(sp, new_esp, retaddr);
2177 POPQ_RA(sp, new_ss, retaddr);
eaa728ee
FB
2178 new_ss &= 0xffff;
2179 } else
2180#endif
20054ef0
BS
2181 {
2182 if (shift == 1) {
2183 /* 32 bits */
100ec099
PD
2184 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2185 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
20054ef0
BS
2186 new_ss &= 0xffff;
2187 } else {
2188 /* 16 bits */
100ec099
PD
2189 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2190 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
20054ef0 2191 }
eaa728ee 2192 }
d12d51d5 2193 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
20054ef0 2194 new_ss, new_esp);
eaa728ee
FB
2195 if ((new_ss & 0xfffc) == 0) {
2196#ifdef TARGET_X86_64
20054ef0
BS
2197 /* NULL ss is allowed in long mode if cpl != 3 */
2198 /* XXX: test CS64? */
eaa728ee
FB
2199 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2200 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2201 0, 0xffffffff,
2202 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2203 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2204 DESC_W_MASK | DESC_A_MASK);
20054ef0 2205 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
eaa728ee
FB
2206 } else
2207#endif
2208 {
100ec099 2209 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
eaa728ee
FB
2210 }
2211 } else {
20054ef0 2212 if ((new_ss & 3) != rpl) {
100ec099 2213 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2214 }
100ec099
PD
2215 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2216 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2217 }
eaa728ee
FB
2218 if (!(ss_e2 & DESC_S_MASK) ||
2219 (ss_e2 & DESC_CS_MASK) ||
20054ef0 2220 !(ss_e2 & DESC_W_MASK)) {
100ec099 2221 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2222 }
eaa728ee 2223 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2224 if (dpl != rpl) {
100ec099 2225 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0
BS
2226 }
2227 if (!(ss_e2 & DESC_P_MASK)) {
100ec099 2228 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
20054ef0 2229 }
eaa728ee
FB
2230 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2231 get_seg_base(ss_e1, ss_e2),
2232 get_seg_limit(ss_e1, ss_e2),
2233 ss_e2);
2234 }
2235
2236 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2237 get_seg_base(e1, e2),
2238 get_seg_limit(e1, e2),
2239 e2);
eaa728ee
FB
2240 sp = new_esp;
2241#ifdef TARGET_X86_64
20054ef0 2242 if (env->hflags & HF_CS64_MASK) {
eaa728ee 2243 sp_mask = -1;
20054ef0 2244 } else
eaa728ee 2245#endif
20054ef0 2246 {
eaa728ee 2247 sp_mask = get_sp_mask(ss_e2);
20054ef0 2248 }
eaa728ee
FB
2249
2250 /* validate data segments */
2999a0b2
BS
2251 validate_seg(env, R_ES, rpl);
2252 validate_seg(env, R_DS, rpl);
2253 validate_seg(env, R_FS, rpl);
2254 validate_seg(env, R_GS, rpl);
eaa728ee
FB
2255
2256 sp += addend;
2257 }
2258 SET_ESP(sp, sp_mask);
2259 env->eip = new_eip;
2260 if (is_iret) {
2261 /* NOTE: 'cpl' is the _old_ CPL */
2262 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
20054ef0 2263 if (cpl == 0) {
eaa728ee 2264 eflags_mask |= IOPL_MASK;
20054ef0 2265 }
eaa728ee 2266 iopl = (env->eflags >> IOPL_SHIFT) & 3;
20054ef0 2267 if (cpl <= iopl) {
eaa728ee 2268 eflags_mask |= IF_MASK;
20054ef0
BS
2269 }
2270 if (shift == 0) {
eaa728ee 2271 eflags_mask &= 0xffff;
20054ef0 2272 }
997ff0d9 2273 cpu_load_eflags(env, new_eflags, eflags_mask);
eaa728ee
FB
2274 }
2275 return;
2276
2277 return_to_vm86:
100ec099
PD
2278 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2279 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2280 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2281 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2282 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2283 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
eaa728ee
FB
2284
2285 /* modify processor state */
997ff0d9
BS
2286 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2287 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2288 VIP_MASK);
2999a0b2 2289 load_seg_vm(env, R_CS, new_cs & 0xffff);
2999a0b2
BS
2290 load_seg_vm(env, R_SS, new_ss & 0xffff);
2291 load_seg_vm(env, R_ES, new_es & 0xffff);
2292 load_seg_vm(env, R_DS, new_ds & 0xffff);
2293 load_seg_vm(env, R_FS, new_fs & 0xffff);
2294 load_seg_vm(env, R_GS, new_gs & 0xffff);
eaa728ee
FB
2295
2296 env->eip = new_eip & 0xffff;
08b3ded6 2297 env->regs[R_ESP] = new_esp;
eaa728ee
FB
2298}
2299
2999a0b2 2300void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
eaa728ee
FB
2301{
2302 int tss_selector, type;
2303 uint32_t e1, e2;
2304
2305 /* specific case for TSS */
2306 if (env->eflags & NT_MASK) {
2307#ifdef TARGET_X86_64
20054ef0 2308 if (env->hflags & HF_LMA_MASK) {
100ec099 2309 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 2310 }
eaa728ee 2311#endif
100ec099 2312 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
20054ef0 2313 if (tss_selector & 4) {
100ec099 2314 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2315 }
100ec099
PD
2316 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2317 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2318 }
eaa728ee
FB
2319 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2320 /* NOTE: we check both segment and busy TSS */
20054ef0 2321 if (type != 3) {
100ec099 2322 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2323 }
100ec099 2324 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
eaa728ee 2325 } else {
100ec099 2326 helper_ret_protected(env, shift, 1, 0, GETPC());
eaa728ee 2327 }
db620f46 2328 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2329}
2330
2999a0b2 2331void helper_lret_protected(CPUX86State *env, int shift, int addend)
eaa728ee 2332{
100ec099 2333 helper_ret_protected(env, shift, 0, addend, GETPC());
eaa728ee
FB
2334}
2335
2999a0b2 2336void helper_sysenter(CPUX86State *env)
eaa728ee
FB
2337{
2338 if (env->sysenter_cs == 0) {
100ec099 2339 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee
FB
2340 }
2341 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2436b61a
AZ
2342
2343#ifdef TARGET_X86_64
2344 if (env->hflags & HF_LMA_MASK) {
2345 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2346 0, 0xffffffff,
2347 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2348 DESC_S_MASK |
20054ef0
BS
2349 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2350 DESC_L_MASK);
2436b61a
AZ
2351 } else
2352#endif
2353 {
2354 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2355 0, 0xffffffff,
2356 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2357 DESC_S_MASK |
2358 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2359 }
eaa728ee
FB
2360 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2361 0, 0xffffffff,
2362 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2363 DESC_S_MASK |
2364 DESC_W_MASK | DESC_A_MASK);
08b3ded6 2365 env->regs[R_ESP] = env->sysenter_esp;
a78d0eab 2366 env->eip = env->sysenter_eip;
eaa728ee
FB
2367}
2368
2999a0b2 2369void helper_sysexit(CPUX86State *env, int dflag)
eaa728ee
FB
2370{
2371 int cpl;
2372
2373 cpl = env->hflags & HF_CPL_MASK;
2374 if (env->sysenter_cs == 0 || cpl != 0) {
100ec099 2375 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee 2376 }
2436b61a
AZ
2377#ifdef TARGET_X86_64
2378 if (dflag == 2) {
20054ef0
BS
2379 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2380 3, 0, 0xffffffff,
2436b61a
AZ
2381 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2382 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
20054ef0
BS
2383 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2384 DESC_L_MASK);
2385 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2386 3, 0, 0xffffffff,
2436b61a
AZ
2387 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2388 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2389 DESC_W_MASK | DESC_A_MASK);
2390 } else
2391#endif
2392 {
20054ef0
BS
2393 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2394 3, 0, 0xffffffff,
2436b61a
AZ
2395 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2396 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2397 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
20054ef0
BS
2398 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2399 3, 0, 0xffffffff,
2436b61a
AZ
2400 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2401 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2402 DESC_W_MASK | DESC_A_MASK);
2403 }
08b3ded6 2404 env->regs[R_ESP] = env->regs[R_ECX];
a78d0eab 2405 env->eip = env->regs[R_EDX];
eaa728ee
FB
2406}
2407
2999a0b2 2408target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2409{
2410 unsigned int limit;
2411 uint32_t e1, e2, eflags, selector;
2412 int rpl, dpl, cpl, type;
2413
2414 selector = selector1 & 0xffff;
f0967a1a 2415 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2416 if ((selector & 0xfffc) == 0) {
dc1ded53 2417 goto fail;
20054ef0 2418 }
100ec099 2419 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2420 goto fail;
20054ef0 2421 }
eaa728ee
FB
2422 rpl = selector & 3;
2423 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2424 cpl = env->hflags & HF_CPL_MASK;
2425 if (e2 & DESC_S_MASK) {
2426 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2427 /* conforming */
2428 } else {
20054ef0 2429 if (dpl < cpl || dpl < rpl) {
eaa728ee 2430 goto fail;
20054ef0 2431 }
eaa728ee
FB
2432 }
2433 } else {
2434 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2435 switch (type) {
eaa728ee
FB
2436 case 1:
2437 case 2:
2438 case 3:
2439 case 9:
2440 case 11:
2441 break;
2442 default:
2443 goto fail;
2444 }
2445 if (dpl < cpl || dpl < rpl) {
2446 fail:
2447 CC_SRC = eflags & ~CC_Z;
2448 return 0;
2449 }
2450 }
2451 limit = get_seg_limit(e1, e2);
2452 CC_SRC = eflags | CC_Z;
2453 return limit;
2454}
2455
2999a0b2 2456target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2457{
2458 uint32_t e1, e2, eflags, selector;
2459 int rpl, dpl, cpl, type;
2460
2461 selector = selector1 & 0xffff;
f0967a1a 2462 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2463 if ((selector & 0xfffc) == 0) {
eaa728ee 2464 goto fail;
20054ef0 2465 }
100ec099 2466 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2467 goto fail;
20054ef0 2468 }
eaa728ee
FB
2469 rpl = selector & 3;
2470 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2471 cpl = env->hflags & HF_CPL_MASK;
2472 if (e2 & DESC_S_MASK) {
2473 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2474 /* conforming */
2475 } else {
20054ef0 2476 if (dpl < cpl || dpl < rpl) {
eaa728ee 2477 goto fail;
20054ef0 2478 }
eaa728ee
FB
2479 }
2480 } else {
2481 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2482 switch (type) {
eaa728ee
FB
2483 case 1:
2484 case 2:
2485 case 3:
2486 case 4:
2487 case 5:
2488 case 9:
2489 case 11:
2490 case 12:
2491 break;
2492 default:
2493 goto fail;
2494 }
2495 if (dpl < cpl || dpl < rpl) {
2496 fail:
2497 CC_SRC = eflags & ~CC_Z;
2498 return 0;
2499 }
2500 }
2501 CC_SRC = eflags | CC_Z;
2502 return e2 & 0x00f0ff00;
2503}
2504
2999a0b2 2505void helper_verr(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2506{
2507 uint32_t e1, e2, eflags, selector;
2508 int rpl, dpl, cpl;
2509
2510 selector = selector1 & 0xffff;
f0967a1a 2511 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2512 if ((selector & 0xfffc) == 0) {
eaa728ee 2513 goto fail;
20054ef0 2514 }
100ec099 2515 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2516 goto fail;
20054ef0
BS
2517 }
2518 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2519 goto fail;
20054ef0 2520 }
eaa728ee
FB
2521 rpl = selector & 3;
2522 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2523 cpl = env->hflags & HF_CPL_MASK;
2524 if (e2 & DESC_CS_MASK) {
20054ef0 2525 if (!(e2 & DESC_R_MASK)) {
eaa728ee 2526 goto fail;
20054ef0 2527 }
eaa728ee 2528 if (!(e2 & DESC_C_MASK)) {
20054ef0 2529 if (dpl < cpl || dpl < rpl) {
eaa728ee 2530 goto fail;
20054ef0 2531 }
eaa728ee
FB
2532 }
2533 } else {
2534 if (dpl < cpl || dpl < rpl) {
2535 fail:
2536 CC_SRC = eflags & ~CC_Z;
2537 return;
2538 }
2539 }
2540 CC_SRC = eflags | CC_Z;
2541}
2542
2999a0b2 2543void helper_verw(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2544{
2545 uint32_t e1, e2, eflags, selector;
2546 int rpl, dpl, cpl;
2547
2548 selector = selector1 & 0xffff;
f0967a1a 2549 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2550 if ((selector & 0xfffc) == 0) {
eaa728ee 2551 goto fail;
20054ef0 2552 }
100ec099 2553 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2554 goto fail;
20054ef0
BS
2555 }
2556 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2557 goto fail;
20054ef0 2558 }
eaa728ee
FB
2559 rpl = selector & 3;
2560 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2561 cpl = env->hflags & HF_CPL_MASK;
2562 if (e2 & DESC_CS_MASK) {
2563 goto fail;
2564 } else {
20054ef0 2565 if (dpl < cpl || dpl < rpl) {
eaa728ee 2566 goto fail;
20054ef0 2567 }
eaa728ee
FB
2568 if (!(e2 & DESC_W_MASK)) {
2569 fail:
2570 CC_SRC = eflags & ~CC_Z;
2571 return;
2572 }
2573 }
2574 CC_SRC = eflags | CC_Z;
2575}
2576
f299f437 2577#if defined(CONFIG_USER_ONLY)
2999a0b2 2578void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee 2579{
f299f437 2580 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
b98dbc90 2581 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
f299f437
BS
2582 selector &= 0xffff;
2583 cpu_x86_load_seg_cache(env, seg_reg, selector,
b98dbc90
PB
2584 (selector << 4), 0xffff,
2585 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2586 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
f299f437 2587 } else {
2999a0b2 2588 helper_load_seg(env, seg_reg, selector);
13822781 2589 }
eaa728ee 2590}
eaa728ee 2591#endif
81cf8d8a
PB
2592
2593/* check if Port I/O is allowed in TSS */
100ec099
PD
2594static inline void check_io(CPUX86State *env, int addr, int size,
2595 uintptr_t retaddr)
81cf8d8a
PB
2596{
2597 int io_offset, val, mask;
2598
2599 /* TSS must be a valid 32 bit one */
2600 if (!(env->tr.flags & DESC_P_MASK) ||
2601 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2602 env->tr.limit < 103) {
2603 goto fail;
2604 }
100ec099 2605 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
81cf8d8a
PB
2606 io_offset += (addr >> 3);
2607 /* Note: the check needs two bytes */
2608 if ((io_offset + 1) > env->tr.limit) {
2609 goto fail;
2610 }
100ec099 2611 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
81cf8d8a
PB
2612 val >>= (addr & 7);
2613 mask = (1 << size) - 1;
2614 /* all bits must be zero to allow the I/O */
2615 if ((val & mask) != 0) {
2616 fail:
100ec099 2617 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
81cf8d8a
PB
2618 }
2619}
2620
2621void helper_check_iob(CPUX86State *env, uint32_t t0)
2622{
100ec099 2623 check_io(env, t0, 1, GETPC());
81cf8d8a
PB
2624}
2625
2626void helper_check_iow(CPUX86State *env, uint32_t t0)
2627{
100ec099 2628 check_io(env, t0, 2, GETPC());
81cf8d8a
PB
2629}
2630
2631void helper_check_iol(CPUX86State *env, uint32_t t0)
2632{
100ec099 2633 check_io(env, t0, 4, GETPC());
81cf8d8a 2634}