]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/seg_helper.c
virtio-net: correctly drop truncated packets
[mirror_qemu.git] / target-i386 / seg_helper.c
CommitLineData
eaa728ee 1/*
10774999
BS
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
eaa728ee
FB
4 *
5 * Copyright (c) 2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 19 */
83dae095 20
3e457172 21#include "cpu.h"
1de7afc9 22#include "qemu/log.h"
2ef6175a 23#include "exec/helper-proto.h"
f08b6170 24#include "exec/cpu_ldst.h"
eaa728ee 25
3e457172 26//#define DEBUG_PCALL
d12d51d5
AL
27
28#ifdef DEBUG_PCALL
20054ef0 29# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
8995b7a0
AF
30# define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
d12d51d5 32#else
20054ef0 33# define LOG_PCALL(...) do { } while (0)
8995b7a0 34# define LOG_PCALL_STATE(cpu) do { } while (0)
d12d51d5
AL
35#endif
36
9220fe54
PM
37#ifdef CONFIG_USER_ONLY
38#define MEMSUFFIX _kernel
39#define DATA_SIZE 1
40#include "exec/cpu_ldst_useronly_template.h"
41
42#define DATA_SIZE 2
43#include "exec/cpu_ldst_useronly_template.h"
44
45#define DATA_SIZE 4
46#include "exec/cpu_ldst_useronly_template.h"
47
48#define DATA_SIZE 8
49#include "exec/cpu_ldst_useronly_template.h"
50#undef MEMSUFFIX
51#else
8a201bd4
PB
52#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
53#define MEMSUFFIX _kernel
54#define DATA_SIZE 1
55#include "exec/cpu_ldst_template.h"
56
57#define DATA_SIZE 2
58#include "exec/cpu_ldst_template.h"
59
60#define DATA_SIZE 4
61#include "exec/cpu_ldst_template.h"
62
63#define DATA_SIZE 8
64#include "exec/cpu_ldst_template.h"
65#undef CPU_MMU_INDEX
66#undef MEMSUFFIX
67#endif
68
eaa728ee 69/* return non zero if error */
100ec099
PD
70static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
71 uint32_t *e2_ptr, int selector,
72 uintptr_t retaddr)
eaa728ee
FB
73{
74 SegmentCache *dt;
75 int index;
76 target_ulong ptr;
77
20054ef0 78 if (selector & 0x4) {
eaa728ee 79 dt = &env->ldt;
20054ef0 80 } else {
eaa728ee 81 dt = &env->gdt;
20054ef0 82 }
eaa728ee 83 index = selector & ~7;
20054ef0 84 if ((index + 7) > dt->limit) {
eaa728ee 85 return -1;
20054ef0 86 }
eaa728ee 87 ptr = dt->base + index;
100ec099
PD
88 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
89 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee
FB
90 return 0;
91}
92
100ec099
PD
93static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
94 uint32_t *e2_ptr, int selector)
95{
96 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
97}
98
eaa728ee
FB
99static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
100{
101 unsigned int limit;
20054ef0 102
eaa728ee 103 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
20054ef0 104 if (e2 & DESC_G_MASK) {
eaa728ee 105 limit = (limit << 12) | 0xfff;
20054ef0 106 }
eaa728ee
FB
107 return limit;
108}
109
110static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
111{
20054ef0 112 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
eaa728ee
FB
113}
114
20054ef0
BS
115static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
116 uint32_t e2)
eaa728ee
FB
117{
118 sc->base = get_seg_base(e1, e2);
119 sc->limit = get_seg_limit(e1, e2);
120 sc->flags = e2;
121}
122
123/* init the segment cache in vm86 mode. */
2999a0b2 124static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
eaa728ee
FB
125{
126 selector &= 0xffff;
b98dbc90
PB
127
128 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
129 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
130 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
eaa728ee
FB
131}
132
2999a0b2 133static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
100ec099
PD
134 uint32_t *esp_ptr, int dpl,
135 uintptr_t retaddr)
eaa728ee 136{
a47dddd7 137 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
138 int type, index, shift;
139
140#if 0
141 {
142 int i;
143 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
20054ef0 144 for (i = 0; i < env->tr.limit; i++) {
eaa728ee 145 printf("%02x ", env->tr.base[i]);
20054ef0
BS
146 if ((i & 7) == 7) {
147 printf("\n");
148 }
eaa728ee
FB
149 }
150 printf("\n");
151 }
152#endif
153
20054ef0 154 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 155 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 156 }
eaa728ee 157 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 158 if ((type & 7) != 1) {
a47dddd7 159 cpu_abort(CPU(cpu), "invalid tss type");
20054ef0 160 }
eaa728ee
FB
161 shift = type >> 3;
162 index = (dpl * 4 + 2) << shift;
20054ef0 163 if (index + (4 << shift) - 1 > env->tr.limit) {
100ec099 164 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
20054ef0 165 }
eaa728ee 166 if (shift == 0) {
100ec099
PD
167 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
168 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
eaa728ee 169 } else {
100ec099
PD
170 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
eaa728ee
FB
172 }
173}
174
100ec099
PD
175static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
176 uintptr_t retaddr)
eaa728ee
FB
177{
178 uint32_t e1, e2;
d3b54918 179 int rpl, dpl;
eaa728ee
FB
180
181 if ((selector & 0xfffc) != 0) {
100ec099
PD
182 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
183 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0
BS
184 }
185 if (!(e2 & DESC_S_MASK)) {
100ec099 186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 187 }
eaa728ee
FB
188 rpl = selector & 3;
189 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
eaa728ee 190 if (seg_reg == R_CS) {
20054ef0 191 if (!(e2 & DESC_CS_MASK)) {
100ec099 192 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 193 }
20054ef0 194 if (dpl != rpl) {
100ec099 195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 196 }
eaa728ee
FB
197 } else if (seg_reg == R_SS) {
198 /* SS must be writable data */
20054ef0 199 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
100ec099 200 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0
BS
201 }
202 if (dpl != cpl || dpl != rpl) {
100ec099 203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 204 }
eaa728ee
FB
205 } else {
206 /* not readable code */
20054ef0 207 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
100ec099 208 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 209 }
eaa728ee
FB
210 /* if data or non conforming code, checks the rights */
211 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
20054ef0 212 if (dpl < cpl || dpl < rpl) {
100ec099 213 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 214 }
eaa728ee
FB
215 }
216 }
20054ef0 217 if (!(e2 & DESC_P_MASK)) {
100ec099 218 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
20054ef0 219 }
eaa728ee 220 cpu_x86_load_seg_cache(env, seg_reg, selector,
20054ef0
BS
221 get_seg_base(e1, e2),
222 get_seg_limit(e1, e2),
223 e2);
eaa728ee 224 } else {
20054ef0 225 if (seg_reg == R_SS || seg_reg == R_CS) {
100ec099 226 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 227 }
eaa728ee
FB
228 }
229}
230
231#define SWITCH_TSS_JMP 0
232#define SWITCH_TSS_IRET 1
233#define SWITCH_TSS_CALL 2
234
235/* XXX: restore CPU state in registers (PowerPC case) */
100ec099
PD
236static void switch_tss_ra(CPUX86State *env, int tss_selector,
237 uint32_t e1, uint32_t e2, int source,
238 uint32_t next_eip, uintptr_t retaddr)
eaa728ee
FB
239{
240 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
241 target_ulong tss_base;
242 uint32_t new_regs[8], new_segs[6];
243 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
244 uint32_t old_eflags, eflags_mask;
245 SegmentCache *dt;
246 int index;
247 target_ulong ptr;
248
249 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0
BS
250 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
251 source);
eaa728ee
FB
252
253 /* if task gate, we read the TSS segment and we load it */
254 if (type == 5) {
20054ef0 255 if (!(e2 & DESC_P_MASK)) {
100ec099 256 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
20054ef0 257 }
eaa728ee 258 tss_selector = e1 >> 16;
20054ef0 259 if (tss_selector & 4) {
100ec099 260 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
20054ef0 261 }
100ec099
PD
262 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
263 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0
BS
264 }
265 if (e2 & DESC_S_MASK) {
100ec099 266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0 267 }
eaa728ee 268 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 269 if ((type & 7) != 1) {
100ec099 270 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0 271 }
eaa728ee
FB
272 }
273
20054ef0 274 if (!(e2 & DESC_P_MASK)) {
100ec099 275 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
20054ef0 276 }
eaa728ee 277
20054ef0 278 if (type & 8) {
eaa728ee 279 tss_limit_max = 103;
20054ef0 280 } else {
eaa728ee 281 tss_limit_max = 43;
20054ef0 282 }
eaa728ee
FB
283 tss_limit = get_seg_limit(e1, e2);
284 tss_base = get_seg_base(e1, e2);
285 if ((tss_selector & 4) != 0 ||
20054ef0 286 tss_limit < tss_limit_max) {
100ec099 287 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
20054ef0 288 }
eaa728ee 289 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 290 if (old_type & 8) {
eaa728ee 291 old_tss_limit_max = 103;
20054ef0 292 } else {
eaa728ee 293 old_tss_limit_max = 43;
20054ef0 294 }
eaa728ee
FB
295
296 /* read all the registers from the new TSS */
297 if (type & 8) {
298 /* 32 bit */
100ec099
PD
299 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
300 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
301 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
20054ef0 302 for (i = 0; i < 8; i++) {
100ec099
PD
303 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
304 retaddr);
20054ef0
BS
305 }
306 for (i = 0; i < 6; i++) {
100ec099
PD
307 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
308 retaddr);
20054ef0 309 }
100ec099
PD
310 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
311 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
eaa728ee
FB
312 } else {
313 /* 16 bit */
314 new_cr3 = 0;
100ec099
PD
315 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
316 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
20054ef0 317 for (i = 0; i < 8; i++) {
100ec099
PD
318 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
319 retaddr) | 0xffff0000;
20054ef0
BS
320 }
321 for (i = 0; i < 4; i++) {
100ec099
PD
322 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
323 retaddr);
20054ef0 324 }
100ec099 325 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
eaa728ee
FB
326 new_segs[R_FS] = 0;
327 new_segs[R_GS] = 0;
328 new_trap = 0;
329 }
4581cbcd
BS
330 /* XXX: avoid a compiler warning, see
331 http://support.amd.com/us/Processor_TechDocs/24593.pdf
332 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
333 (void)new_trap;
eaa728ee
FB
334
335 /* NOTE: we must avoid memory exceptions during the task switch,
336 so we make dummy accesses before */
337 /* XXX: it can still fail in some cases, so a bigger hack is
338 necessary to valid the TLB after having done the accesses */
339
100ec099
PD
340 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
341 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
342 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
343 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
eaa728ee
FB
344
345 /* clear busy bit (it is restartable) */
346 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
347 target_ulong ptr;
348 uint32_t e2;
20054ef0 349
eaa728ee 350 ptr = env->gdt.base + (env->tr.selector & ~7);
100ec099 351 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee 352 e2 &= ~DESC_TSS_BUSY_MASK;
100ec099 353 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
eaa728ee 354 }
997ff0d9 355 old_eflags = cpu_compute_eflags(env);
20054ef0 356 if (source == SWITCH_TSS_IRET) {
eaa728ee 357 old_eflags &= ~NT_MASK;
20054ef0 358 }
eaa728ee
FB
359
360 /* save the current state in the old TSS */
361 if (type & 8) {
362 /* 32 bit */
100ec099
PD
363 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
364 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
365 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
366 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
20054ef0 373 for (i = 0; i < 6; i++) {
100ec099
PD
374 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
375 env->segs[i].selector, retaddr);
20054ef0 376 }
eaa728ee
FB
377 } else {
378 /* 16 bit */
100ec099
PD
379 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
380 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
381 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
382 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
20054ef0 389 for (i = 0; i < 4; i++) {
100ec099
PD
390 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
391 env->segs[i].selector, retaddr);
20054ef0 392 }
eaa728ee
FB
393 }
394
395 /* now if an exception occurs, it will occurs in the next task
396 context */
397
398 if (source == SWITCH_TSS_CALL) {
100ec099 399 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
eaa728ee
FB
400 new_eflags |= NT_MASK;
401 }
402
403 /* set busy bit */
404 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
405 target_ulong ptr;
406 uint32_t e2;
20054ef0 407
eaa728ee 408 ptr = env->gdt.base + (tss_selector & ~7);
100ec099 409 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee 410 e2 |= DESC_TSS_BUSY_MASK;
100ec099 411 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
eaa728ee
FB
412 }
413
414 /* set the new CPU state */
415 /* from this point, any exception which occurs can give problems */
416 env->cr[0] |= CR0_TS_MASK;
417 env->hflags |= HF_TS_MASK;
418 env->tr.selector = tss_selector;
419 env->tr.base = tss_base;
420 env->tr.limit = tss_limit;
421 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
422
423 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
424 cpu_x86_update_cr3(env, new_cr3);
425 }
426
427 /* load all registers without an exception, then reload them with
428 possible exception */
429 env->eip = new_eip;
430 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
431 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
20054ef0 432 if (!(type & 8)) {
eaa728ee 433 eflags_mask &= 0xffff;
20054ef0 434 }
997ff0d9 435 cpu_load_eflags(env, new_eflags, eflags_mask);
20054ef0 436 /* XXX: what to do in 16 bit case? */
4b34e3ad 437 env->regs[R_EAX] = new_regs[0];
a4165610 438 env->regs[R_ECX] = new_regs[1];
00f5e6f2 439 env->regs[R_EDX] = new_regs[2];
70b51365 440 env->regs[R_EBX] = new_regs[3];
08b3ded6 441 env->regs[R_ESP] = new_regs[4];
c12dddd7 442 env->regs[R_EBP] = new_regs[5];
78c3c6d3 443 env->regs[R_ESI] = new_regs[6];
cf75c597 444 env->regs[R_EDI] = new_regs[7];
eaa728ee 445 if (new_eflags & VM_MASK) {
20054ef0 446 for (i = 0; i < 6; i++) {
2999a0b2 447 load_seg_vm(env, i, new_segs[i]);
20054ef0 448 }
eaa728ee 449 } else {
eaa728ee 450 /* first just selectors as the rest may trigger exceptions */
20054ef0 451 for (i = 0; i < 6; i++) {
eaa728ee 452 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
20054ef0 453 }
eaa728ee
FB
454 }
455
456 env->ldt.selector = new_ldt & ~4;
457 env->ldt.base = 0;
458 env->ldt.limit = 0;
459 env->ldt.flags = 0;
460
461 /* load the LDT */
20054ef0 462 if (new_ldt & 4) {
100ec099 463 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 464 }
eaa728ee
FB
465
466 if ((new_ldt & 0xfffc) != 0) {
467 dt = &env->gdt;
468 index = new_ldt & ~7;
20054ef0 469 if ((index + 7) > dt->limit) {
100ec099 470 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 471 }
eaa728ee 472 ptr = dt->base + index;
100ec099
PD
473 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
474 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
20054ef0 475 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
100ec099 476 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0
BS
477 }
478 if (!(e2 & DESC_P_MASK)) {
100ec099 479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 480 }
eaa728ee
FB
481 load_seg_cache_raw_dt(&env->ldt, e1, e2);
482 }
483
484 /* load the segments */
485 if (!(new_eflags & VM_MASK)) {
d3b54918 486 int cpl = new_segs[R_CS] & 3;
100ec099
PD
487 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
488 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
489 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
490 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
491 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
492 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
eaa728ee
FB
493 }
494
a78d0eab 495 /* check that env->eip is in the CS segment limits */
eaa728ee 496 if (new_eip > env->segs[R_CS].limit) {
20054ef0 497 /* XXX: different exception if CALL? */
100ec099 498 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
eaa728ee 499 }
01df040b
AL
500
501#ifndef CONFIG_USER_ONLY
502 /* reset local breakpoints */
428065ce
LG
503 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
504 for (i = 0; i < DR7_MAX_BP; i++) {
5902564a
LG
505 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
506 !hw_global_breakpoint_enabled(env->dr[7], i)) {
01df040b 507 hw_breakpoint_remove(env, i);
20054ef0 508 }
01df040b 509 }
428065ce 510 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
01df040b
AL
511 }
512#endif
eaa728ee
FB
513}
514
100ec099
PD
515static void switch_tss(CPUX86State *env, int tss_selector,
516 uint32_t e1, uint32_t e2, int source,
517 uint32_t next_eip)
518{
519 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
520}
521
eaa728ee
FB
522static inline unsigned int get_sp_mask(unsigned int e2)
523{
20054ef0 524 if (e2 & DESC_B_MASK) {
eaa728ee 525 return 0xffffffff;
20054ef0 526 } else {
eaa728ee 527 return 0xffff;
20054ef0 528 }
eaa728ee
FB
529}
530
20054ef0 531static int exception_has_error_code(int intno)
2ed51f5b 532{
20054ef0
BS
533 switch (intno) {
534 case 8:
535 case 10:
536 case 11:
537 case 12:
538 case 13:
539 case 14:
540 case 17:
541 return 1;
542 }
543 return 0;
2ed51f5b
AL
544}
545
eaa728ee 546#ifdef TARGET_X86_64
08b3ded6
LG
547#define SET_ESP(val, sp_mask) \
548 do { \
549 if ((sp_mask) == 0xffff) { \
550 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
551 ((val) & 0xffff); \
552 } else if ((sp_mask) == 0xffffffffLL) { \
553 env->regs[R_ESP] = (uint32_t)(val); \
554 } else { \
555 env->regs[R_ESP] = (val); \
556 } \
20054ef0 557 } while (0)
eaa728ee 558#else
08b3ded6
LG
559#define SET_ESP(val, sp_mask) \
560 do { \
561 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
562 ((val) & (sp_mask)); \
20054ef0 563 } while (0)
eaa728ee
FB
564#endif
565
c0a04f0e
AL
566/* in 64-bit machines, this can overflow. So this segment addition macro
567 * can be used to trim the value to 32-bit whenever needed */
568#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
569
eaa728ee 570/* XXX: add a is_user flag to have proper security support */
100ec099 571#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
329e607d
BS
572 { \
573 sp -= 2; \
100ec099 574 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
20054ef0 575 }
eaa728ee 576
100ec099 577#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
20054ef0
BS
578 { \
579 sp -= 4; \
100ec099 580 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
20054ef0 581 }
eaa728ee 582
100ec099 583#define POPW_RA(ssp, sp, sp_mask, val, ra) \
329e607d 584 { \
100ec099 585 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
329e607d 586 sp += 2; \
20054ef0 587 }
eaa728ee 588
100ec099 589#define POPL_RA(ssp, sp, sp_mask, val, ra) \
329e607d 590 { \
100ec099 591 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
329e607d 592 sp += 4; \
20054ef0 593 }
eaa728ee 594
100ec099
PD
595#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
596#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
597#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
598#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
599
eaa728ee 600/* protected mode interrupt */
2999a0b2
BS
601static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
602 int error_code, unsigned int next_eip,
603 int is_hw)
eaa728ee
FB
604{
605 SegmentCache *dt;
606 target_ulong ptr, ssp;
607 int type, dpl, selector, ss_dpl, cpl;
608 int has_error_code, new_stack, shift;
1c918eba 609 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 610 uint32_t old_eip, sp_mask;
87446327 611 int vm86 = env->eflags & VM_MASK;
eaa728ee 612
eaa728ee 613 has_error_code = 0;
20054ef0
BS
614 if (!is_int && !is_hw) {
615 has_error_code = exception_has_error_code(intno);
616 }
617 if (is_int) {
eaa728ee 618 old_eip = next_eip;
20054ef0 619 } else {
eaa728ee 620 old_eip = env->eip;
20054ef0 621 }
eaa728ee
FB
622
623 dt = &env->idt;
20054ef0 624 if (intno * 8 + 7 > dt->limit) {
77b2bc2c 625 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 626 }
eaa728ee 627 ptr = dt->base + intno * 8;
329e607d
BS
628 e1 = cpu_ldl_kernel(env, ptr);
629 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
630 /* check gate type */
631 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 632 switch (type) {
eaa728ee
FB
633 case 5: /* task gate */
634 /* must do that check here to return the correct error code */
20054ef0 635 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 636 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 637 }
2999a0b2 638 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
eaa728ee
FB
639 if (has_error_code) {
640 int type;
641 uint32_t mask;
20054ef0 642
eaa728ee
FB
643 /* push the error code */
644 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
645 shift = type >> 3;
20054ef0 646 if (env->segs[R_SS].flags & DESC_B_MASK) {
eaa728ee 647 mask = 0xffffffff;
20054ef0 648 } else {
eaa728ee 649 mask = 0xffff;
20054ef0 650 }
08b3ded6 651 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
eaa728ee 652 ssp = env->segs[R_SS].base + esp;
20054ef0 653 if (shift) {
329e607d 654 cpu_stl_kernel(env, ssp, error_code);
20054ef0 655 } else {
329e607d 656 cpu_stw_kernel(env, ssp, error_code);
20054ef0 657 }
eaa728ee
FB
658 SET_ESP(esp, mask);
659 }
660 return;
661 case 6: /* 286 interrupt gate */
662 case 7: /* 286 trap gate */
663 case 14: /* 386 interrupt gate */
664 case 15: /* 386 trap gate */
665 break;
666 default:
77b2bc2c 667 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
eaa728ee
FB
668 break;
669 }
670 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
671 cpl = env->hflags & HF_CPL_MASK;
1235fc06 672 /* check privilege if software int */
20054ef0 673 if (is_int && dpl < cpl) {
77b2bc2c 674 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 675 }
eaa728ee 676 /* check valid bit */
20054ef0 677 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 678 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 679 }
eaa728ee
FB
680 selector = e1 >> 16;
681 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
20054ef0 682 if ((selector & 0xfffc) == 0) {
77b2bc2c 683 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 684 }
2999a0b2 685 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
687 }
688 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 689 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 690 }
eaa728ee 691 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 692 if (dpl > cpl) {
77b2bc2c 693 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
694 }
695 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 696 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 697 }
eaa728ee
FB
698 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
699 /* to inner privilege */
100ec099 700 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
20054ef0 701 if ((ss & 0xfffc) == 0) {
77b2bc2c 702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
703 }
704 if ((ss & 3) != dpl) {
77b2bc2c 705 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 706 }
2999a0b2 707 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 708 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 709 }
eaa728ee 710 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 711 if (ss_dpl != dpl) {
77b2bc2c 712 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 713 }
eaa728ee
FB
714 if (!(ss_e2 & DESC_S_MASK) ||
715 (ss_e2 & DESC_CS_MASK) ||
20054ef0 716 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
718 }
719 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 720 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 721 }
eaa728ee
FB
722 new_stack = 1;
723 sp_mask = get_sp_mask(ss_e2);
724 ssp = get_seg_base(ss_e1, ss_e2);
725 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
726 /* to same privilege */
87446327 727 if (vm86) {
77b2bc2c 728 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 729 }
eaa728ee
FB
730 new_stack = 0;
731 sp_mask = get_sp_mask(env->segs[R_SS].flags);
732 ssp = env->segs[R_SS].base;
08b3ded6 733 esp = env->regs[R_ESP];
eaa728ee
FB
734 dpl = cpl;
735 } else {
77b2bc2c 736 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
737 new_stack = 0; /* avoid warning */
738 sp_mask = 0; /* avoid warning */
739 ssp = 0; /* avoid warning */
740 esp = 0; /* avoid warning */
741 }
742
743 shift = type >> 3;
744
745#if 0
746 /* XXX: check that enough room is available */
747 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
87446327 748 if (vm86) {
eaa728ee 749 push_size += 8;
20054ef0 750 }
eaa728ee
FB
751 push_size <<= shift;
752#endif
753 if (shift == 1) {
754 if (new_stack) {
87446327 755 if (vm86) {
eaa728ee
FB
756 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
757 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
758 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
759 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
760 }
761 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 762 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 763 }
997ff0d9 764 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
765 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
766 PUSHL(ssp, esp, sp_mask, old_eip);
767 if (has_error_code) {
768 PUSHL(ssp, esp, sp_mask, error_code);
769 }
770 } else {
771 if (new_stack) {
87446327 772 if (vm86) {
eaa728ee
FB
773 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
774 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
775 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
776 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
777 }
778 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 779 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 780 }
997ff0d9 781 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
782 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
783 PUSHW(ssp, esp, sp_mask, old_eip);
784 if (has_error_code) {
785 PUSHW(ssp, esp, sp_mask, error_code);
786 }
787 }
788
fd460606
KC
789 /* interrupt gate clear IF mask */
790 if ((type & 1) == 0) {
791 env->eflags &= ~IF_MASK;
792 }
793 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
794
eaa728ee 795 if (new_stack) {
87446327 796 if (vm86) {
eaa728ee
FB
797 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
799 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
800 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
801 }
802 ss = (ss & ~3) | dpl;
803 cpu_x86_load_seg_cache(env, R_SS, ss,
804 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
805 }
806 SET_ESP(esp, sp_mask);
807
808 selector = (selector & ~3) | dpl;
809 cpu_x86_load_seg_cache(env, R_CS, selector,
810 get_seg_base(e1, e2),
811 get_seg_limit(e1, e2),
812 e2);
eaa728ee 813 env->eip = offset;
eaa728ee
FB
814}
815
816#ifdef TARGET_X86_64
817
100ec099 818#define PUSHQ_RA(sp, val, ra) \
20054ef0
BS
819 { \
820 sp -= 8; \
100ec099 821 cpu_stq_kernel_ra(env, sp, (val), ra); \
20054ef0 822 }
eaa728ee 823
100ec099 824#define POPQ_RA(sp, val, ra) \
20054ef0 825 { \
100ec099 826 val = cpu_ldq_kernel_ra(env, sp, ra); \
20054ef0
BS
827 sp += 8; \
828 }
eaa728ee 829
100ec099
PD
830#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
831#define POPQ(sp, val) POPQ_RA(sp, val, 0)
832
2999a0b2 833static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
eaa728ee 834{
a47dddd7 835 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
836 int index;
837
838#if 0
839 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
840 env->tr.base, env->tr.limit);
841#endif
842
20054ef0 843 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 844 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 845 }
eaa728ee 846 index = 8 * level + 4;
20054ef0 847 if ((index + 7) > env->tr.limit) {
77b2bc2c 848 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 849 }
329e607d 850 return cpu_ldq_kernel(env, env->tr.base + index);
eaa728ee
FB
851}
852
853/* 64 bit interrupt */
2999a0b2
BS
854static void do_interrupt64(CPUX86State *env, int intno, int is_int,
855 int error_code, target_ulong next_eip, int is_hw)
eaa728ee
FB
856{
857 SegmentCache *dt;
858 target_ulong ptr;
859 int type, dpl, selector, cpl, ist;
860 int has_error_code, new_stack;
861 uint32_t e1, e2, e3, ss;
862 target_ulong old_eip, esp, offset;
eaa728ee 863
eaa728ee 864 has_error_code = 0;
20054ef0
BS
865 if (!is_int && !is_hw) {
866 has_error_code = exception_has_error_code(intno);
867 }
868 if (is_int) {
eaa728ee 869 old_eip = next_eip;
20054ef0 870 } else {
eaa728ee 871 old_eip = env->eip;
20054ef0 872 }
eaa728ee
FB
873
874 dt = &env->idt;
20054ef0 875 if (intno * 16 + 15 > dt->limit) {
77b2bc2c 876 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 877 }
eaa728ee 878 ptr = dt->base + intno * 16;
329e607d
BS
879 e1 = cpu_ldl_kernel(env, ptr);
880 e2 = cpu_ldl_kernel(env, ptr + 4);
881 e3 = cpu_ldl_kernel(env, ptr + 8);
eaa728ee
FB
882 /* check gate type */
883 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 884 switch (type) {
eaa728ee
FB
885 case 14: /* 386 interrupt gate */
886 case 15: /* 386 trap gate */
887 break;
888 default:
77b2bc2c 889 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
eaa728ee
FB
890 break;
891 }
892 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893 cpl = env->hflags & HF_CPL_MASK;
1235fc06 894 /* check privilege if software int */
20054ef0 895 if (is_int && dpl < cpl) {
77b2bc2c 896 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 897 }
eaa728ee 898 /* check valid bit */
20054ef0 899 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 900 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
20054ef0 901 }
eaa728ee
FB
902 selector = e1 >> 16;
903 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
904 ist = e2 & 7;
20054ef0 905 if ((selector & 0xfffc) == 0) {
77b2bc2c 906 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 907 }
eaa728ee 908
2999a0b2 909 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 910 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
911 }
912 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 913 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 914 }
eaa728ee 915 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 916 if (dpl > cpl) {
77b2bc2c 917 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
918 }
919 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 920 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0
BS
921 }
922 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
77b2bc2c 923 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 924 }
eaa728ee
FB
925 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
926 /* to inner privilege */
eaa728ee 927 new_stack = 1;
ae67dc72
PB
928 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
929 ss = 0;
eaa728ee
FB
930 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
931 /* to same privilege */
20054ef0 932 if (env->eflags & VM_MASK) {
77b2bc2c 933 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 934 }
eaa728ee 935 new_stack = 0;
ae67dc72 936 esp = env->regs[R_ESP];
eaa728ee
FB
937 dpl = cpl;
938 } else {
77b2bc2c 939 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
940 new_stack = 0; /* avoid warning */
941 esp = 0; /* avoid warning */
942 }
ae67dc72 943 esp &= ~0xfLL; /* align stack */
eaa728ee
FB
944
945 PUSHQ(esp, env->segs[R_SS].selector);
08b3ded6 946 PUSHQ(esp, env->regs[R_ESP]);
997ff0d9 947 PUSHQ(esp, cpu_compute_eflags(env));
eaa728ee
FB
948 PUSHQ(esp, env->segs[R_CS].selector);
949 PUSHQ(esp, old_eip);
950 if (has_error_code) {
951 PUSHQ(esp, error_code);
952 }
953
fd460606
KC
954 /* interrupt gate clear IF mask */
955 if ((type & 1) == 0) {
956 env->eflags &= ~IF_MASK;
957 }
958 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
959
eaa728ee
FB
960 if (new_stack) {
961 ss = 0 | dpl;
962 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
963 }
08b3ded6 964 env->regs[R_ESP] = esp;
eaa728ee
FB
965
966 selector = (selector & ~3) | dpl;
967 cpu_x86_load_seg_cache(env, R_CS, selector,
968 get_seg_base(e1, e2),
969 get_seg_limit(e1, e2),
970 e2);
eaa728ee 971 env->eip = offset;
eaa728ee
FB
972}
973#endif
974
d9957a8b 975#ifdef TARGET_X86_64
eaa728ee 976#if defined(CONFIG_USER_ONLY)
2999a0b2 977void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee 978{
27103424
AF
979 CPUState *cs = CPU(x86_env_get_cpu(env));
980
981 cs->exception_index = EXCP_SYSCALL;
eaa728ee 982 env->exception_next_eip = env->eip + next_eip_addend;
5638d180 983 cpu_loop_exit(cs);
eaa728ee
FB
984}
985#else
2999a0b2 986void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee
FB
987{
988 int selector;
989
990 if (!(env->efer & MSR_EFER_SCE)) {
100ec099 991 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
eaa728ee
FB
992 }
993 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
994 if (env->hflags & HF_LMA_MASK) {
995 int code64;
996
a4165610 997 env->regs[R_ECX] = env->eip + next_eip_addend;
997ff0d9 998 env->regs[11] = cpu_compute_eflags(env);
eaa728ee
FB
999
1000 code64 = env->hflags & HF_CS64_MASK;
1001
fd460606
KC
1002 env->eflags &= ~env->fmask;
1003 cpu_load_eflags(env, env->eflags, 0);
eaa728ee
FB
1004 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1005 0, 0xffffffff,
1006 DESC_G_MASK | DESC_P_MASK |
1007 DESC_S_MASK |
20054ef0
BS
1008 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1009 DESC_L_MASK);
eaa728ee
FB
1010 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1011 0, 0xffffffff,
1012 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1013 DESC_S_MASK |
1014 DESC_W_MASK | DESC_A_MASK);
20054ef0 1015 if (code64) {
eaa728ee 1016 env->eip = env->lstar;
20054ef0 1017 } else {
eaa728ee 1018 env->eip = env->cstar;
20054ef0 1019 }
d9957a8b 1020 } else {
a4165610 1021 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
eaa728ee 1022
fd460606 1023 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
eaa728ee
FB
1024 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1025 0, 0xffffffff,
1026 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1027 DESC_S_MASK |
1028 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1029 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1030 0, 0xffffffff,
1031 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1032 DESC_S_MASK |
1033 DESC_W_MASK | DESC_A_MASK);
eaa728ee
FB
1034 env->eip = (uint32_t)env->star;
1035 }
1036}
1037#endif
d9957a8b 1038#endif
eaa728ee 1039
d9957a8b 1040#ifdef TARGET_X86_64
2999a0b2 1041void helper_sysret(CPUX86State *env, int dflag)
eaa728ee
FB
1042{
1043 int cpl, selector;
1044
1045 if (!(env->efer & MSR_EFER_SCE)) {
100ec099 1046 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
eaa728ee
FB
1047 }
1048 cpl = env->hflags & HF_CPL_MASK;
1049 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
100ec099 1050 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee
FB
1051 }
1052 selector = (env->star >> 48) & 0xffff;
eaa728ee 1053 if (env->hflags & HF_LMA_MASK) {
fd460606
KC
1054 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1055 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1056 NT_MASK);
eaa728ee
FB
1057 if (dflag == 2) {
1058 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1059 0, 0xffffffff,
1060 DESC_G_MASK | DESC_P_MASK |
1061 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1062 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1063 DESC_L_MASK);
a4165610 1064 env->eip = env->regs[R_ECX];
eaa728ee
FB
1065 } else {
1066 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1067 0, 0xffffffff,
1068 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1069 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1070 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1071 env->eip = (uint32_t)env->regs[R_ECX];
eaa728ee 1072 }
ac576229 1073 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1074 0, 0xffffffff,
1075 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1076 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1077 DESC_W_MASK | DESC_A_MASK);
d9957a8b 1078 } else {
fd460606 1079 env->eflags |= IF_MASK;
eaa728ee
FB
1080 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1081 0, 0xffffffff,
1082 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1083 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1084 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1085 env->eip = (uint32_t)env->regs[R_ECX];
ac576229 1086 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1087 0, 0xffffffff,
1088 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1089 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1090 DESC_W_MASK | DESC_A_MASK);
eaa728ee 1091 }
eaa728ee 1092}
d9957a8b 1093#endif
eaa728ee
FB
1094
1095/* real mode interrupt */
2999a0b2
BS
1096static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1097 int error_code, unsigned int next_eip)
eaa728ee
FB
1098{
1099 SegmentCache *dt;
1100 target_ulong ptr, ssp;
1101 int selector;
1102 uint32_t offset, esp;
1103 uint32_t old_cs, old_eip;
eaa728ee 1104
20054ef0 1105 /* real mode (simpler!) */
eaa728ee 1106 dt = &env->idt;
20054ef0 1107 if (intno * 4 + 3 > dt->limit) {
77b2bc2c 1108 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 1109 }
eaa728ee 1110 ptr = dt->base + intno * 4;
329e607d
BS
1111 offset = cpu_lduw_kernel(env, ptr);
1112 selector = cpu_lduw_kernel(env, ptr + 2);
08b3ded6 1113 esp = env->regs[R_ESP];
eaa728ee 1114 ssp = env->segs[R_SS].base;
20054ef0 1115 if (is_int) {
eaa728ee 1116 old_eip = next_eip;
20054ef0 1117 } else {
eaa728ee 1118 old_eip = env->eip;
20054ef0 1119 }
eaa728ee 1120 old_cs = env->segs[R_CS].selector;
20054ef0 1121 /* XXX: use SS segment size? */
997ff0d9 1122 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
eaa728ee
FB
1123 PUSHW(ssp, esp, 0xffff, old_cs);
1124 PUSHW(ssp, esp, 0xffff, old_eip);
1125
1126 /* update processor state */
08b3ded6 1127 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
eaa728ee
FB
1128 env->eip = offset;
1129 env->segs[R_CS].selector = selector;
1130 env->segs[R_CS].base = (selector << 4);
1131 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1132}
1133
e694d4e2 1134#if defined(CONFIG_USER_ONLY)
eaa728ee 1135/* fake user mode interrupt */
2999a0b2
BS
1136static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1137 int error_code, target_ulong next_eip)
eaa728ee
FB
1138{
1139 SegmentCache *dt;
1140 target_ulong ptr;
1141 int dpl, cpl, shift;
1142 uint32_t e2;
1143
1144 dt = &env->idt;
1145 if (env->hflags & HF_LMA_MASK) {
1146 shift = 4;
1147 } else {
1148 shift = 3;
1149 }
1150 ptr = dt->base + (intno << shift);
329e607d 1151 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
1152
1153 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1154 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1155 /* check privilege if software int */
20054ef0 1156 if (is_int && dpl < cpl) {
77b2bc2c 1157 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
20054ef0 1158 }
eaa728ee
FB
1159
1160 /* Since we emulate only user space, we cannot do more than
1161 exiting the emulation with the suitable exception and error
47575997
JM
1162 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1163 if (is_int || intno == EXCP_SYSCALL) {
a78d0eab 1164 env->eip = next_eip;
20054ef0 1165 }
eaa728ee
FB
1166}
1167
e694d4e2
BS
1168#else
1169
2999a0b2
BS
1170static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1171 int error_code, int is_hw, int rm)
2ed51f5b 1172{
19d6ca16 1173 CPUState *cs = CPU(x86_env_get_cpu(env));
b216aa6c 1174 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1175 control.event_inj));
1176
2ed51f5b 1177 if (!(event_inj & SVM_EVTINJ_VALID)) {
20054ef0
BS
1178 int type;
1179
1180 if (is_int) {
1181 type = SVM_EVTINJ_TYPE_SOFT;
1182 } else {
1183 type = SVM_EVTINJ_TYPE_EXEPT;
1184 }
1185 event_inj = intno | type | SVM_EVTINJ_VALID;
1186 if (!rm && exception_has_error_code(intno)) {
1187 event_inj |= SVM_EVTINJ_VALID_ERR;
b216aa6c 1188 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1189 control.event_inj_err),
1190 error_code);
1191 }
b216aa6c 1192 x86_stl_phys(cs,
ab1da857 1193 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1194 event_inj);
2ed51f5b
AL
1195 }
1196}
00ea18d1 1197#endif
2ed51f5b 1198
eaa728ee
FB
1199/*
1200 * Begin execution of an interruption. is_int is TRUE if coming from
a78d0eab 1201 * the int instruction. next_eip is the env->eip value AFTER the interrupt
eaa728ee
FB
1202 * instruction. It is only relevant if is_int is TRUE.
1203 */
ca4c810a 1204static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
2999a0b2 1205 int error_code, target_ulong next_eip, int is_hw)
eaa728ee 1206{
ca4c810a
AF
1207 CPUX86State *env = &cpu->env;
1208
8fec2b8c 1209 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1210 if ((env->cr[0] & CR0_PE_MASK)) {
1211 static int count;
20054ef0
BS
1212
1213 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1214 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1215 count, intno, error_code, is_int,
1216 env->hflags & HF_CPL_MASK,
a78d0eab
LG
1217 env->segs[R_CS].selector, env->eip,
1218 (int)env->segs[R_CS].base + env->eip,
08b3ded6 1219 env->segs[R_SS].selector, env->regs[R_ESP]);
eaa728ee 1220 if (intno == 0x0e) {
93fcfe39 1221 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1222 } else {
4b34e3ad 1223 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
eaa728ee 1224 }
93fcfe39 1225 qemu_log("\n");
a0762859 1226 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
eaa728ee
FB
1227#if 0
1228 {
1229 int i;
9bd5494e 1230 target_ulong ptr;
20054ef0 1231
93fcfe39 1232 qemu_log(" code=");
eaa728ee 1233 ptr = env->segs[R_CS].base + env->eip;
20054ef0 1234 for (i = 0; i < 16; i++) {
93fcfe39 1235 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1236 }
93fcfe39 1237 qemu_log("\n");
eaa728ee
FB
1238 }
1239#endif
1240 count++;
1241 }
1242 }
1243 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1244#if !defined(CONFIG_USER_ONLY)
20054ef0 1245 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1246 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
20054ef0 1247 }
00ea18d1 1248#endif
eb38c52c 1249#ifdef TARGET_X86_64
eaa728ee 1250 if (env->hflags & HF_LMA_MASK) {
2999a0b2 1251 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
eaa728ee
FB
1252 } else
1253#endif
1254 {
2999a0b2
BS
1255 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1256 is_hw);
eaa728ee
FB
1257 }
1258 } else {
00ea18d1 1259#if !defined(CONFIG_USER_ONLY)
20054ef0 1260 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1261 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
20054ef0 1262 }
00ea18d1 1263#endif
2999a0b2 1264 do_interrupt_real(env, intno, is_int, error_code, next_eip);
eaa728ee 1265 }
2ed51f5b 1266
00ea18d1 1267#if !defined(CONFIG_USER_ONLY)
2ed51f5b 1268 if (env->hflags & HF_SVMI_MASK) {
fdfba1a2 1269 CPUState *cs = CPU(cpu);
b216aa6c 1270 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
20054ef0
BS
1271 offsetof(struct vmcb,
1272 control.event_inj));
1273
b216aa6c 1274 x86_stl_phys(cs,
ab1da857 1275 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1276 event_inj & ~SVM_EVTINJ_VALID);
2ed51f5b 1277 }
00ea18d1 1278#endif
eaa728ee
FB
1279}
1280
97a8ea5a 1281void x86_cpu_do_interrupt(CPUState *cs)
e694d4e2 1282{
97a8ea5a
AF
1283 X86CPU *cpu = X86_CPU(cs);
1284 CPUX86State *env = &cpu->env;
1285
e694d4e2
BS
1286#if defined(CONFIG_USER_ONLY)
1287 /* if user mode only, we simulate a fake exception
1288 which will be handled outside the cpu execution
1289 loop */
27103424 1290 do_interrupt_user(env, cs->exception_index,
e694d4e2
BS
1291 env->exception_is_int,
1292 env->error_code,
1293 env->exception_next_eip);
1294 /* successfully delivered */
1295 env->old_exception = -1;
1296#else
1297 /* simulate a real cpu exception. On i386, it can
1298 trigger new exceptions, but we do not handle
1299 double or triple faults yet. */
27103424 1300 do_interrupt_all(cpu, cs->exception_index,
e694d4e2
BS
1301 env->exception_is_int,
1302 env->error_code,
1303 env->exception_next_eip, 0);
1304 /* successfully delivered */
1305 env->old_exception = -1;
1306#endif
e694d4e2
BS
1307}
1308
2999a0b2 1309void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
e694d4e2 1310{
ca4c810a 1311 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
e694d4e2
BS
1312}
1313
42f53fea
RH
1314bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1315{
1316 X86CPU *cpu = X86_CPU(cs);
1317 CPUX86State *env = &cpu->env;
1318 bool ret = false;
1319
1320#if !defined(CONFIG_USER_ONLY)
1321 if (interrupt_request & CPU_INTERRUPT_POLL) {
1322 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1323 apic_poll_irq(cpu->apic_state);
a4fc3212
PD
1324 /* Don't process multiple interrupt requests in a single call.
1325 This is required to make icount-driven execution deterministic. */
1326 return true;
42f53fea
RH
1327 }
1328#endif
1329 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1330 do_cpu_sipi(cpu);
1331 } else if (env->hflags2 & HF2_GIF_MASK) {
1332 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1333 !(env->hflags & HF_SMM_MASK)) {
1334 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1335 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1336 do_smm_enter(cpu);
1337 ret = true;
1338 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1339 !(env->hflags2 & HF2_NMI_MASK)) {
1340 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1341 env->hflags2 |= HF2_NMI_MASK;
1342 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1343 ret = true;
1344 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1345 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1346 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1347 ret = true;
1348 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1349 (((env->hflags2 & HF2_VINTR_MASK) &&
1350 (env->hflags2 & HF2_HIF_MASK)) ||
1351 (!(env->hflags2 & HF2_VINTR_MASK) &&
1352 (env->eflags & IF_MASK &&
1353 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1354 int intno;
1355 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1356 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1357 CPU_INTERRUPT_VIRQ);
1358 intno = cpu_get_pic_interrupt(env);
1359 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1360 "Servicing hardware INT=0x%02x\n", intno);
1361 do_interrupt_x86_hardirq(env, intno, 1);
1362 /* ensure that no TB jump will be modified as
1363 the program flow was changed */
1364 ret = true;
1365#if !defined(CONFIG_USER_ONLY)
1366 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1367 (env->eflags & IF_MASK) &&
1368 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1369 int intno;
1370 /* FIXME: this should respect TPR */
1371 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
b216aa6c 1372 intno = x86_ldl_phys(cs, env->vm_vmcb
42f53fea
RH
1373 + offsetof(struct vmcb, control.int_vector));
1374 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1375 "Servicing virtual hardware INT=0x%02x\n", intno);
1376 do_interrupt_x86_hardirq(env, intno, 1);
1377 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1378 ret = true;
1379#endif
1380 }
1381 }
1382
1383 return ret;
1384}
1385
2999a0b2
BS
1386void helper_enter_level(CPUX86State *env, int level, int data32,
1387 target_ulong t1)
eaa728ee
FB
1388{
1389 target_ulong ssp;
1390 uint32_t esp_mask, esp, ebp;
1391
1392 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1393 ssp = env->segs[R_SS].base;
c12dddd7 1394 ebp = env->regs[R_EBP];
08b3ded6 1395 esp = env->regs[R_ESP];
eaa728ee
FB
1396 if (data32) {
1397 /* 32 bit */
1398 esp -= 4;
1399 while (--level) {
1400 esp -= 4;
1401 ebp -= 4;
100ec099
PD
1402 cpu_stl_data_ra(env, ssp + (esp & esp_mask),
1403 cpu_ldl_data_ra(env, ssp + (ebp & esp_mask),
1404 GETPC()),
1405 GETPC());
eaa728ee
FB
1406 }
1407 esp -= 4;
100ec099 1408 cpu_stl_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
eaa728ee
FB
1409 } else {
1410 /* 16 bit */
1411 esp -= 2;
1412 while (--level) {
1413 esp -= 2;
1414 ebp -= 2;
100ec099
PD
1415 cpu_stw_data_ra(env, ssp + (esp & esp_mask),
1416 cpu_lduw_data_ra(env, ssp + (ebp & esp_mask),
1417 GETPC()),
1418 GETPC());
eaa728ee
FB
1419 }
1420 esp -= 2;
100ec099 1421 cpu_stw_data_ra(env, ssp + (esp & esp_mask), t1, GETPC());
eaa728ee
FB
1422 }
1423}
1424
1425#ifdef TARGET_X86_64
2999a0b2
BS
1426void helper_enter64_level(CPUX86State *env, int level, int data64,
1427 target_ulong t1)
eaa728ee
FB
1428{
1429 target_ulong esp, ebp;
20054ef0 1430
c12dddd7 1431 ebp = env->regs[R_EBP];
08b3ded6 1432 esp = env->regs[R_ESP];
eaa728ee
FB
1433
1434 if (data64) {
1435 /* 64 bit */
1436 esp -= 8;
1437 while (--level) {
1438 esp -= 8;
1439 ebp -= 8;
100ec099
PD
1440 cpu_stq_data_ra(env, esp, cpu_ldq_data_ra(env, ebp, GETPC()),
1441 GETPC());
eaa728ee
FB
1442 }
1443 esp -= 8;
100ec099 1444 cpu_stq_data_ra(env, esp, t1, GETPC());
eaa728ee
FB
1445 } else {
1446 /* 16 bit */
1447 esp -= 2;
1448 while (--level) {
1449 esp -= 2;
1450 ebp -= 2;
100ec099
PD
1451 cpu_stw_data_ra(env, esp, cpu_lduw_data_ra(env, ebp, GETPC()),
1452 GETPC());
eaa728ee
FB
1453 }
1454 esp -= 2;
100ec099 1455 cpu_stw_data_ra(env, esp, t1, GETPC());
eaa728ee
FB
1456 }
1457}
1458#endif
1459
2999a0b2 1460void helper_lldt(CPUX86State *env, int selector)
eaa728ee
FB
1461{
1462 SegmentCache *dt;
1463 uint32_t e1, e2;
1464 int index, entry_limit;
1465 target_ulong ptr;
1466
1467 selector &= 0xffff;
1468 if ((selector & 0xfffc) == 0) {
1469 /* XXX: NULL selector case: invalid LDT */
1470 env->ldt.base = 0;
1471 env->ldt.limit = 0;
1472 } else {
20054ef0 1473 if (selector & 0x4) {
100ec099 1474 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1475 }
eaa728ee
FB
1476 dt = &env->gdt;
1477 index = selector & ~7;
1478#ifdef TARGET_X86_64
20054ef0 1479 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1480 entry_limit = 15;
20054ef0 1481 } else
eaa728ee 1482#endif
20054ef0 1483 {
eaa728ee 1484 entry_limit = 7;
20054ef0
BS
1485 }
1486 if ((index + entry_limit) > dt->limit) {
100ec099 1487 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1488 }
eaa728ee 1489 ptr = dt->base + index;
100ec099
PD
1490 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1491 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
20054ef0 1492 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
100ec099 1493 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1494 }
1495 if (!(e2 & DESC_P_MASK)) {
100ec099 1496 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1497 }
eaa728ee
FB
1498#ifdef TARGET_X86_64
1499 if (env->hflags & HF_LMA_MASK) {
1500 uint32_t e3;
20054ef0 1501
100ec099 1502 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
eaa728ee
FB
1503 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1504 env->ldt.base |= (target_ulong)e3 << 32;
1505 } else
1506#endif
1507 {
1508 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1509 }
1510 }
1511 env->ldt.selector = selector;
1512}
1513
2999a0b2 1514void helper_ltr(CPUX86State *env, int selector)
eaa728ee
FB
1515{
1516 SegmentCache *dt;
1517 uint32_t e1, e2;
1518 int index, type, entry_limit;
1519 target_ulong ptr;
1520
1521 selector &= 0xffff;
1522 if ((selector & 0xfffc) == 0) {
1523 /* NULL selector case: invalid TR */
1524 env->tr.base = 0;
1525 env->tr.limit = 0;
1526 env->tr.flags = 0;
1527 } else {
20054ef0 1528 if (selector & 0x4) {
100ec099 1529 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1530 }
eaa728ee
FB
1531 dt = &env->gdt;
1532 index = selector & ~7;
1533#ifdef TARGET_X86_64
20054ef0 1534 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1535 entry_limit = 15;
20054ef0 1536 } else
eaa728ee 1537#endif
20054ef0 1538 {
eaa728ee 1539 entry_limit = 7;
20054ef0
BS
1540 }
1541 if ((index + entry_limit) > dt->limit) {
100ec099 1542 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1543 }
eaa728ee 1544 ptr = dt->base + index;
100ec099
PD
1545 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1546 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
eaa728ee
FB
1547 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1548 if ((e2 & DESC_S_MASK) ||
20054ef0 1549 (type != 1 && type != 9)) {
100ec099 1550 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1551 }
1552 if (!(e2 & DESC_P_MASK)) {
100ec099 1553 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1554 }
eaa728ee
FB
1555#ifdef TARGET_X86_64
1556 if (env->hflags & HF_LMA_MASK) {
1557 uint32_t e3, e4;
20054ef0 1558
100ec099
PD
1559 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1560 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
20054ef0 1561 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
100ec099 1562 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1563 }
eaa728ee
FB
1564 load_seg_cache_raw_dt(&env->tr, e1, e2);
1565 env->tr.base |= (target_ulong)e3 << 32;
1566 } else
1567#endif
1568 {
1569 load_seg_cache_raw_dt(&env->tr, e1, e2);
1570 }
1571 e2 |= DESC_TSS_BUSY_MASK;
100ec099 1572 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
eaa728ee
FB
1573 }
1574 env->tr.selector = selector;
1575}
1576
1577/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2999a0b2 1578void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee
FB
1579{
1580 uint32_t e1, e2;
1581 int cpl, dpl, rpl;
1582 SegmentCache *dt;
1583 int index;
1584 target_ulong ptr;
1585
1586 selector &= 0xffff;
1587 cpl = env->hflags & HF_CPL_MASK;
1588 if ((selector & 0xfffc) == 0) {
1589 /* null selector case */
1590 if (seg_reg == R_SS
1591#ifdef TARGET_X86_64
1592 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1593#endif
20054ef0 1594 ) {
100ec099 1595 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1596 }
eaa728ee
FB
1597 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1598 } else {
1599
20054ef0 1600 if (selector & 0x4) {
eaa728ee 1601 dt = &env->ldt;
20054ef0 1602 } else {
eaa728ee 1603 dt = &env->gdt;
20054ef0 1604 }
eaa728ee 1605 index = selector & ~7;
20054ef0 1606 if ((index + 7) > dt->limit) {
100ec099 1607 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1608 }
eaa728ee 1609 ptr = dt->base + index;
100ec099
PD
1610 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1611 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
eaa728ee 1612
20054ef0 1613 if (!(e2 & DESC_S_MASK)) {
100ec099 1614 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1615 }
eaa728ee
FB
1616 rpl = selector & 3;
1617 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1618 if (seg_reg == R_SS) {
1619 /* must be writable segment */
20054ef0 1620 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
100ec099 1621 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1622 }
1623 if (rpl != cpl || dpl != cpl) {
100ec099 1624 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1625 }
eaa728ee
FB
1626 } else {
1627 /* must be readable segment */
20054ef0 1628 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
100ec099 1629 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1630 }
eaa728ee
FB
1631
1632 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1633 /* if not conforming code, test rights */
20054ef0 1634 if (dpl < cpl || dpl < rpl) {
100ec099 1635 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1636 }
eaa728ee
FB
1637 }
1638 }
1639
1640 if (!(e2 & DESC_P_MASK)) {
20054ef0 1641 if (seg_reg == R_SS) {
100ec099 1642 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
20054ef0 1643 } else {
100ec099 1644 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1645 }
eaa728ee
FB
1646 }
1647
1648 /* set the access bit if not already set */
1649 if (!(e2 & DESC_A_MASK)) {
1650 e2 |= DESC_A_MASK;
100ec099 1651 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
eaa728ee
FB
1652 }
1653
1654 cpu_x86_load_seg_cache(env, seg_reg, selector,
1655 get_seg_base(e1, e2),
1656 get_seg_limit(e1, e2),
1657 e2);
1658#if 0
93fcfe39 1659 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
1660 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1661#endif
1662 }
1663}
1664
1665/* protected mode jump */
2999a0b2 1666void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
100ec099 1667 target_ulong next_eip)
eaa728ee
FB
1668{
1669 int gate_cs, type;
1670 uint32_t e1, e2, cpl, dpl, rpl, limit;
eaa728ee 1671
20054ef0 1672 if ((new_cs & 0xfffc) == 0) {
100ec099 1673 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1674 }
100ec099
PD
1675 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1676 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1677 }
eaa728ee
FB
1678 cpl = env->hflags & HF_CPL_MASK;
1679 if (e2 & DESC_S_MASK) {
20054ef0 1680 if (!(e2 & DESC_CS_MASK)) {
100ec099 1681 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1682 }
eaa728ee
FB
1683 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1684 if (e2 & DESC_C_MASK) {
1685 /* conforming code segment */
20054ef0 1686 if (dpl > cpl) {
100ec099 1687 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1688 }
eaa728ee
FB
1689 } else {
1690 /* non conforming code segment */
1691 rpl = new_cs & 3;
20054ef0 1692 if (rpl > cpl) {
100ec099 1693 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1694 }
1695 if (dpl != cpl) {
100ec099 1696 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1697 }
eaa728ee 1698 }
20054ef0 1699 if (!(e2 & DESC_P_MASK)) {
100ec099 1700 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1701 }
eaa728ee
FB
1702 limit = get_seg_limit(e1, e2);
1703 if (new_eip > limit &&
20054ef0 1704 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
100ec099 1705 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1706 }
eaa728ee
FB
1707 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1708 get_seg_base(e1, e2), limit, e2);
a78d0eab 1709 env->eip = new_eip;
eaa728ee
FB
1710 } else {
1711 /* jump to call or task gate */
1712 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1713 rpl = new_cs & 3;
1714 cpl = env->hflags & HF_CPL_MASK;
1715 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 1716 switch (type) {
eaa728ee
FB
1717 case 1: /* 286 TSS */
1718 case 9: /* 386 TSS */
1719 case 5: /* task gate */
20054ef0 1720 if (dpl < cpl || dpl < rpl) {
100ec099 1721 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1722 }
100ec099 1723 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
eaa728ee
FB
1724 break;
1725 case 4: /* 286 call gate */
1726 case 12: /* 386 call gate */
20054ef0 1727 if ((dpl < cpl) || (dpl < rpl)) {
100ec099 1728 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1729 }
1730 if (!(e2 & DESC_P_MASK)) {
100ec099 1731 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1732 }
eaa728ee
FB
1733 gate_cs = e1 >> 16;
1734 new_eip = (e1 & 0xffff);
20054ef0 1735 if (type == 12) {
eaa728ee 1736 new_eip |= (e2 & 0xffff0000);
20054ef0 1737 }
100ec099
PD
1738 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1739 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1740 }
eaa728ee
FB
1741 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1742 /* must be code segment */
1743 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
20054ef0 1744 (DESC_S_MASK | DESC_CS_MASK))) {
100ec099 1745 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1746 }
eaa728ee 1747 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
20054ef0 1748 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
100ec099 1749 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0
BS
1750 }
1751 if (!(e2 & DESC_P_MASK)) {
100ec099 1752 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1753 }
eaa728ee 1754 limit = get_seg_limit(e1, e2);
20054ef0 1755 if (new_eip > limit) {
100ec099 1756 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1757 }
eaa728ee
FB
1758 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1759 get_seg_base(e1, e2), limit, e2);
a78d0eab 1760 env->eip = new_eip;
eaa728ee
FB
1761 break;
1762 default:
100ec099 1763 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
eaa728ee
FB
1764 break;
1765 }
1766 }
1767}
1768
1769/* real mode call */
2999a0b2 1770void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
eaa728ee
FB
1771 int shift, int next_eip)
1772{
1773 int new_eip;
1774 uint32_t esp, esp_mask;
1775 target_ulong ssp;
1776
1777 new_eip = new_eip1;
08b3ded6 1778 esp = env->regs[R_ESP];
eaa728ee
FB
1779 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1780 ssp = env->segs[R_SS].base;
1781 if (shift) {
100ec099
PD
1782 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1783 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
eaa728ee 1784 } else {
100ec099
PD
1785 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1786 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
eaa728ee
FB
1787 }
1788
1789 SET_ESP(esp, esp_mask);
1790 env->eip = new_eip;
1791 env->segs[R_CS].selector = new_cs;
1792 env->segs[R_CS].base = (new_cs << 4);
1793}
1794
1795/* protected mode call */
2999a0b2 1796void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
100ec099 1797 int shift, target_ulong next_eip)
eaa728ee
FB
1798{
1799 int new_stack, i;
1800 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 1801 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee 1802 uint32_t val, limit, old_sp_mask;
100ec099 1803 target_ulong ssp, old_ssp;
eaa728ee 1804
d12d51d5 1805 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
8995b7a0 1806 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 1807 if ((new_cs & 0xfffc) == 0) {
100ec099 1808 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1809 }
100ec099
PD
1810 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1811 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1812 }
eaa728ee 1813 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 1814 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee 1815 if (e2 & DESC_S_MASK) {
20054ef0 1816 if (!(e2 & DESC_CS_MASK)) {
100ec099 1817 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1818 }
eaa728ee
FB
1819 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1820 if (e2 & DESC_C_MASK) {
1821 /* conforming code segment */
20054ef0 1822 if (dpl > cpl) {
100ec099 1823 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1824 }
eaa728ee
FB
1825 } else {
1826 /* non conforming code segment */
1827 rpl = new_cs & 3;
20054ef0 1828 if (rpl > cpl) {
100ec099 1829 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1830 }
1831 if (dpl != cpl) {
100ec099 1832 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1833 }
eaa728ee 1834 }
20054ef0 1835 if (!(e2 & DESC_P_MASK)) {
100ec099 1836 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1837 }
eaa728ee
FB
1838
1839#ifdef TARGET_X86_64
1840 /* XXX: check 16/32 bit cases in long mode */
1841 if (shift == 2) {
1842 target_ulong rsp;
20054ef0 1843
eaa728ee 1844 /* 64 bit case */
08b3ded6 1845 rsp = env->regs[R_ESP];
100ec099
PD
1846 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1847 PUSHQ_RA(rsp, next_eip, GETPC());
eaa728ee 1848 /* from this point, not restartable */
08b3ded6 1849 env->regs[R_ESP] = rsp;
eaa728ee
FB
1850 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1851 get_seg_base(e1, e2),
1852 get_seg_limit(e1, e2), e2);
a78d0eab 1853 env->eip = new_eip;
eaa728ee
FB
1854 } else
1855#endif
1856 {
08b3ded6 1857 sp = env->regs[R_ESP];
eaa728ee
FB
1858 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1859 ssp = env->segs[R_SS].base;
1860 if (shift) {
100ec099
PD
1861 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1862 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee 1863 } else {
100ec099
PD
1864 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1865 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee
FB
1866 }
1867
1868 limit = get_seg_limit(e1, e2);
20054ef0 1869 if (new_eip > limit) {
100ec099 1870 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1871 }
eaa728ee
FB
1872 /* from this point, not restartable */
1873 SET_ESP(sp, sp_mask);
1874 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1875 get_seg_base(e1, e2), limit, e2);
a78d0eab 1876 env->eip = new_eip;
eaa728ee
FB
1877 }
1878 } else {
1879 /* check gate type */
1880 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1881 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1882 rpl = new_cs & 3;
20054ef0 1883 switch (type) {
eaa728ee
FB
1884 case 1: /* available 286 TSS */
1885 case 9: /* available 386 TSS */
1886 case 5: /* task gate */
20054ef0 1887 if (dpl < cpl || dpl < rpl) {
100ec099 1888 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1889 }
100ec099 1890 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
eaa728ee
FB
1891 return;
1892 case 4: /* 286 call gate */
1893 case 12: /* 386 call gate */
1894 break;
1895 default:
100ec099 1896 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
eaa728ee
FB
1897 break;
1898 }
1899 shift = type >> 3;
1900
20054ef0 1901 if (dpl < cpl || dpl < rpl) {
100ec099 1902 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1903 }
eaa728ee 1904 /* check valid bit */
20054ef0 1905 if (!(e2 & DESC_P_MASK)) {
100ec099 1906 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1907 }
eaa728ee
FB
1908 selector = e1 >> 16;
1909 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1910 param_count = e2 & 0x1f;
20054ef0 1911 if ((selector & 0xfffc) == 0) {
100ec099 1912 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1913 }
eaa728ee 1914
100ec099
PD
1915 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1916 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1917 }
1918 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
100ec099 1919 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1920 }
eaa728ee 1921 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1922 if (dpl > cpl) {
100ec099 1923 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1924 }
1925 if (!(e2 & DESC_P_MASK)) {
100ec099 1926 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1927 }
eaa728ee
FB
1928
1929 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1930 /* to inner privilege */
100ec099 1931 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
90a2541b
LG
1932 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1933 TARGET_FMT_lx "\n", ss, sp, param_count,
1934 env->regs[R_ESP]);
20054ef0 1935 if ((ss & 0xfffc) == 0) {
100ec099 1936 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0
BS
1937 }
1938 if ((ss & 3) != dpl) {
100ec099 1939 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1940 }
100ec099
PD
1941 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1942 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1943 }
eaa728ee 1944 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1945 if (ss_dpl != dpl) {
100ec099 1946 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1947 }
eaa728ee
FB
1948 if (!(ss_e2 & DESC_S_MASK) ||
1949 (ss_e2 & DESC_CS_MASK) ||
20054ef0 1950 !(ss_e2 & DESC_W_MASK)) {
100ec099 1951 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0
BS
1952 }
1953 if (!(ss_e2 & DESC_P_MASK)) {
100ec099 1954 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1955 }
eaa728ee 1956
20054ef0 1957 /* push_size = ((param_count * 2) + 8) << shift; */
eaa728ee
FB
1958
1959 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1960 old_ssp = env->segs[R_SS].base;
1961
1962 sp_mask = get_sp_mask(ss_e2);
1963 ssp = get_seg_base(ss_e1, ss_e2);
1964 if (shift) {
100ec099
PD
1965 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1966 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
20054ef0 1967 for (i = param_count - 1; i >= 0; i--) {
100ec099
PD
1968 val = cpu_ldl_kernel_ra(env, old_ssp +
1969 ((env->regs[R_ESP] + i * 4) &
1970 old_sp_mask), GETPC());
1971 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
eaa728ee
FB
1972 }
1973 } else {
100ec099
PD
1974 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1975 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
20054ef0 1976 for (i = param_count - 1; i >= 0; i--) {
100ec099
PD
1977 val = cpu_lduw_kernel_ra(env, old_ssp +
1978 ((env->regs[R_ESP] + i * 2) &
1979 old_sp_mask), GETPC());
1980 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
eaa728ee
FB
1981 }
1982 }
1983 new_stack = 1;
1984 } else {
1985 /* to same privilege */
08b3ded6 1986 sp = env->regs[R_ESP];
eaa728ee
FB
1987 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1988 ssp = env->segs[R_SS].base;
20054ef0 1989 /* push_size = (4 << shift); */
eaa728ee
FB
1990 new_stack = 0;
1991 }
1992
1993 if (shift) {
100ec099
PD
1994 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1995 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee 1996 } else {
100ec099
PD
1997 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1998 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee
FB
1999 }
2000
2001 /* from this point, not restartable */
2002
2003 if (new_stack) {
2004 ss = (ss & ~3) | dpl;
2005 cpu_x86_load_seg_cache(env, R_SS, ss,
2006 ssp,
2007 get_seg_limit(ss_e1, ss_e2),
2008 ss_e2);
2009 }
2010
2011 selector = (selector & ~3) | dpl;
2012 cpu_x86_load_seg_cache(env, R_CS, selector,
2013 get_seg_base(e1, e2),
2014 get_seg_limit(e1, e2),
2015 e2);
eaa728ee 2016 SET_ESP(sp, sp_mask);
a78d0eab 2017 env->eip = offset;
eaa728ee 2018 }
eaa728ee
FB
2019}
2020
2021/* real and vm86 mode iret */
2999a0b2 2022void helper_iret_real(CPUX86State *env, int shift)
eaa728ee
FB
2023{
2024 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2025 target_ulong ssp;
2026 int eflags_mask;
2027
20054ef0 2028 sp_mask = 0xffff; /* XXXX: use SS segment size? */
08b3ded6 2029 sp = env->regs[R_ESP];
eaa728ee
FB
2030 ssp = env->segs[R_SS].base;
2031 if (shift == 1) {
2032 /* 32 bits */
100ec099
PD
2033 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2034 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
eaa728ee 2035 new_cs &= 0xffff;
100ec099 2036 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
eaa728ee
FB
2037 } else {
2038 /* 16 bits */
100ec099
PD
2039 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2040 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2041 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
eaa728ee 2042 }
08b3ded6 2043 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2044 env->segs[R_CS].selector = new_cs;
2045 env->segs[R_CS].base = (new_cs << 4);
eaa728ee 2046 env->eip = new_eip;
20054ef0
BS
2047 if (env->eflags & VM_MASK) {
2048 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2049 NT_MASK;
2050 } else {
2051 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2052 RF_MASK | NT_MASK;
2053 }
2054 if (shift == 0) {
eaa728ee 2055 eflags_mask &= 0xffff;
20054ef0 2056 }
997ff0d9 2057 cpu_load_eflags(env, new_eflags, eflags_mask);
db620f46 2058 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2059}
2060
2999a0b2 2061static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
eaa728ee
FB
2062{
2063 int dpl;
2064 uint32_t e2;
2065
2066 /* XXX: on x86_64, we do not want to nullify FS and GS because
2067 they may still contain a valid base. I would be interested to
2068 know how a real x86_64 CPU behaves */
2069 if ((seg_reg == R_FS || seg_reg == R_GS) &&
20054ef0 2070 (env->segs[seg_reg].selector & 0xfffc) == 0) {
eaa728ee 2071 return;
20054ef0 2072 }
eaa728ee
FB
2073
2074 e2 = env->segs[seg_reg].flags;
2075 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2076 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2077 /* data or non conforming code segment */
2078 if (dpl < cpl) {
2079 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2080 }
2081 }
2082}
2083
2084/* protected mode iret */
2999a0b2 2085static inline void helper_ret_protected(CPUX86State *env, int shift,
100ec099
PD
2086 int is_iret, int addend,
2087 uintptr_t retaddr)
eaa728ee
FB
2088{
2089 uint32_t new_cs, new_eflags, new_ss;
2090 uint32_t new_es, new_ds, new_fs, new_gs;
2091 uint32_t e1, e2, ss_e1, ss_e2;
2092 int cpl, dpl, rpl, eflags_mask, iopl;
2093 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2094
2095#ifdef TARGET_X86_64
20054ef0 2096 if (shift == 2) {
eaa728ee 2097 sp_mask = -1;
20054ef0 2098 } else
eaa728ee 2099#endif
20054ef0 2100 {
eaa728ee 2101 sp_mask = get_sp_mask(env->segs[R_SS].flags);
20054ef0 2102 }
08b3ded6 2103 sp = env->regs[R_ESP];
eaa728ee
FB
2104 ssp = env->segs[R_SS].base;
2105 new_eflags = 0; /* avoid warning */
2106#ifdef TARGET_X86_64
2107 if (shift == 2) {
100ec099
PD
2108 POPQ_RA(sp, new_eip, retaddr);
2109 POPQ_RA(sp, new_cs, retaddr);
eaa728ee
FB
2110 new_cs &= 0xffff;
2111 if (is_iret) {
100ec099 2112 POPQ_RA(sp, new_eflags, retaddr);
eaa728ee
FB
2113 }
2114 } else
2115#endif
20054ef0
BS
2116 {
2117 if (shift == 1) {
2118 /* 32 bits */
100ec099
PD
2119 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2120 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
20054ef0
BS
2121 new_cs &= 0xffff;
2122 if (is_iret) {
100ec099 2123 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
20054ef0
BS
2124 if (new_eflags & VM_MASK) {
2125 goto return_to_vm86;
2126 }
2127 }
2128 } else {
2129 /* 16 bits */
100ec099
PD
2130 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2131 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
20054ef0 2132 if (is_iret) {
100ec099 2133 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
20054ef0 2134 }
eaa728ee 2135 }
eaa728ee 2136 }
d12d51d5
AL
2137 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2138 new_cs, new_eip, shift, addend);
8995b7a0 2139 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 2140 if ((new_cs & 0xfffc) == 0) {
100ec099 2141 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2142 }
100ec099
PD
2143 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2144 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2145 }
eaa728ee 2146 if (!(e2 & DESC_S_MASK) ||
20054ef0 2147 !(e2 & DESC_CS_MASK)) {
100ec099 2148 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2149 }
eaa728ee
FB
2150 cpl = env->hflags & HF_CPL_MASK;
2151 rpl = new_cs & 3;
20054ef0 2152 if (rpl < cpl) {
100ec099 2153 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2154 }
eaa728ee
FB
2155 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2156 if (e2 & DESC_C_MASK) {
20054ef0 2157 if (dpl > rpl) {
100ec099 2158 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2159 }
eaa728ee 2160 } else {
20054ef0 2161 if (dpl != rpl) {
100ec099 2162 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2163 }
eaa728ee 2164 }
20054ef0 2165 if (!(e2 & DESC_P_MASK)) {
100ec099 2166 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
20054ef0 2167 }
eaa728ee
FB
2168
2169 sp += addend;
2170 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2171 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2172 /* return to same privilege level */
eaa728ee
FB
2173 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2174 get_seg_base(e1, e2),
2175 get_seg_limit(e1, e2),
2176 e2);
2177 } else {
2178 /* return to different privilege level */
2179#ifdef TARGET_X86_64
2180 if (shift == 2) {
100ec099
PD
2181 POPQ_RA(sp, new_esp, retaddr);
2182 POPQ_RA(sp, new_ss, retaddr);
eaa728ee
FB
2183 new_ss &= 0xffff;
2184 } else
2185#endif
20054ef0
BS
2186 {
2187 if (shift == 1) {
2188 /* 32 bits */
100ec099
PD
2189 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2190 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
20054ef0
BS
2191 new_ss &= 0xffff;
2192 } else {
2193 /* 16 bits */
100ec099
PD
2194 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2195 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
20054ef0 2196 }
eaa728ee 2197 }
d12d51d5 2198 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
20054ef0 2199 new_ss, new_esp);
eaa728ee
FB
2200 if ((new_ss & 0xfffc) == 0) {
2201#ifdef TARGET_X86_64
20054ef0
BS
2202 /* NULL ss is allowed in long mode if cpl != 3 */
2203 /* XXX: test CS64? */
eaa728ee
FB
2204 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2205 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2206 0, 0xffffffff,
2207 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2208 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2209 DESC_W_MASK | DESC_A_MASK);
20054ef0 2210 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
eaa728ee
FB
2211 } else
2212#endif
2213 {
100ec099 2214 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
eaa728ee
FB
2215 }
2216 } else {
20054ef0 2217 if ((new_ss & 3) != rpl) {
100ec099 2218 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2219 }
100ec099
PD
2220 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2221 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2222 }
eaa728ee
FB
2223 if (!(ss_e2 & DESC_S_MASK) ||
2224 (ss_e2 & DESC_CS_MASK) ||
20054ef0 2225 !(ss_e2 & DESC_W_MASK)) {
100ec099 2226 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2227 }
eaa728ee 2228 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2229 if (dpl != rpl) {
100ec099 2230 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0
BS
2231 }
2232 if (!(ss_e2 & DESC_P_MASK)) {
100ec099 2233 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
20054ef0 2234 }
eaa728ee
FB
2235 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2236 get_seg_base(ss_e1, ss_e2),
2237 get_seg_limit(ss_e1, ss_e2),
2238 ss_e2);
2239 }
2240
2241 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2242 get_seg_base(e1, e2),
2243 get_seg_limit(e1, e2),
2244 e2);
eaa728ee
FB
2245 sp = new_esp;
2246#ifdef TARGET_X86_64
20054ef0 2247 if (env->hflags & HF_CS64_MASK) {
eaa728ee 2248 sp_mask = -1;
20054ef0 2249 } else
eaa728ee 2250#endif
20054ef0 2251 {
eaa728ee 2252 sp_mask = get_sp_mask(ss_e2);
20054ef0 2253 }
eaa728ee
FB
2254
2255 /* validate data segments */
2999a0b2
BS
2256 validate_seg(env, R_ES, rpl);
2257 validate_seg(env, R_DS, rpl);
2258 validate_seg(env, R_FS, rpl);
2259 validate_seg(env, R_GS, rpl);
eaa728ee
FB
2260
2261 sp += addend;
2262 }
2263 SET_ESP(sp, sp_mask);
2264 env->eip = new_eip;
2265 if (is_iret) {
2266 /* NOTE: 'cpl' is the _old_ CPL */
2267 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
20054ef0 2268 if (cpl == 0) {
eaa728ee 2269 eflags_mask |= IOPL_MASK;
20054ef0 2270 }
eaa728ee 2271 iopl = (env->eflags >> IOPL_SHIFT) & 3;
20054ef0 2272 if (cpl <= iopl) {
eaa728ee 2273 eflags_mask |= IF_MASK;
20054ef0
BS
2274 }
2275 if (shift == 0) {
eaa728ee 2276 eflags_mask &= 0xffff;
20054ef0 2277 }
997ff0d9 2278 cpu_load_eflags(env, new_eflags, eflags_mask);
eaa728ee
FB
2279 }
2280 return;
2281
2282 return_to_vm86:
100ec099
PD
2283 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2284 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2285 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2286 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2287 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2288 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
eaa728ee
FB
2289
2290 /* modify processor state */
997ff0d9
BS
2291 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2292 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2293 VIP_MASK);
2999a0b2 2294 load_seg_vm(env, R_CS, new_cs & 0xffff);
2999a0b2
BS
2295 load_seg_vm(env, R_SS, new_ss & 0xffff);
2296 load_seg_vm(env, R_ES, new_es & 0xffff);
2297 load_seg_vm(env, R_DS, new_ds & 0xffff);
2298 load_seg_vm(env, R_FS, new_fs & 0xffff);
2299 load_seg_vm(env, R_GS, new_gs & 0xffff);
eaa728ee
FB
2300
2301 env->eip = new_eip & 0xffff;
08b3ded6 2302 env->regs[R_ESP] = new_esp;
eaa728ee
FB
2303}
2304
2999a0b2 2305void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
eaa728ee
FB
2306{
2307 int tss_selector, type;
2308 uint32_t e1, e2;
2309
2310 /* specific case for TSS */
2311 if (env->eflags & NT_MASK) {
2312#ifdef TARGET_X86_64
20054ef0 2313 if (env->hflags & HF_LMA_MASK) {
100ec099 2314 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 2315 }
eaa728ee 2316#endif
100ec099 2317 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
20054ef0 2318 if (tss_selector & 4) {
100ec099 2319 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2320 }
100ec099
PD
2321 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2322 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2323 }
eaa728ee
FB
2324 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2325 /* NOTE: we check both segment and busy TSS */
20054ef0 2326 if (type != 3) {
100ec099 2327 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2328 }
100ec099 2329 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
eaa728ee 2330 } else {
100ec099 2331 helper_ret_protected(env, shift, 1, 0, GETPC());
eaa728ee 2332 }
db620f46 2333 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2334}
2335
2999a0b2 2336void helper_lret_protected(CPUX86State *env, int shift, int addend)
eaa728ee 2337{
100ec099 2338 helper_ret_protected(env, shift, 0, addend, GETPC());
eaa728ee
FB
2339}
2340
2999a0b2 2341void helper_sysenter(CPUX86State *env)
eaa728ee
FB
2342{
2343 if (env->sysenter_cs == 0) {
100ec099 2344 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee
FB
2345 }
2346 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2436b61a
AZ
2347
2348#ifdef TARGET_X86_64
2349 if (env->hflags & HF_LMA_MASK) {
2350 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2351 0, 0xffffffff,
2352 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2353 DESC_S_MASK |
20054ef0
BS
2354 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2355 DESC_L_MASK);
2436b61a
AZ
2356 } else
2357#endif
2358 {
2359 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2360 0, 0xffffffff,
2361 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2362 DESC_S_MASK |
2363 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2364 }
eaa728ee
FB
2365 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2366 0, 0xffffffff,
2367 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2368 DESC_S_MASK |
2369 DESC_W_MASK | DESC_A_MASK);
08b3ded6 2370 env->regs[R_ESP] = env->sysenter_esp;
a78d0eab 2371 env->eip = env->sysenter_eip;
eaa728ee
FB
2372}
2373
2999a0b2 2374void helper_sysexit(CPUX86State *env, int dflag)
eaa728ee
FB
2375{
2376 int cpl;
2377
2378 cpl = env->hflags & HF_CPL_MASK;
2379 if (env->sysenter_cs == 0 || cpl != 0) {
100ec099 2380 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee 2381 }
2436b61a
AZ
2382#ifdef TARGET_X86_64
2383 if (dflag == 2) {
20054ef0
BS
2384 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2385 3, 0, 0xffffffff,
2436b61a
AZ
2386 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2387 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
20054ef0
BS
2388 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2389 DESC_L_MASK);
2390 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2391 3, 0, 0xffffffff,
2436b61a
AZ
2392 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2393 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2394 DESC_W_MASK | DESC_A_MASK);
2395 } else
2396#endif
2397 {
20054ef0
BS
2398 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2399 3, 0, 0xffffffff,
2436b61a
AZ
2400 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2401 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2402 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
20054ef0
BS
2403 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2404 3, 0, 0xffffffff,
2436b61a
AZ
2405 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2406 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2407 DESC_W_MASK | DESC_A_MASK);
2408 }
08b3ded6 2409 env->regs[R_ESP] = env->regs[R_ECX];
a78d0eab 2410 env->eip = env->regs[R_EDX];
eaa728ee
FB
2411}
2412
2999a0b2 2413target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2414{
2415 unsigned int limit;
2416 uint32_t e1, e2, eflags, selector;
2417 int rpl, dpl, cpl, type;
2418
2419 selector = selector1 & 0xffff;
f0967a1a 2420 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2421 if ((selector & 0xfffc) == 0) {
dc1ded53 2422 goto fail;
20054ef0 2423 }
100ec099 2424 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2425 goto fail;
20054ef0 2426 }
eaa728ee
FB
2427 rpl = selector & 3;
2428 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2429 cpl = env->hflags & HF_CPL_MASK;
2430 if (e2 & DESC_S_MASK) {
2431 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2432 /* conforming */
2433 } else {
20054ef0 2434 if (dpl < cpl || dpl < rpl) {
eaa728ee 2435 goto fail;
20054ef0 2436 }
eaa728ee
FB
2437 }
2438 } else {
2439 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2440 switch (type) {
eaa728ee
FB
2441 case 1:
2442 case 2:
2443 case 3:
2444 case 9:
2445 case 11:
2446 break;
2447 default:
2448 goto fail;
2449 }
2450 if (dpl < cpl || dpl < rpl) {
2451 fail:
2452 CC_SRC = eflags & ~CC_Z;
2453 return 0;
2454 }
2455 }
2456 limit = get_seg_limit(e1, e2);
2457 CC_SRC = eflags | CC_Z;
2458 return limit;
2459}
2460
2999a0b2 2461target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2462{
2463 uint32_t e1, e2, eflags, selector;
2464 int rpl, dpl, cpl, type;
2465
2466 selector = selector1 & 0xffff;
f0967a1a 2467 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2468 if ((selector & 0xfffc) == 0) {
eaa728ee 2469 goto fail;
20054ef0 2470 }
100ec099 2471 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2472 goto fail;
20054ef0 2473 }
eaa728ee
FB
2474 rpl = selector & 3;
2475 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2476 cpl = env->hflags & HF_CPL_MASK;
2477 if (e2 & DESC_S_MASK) {
2478 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2479 /* conforming */
2480 } else {
20054ef0 2481 if (dpl < cpl || dpl < rpl) {
eaa728ee 2482 goto fail;
20054ef0 2483 }
eaa728ee
FB
2484 }
2485 } else {
2486 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2487 switch (type) {
eaa728ee
FB
2488 case 1:
2489 case 2:
2490 case 3:
2491 case 4:
2492 case 5:
2493 case 9:
2494 case 11:
2495 case 12:
2496 break;
2497 default:
2498 goto fail;
2499 }
2500 if (dpl < cpl || dpl < rpl) {
2501 fail:
2502 CC_SRC = eflags & ~CC_Z;
2503 return 0;
2504 }
2505 }
2506 CC_SRC = eflags | CC_Z;
2507 return e2 & 0x00f0ff00;
2508}
2509
2999a0b2 2510void helper_verr(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2511{
2512 uint32_t e1, e2, eflags, selector;
2513 int rpl, dpl, cpl;
2514
2515 selector = selector1 & 0xffff;
f0967a1a 2516 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2517 if ((selector & 0xfffc) == 0) {
eaa728ee 2518 goto fail;
20054ef0 2519 }
100ec099 2520 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2521 goto fail;
20054ef0
BS
2522 }
2523 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2524 goto fail;
20054ef0 2525 }
eaa728ee
FB
2526 rpl = selector & 3;
2527 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2528 cpl = env->hflags & HF_CPL_MASK;
2529 if (e2 & DESC_CS_MASK) {
20054ef0 2530 if (!(e2 & DESC_R_MASK)) {
eaa728ee 2531 goto fail;
20054ef0 2532 }
eaa728ee 2533 if (!(e2 & DESC_C_MASK)) {
20054ef0 2534 if (dpl < cpl || dpl < rpl) {
eaa728ee 2535 goto fail;
20054ef0 2536 }
eaa728ee
FB
2537 }
2538 } else {
2539 if (dpl < cpl || dpl < rpl) {
2540 fail:
2541 CC_SRC = eflags & ~CC_Z;
2542 return;
2543 }
2544 }
2545 CC_SRC = eflags | CC_Z;
2546}
2547
2999a0b2 2548void helper_verw(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2549{
2550 uint32_t e1, e2, eflags, selector;
2551 int rpl, dpl, cpl;
2552
2553 selector = selector1 & 0xffff;
f0967a1a 2554 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2555 if ((selector & 0xfffc) == 0) {
eaa728ee 2556 goto fail;
20054ef0 2557 }
100ec099 2558 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2559 goto fail;
20054ef0
BS
2560 }
2561 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2562 goto fail;
20054ef0 2563 }
eaa728ee
FB
2564 rpl = selector & 3;
2565 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2566 cpl = env->hflags & HF_CPL_MASK;
2567 if (e2 & DESC_CS_MASK) {
2568 goto fail;
2569 } else {
20054ef0 2570 if (dpl < cpl || dpl < rpl) {
eaa728ee 2571 goto fail;
20054ef0 2572 }
eaa728ee
FB
2573 if (!(e2 & DESC_W_MASK)) {
2574 fail:
2575 CC_SRC = eflags & ~CC_Z;
2576 return;
2577 }
2578 }
2579 CC_SRC = eflags | CC_Z;
2580}
2581
f299f437 2582#if defined(CONFIG_USER_ONLY)
2999a0b2 2583void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee 2584{
f299f437 2585 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
b98dbc90 2586 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
f299f437
BS
2587 selector &= 0xffff;
2588 cpu_x86_load_seg_cache(env, seg_reg, selector,
b98dbc90
PB
2589 (selector << 4), 0xffff,
2590 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2591 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
f299f437 2592 } else {
2999a0b2 2593 helper_load_seg(env, seg_reg, selector);
13822781 2594 }
eaa728ee 2595}
eaa728ee 2596#endif
81cf8d8a
PB
2597
2598/* check if Port I/O is allowed in TSS */
100ec099
PD
2599static inline void check_io(CPUX86State *env, int addr, int size,
2600 uintptr_t retaddr)
81cf8d8a
PB
2601{
2602 int io_offset, val, mask;
2603
2604 /* TSS must be a valid 32 bit one */
2605 if (!(env->tr.flags & DESC_P_MASK) ||
2606 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2607 env->tr.limit < 103) {
2608 goto fail;
2609 }
100ec099 2610 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
81cf8d8a
PB
2611 io_offset += (addr >> 3);
2612 /* Note: the check needs two bytes */
2613 if ((io_offset + 1) > env->tr.limit) {
2614 goto fail;
2615 }
100ec099 2616 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
81cf8d8a
PB
2617 val >>= (addr & 7);
2618 mask = (1 << size) - 1;
2619 /* all bits must be zero to allow the I/O */
2620 if ((val & mask) != 0) {
2621 fail:
100ec099 2622 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
81cf8d8a
PB
2623 }
2624}
2625
2626void helper_check_iob(CPUX86State *env, uint32_t t0)
2627{
100ec099 2628 check_io(env, t0, 1, GETPC());
81cf8d8a
PB
2629}
2630
2631void helper_check_iow(CPUX86State *env, uint32_t t0)
2632{
100ec099 2633 check_io(env, t0, 2, GETPC());
81cf8d8a
PB
2634}
2635
2636void helper_check_iol(CPUX86State *env, uint32_t t0)
2637{
100ec099 2638 check_io(env, t0, 4, GETPC());
81cf8d8a 2639}