]> git.proxmox.com Git - mirror_qemu.git/blame - target/i386/seg_helper.c
target/i386: Use env_cpu, env_archcpu
[mirror_qemu.git] / target / i386 / seg_helper.c
CommitLineData
eaa728ee 1/*
10774999
BS
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
eaa728ee
FB
4 *
5 * Copyright (c) 2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 19 */
83dae095 20
b6a0aa05 21#include "qemu/osdep.h"
3e457172 22#include "cpu.h"
1de7afc9 23#include "qemu/log.h"
2ef6175a 24#include "exec/helper-proto.h"
63c91552 25#include "exec/exec-all.h"
f08b6170 26#include "exec/cpu_ldst.h"
508127e2 27#include "exec/log.h"
eaa728ee 28
3e457172 29//#define DEBUG_PCALL
d12d51d5
AL
30
31#ifdef DEBUG_PCALL
20054ef0 32# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
8995b7a0
AF
33# define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
d12d51d5 35#else
20054ef0 36# define LOG_PCALL(...) do { } while (0)
8995b7a0 37# define LOG_PCALL_STATE(cpu) do { } while (0)
d12d51d5
AL
38#endif
39
9220fe54
PM
40#ifdef CONFIG_USER_ONLY
41#define MEMSUFFIX _kernel
42#define DATA_SIZE 1
43#include "exec/cpu_ldst_useronly_template.h"
44
45#define DATA_SIZE 2
46#include "exec/cpu_ldst_useronly_template.h"
47
48#define DATA_SIZE 4
49#include "exec/cpu_ldst_useronly_template.h"
50
51#define DATA_SIZE 8
52#include "exec/cpu_ldst_useronly_template.h"
53#undef MEMSUFFIX
54#else
8a201bd4
PB
55#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
56#define MEMSUFFIX _kernel
57#define DATA_SIZE 1
58#include "exec/cpu_ldst_template.h"
59
60#define DATA_SIZE 2
61#include "exec/cpu_ldst_template.h"
62
63#define DATA_SIZE 4
64#include "exec/cpu_ldst_template.h"
65
66#define DATA_SIZE 8
67#include "exec/cpu_ldst_template.h"
68#undef CPU_MMU_INDEX
69#undef MEMSUFFIX
70#endif
71
eaa728ee 72/* return non zero if error */
100ec099
PD
73static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
74 uint32_t *e2_ptr, int selector,
75 uintptr_t retaddr)
eaa728ee
FB
76{
77 SegmentCache *dt;
78 int index;
79 target_ulong ptr;
80
20054ef0 81 if (selector & 0x4) {
eaa728ee 82 dt = &env->ldt;
20054ef0 83 } else {
eaa728ee 84 dt = &env->gdt;
20054ef0 85 }
eaa728ee 86 index = selector & ~7;
20054ef0 87 if ((index + 7) > dt->limit) {
eaa728ee 88 return -1;
20054ef0 89 }
eaa728ee 90 ptr = dt->base + index;
100ec099
PD
91 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
92 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee
FB
93 return 0;
94}
95
100ec099
PD
96static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
97 uint32_t *e2_ptr, int selector)
98{
99 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
100}
101
eaa728ee
FB
102static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
103{
104 unsigned int limit;
20054ef0 105
eaa728ee 106 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
20054ef0 107 if (e2 & DESC_G_MASK) {
eaa728ee 108 limit = (limit << 12) | 0xfff;
20054ef0 109 }
eaa728ee
FB
110 return limit;
111}
112
113static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
114{
20054ef0 115 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
eaa728ee
FB
116}
117
20054ef0
BS
118static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
119 uint32_t e2)
eaa728ee
FB
120{
121 sc->base = get_seg_base(e1, e2);
122 sc->limit = get_seg_limit(e1, e2);
123 sc->flags = e2;
124}
125
126/* init the segment cache in vm86 mode. */
2999a0b2 127static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
eaa728ee
FB
128{
129 selector &= 0xffff;
b98dbc90
PB
130
131 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
132 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
133 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
eaa728ee
FB
134}
135
2999a0b2 136static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
100ec099
PD
137 uint32_t *esp_ptr, int dpl,
138 uintptr_t retaddr)
eaa728ee 139{
6aa9e42f 140 X86CPU *cpu = env_archcpu(env);
eaa728ee
FB
141 int type, index, shift;
142
143#if 0
144 {
145 int i;
146 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
20054ef0 147 for (i = 0; i < env->tr.limit; i++) {
eaa728ee 148 printf("%02x ", env->tr.base[i]);
20054ef0
BS
149 if ((i & 7) == 7) {
150 printf("\n");
151 }
eaa728ee
FB
152 }
153 printf("\n");
154 }
155#endif
156
20054ef0 157 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 158 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 159 }
eaa728ee 160 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 161 if ((type & 7) != 1) {
a47dddd7 162 cpu_abort(CPU(cpu), "invalid tss type");
20054ef0 163 }
eaa728ee
FB
164 shift = type >> 3;
165 index = (dpl * 4 + 2) << shift;
20054ef0 166 if (index + (4 << shift) - 1 > env->tr.limit) {
100ec099 167 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
20054ef0 168 }
eaa728ee 169 if (shift == 0) {
100ec099
PD
170 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
eaa728ee 172 } else {
100ec099
PD
173 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
174 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
eaa728ee
FB
175 }
176}
177
100ec099
PD
178static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
179 uintptr_t retaddr)
eaa728ee
FB
180{
181 uint32_t e1, e2;
d3b54918 182 int rpl, dpl;
eaa728ee
FB
183
184 if ((selector & 0xfffc) != 0) {
100ec099
PD
185 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0
BS
187 }
188 if (!(e2 & DESC_S_MASK)) {
100ec099 189 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 190 }
eaa728ee
FB
191 rpl = selector & 3;
192 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
eaa728ee 193 if (seg_reg == R_CS) {
20054ef0 194 if (!(e2 & DESC_CS_MASK)) {
100ec099 195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 196 }
20054ef0 197 if (dpl != rpl) {
100ec099 198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 199 }
eaa728ee
FB
200 } else if (seg_reg == R_SS) {
201 /* SS must be writable data */
20054ef0 202 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
100ec099 203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0
BS
204 }
205 if (dpl != cpl || dpl != rpl) {
100ec099 206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 207 }
eaa728ee
FB
208 } else {
209 /* not readable code */
20054ef0 210 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
100ec099 211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 212 }
eaa728ee
FB
213 /* if data or non conforming code, checks the rights */
214 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
20054ef0 215 if (dpl < cpl || dpl < rpl) {
100ec099 216 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 217 }
eaa728ee
FB
218 }
219 }
20054ef0 220 if (!(e2 & DESC_P_MASK)) {
100ec099 221 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
20054ef0 222 }
eaa728ee 223 cpu_x86_load_seg_cache(env, seg_reg, selector,
20054ef0
BS
224 get_seg_base(e1, e2),
225 get_seg_limit(e1, e2),
226 e2);
eaa728ee 227 } else {
20054ef0 228 if (seg_reg == R_SS || seg_reg == R_CS) {
100ec099 229 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 230 }
eaa728ee
FB
231 }
232}
233
234#define SWITCH_TSS_JMP 0
235#define SWITCH_TSS_IRET 1
236#define SWITCH_TSS_CALL 2
237
238/* XXX: restore CPU state in registers (PowerPC case) */
100ec099
PD
239static void switch_tss_ra(CPUX86State *env, int tss_selector,
240 uint32_t e1, uint32_t e2, int source,
241 uint32_t next_eip, uintptr_t retaddr)
eaa728ee
FB
242{
243 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
244 target_ulong tss_base;
245 uint32_t new_regs[8], new_segs[6];
246 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
247 uint32_t old_eflags, eflags_mask;
248 SegmentCache *dt;
249 int index;
250 target_ulong ptr;
251
252 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0
BS
253 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
254 source);
eaa728ee
FB
255
256 /* if task gate, we read the TSS segment and we load it */
257 if (type == 5) {
20054ef0 258 if (!(e2 & DESC_P_MASK)) {
100ec099 259 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
20054ef0 260 }
eaa728ee 261 tss_selector = e1 >> 16;
20054ef0 262 if (tss_selector & 4) {
100ec099 263 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
20054ef0 264 }
100ec099
PD
265 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0
BS
267 }
268 if (e2 & DESC_S_MASK) {
100ec099 269 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0 270 }
eaa728ee 271 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 272 if ((type & 7) != 1) {
100ec099 273 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0 274 }
eaa728ee
FB
275 }
276
20054ef0 277 if (!(e2 & DESC_P_MASK)) {
100ec099 278 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
20054ef0 279 }
eaa728ee 280
20054ef0 281 if (type & 8) {
eaa728ee 282 tss_limit_max = 103;
20054ef0 283 } else {
eaa728ee 284 tss_limit_max = 43;
20054ef0 285 }
eaa728ee
FB
286 tss_limit = get_seg_limit(e1, e2);
287 tss_base = get_seg_base(e1, e2);
288 if ((tss_selector & 4) != 0 ||
20054ef0 289 tss_limit < tss_limit_max) {
100ec099 290 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
20054ef0 291 }
eaa728ee 292 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 293 if (old_type & 8) {
eaa728ee 294 old_tss_limit_max = 103;
20054ef0 295 } else {
eaa728ee 296 old_tss_limit_max = 43;
20054ef0 297 }
eaa728ee
FB
298
299 /* read all the registers from the new TSS */
300 if (type & 8) {
301 /* 32 bit */
100ec099
PD
302 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
303 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
304 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
20054ef0 305 for (i = 0; i < 8; i++) {
100ec099
PD
306 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
307 retaddr);
20054ef0
BS
308 }
309 for (i = 0; i < 6; i++) {
100ec099
PD
310 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
311 retaddr);
20054ef0 312 }
100ec099
PD
313 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
314 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
eaa728ee
FB
315 } else {
316 /* 16 bit */
317 new_cr3 = 0;
100ec099
PD
318 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
319 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
20054ef0 320 for (i = 0; i < 8; i++) {
100ec099
PD
321 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
322 retaddr) | 0xffff0000;
20054ef0
BS
323 }
324 for (i = 0; i < 4; i++) {
100ec099
PD
325 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
326 retaddr);
20054ef0 327 }
100ec099 328 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
eaa728ee
FB
329 new_segs[R_FS] = 0;
330 new_segs[R_GS] = 0;
331 new_trap = 0;
332 }
4581cbcd
BS
333 /* XXX: avoid a compiler warning, see
334 http://support.amd.com/us/Processor_TechDocs/24593.pdf
335 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
336 (void)new_trap;
eaa728ee
FB
337
338 /* NOTE: we must avoid memory exceptions during the task switch,
339 so we make dummy accesses before */
340 /* XXX: it can still fail in some cases, so a bigger hack is
341 necessary to valid the TLB after having done the accesses */
342
100ec099
PD
343 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
344 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
345 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
346 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
eaa728ee
FB
347
348 /* clear busy bit (it is restartable) */
349 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
350 target_ulong ptr;
351 uint32_t e2;
20054ef0 352
eaa728ee 353 ptr = env->gdt.base + (env->tr.selector & ~7);
100ec099 354 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee 355 e2 &= ~DESC_TSS_BUSY_MASK;
100ec099 356 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
eaa728ee 357 }
997ff0d9 358 old_eflags = cpu_compute_eflags(env);
20054ef0 359 if (source == SWITCH_TSS_IRET) {
eaa728ee 360 old_eflags &= ~NT_MASK;
20054ef0 361 }
eaa728ee
FB
362
363 /* save the current state in the old TSS */
364 if (type & 8) {
365 /* 32 bit */
100ec099
PD
366 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
20054ef0 376 for (i = 0; i < 6; i++) {
100ec099
PD
377 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
378 env->segs[i].selector, retaddr);
20054ef0 379 }
eaa728ee
FB
380 } else {
381 /* 16 bit */
100ec099
PD
382 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
20054ef0 392 for (i = 0; i < 4; i++) {
100ec099
PD
393 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
394 env->segs[i].selector, retaddr);
20054ef0 395 }
eaa728ee
FB
396 }
397
398 /* now if an exception occurs, it will occurs in the next task
399 context */
400
401 if (source == SWITCH_TSS_CALL) {
100ec099 402 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
eaa728ee
FB
403 new_eflags |= NT_MASK;
404 }
405
406 /* set busy bit */
407 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
408 target_ulong ptr;
409 uint32_t e2;
20054ef0 410
eaa728ee 411 ptr = env->gdt.base + (tss_selector & ~7);
100ec099 412 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee 413 e2 |= DESC_TSS_BUSY_MASK;
100ec099 414 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
eaa728ee
FB
415 }
416
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
428 }
429
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
20054ef0 435 if (!(type & 8)) {
eaa728ee 436 eflags_mask &= 0xffff;
20054ef0 437 }
997ff0d9 438 cpu_load_eflags(env, new_eflags, eflags_mask);
20054ef0 439 /* XXX: what to do in 16 bit case? */
4b34e3ad 440 env->regs[R_EAX] = new_regs[0];
a4165610 441 env->regs[R_ECX] = new_regs[1];
00f5e6f2 442 env->regs[R_EDX] = new_regs[2];
70b51365 443 env->regs[R_EBX] = new_regs[3];
08b3ded6 444 env->regs[R_ESP] = new_regs[4];
c12dddd7 445 env->regs[R_EBP] = new_regs[5];
78c3c6d3 446 env->regs[R_ESI] = new_regs[6];
cf75c597 447 env->regs[R_EDI] = new_regs[7];
eaa728ee 448 if (new_eflags & VM_MASK) {
20054ef0 449 for (i = 0; i < 6; i++) {
2999a0b2 450 load_seg_vm(env, i, new_segs[i]);
20054ef0 451 }
eaa728ee 452 } else {
eaa728ee 453 /* first just selectors as the rest may trigger exceptions */
20054ef0 454 for (i = 0; i < 6; i++) {
eaa728ee 455 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
20054ef0 456 }
eaa728ee
FB
457 }
458
459 env->ldt.selector = new_ldt & ~4;
460 env->ldt.base = 0;
461 env->ldt.limit = 0;
462 env->ldt.flags = 0;
463
464 /* load the LDT */
20054ef0 465 if (new_ldt & 4) {
100ec099 466 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 467 }
eaa728ee
FB
468
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
20054ef0 472 if ((index + 7) > dt->limit) {
100ec099 473 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 474 }
eaa728ee 475 ptr = dt->base + index;
100ec099
PD
476 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
477 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
20054ef0 478 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
100ec099 479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0
BS
480 }
481 if (!(e2 & DESC_P_MASK)) {
100ec099 482 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 483 }
eaa728ee
FB
484 load_seg_cache_raw_dt(&env->ldt, e1, e2);
485 }
486
487 /* load the segments */
488 if (!(new_eflags & VM_MASK)) {
d3b54918 489 int cpl = new_segs[R_CS] & 3;
100ec099
PD
490 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
491 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
492 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
493 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
494 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
495 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
eaa728ee
FB
496 }
497
a78d0eab 498 /* check that env->eip is in the CS segment limits */
eaa728ee 499 if (new_eip > env->segs[R_CS].limit) {
20054ef0 500 /* XXX: different exception if CALL? */
100ec099 501 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
eaa728ee 502 }
01df040b
AL
503
504#ifndef CONFIG_USER_ONLY
505 /* reset local breakpoints */
428065ce 506 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
93d00d0f 507 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
01df040b
AL
508 }
509#endif
eaa728ee
FB
510}
511
100ec099
PD
512static void switch_tss(CPUX86State *env, int tss_selector,
513 uint32_t e1, uint32_t e2, int source,
514 uint32_t next_eip)
515{
516 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
517}
518
eaa728ee
FB
519static inline unsigned int get_sp_mask(unsigned int e2)
520{
0aca0605
AO
521#ifdef TARGET_X86_64
522 if (e2 & DESC_L_MASK) {
523 return 0;
524 } else
525#endif
20054ef0 526 if (e2 & DESC_B_MASK) {
eaa728ee 527 return 0xffffffff;
20054ef0 528 } else {
eaa728ee 529 return 0xffff;
20054ef0 530 }
eaa728ee
FB
531}
532
20054ef0 533static int exception_has_error_code(int intno)
2ed51f5b 534{
20054ef0
BS
535 switch (intno) {
536 case 8:
537 case 10:
538 case 11:
539 case 12:
540 case 13:
541 case 14:
542 case 17:
543 return 1;
544 }
545 return 0;
2ed51f5b
AL
546}
547
eaa728ee 548#ifdef TARGET_X86_64
08b3ded6
LG
549#define SET_ESP(val, sp_mask) \
550 do { \
551 if ((sp_mask) == 0xffff) { \
552 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
553 ((val) & 0xffff); \
554 } else if ((sp_mask) == 0xffffffffLL) { \
555 env->regs[R_ESP] = (uint32_t)(val); \
556 } else { \
557 env->regs[R_ESP] = (val); \
558 } \
20054ef0 559 } while (0)
eaa728ee 560#else
08b3ded6
LG
561#define SET_ESP(val, sp_mask) \
562 do { \
563 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
564 ((val) & (sp_mask)); \
20054ef0 565 } while (0)
eaa728ee
FB
566#endif
567
c0a04f0e
AL
568/* in 64-bit machines, this can overflow. So this segment addition macro
569 * can be used to trim the value to 32-bit whenever needed */
570#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
571
eaa728ee 572/* XXX: add a is_user flag to have proper security support */
100ec099 573#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
329e607d
BS
574 { \
575 sp -= 2; \
100ec099 576 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
20054ef0 577 }
eaa728ee 578
100ec099 579#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
20054ef0
BS
580 { \
581 sp -= 4; \
100ec099 582 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
20054ef0 583 }
eaa728ee 584
100ec099 585#define POPW_RA(ssp, sp, sp_mask, val, ra) \
329e607d 586 { \
100ec099 587 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
329e607d 588 sp += 2; \
20054ef0 589 }
eaa728ee 590
100ec099 591#define POPL_RA(ssp, sp, sp_mask, val, ra) \
329e607d 592 { \
100ec099 593 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
329e607d 594 sp += 4; \
20054ef0 595 }
eaa728ee 596
100ec099
PD
597#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
598#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
599#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
600#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
601
eaa728ee 602/* protected mode interrupt */
2999a0b2
BS
603static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
604 int error_code, unsigned int next_eip,
605 int is_hw)
eaa728ee
FB
606{
607 SegmentCache *dt;
608 target_ulong ptr, ssp;
609 int type, dpl, selector, ss_dpl, cpl;
610 int has_error_code, new_stack, shift;
1c918eba 611 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 612 uint32_t old_eip, sp_mask;
87446327 613 int vm86 = env->eflags & VM_MASK;
eaa728ee 614
eaa728ee 615 has_error_code = 0;
20054ef0
BS
616 if (!is_int && !is_hw) {
617 has_error_code = exception_has_error_code(intno);
618 }
619 if (is_int) {
eaa728ee 620 old_eip = next_eip;
20054ef0 621 } else {
eaa728ee 622 old_eip = env->eip;
20054ef0 623 }
eaa728ee
FB
624
625 dt = &env->idt;
20054ef0 626 if (intno * 8 + 7 > dt->limit) {
77b2bc2c 627 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 628 }
eaa728ee 629 ptr = dt->base + intno * 8;
329e607d
BS
630 e1 = cpu_ldl_kernel(env, ptr);
631 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
632 /* check gate type */
633 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 634 switch (type) {
eaa728ee
FB
635 case 5: /* task gate */
636 /* must do that check here to return the correct error code */
20054ef0 637 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 638 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 639 }
2999a0b2 640 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
eaa728ee
FB
641 if (has_error_code) {
642 int type;
643 uint32_t mask;
20054ef0 644
eaa728ee
FB
645 /* push the error code */
646 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
647 shift = type >> 3;
20054ef0 648 if (env->segs[R_SS].flags & DESC_B_MASK) {
eaa728ee 649 mask = 0xffffffff;
20054ef0 650 } else {
eaa728ee 651 mask = 0xffff;
20054ef0 652 }
08b3ded6 653 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
eaa728ee 654 ssp = env->segs[R_SS].base + esp;
20054ef0 655 if (shift) {
329e607d 656 cpu_stl_kernel(env, ssp, error_code);
20054ef0 657 } else {
329e607d 658 cpu_stw_kernel(env, ssp, error_code);
20054ef0 659 }
eaa728ee
FB
660 SET_ESP(esp, mask);
661 }
662 return;
663 case 6: /* 286 interrupt gate */
664 case 7: /* 286 trap gate */
665 case 14: /* 386 interrupt gate */
666 case 15: /* 386 trap gate */
667 break;
668 default:
77b2bc2c 669 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
eaa728ee
FB
670 break;
671 }
672 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
673 cpl = env->hflags & HF_CPL_MASK;
1235fc06 674 /* check privilege if software int */
20054ef0 675 if (is_int && dpl < cpl) {
77b2bc2c 676 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 677 }
eaa728ee 678 /* check valid bit */
20054ef0 679 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 680 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 681 }
eaa728ee
FB
682 selector = e1 >> 16;
683 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
20054ef0 684 if ((selector & 0xfffc) == 0) {
77b2bc2c 685 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 686 }
2999a0b2 687 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 688 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
689 }
690 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 691 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 692 }
eaa728ee 693 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 694 if (dpl > cpl) {
77b2bc2c 695 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
696 }
697 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 698 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 699 }
1110bfe6
PB
700 if (e2 & DESC_C_MASK) {
701 dpl = cpl;
702 }
703 if (dpl < cpl) {
eaa728ee 704 /* to inner privilege */
100ec099 705 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
20054ef0 706 if ((ss & 0xfffc) == 0) {
77b2bc2c 707 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
708 }
709 if ((ss & 3) != dpl) {
77b2bc2c 710 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 711 }
2999a0b2 712 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 713 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 714 }
eaa728ee 715 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 716 if (ss_dpl != dpl) {
77b2bc2c 717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 718 }
eaa728ee
FB
719 if (!(ss_e2 & DESC_S_MASK) ||
720 (ss_e2 & DESC_CS_MASK) ||
20054ef0 721 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 722 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
723 }
724 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 725 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 726 }
eaa728ee
FB
727 new_stack = 1;
728 sp_mask = get_sp_mask(ss_e2);
729 ssp = get_seg_base(ss_e1, ss_e2);
1110bfe6 730 } else {
eaa728ee 731 /* to same privilege */
87446327 732 if (vm86) {
77b2bc2c 733 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 734 }
eaa728ee
FB
735 new_stack = 0;
736 sp_mask = get_sp_mask(env->segs[R_SS].flags);
737 ssp = env->segs[R_SS].base;
08b3ded6 738 esp = env->regs[R_ESP];
eaa728ee
FB
739 }
740
741 shift = type >> 3;
742
743#if 0
744 /* XXX: check that enough room is available */
745 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
87446327 746 if (vm86) {
eaa728ee 747 push_size += 8;
20054ef0 748 }
eaa728ee
FB
749 push_size <<= shift;
750#endif
751 if (shift == 1) {
752 if (new_stack) {
87446327 753 if (vm86) {
eaa728ee
FB
754 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
755 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
756 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
757 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
758 }
759 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 760 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 761 }
997ff0d9 762 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
763 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
764 PUSHL(ssp, esp, sp_mask, old_eip);
765 if (has_error_code) {
766 PUSHL(ssp, esp, sp_mask, error_code);
767 }
768 } else {
769 if (new_stack) {
87446327 770 if (vm86) {
eaa728ee
FB
771 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
772 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
773 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
774 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
775 }
776 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 777 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 778 }
997ff0d9 779 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
780 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
781 PUSHW(ssp, esp, sp_mask, old_eip);
782 if (has_error_code) {
783 PUSHW(ssp, esp, sp_mask, error_code);
784 }
785 }
786
fd460606
KC
787 /* interrupt gate clear IF mask */
788 if ((type & 1) == 0) {
789 env->eflags &= ~IF_MASK;
790 }
791 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
792
eaa728ee 793 if (new_stack) {
87446327 794 if (vm86) {
eaa728ee
FB
795 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
797 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
798 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
799 }
800 ss = (ss & ~3) | dpl;
801 cpu_x86_load_seg_cache(env, R_SS, ss,
802 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
803 }
804 SET_ESP(esp, sp_mask);
805
806 selector = (selector & ~3) | dpl;
807 cpu_x86_load_seg_cache(env, R_CS, selector,
808 get_seg_base(e1, e2),
809 get_seg_limit(e1, e2),
810 e2);
eaa728ee 811 env->eip = offset;
eaa728ee
FB
812}
813
814#ifdef TARGET_X86_64
815
100ec099 816#define PUSHQ_RA(sp, val, ra) \
20054ef0
BS
817 { \
818 sp -= 8; \
100ec099 819 cpu_stq_kernel_ra(env, sp, (val), ra); \
20054ef0 820 }
eaa728ee 821
100ec099 822#define POPQ_RA(sp, val, ra) \
20054ef0 823 { \
100ec099 824 val = cpu_ldq_kernel_ra(env, sp, ra); \
20054ef0
BS
825 sp += 8; \
826 }
eaa728ee 827
100ec099
PD
828#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
829#define POPQ(sp, val) POPQ_RA(sp, val, 0)
830
2999a0b2 831static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
eaa728ee 832{
6aa9e42f 833 X86CPU *cpu = env_archcpu(env);
eaa728ee
FB
834 int index;
835
836#if 0
837 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
838 env->tr.base, env->tr.limit);
839#endif
840
20054ef0 841 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 842 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 843 }
eaa728ee 844 index = 8 * level + 4;
20054ef0 845 if ((index + 7) > env->tr.limit) {
77b2bc2c 846 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 847 }
329e607d 848 return cpu_ldq_kernel(env, env->tr.base + index);
eaa728ee
FB
849}
850
851/* 64 bit interrupt */
2999a0b2
BS
852static void do_interrupt64(CPUX86State *env, int intno, int is_int,
853 int error_code, target_ulong next_eip, int is_hw)
eaa728ee
FB
854{
855 SegmentCache *dt;
856 target_ulong ptr;
857 int type, dpl, selector, cpl, ist;
858 int has_error_code, new_stack;
859 uint32_t e1, e2, e3, ss;
860 target_ulong old_eip, esp, offset;
eaa728ee 861
eaa728ee 862 has_error_code = 0;
20054ef0
BS
863 if (!is_int && !is_hw) {
864 has_error_code = exception_has_error_code(intno);
865 }
866 if (is_int) {
eaa728ee 867 old_eip = next_eip;
20054ef0 868 } else {
eaa728ee 869 old_eip = env->eip;
20054ef0 870 }
eaa728ee
FB
871
872 dt = &env->idt;
20054ef0 873 if (intno * 16 + 15 > dt->limit) {
77b2bc2c 874 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 875 }
eaa728ee 876 ptr = dt->base + intno * 16;
329e607d
BS
877 e1 = cpu_ldl_kernel(env, ptr);
878 e2 = cpu_ldl_kernel(env, ptr + 4);
879 e3 = cpu_ldl_kernel(env, ptr + 8);
eaa728ee
FB
880 /* check gate type */
881 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 882 switch (type) {
eaa728ee
FB
883 case 14: /* 386 interrupt gate */
884 case 15: /* 386 trap gate */
885 break;
886 default:
77b2bc2c 887 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
eaa728ee
FB
888 break;
889 }
890 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
891 cpl = env->hflags & HF_CPL_MASK;
1235fc06 892 /* check privilege if software int */
20054ef0 893 if (is_int && dpl < cpl) {
77b2bc2c 894 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 895 }
eaa728ee 896 /* check valid bit */
20054ef0 897 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 898 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
20054ef0 899 }
eaa728ee
FB
900 selector = e1 >> 16;
901 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
902 ist = e2 & 7;
20054ef0 903 if ((selector & 0xfffc) == 0) {
77b2bc2c 904 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 905 }
eaa728ee 906
2999a0b2 907 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 908 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
909 }
910 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 911 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 912 }
eaa728ee 913 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 914 if (dpl > cpl) {
77b2bc2c 915 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
916 }
917 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 918 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0
BS
919 }
920 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
77b2bc2c 921 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 922 }
1110bfe6
PB
923 if (e2 & DESC_C_MASK) {
924 dpl = cpl;
925 }
926 if (dpl < cpl || ist != 0) {
eaa728ee 927 /* to inner privilege */
eaa728ee 928 new_stack = 1;
ae67dc72
PB
929 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
930 ss = 0;
1110bfe6 931 } else {
eaa728ee 932 /* to same privilege */
20054ef0 933 if (env->eflags & VM_MASK) {
77b2bc2c 934 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 935 }
eaa728ee 936 new_stack = 0;
ae67dc72 937 esp = env->regs[R_ESP];
e95e9b88 938 }
ae67dc72 939 esp &= ~0xfLL; /* align stack */
eaa728ee
FB
940
941 PUSHQ(esp, env->segs[R_SS].selector);
08b3ded6 942 PUSHQ(esp, env->regs[R_ESP]);
997ff0d9 943 PUSHQ(esp, cpu_compute_eflags(env));
eaa728ee
FB
944 PUSHQ(esp, env->segs[R_CS].selector);
945 PUSHQ(esp, old_eip);
946 if (has_error_code) {
947 PUSHQ(esp, error_code);
948 }
949
fd460606
KC
950 /* interrupt gate clear IF mask */
951 if ((type & 1) == 0) {
952 env->eflags &= ~IF_MASK;
953 }
954 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
955
eaa728ee
FB
956 if (new_stack) {
957 ss = 0 | dpl;
e95e9b88 958 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
eaa728ee 959 }
08b3ded6 960 env->regs[R_ESP] = esp;
eaa728ee
FB
961
962 selector = (selector & ~3) | dpl;
963 cpu_x86_load_seg_cache(env, R_CS, selector,
964 get_seg_base(e1, e2),
965 get_seg_limit(e1, e2),
966 e2);
eaa728ee 967 env->eip = offset;
eaa728ee
FB
968}
969#endif
970
d9957a8b 971#ifdef TARGET_X86_64
eaa728ee 972#if defined(CONFIG_USER_ONLY)
2999a0b2 973void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee 974{
6aa9e42f 975 CPUState *cs = env_cpu(env);
27103424
AF
976
977 cs->exception_index = EXCP_SYSCALL;
eaa728ee 978 env->exception_next_eip = env->eip + next_eip_addend;
5638d180 979 cpu_loop_exit(cs);
eaa728ee
FB
980}
981#else
2999a0b2 982void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee
FB
983{
984 int selector;
985
986 if (!(env->efer & MSR_EFER_SCE)) {
100ec099 987 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
eaa728ee
FB
988 }
989 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
990 if (env->hflags & HF_LMA_MASK) {
991 int code64;
992
a4165610 993 env->regs[R_ECX] = env->eip + next_eip_addend;
1a1435dd 994 env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
eaa728ee
FB
995
996 code64 = env->hflags & HF_CS64_MASK;
997
1a1435dd 998 env->eflags &= ~(env->fmask | RF_MASK);
fd460606 999 cpu_load_eflags(env, env->eflags, 0);
eaa728ee
FB
1000 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1001 0, 0xffffffff,
1002 DESC_G_MASK | DESC_P_MASK |
1003 DESC_S_MASK |
20054ef0
BS
1004 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1005 DESC_L_MASK);
eaa728ee
FB
1006 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1007 0, 0xffffffff,
1008 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1009 DESC_S_MASK |
1010 DESC_W_MASK | DESC_A_MASK);
20054ef0 1011 if (code64) {
eaa728ee 1012 env->eip = env->lstar;
20054ef0 1013 } else {
eaa728ee 1014 env->eip = env->cstar;
20054ef0 1015 }
d9957a8b 1016 } else {
a4165610 1017 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
eaa728ee 1018
fd460606 1019 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
eaa728ee
FB
1020 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1021 0, 0xffffffff,
1022 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1023 DESC_S_MASK |
1024 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1025 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1026 0, 0xffffffff,
1027 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1028 DESC_S_MASK |
1029 DESC_W_MASK | DESC_A_MASK);
eaa728ee
FB
1030 env->eip = (uint32_t)env->star;
1031 }
1032}
1033#endif
d9957a8b 1034#endif
eaa728ee 1035
d9957a8b 1036#ifdef TARGET_X86_64
2999a0b2 1037void helper_sysret(CPUX86State *env, int dflag)
eaa728ee
FB
1038{
1039 int cpl, selector;
1040
1041 if (!(env->efer & MSR_EFER_SCE)) {
100ec099 1042 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
eaa728ee
FB
1043 }
1044 cpl = env->hflags & HF_CPL_MASK;
1045 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
100ec099 1046 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee
FB
1047 }
1048 selector = (env->star >> 48) & 0xffff;
eaa728ee 1049 if (env->hflags & HF_LMA_MASK) {
fd460606
KC
1050 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1051 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1052 NT_MASK);
eaa728ee
FB
1053 if (dflag == 2) {
1054 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1055 0, 0xffffffff,
1056 DESC_G_MASK | DESC_P_MASK |
1057 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1058 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1059 DESC_L_MASK);
a4165610 1060 env->eip = env->regs[R_ECX];
eaa728ee
FB
1061 } else {
1062 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1063 0, 0xffffffff,
1064 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1065 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1066 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1067 env->eip = (uint32_t)env->regs[R_ECX];
eaa728ee 1068 }
ac576229 1069 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1070 0, 0xffffffff,
1071 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1072 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1073 DESC_W_MASK | DESC_A_MASK);
d9957a8b 1074 } else {
fd460606 1075 env->eflags |= IF_MASK;
eaa728ee
FB
1076 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1077 0, 0xffffffff,
1078 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1079 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1080 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1081 env->eip = (uint32_t)env->regs[R_ECX];
ac576229 1082 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1083 0, 0xffffffff,
1084 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1085 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1086 DESC_W_MASK | DESC_A_MASK);
eaa728ee 1087 }
eaa728ee 1088}
d9957a8b 1089#endif
eaa728ee
FB
1090
1091/* real mode interrupt */
2999a0b2
BS
1092static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1093 int error_code, unsigned int next_eip)
eaa728ee
FB
1094{
1095 SegmentCache *dt;
1096 target_ulong ptr, ssp;
1097 int selector;
1098 uint32_t offset, esp;
1099 uint32_t old_cs, old_eip;
eaa728ee 1100
20054ef0 1101 /* real mode (simpler!) */
eaa728ee 1102 dt = &env->idt;
20054ef0 1103 if (intno * 4 + 3 > dt->limit) {
77b2bc2c 1104 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 1105 }
eaa728ee 1106 ptr = dt->base + intno * 4;
329e607d
BS
1107 offset = cpu_lduw_kernel(env, ptr);
1108 selector = cpu_lduw_kernel(env, ptr + 2);
08b3ded6 1109 esp = env->regs[R_ESP];
eaa728ee 1110 ssp = env->segs[R_SS].base;
20054ef0 1111 if (is_int) {
eaa728ee 1112 old_eip = next_eip;
20054ef0 1113 } else {
eaa728ee 1114 old_eip = env->eip;
20054ef0 1115 }
eaa728ee 1116 old_cs = env->segs[R_CS].selector;
20054ef0 1117 /* XXX: use SS segment size? */
997ff0d9 1118 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
eaa728ee
FB
1119 PUSHW(ssp, esp, 0xffff, old_cs);
1120 PUSHW(ssp, esp, 0xffff, old_eip);
1121
1122 /* update processor state */
08b3ded6 1123 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
eaa728ee
FB
1124 env->eip = offset;
1125 env->segs[R_CS].selector = selector;
1126 env->segs[R_CS].base = (selector << 4);
1127 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1128}
1129
e694d4e2 1130#if defined(CONFIG_USER_ONLY)
33271823
PM
1131/* fake user mode interrupt. is_int is TRUE if coming from the int
1132 * instruction. next_eip is the env->eip value AFTER the interrupt
1133 * instruction. It is only relevant if is_int is TRUE or if intno
1134 * is EXCP_SYSCALL.
1135 */
2999a0b2
BS
1136static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1137 int error_code, target_ulong next_eip)
eaa728ee 1138{
885b7c44
SS
1139 if (is_int) {
1140 SegmentCache *dt;
1141 target_ulong ptr;
1142 int dpl, cpl, shift;
1143 uint32_t e2;
eaa728ee 1144
885b7c44
SS
1145 dt = &env->idt;
1146 if (env->hflags & HF_LMA_MASK) {
1147 shift = 4;
1148 } else {
1149 shift = 3;
1150 }
1151 ptr = dt->base + (intno << shift);
1152 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee 1153
885b7c44
SS
1154 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1155 cpl = env->hflags & HF_CPL_MASK;
1156 /* check privilege if software int */
1157 if (dpl < cpl) {
1158 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1159 }
20054ef0 1160 }
eaa728ee
FB
1161
1162 /* Since we emulate only user space, we cannot do more than
1163 exiting the emulation with the suitable exception and error
47575997
JM
1164 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1165 if (is_int || intno == EXCP_SYSCALL) {
a78d0eab 1166 env->eip = next_eip;
20054ef0 1167 }
eaa728ee
FB
1168}
1169
e694d4e2
BS
1170#else
1171
2999a0b2
BS
1172static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1173 int error_code, int is_hw, int rm)
2ed51f5b 1174{
6aa9e42f 1175 CPUState *cs = env_cpu(env);
b216aa6c 1176 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1177 control.event_inj));
1178
2ed51f5b 1179 if (!(event_inj & SVM_EVTINJ_VALID)) {
20054ef0
BS
1180 int type;
1181
1182 if (is_int) {
1183 type = SVM_EVTINJ_TYPE_SOFT;
1184 } else {
1185 type = SVM_EVTINJ_TYPE_EXEPT;
1186 }
1187 event_inj = intno | type | SVM_EVTINJ_VALID;
1188 if (!rm && exception_has_error_code(intno)) {
1189 event_inj |= SVM_EVTINJ_VALID_ERR;
b216aa6c 1190 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1191 control.event_inj_err),
1192 error_code);
1193 }
b216aa6c 1194 x86_stl_phys(cs,
ab1da857 1195 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1196 event_inj);
2ed51f5b
AL
1197 }
1198}
00ea18d1 1199#endif
2ed51f5b 1200
eaa728ee
FB
1201/*
1202 * Begin execution of an interruption. is_int is TRUE if coming from
a78d0eab 1203 * the int instruction. next_eip is the env->eip value AFTER the interrupt
eaa728ee
FB
1204 * instruction. It is only relevant if is_int is TRUE.
1205 */
ca4c810a 1206static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
2999a0b2 1207 int error_code, target_ulong next_eip, int is_hw)
eaa728ee 1208{
ca4c810a
AF
1209 CPUX86State *env = &cpu->env;
1210
8fec2b8c 1211 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1212 if ((env->cr[0] & CR0_PE_MASK)) {
1213 static int count;
20054ef0
BS
1214
1215 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1216 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1217 count, intno, error_code, is_int,
1218 env->hflags & HF_CPL_MASK,
a78d0eab
LG
1219 env->segs[R_CS].selector, env->eip,
1220 (int)env->segs[R_CS].base + env->eip,
08b3ded6 1221 env->segs[R_SS].selector, env->regs[R_ESP]);
eaa728ee 1222 if (intno == 0x0e) {
93fcfe39 1223 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1224 } else {
4b34e3ad 1225 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
eaa728ee 1226 }
93fcfe39 1227 qemu_log("\n");
a0762859 1228 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
eaa728ee
FB
1229#if 0
1230 {
1231 int i;
9bd5494e 1232 target_ulong ptr;
20054ef0 1233
93fcfe39 1234 qemu_log(" code=");
eaa728ee 1235 ptr = env->segs[R_CS].base + env->eip;
20054ef0 1236 for (i = 0; i < 16; i++) {
93fcfe39 1237 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1238 }
93fcfe39 1239 qemu_log("\n");
eaa728ee
FB
1240 }
1241#endif
1242 count++;
1243 }
1244 }
1245 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1246#if !defined(CONFIG_USER_ONLY)
f8dc4c64 1247 if (env->hflags & HF_GUEST_MASK) {
2999a0b2 1248 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
20054ef0 1249 }
00ea18d1 1250#endif
eb38c52c 1251#ifdef TARGET_X86_64
eaa728ee 1252 if (env->hflags & HF_LMA_MASK) {
2999a0b2 1253 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
eaa728ee
FB
1254 } else
1255#endif
1256 {
2999a0b2
BS
1257 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1258 is_hw);
eaa728ee
FB
1259 }
1260 } else {
00ea18d1 1261#if !defined(CONFIG_USER_ONLY)
f8dc4c64 1262 if (env->hflags & HF_GUEST_MASK) {
2999a0b2 1263 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
20054ef0 1264 }
00ea18d1 1265#endif
2999a0b2 1266 do_interrupt_real(env, intno, is_int, error_code, next_eip);
eaa728ee 1267 }
2ed51f5b 1268
00ea18d1 1269#if !defined(CONFIG_USER_ONLY)
f8dc4c64 1270 if (env->hflags & HF_GUEST_MASK) {
fdfba1a2 1271 CPUState *cs = CPU(cpu);
b216aa6c 1272 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
20054ef0
BS
1273 offsetof(struct vmcb,
1274 control.event_inj));
1275
b216aa6c 1276 x86_stl_phys(cs,
ab1da857 1277 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1278 event_inj & ~SVM_EVTINJ_VALID);
2ed51f5b 1279 }
00ea18d1 1280#endif
eaa728ee
FB
1281}
1282
97a8ea5a 1283void x86_cpu_do_interrupt(CPUState *cs)
e694d4e2 1284{
97a8ea5a
AF
1285 X86CPU *cpu = X86_CPU(cs);
1286 CPUX86State *env = &cpu->env;
1287
e694d4e2
BS
1288#if defined(CONFIG_USER_ONLY)
1289 /* if user mode only, we simulate a fake exception
1290 which will be handled outside the cpu execution
1291 loop */
27103424 1292 do_interrupt_user(env, cs->exception_index,
e694d4e2
BS
1293 env->exception_is_int,
1294 env->error_code,
1295 env->exception_next_eip);
1296 /* successfully delivered */
1297 env->old_exception = -1;
1298#else
10cde894
PB
1299 if (cs->exception_index >= EXCP_VMEXIT) {
1300 assert(env->old_exception == -1);
1301 do_vmexit(env, cs->exception_index - EXCP_VMEXIT, env->error_code);
1302 } else {
1303 do_interrupt_all(cpu, cs->exception_index,
1304 env->exception_is_int,
1305 env->error_code,
1306 env->exception_next_eip, 0);
1307 /* successfully delivered */
1308 env->old_exception = -1;
1309 }
e694d4e2 1310#endif
e694d4e2
BS
1311}
1312
2999a0b2 1313void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
e694d4e2 1314{
6aa9e42f 1315 do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
e694d4e2
BS
1316}
1317
42f53fea
RH
1318bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1319{
1320 X86CPU *cpu = X86_CPU(cs);
1321 CPUX86State *env = &cpu->env;
92d5f1a4 1322 int intno;
42f53fea 1323
92d5f1a4
PB
1324 interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
1325 if (!interrupt_request) {
1326 return false;
1327 }
1328
1329 /* Don't process multiple interrupt requests in a single call.
1330 * This is required to make icount-driven execution deterministic.
1331 */
1332 switch (interrupt_request) {
42f53fea 1333#if !defined(CONFIG_USER_ONLY)
92d5f1a4 1334 case CPU_INTERRUPT_POLL:
42f53fea
RH
1335 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1336 apic_poll_irq(cpu->apic_state);
92d5f1a4 1337 break;
42f53fea 1338#endif
92d5f1a4 1339 case CPU_INTERRUPT_SIPI:
42f53fea 1340 do_cpu_sipi(cpu);
92d5f1a4
PB
1341 break;
1342 case CPU_INTERRUPT_SMI:
1343 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
1344 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1345 do_smm_enter(cpu);
1346 break;
1347 case CPU_INTERRUPT_NMI:
1348 cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
1349 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1350 env->hflags2 |= HF2_NMI_MASK;
1351 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1352 break;
1353 case CPU_INTERRUPT_MCE:
1354 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1355 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1356 break;
1357 case CPU_INTERRUPT_HARD:
1358 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
1359 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1360 CPU_INTERRUPT_VIRQ);
1361 intno = cpu_get_pic_interrupt(env);
1362 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1363 "Servicing hardware INT=0x%02x\n", intno);
1364 do_interrupt_x86_hardirq(env, intno, 1);
1365 break;
42f53fea 1366#if !defined(CONFIG_USER_ONLY)
92d5f1a4
PB
1367 case CPU_INTERRUPT_VIRQ:
1368 /* FIXME: this should respect TPR */
1369 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
1370 intno = x86_ldl_phys(cs, env->vm_vmcb
42f53fea 1371 + offsetof(struct vmcb, control.int_vector));
92d5f1a4
PB
1372 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1373 "Servicing virtual hardware INT=0x%02x\n", intno);
1374 do_interrupt_x86_hardirq(env, intno, 1);
1375 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1376 break;
42f53fea 1377#endif
42f53fea
RH
1378 }
1379
92d5f1a4
PB
1380 /* Ensure that no TB jump will be modified as the program flow was changed. */
1381 return true;
42f53fea
RH
1382}
1383
2999a0b2 1384void helper_lldt(CPUX86State *env, int selector)
eaa728ee
FB
1385{
1386 SegmentCache *dt;
1387 uint32_t e1, e2;
1388 int index, entry_limit;
1389 target_ulong ptr;
1390
1391 selector &= 0xffff;
1392 if ((selector & 0xfffc) == 0) {
1393 /* XXX: NULL selector case: invalid LDT */
1394 env->ldt.base = 0;
1395 env->ldt.limit = 0;
1396 } else {
20054ef0 1397 if (selector & 0x4) {
100ec099 1398 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1399 }
eaa728ee
FB
1400 dt = &env->gdt;
1401 index = selector & ~7;
1402#ifdef TARGET_X86_64
20054ef0 1403 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1404 entry_limit = 15;
20054ef0 1405 } else
eaa728ee 1406#endif
20054ef0 1407 {
eaa728ee 1408 entry_limit = 7;
20054ef0
BS
1409 }
1410 if ((index + entry_limit) > dt->limit) {
100ec099 1411 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1412 }
eaa728ee 1413 ptr = dt->base + index;
100ec099
PD
1414 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1415 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
20054ef0 1416 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
100ec099 1417 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1418 }
1419 if (!(e2 & DESC_P_MASK)) {
100ec099 1420 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1421 }
eaa728ee
FB
1422#ifdef TARGET_X86_64
1423 if (env->hflags & HF_LMA_MASK) {
1424 uint32_t e3;
20054ef0 1425
100ec099 1426 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
eaa728ee
FB
1427 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1428 env->ldt.base |= (target_ulong)e3 << 32;
1429 } else
1430#endif
1431 {
1432 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1433 }
1434 }
1435 env->ldt.selector = selector;
1436}
1437
2999a0b2 1438void helper_ltr(CPUX86State *env, int selector)
eaa728ee
FB
1439{
1440 SegmentCache *dt;
1441 uint32_t e1, e2;
1442 int index, type, entry_limit;
1443 target_ulong ptr;
1444
1445 selector &= 0xffff;
1446 if ((selector & 0xfffc) == 0) {
1447 /* NULL selector case: invalid TR */
1448 env->tr.base = 0;
1449 env->tr.limit = 0;
1450 env->tr.flags = 0;
1451 } else {
20054ef0 1452 if (selector & 0x4) {
100ec099 1453 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1454 }
eaa728ee
FB
1455 dt = &env->gdt;
1456 index = selector & ~7;
1457#ifdef TARGET_X86_64
20054ef0 1458 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1459 entry_limit = 15;
20054ef0 1460 } else
eaa728ee 1461#endif
20054ef0 1462 {
eaa728ee 1463 entry_limit = 7;
20054ef0
BS
1464 }
1465 if ((index + entry_limit) > dt->limit) {
100ec099 1466 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1467 }
eaa728ee 1468 ptr = dt->base + index;
100ec099
PD
1469 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1470 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
eaa728ee
FB
1471 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1472 if ((e2 & DESC_S_MASK) ||
20054ef0 1473 (type != 1 && type != 9)) {
100ec099 1474 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1475 }
1476 if (!(e2 & DESC_P_MASK)) {
100ec099 1477 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1478 }
eaa728ee
FB
1479#ifdef TARGET_X86_64
1480 if (env->hflags & HF_LMA_MASK) {
1481 uint32_t e3, e4;
20054ef0 1482
100ec099
PD
1483 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1484 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
20054ef0 1485 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
100ec099 1486 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1487 }
eaa728ee
FB
1488 load_seg_cache_raw_dt(&env->tr, e1, e2);
1489 env->tr.base |= (target_ulong)e3 << 32;
1490 } else
1491#endif
1492 {
1493 load_seg_cache_raw_dt(&env->tr, e1, e2);
1494 }
1495 e2 |= DESC_TSS_BUSY_MASK;
100ec099 1496 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
eaa728ee
FB
1497 }
1498 env->tr.selector = selector;
1499}
1500
1501/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2999a0b2 1502void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee
FB
1503{
1504 uint32_t e1, e2;
1505 int cpl, dpl, rpl;
1506 SegmentCache *dt;
1507 int index;
1508 target_ulong ptr;
1509
1510 selector &= 0xffff;
1511 cpl = env->hflags & HF_CPL_MASK;
1512 if ((selector & 0xfffc) == 0) {
1513 /* null selector case */
1514 if (seg_reg == R_SS
1515#ifdef TARGET_X86_64
1516 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1517#endif
20054ef0 1518 ) {
100ec099 1519 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1520 }
eaa728ee
FB
1521 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1522 } else {
1523
20054ef0 1524 if (selector & 0x4) {
eaa728ee 1525 dt = &env->ldt;
20054ef0 1526 } else {
eaa728ee 1527 dt = &env->gdt;
20054ef0 1528 }
eaa728ee 1529 index = selector & ~7;
20054ef0 1530 if ((index + 7) > dt->limit) {
100ec099 1531 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1532 }
eaa728ee 1533 ptr = dt->base + index;
100ec099
PD
1534 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1535 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
eaa728ee 1536
20054ef0 1537 if (!(e2 & DESC_S_MASK)) {
100ec099 1538 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1539 }
eaa728ee
FB
1540 rpl = selector & 3;
1541 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1542 if (seg_reg == R_SS) {
1543 /* must be writable segment */
20054ef0 1544 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
100ec099 1545 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1546 }
1547 if (rpl != cpl || dpl != cpl) {
100ec099 1548 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1549 }
eaa728ee
FB
1550 } else {
1551 /* must be readable segment */
20054ef0 1552 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
100ec099 1553 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1554 }
eaa728ee
FB
1555
1556 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1557 /* if not conforming code, test rights */
20054ef0 1558 if (dpl < cpl || dpl < rpl) {
100ec099 1559 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1560 }
eaa728ee
FB
1561 }
1562 }
1563
1564 if (!(e2 & DESC_P_MASK)) {
20054ef0 1565 if (seg_reg == R_SS) {
100ec099 1566 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
20054ef0 1567 } else {
100ec099 1568 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1569 }
eaa728ee
FB
1570 }
1571
1572 /* set the access bit if not already set */
1573 if (!(e2 & DESC_A_MASK)) {
1574 e2 |= DESC_A_MASK;
100ec099 1575 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
eaa728ee
FB
1576 }
1577
1578 cpu_x86_load_seg_cache(env, seg_reg, selector,
1579 get_seg_base(e1, e2),
1580 get_seg_limit(e1, e2),
1581 e2);
1582#if 0
93fcfe39 1583 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
1584 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1585#endif
1586 }
1587}
1588
1589/* protected mode jump */
2999a0b2 1590void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
100ec099 1591 target_ulong next_eip)
eaa728ee
FB
1592{
1593 int gate_cs, type;
1594 uint32_t e1, e2, cpl, dpl, rpl, limit;
eaa728ee 1595
20054ef0 1596 if ((new_cs & 0xfffc) == 0) {
100ec099 1597 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1598 }
100ec099
PD
1599 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1600 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1601 }
eaa728ee
FB
1602 cpl = env->hflags & HF_CPL_MASK;
1603 if (e2 & DESC_S_MASK) {
20054ef0 1604 if (!(e2 & DESC_CS_MASK)) {
100ec099 1605 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1606 }
eaa728ee
FB
1607 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1608 if (e2 & DESC_C_MASK) {
1609 /* conforming code segment */
20054ef0 1610 if (dpl > cpl) {
100ec099 1611 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1612 }
eaa728ee
FB
1613 } else {
1614 /* non conforming code segment */
1615 rpl = new_cs & 3;
20054ef0 1616 if (rpl > cpl) {
100ec099 1617 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1618 }
1619 if (dpl != cpl) {
100ec099 1620 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1621 }
eaa728ee 1622 }
20054ef0 1623 if (!(e2 & DESC_P_MASK)) {
100ec099 1624 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1625 }
eaa728ee
FB
1626 limit = get_seg_limit(e1, e2);
1627 if (new_eip > limit &&
db7196db
AO
1628 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1629 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1630 }
eaa728ee
FB
1631 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1632 get_seg_base(e1, e2), limit, e2);
a78d0eab 1633 env->eip = new_eip;
eaa728ee
FB
1634 } else {
1635 /* jump to call or task gate */
1636 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1637 rpl = new_cs & 3;
1638 cpl = env->hflags & HF_CPL_MASK;
1639 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
0aca0605
AO
1640
1641#ifdef TARGET_X86_64
1642 if (env->efer & MSR_EFER_LMA) {
1643 if (type != 12) {
1644 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1645 }
1646 }
1647#endif
20054ef0 1648 switch (type) {
eaa728ee
FB
1649 case 1: /* 286 TSS */
1650 case 9: /* 386 TSS */
1651 case 5: /* task gate */
20054ef0 1652 if (dpl < cpl || dpl < rpl) {
100ec099 1653 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1654 }
100ec099 1655 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
eaa728ee
FB
1656 break;
1657 case 4: /* 286 call gate */
1658 case 12: /* 386 call gate */
20054ef0 1659 if ((dpl < cpl) || (dpl < rpl)) {
100ec099 1660 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1661 }
1662 if (!(e2 & DESC_P_MASK)) {
100ec099 1663 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1664 }
eaa728ee
FB
1665 gate_cs = e1 >> 16;
1666 new_eip = (e1 & 0xffff);
20054ef0 1667 if (type == 12) {
eaa728ee 1668 new_eip |= (e2 & 0xffff0000);
20054ef0 1669 }
0aca0605
AO
1670
1671#ifdef TARGET_X86_64
1672 if (env->efer & MSR_EFER_LMA) {
1673 /* load the upper 8 bytes of the 64-bit call gate */
1674 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1675 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1676 GETPC());
1677 }
1678 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1679 if (type != 0) {
1680 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1681 GETPC());
1682 }
1683 new_eip |= ((target_ulong)e1) << 32;
1684 }
1685#endif
1686
100ec099
PD
1687 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1688 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1689 }
eaa728ee
FB
1690 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1691 /* must be code segment */
1692 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
20054ef0 1693 (DESC_S_MASK | DESC_CS_MASK))) {
100ec099 1694 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1695 }
eaa728ee 1696 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
20054ef0 1697 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
100ec099 1698 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1699 }
0aca0605
AO
1700#ifdef TARGET_X86_64
1701 if (env->efer & MSR_EFER_LMA) {
1702 if (!(e2 & DESC_L_MASK)) {
1703 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1704 }
1705 if (e2 & DESC_B_MASK) {
1706 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1707 }
1708 }
1709#endif
20054ef0 1710 if (!(e2 & DESC_P_MASK)) {
100ec099 1711 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1712 }
eaa728ee 1713 limit = get_seg_limit(e1, e2);
0aca0605
AO
1714 if (new_eip > limit &&
1715 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
100ec099 1716 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1717 }
eaa728ee
FB
1718 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1719 get_seg_base(e1, e2), limit, e2);
a78d0eab 1720 env->eip = new_eip;
eaa728ee
FB
1721 break;
1722 default:
100ec099 1723 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
eaa728ee
FB
1724 break;
1725 }
1726 }
1727}
1728
1729/* real mode call */
2999a0b2 1730void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
eaa728ee
FB
1731 int shift, int next_eip)
1732{
1733 int new_eip;
1734 uint32_t esp, esp_mask;
1735 target_ulong ssp;
1736
1737 new_eip = new_eip1;
08b3ded6 1738 esp = env->regs[R_ESP];
eaa728ee
FB
1739 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1740 ssp = env->segs[R_SS].base;
1741 if (shift) {
100ec099
PD
1742 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1743 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
eaa728ee 1744 } else {
100ec099
PD
1745 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1746 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
eaa728ee
FB
1747 }
1748
1749 SET_ESP(esp, esp_mask);
1750 env->eip = new_eip;
1751 env->segs[R_CS].selector = new_cs;
1752 env->segs[R_CS].base = (new_cs << 4);
1753}
1754
1755/* protected mode call */
2999a0b2 1756void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
100ec099 1757 int shift, target_ulong next_eip)
eaa728ee
FB
1758{
1759 int new_stack, i;
0aca0605
AO
1760 uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1761 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl, sp_mask;
eaa728ee 1762 uint32_t val, limit, old_sp_mask;
0aca0605 1763 target_ulong ssp, old_ssp, offset, sp;
eaa728ee 1764
0aca0605 1765 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
6aa9e42f 1766 LOG_PCALL_STATE(env_cpu(env));
20054ef0 1767 if ((new_cs & 0xfffc) == 0) {
100ec099 1768 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1769 }
100ec099
PD
1770 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1771 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1772 }
eaa728ee 1773 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 1774 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee 1775 if (e2 & DESC_S_MASK) {
20054ef0 1776 if (!(e2 & DESC_CS_MASK)) {
100ec099 1777 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1778 }
eaa728ee
FB
1779 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1780 if (e2 & DESC_C_MASK) {
1781 /* conforming code segment */
20054ef0 1782 if (dpl > cpl) {
100ec099 1783 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1784 }
eaa728ee
FB
1785 } else {
1786 /* non conforming code segment */
1787 rpl = new_cs & 3;
20054ef0 1788 if (rpl > cpl) {
100ec099 1789 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1790 }
1791 if (dpl != cpl) {
100ec099 1792 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1793 }
eaa728ee 1794 }
20054ef0 1795 if (!(e2 & DESC_P_MASK)) {
100ec099 1796 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1797 }
eaa728ee
FB
1798
1799#ifdef TARGET_X86_64
1800 /* XXX: check 16/32 bit cases in long mode */
1801 if (shift == 2) {
1802 target_ulong rsp;
20054ef0 1803
eaa728ee 1804 /* 64 bit case */
08b3ded6 1805 rsp = env->regs[R_ESP];
100ec099
PD
1806 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1807 PUSHQ_RA(rsp, next_eip, GETPC());
eaa728ee 1808 /* from this point, not restartable */
08b3ded6 1809 env->regs[R_ESP] = rsp;
eaa728ee
FB
1810 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1811 get_seg_base(e1, e2),
1812 get_seg_limit(e1, e2), e2);
a78d0eab 1813 env->eip = new_eip;
eaa728ee
FB
1814 } else
1815#endif
1816 {
08b3ded6 1817 sp = env->regs[R_ESP];
eaa728ee
FB
1818 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1819 ssp = env->segs[R_SS].base;
1820 if (shift) {
100ec099
PD
1821 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1822 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee 1823 } else {
100ec099
PD
1824 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1825 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee
FB
1826 }
1827
1828 limit = get_seg_limit(e1, e2);
20054ef0 1829 if (new_eip > limit) {
100ec099 1830 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1831 }
eaa728ee
FB
1832 /* from this point, not restartable */
1833 SET_ESP(sp, sp_mask);
1834 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1835 get_seg_base(e1, e2), limit, e2);
a78d0eab 1836 env->eip = new_eip;
eaa728ee
FB
1837 }
1838 } else {
1839 /* check gate type */
1840 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1841 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1842 rpl = new_cs & 3;
0aca0605
AO
1843
1844#ifdef TARGET_X86_64
1845 if (env->efer & MSR_EFER_LMA) {
1846 if (type != 12) {
1847 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1848 }
1849 }
1850#endif
1851
20054ef0 1852 switch (type) {
eaa728ee
FB
1853 case 1: /* available 286 TSS */
1854 case 9: /* available 386 TSS */
1855 case 5: /* task gate */
20054ef0 1856 if (dpl < cpl || dpl < rpl) {
100ec099 1857 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1858 }
100ec099 1859 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
eaa728ee
FB
1860 return;
1861 case 4: /* 286 call gate */
1862 case 12: /* 386 call gate */
1863 break;
1864 default:
100ec099 1865 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
eaa728ee
FB
1866 break;
1867 }
1868 shift = type >> 3;
1869
20054ef0 1870 if (dpl < cpl || dpl < rpl) {
100ec099 1871 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1872 }
eaa728ee 1873 /* check valid bit */
20054ef0 1874 if (!(e2 & DESC_P_MASK)) {
100ec099 1875 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1876 }
eaa728ee 1877 selector = e1 >> 16;
eaa728ee 1878 param_count = e2 & 0x1f;
0aca0605
AO
1879 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1880#ifdef TARGET_X86_64
1881 if (env->efer & MSR_EFER_LMA) {
1882 /* load the upper 8 bytes of the 64-bit call gate */
1883 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1884 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1885 GETPC());
1886 }
1887 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1888 if (type != 0) {
1889 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1890 GETPC());
1891 }
1892 offset |= ((target_ulong)e1) << 32;
1893 }
1894#endif
20054ef0 1895 if ((selector & 0xfffc) == 0) {
100ec099 1896 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1897 }
eaa728ee 1898
100ec099
PD
1899 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1900 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1901 }
1902 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
100ec099 1903 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1904 }
eaa728ee 1905 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1906 if (dpl > cpl) {
100ec099 1907 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1908 }
0aca0605
AO
1909#ifdef TARGET_X86_64
1910 if (env->efer & MSR_EFER_LMA) {
1911 if (!(e2 & DESC_L_MASK)) {
1912 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1913 }
1914 if (e2 & DESC_B_MASK) {
1915 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1916 }
1917 shift++;
1918 }
1919#endif
20054ef0 1920 if (!(e2 & DESC_P_MASK)) {
100ec099 1921 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1922 }
eaa728ee
FB
1923
1924 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1925 /* to inner privilege */
0aca0605
AO
1926#ifdef TARGET_X86_64
1927 if (shift == 2) {
1928 sp = get_rsp_from_tss(env, dpl);
1929 ss = dpl; /* SS = NULL selector with RPL = new CPL */
1930 new_stack = 1;
1931 sp_mask = 0;
1932 ssp = 0; /* SS base is always zero in IA-32e mode */
1933 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1934 TARGET_FMT_lx "\n", ss, sp, env->regs[R_ESP]);
1935 } else
1936#endif
1937 {
1938 uint32_t sp32;
1939 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1940 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1941 TARGET_FMT_lx "\n", ss, sp32, param_count,
1942 env->regs[R_ESP]);
1943 sp = sp32;
1944 if ((ss & 0xfffc) == 0) {
1945 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1946 }
1947 if ((ss & 3) != dpl) {
1948 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1949 }
1950 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1951 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1952 }
1953 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1954 if (ss_dpl != dpl) {
1955 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1956 }
1957 if (!(ss_e2 & DESC_S_MASK) ||
1958 (ss_e2 & DESC_CS_MASK) ||
1959 !(ss_e2 & DESC_W_MASK)) {
1960 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1961 }
1962 if (!(ss_e2 & DESC_P_MASK)) {
1963 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1964 }
1965
1966 sp_mask = get_sp_mask(ss_e2);
1967 ssp = get_seg_base(ss_e1, ss_e2);
20054ef0 1968 }
eaa728ee 1969
20054ef0 1970 /* push_size = ((param_count * 2) + 8) << shift; */
eaa728ee
FB
1971
1972 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1973 old_ssp = env->segs[R_SS].base;
0aca0605
AO
1974#ifdef TARGET_X86_64
1975 if (shift == 2) {
1976 /* XXX: verify if new stack address is canonical */
1977 PUSHQ_RA(sp, env->segs[R_SS].selector, GETPC());
1978 PUSHQ_RA(sp, env->regs[R_ESP], GETPC());
1979 /* parameters aren't supported for 64-bit call gates */
1980 } else
1981#endif
1982 if (shift == 1) {
100ec099
PD
1983 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1984 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
20054ef0 1985 for (i = param_count - 1; i >= 0; i--) {
100ec099
PD
1986 val = cpu_ldl_kernel_ra(env, old_ssp +
1987 ((env->regs[R_ESP] + i * 4) &
1988 old_sp_mask), GETPC());
1989 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
eaa728ee
FB
1990 }
1991 } else {
100ec099
PD
1992 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1993 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
20054ef0 1994 for (i = param_count - 1; i >= 0; i--) {
100ec099
PD
1995 val = cpu_lduw_kernel_ra(env, old_ssp +
1996 ((env->regs[R_ESP] + i * 2) &
1997 old_sp_mask), GETPC());
1998 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
eaa728ee
FB
1999 }
2000 }
2001 new_stack = 1;
2002 } else {
2003 /* to same privilege */
08b3ded6 2004 sp = env->regs[R_ESP];
eaa728ee
FB
2005 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2006 ssp = env->segs[R_SS].base;
20054ef0 2007 /* push_size = (4 << shift); */
eaa728ee
FB
2008 new_stack = 0;
2009 }
2010
0aca0605
AO
2011#ifdef TARGET_X86_64
2012 if (shift == 2) {
2013 PUSHQ_RA(sp, env->segs[R_CS].selector, GETPC());
2014 PUSHQ_RA(sp, next_eip, GETPC());
2015 } else
2016#endif
2017 if (shift == 1) {
100ec099
PD
2018 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2019 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee 2020 } else {
100ec099
PD
2021 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
2022 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee
FB
2023 }
2024
2025 /* from this point, not restartable */
2026
2027 if (new_stack) {
0aca0605
AO
2028#ifdef TARGET_X86_64
2029 if (shift == 2) {
2030 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
2031 } else
2032#endif
2033 {
2034 ss = (ss & ~3) | dpl;
2035 cpu_x86_load_seg_cache(env, R_SS, ss,
2036 ssp,
2037 get_seg_limit(ss_e1, ss_e2),
2038 ss_e2);
2039 }
eaa728ee
FB
2040 }
2041
2042 selector = (selector & ~3) | dpl;
2043 cpu_x86_load_seg_cache(env, R_CS, selector,
2044 get_seg_base(e1, e2),
2045 get_seg_limit(e1, e2),
2046 e2);
eaa728ee 2047 SET_ESP(sp, sp_mask);
a78d0eab 2048 env->eip = offset;
eaa728ee 2049 }
eaa728ee
FB
2050}
2051
2052/* real and vm86 mode iret */
2999a0b2 2053void helper_iret_real(CPUX86State *env, int shift)
eaa728ee
FB
2054{
2055 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2056 target_ulong ssp;
2057 int eflags_mask;
2058
20054ef0 2059 sp_mask = 0xffff; /* XXXX: use SS segment size? */
08b3ded6 2060 sp = env->regs[R_ESP];
eaa728ee
FB
2061 ssp = env->segs[R_SS].base;
2062 if (shift == 1) {
2063 /* 32 bits */
100ec099
PD
2064 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
2065 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
eaa728ee 2066 new_cs &= 0xffff;
100ec099 2067 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
eaa728ee
FB
2068 } else {
2069 /* 16 bits */
100ec099
PD
2070 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
2071 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
2072 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
eaa728ee 2073 }
08b3ded6 2074 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2075 env->segs[R_CS].selector = new_cs;
2076 env->segs[R_CS].base = (new_cs << 4);
eaa728ee 2077 env->eip = new_eip;
20054ef0
BS
2078 if (env->eflags & VM_MASK) {
2079 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2080 NT_MASK;
2081 } else {
2082 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2083 RF_MASK | NT_MASK;
2084 }
2085 if (shift == 0) {
eaa728ee 2086 eflags_mask &= 0xffff;
20054ef0 2087 }
997ff0d9 2088 cpu_load_eflags(env, new_eflags, eflags_mask);
db620f46 2089 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2090}
2091
2999a0b2 2092static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
eaa728ee
FB
2093{
2094 int dpl;
2095 uint32_t e2;
2096
2097 /* XXX: on x86_64, we do not want to nullify FS and GS because
2098 they may still contain a valid base. I would be interested to
2099 know how a real x86_64 CPU behaves */
2100 if ((seg_reg == R_FS || seg_reg == R_GS) &&
20054ef0 2101 (env->segs[seg_reg].selector & 0xfffc) == 0) {
eaa728ee 2102 return;
20054ef0 2103 }
eaa728ee
FB
2104
2105 e2 = env->segs[seg_reg].flags;
2106 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2107 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2108 /* data or non conforming code segment */
2109 if (dpl < cpl) {
2110 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2111 }
2112 }
2113}
2114
2115/* protected mode iret */
2999a0b2 2116static inline void helper_ret_protected(CPUX86State *env, int shift,
100ec099
PD
2117 int is_iret, int addend,
2118 uintptr_t retaddr)
eaa728ee
FB
2119{
2120 uint32_t new_cs, new_eflags, new_ss;
2121 uint32_t new_es, new_ds, new_fs, new_gs;
2122 uint32_t e1, e2, ss_e1, ss_e2;
2123 int cpl, dpl, rpl, eflags_mask, iopl;
2124 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2125
2126#ifdef TARGET_X86_64
20054ef0 2127 if (shift == 2) {
eaa728ee 2128 sp_mask = -1;
20054ef0 2129 } else
eaa728ee 2130#endif
20054ef0 2131 {
eaa728ee 2132 sp_mask = get_sp_mask(env->segs[R_SS].flags);
20054ef0 2133 }
08b3ded6 2134 sp = env->regs[R_ESP];
eaa728ee
FB
2135 ssp = env->segs[R_SS].base;
2136 new_eflags = 0; /* avoid warning */
2137#ifdef TARGET_X86_64
2138 if (shift == 2) {
100ec099
PD
2139 POPQ_RA(sp, new_eip, retaddr);
2140 POPQ_RA(sp, new_cs, retaddr);
eaa728ee
FB
2141 new_cs &= 0xffff;
2142 if (is_iret) {
100ec099 2143 POPQ_RA(sp, new_eflags, retaddr);
eaa728ee
FB
2144 }
2145 } else
2146#endif
20054ef0
BS
2147 {
2148 if (shift == 1) {
2149 /* 32 bits */
100ec099
PD
2150 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2151 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
20054ef0
BS
2152 new_cs &= 0xffff;
2153 if (is_iret) {
100ec099 2154 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
20054ef0
BS
2155 if (new_eflags & VM_MASK) {
2156 goto return_to_vm86;
2157 }
2158 }
2159 } else {
2160 /* 16 bits */
100ec099
PD
2161 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2162 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
20054ef0 2163 if (is_iret) {
100ec099 2164 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
20054ef0 2165 }
eaa728ee 2166 }
eaa728ee 2167 }
d12d51d5
AL
2168 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2169 new_cs, new_eip, shift, addend);
6aa9e42f 2170 LOG_PCALL_STATE(env_cpu(env));
20054ef0 2171 if ((new_cs & 0xfffc) == 0) {
100ec099 2172 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2173 }
100ec099
PD
2174 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2175 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2176 }
eaa728ee 2177 if (!(e2 & DESC_S_MASK) ||
20054ef0 2178 !(e2 & DESC_CS_MASK)) {
100ec099 2179 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2180 }
eaa728ee
FB
2181 cpl = env->hflags & HF_CPL_MASK;
2182 rpl = new_cs & 3;
20054ef0 2183 if (rpl < cpl) {
100ec099 2184 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2185 }
eaa728ee
FB
2186 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2187 if (e2 & DESC_C_MASK) {
20054ef0 2188 if (dpl > rpl) {
100ec099 2189 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2190 }
eaa728ee 2191 } else {
20054ef0 2192 if (dpl != rpl) {
100ec099 2193 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2194 }
eaa728ee 2195 }
20054ef0 2196 if (!(e2 & DESC_P_MASK)) {
100ec099 2197 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
20054ef0 2198 }
eaa728ee
FB
2199
2200 sp += addend;
2201 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2202 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2203 /* return to same privilege level */
eaa728ee
FB
2204 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2205 get_seg_base(e1, e2),
2206 get_seg_limit(e1, e2),
2207 e2);
2208 } else {
2209 /* return to different privilege level */
2210#ifdef TARGET_X86_64
2211 if (shift == 2) {
100ec099
PD
2212 POPQ_RA(sp, new_esp, retaddr);
2213 POPQ_RA(sp, new_ss, retaddr);
eaa728ee
FB
2214 new_ss &= 0xffff;
2215 } else
2216#endif
20054ef0
BS
2217 {
2218 if (shift == 1) {
2219 /* 32 bits */
100ec099
PD
2220 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2221 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
20054ef0
BS
2222 new_ss &= 0xffff;
2223 } else {
2224 /* 16 bits */
100ec099
PD
2225 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2226 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
20054ef0 2227 }
eaa728ee 2228 }
d12d51d5 2229 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
20054ef0 2230 new_ss, new_esp);
eaa728ee
FB
2231 if ((new_ss & 0xfffc) == 0) {
2232#ifdef TARGET_X86_64
20054ef0
BS
2233 /* NULL ss is allowed in long mode if cpl != 3 */
2234 /* XXX: test CS64? */
eaa728ee
FB
2235 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2236 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2237 0, 0xffffffff,
2238 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2239 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2240 DESC_W_MASK | DESC_A_MASK);
20054ef0 2241 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
eaa728ee
FB
2242 } else
2243#endif
2244 {
100ec099 2245 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
eaa728ee
FB
2246 }
2247 } else {
20054ef0 2248 if ((new_ss & 3) != rpl) {
100ec099 2249 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2250 }
100ec099
PD
2251 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2252 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2253 }
eaa728ee
FB
2254 if (!(ss_e2 & DESC_S_MASK) ||
2255 (ss_e2 & DESC_CS_MASK) ||
20054ef0 2256 !(ss_e2 & DESC_W_MASK)) {
100ec099 2257 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2258 }
eaa728ee 2259 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2260 if (dpl != rpl) {
100ec099 2261 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0
BS
2262 }
2263 if (!(ss_e2 & DESC_P_MASK)) {
100ec099 2264 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
20054ef0 2265 }
eaa728ee
FB
2266 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2267 get_seg_base(ss_e1, ss_e2),
2268 get_seg_limit(ss_e1, ss_e2),
2269 ss_e2);
2270 }
2271
2272 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2273 get_seg_base(e1, e2),
2274 get_seg_limit(e1, e2),
2275 e2);
eaa728ee
FB
2276 sp = new_esp;
2277#ifdef TARGET_X86_64
20054ef0 2278 if (env->hflags & HF_CS64_MASK) {
eaa728ee 2279 sp_mask = -1;
20054ef0 2280 } else
eaa728ee 2281#endif
20054ef0 2282 {
eaa728ee 2283 sp_mask = get_sp_mask(ss_e2);
20054ef0 2284 }
eaa728ee
FB
2285
2286 /* validate data segments */
2999a0b2
BS
2287 validate_seg(env, R_ES, rpl);
2288 validate_seg(env, R_DS, rpl);
2289 validate_seg(env, R_FS, rpl);
2290 validate_seg(env, R_GS, rpl);
eaa728ee
FB
2291
2292 sp += addend;
2293 }
2294 SET_ESP(sp, sp_mask);
2295 env->eip = new_eip;
2296 if (is_iret) {
2297 /* NOTE: 'cpl' is the _old_ CPL */
2298 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
20054ef0 2299 if (cpl == 0) {
eaa728ee 2300 eflags_mask |= IOPL_MASK;
20054ef0 2301 }
eaa728ee 2302 iopl = (env->eflags >> IOPL_SHIFT) & 3;
20054ef0 2303 if (cpl <= iopl) {
eaa728ee 2304 eflags_mask |= IF_MASK;
20054ef0
BS
2305 }
2306 if (shift == 0) {
eaa728ee 2307 eflags_mask &= 0xffff;
20054ef0 2308 }
997ff0d9 2309 cpu_load_eflags(env, new_eflags, eflags_mask);
eaa728ee
FB
2310 }
2311 return;
2312
2313 return_to_vm86:
100ec099
PD
2314 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2315 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2316 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2317 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2318 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2319 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
eaa728ee
FB
2320
2321 /* modify processor state */
997ff0d9
BS
2322 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2323 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2324 VIP_MASK);
2999a0b2 2325 load_seg_vm(env, R_CS, new_cs & 0xffff);
2999a0b2
BS
2326 load_seg_vm(env, R_SS, new_ss & 0xffff);
2327 load_seg_vm(env, R_ES, new_es & 0xffff);
2328 load_seg_vm(env, R_DS, new_ds & 0xffff);
2329 load_seg_vm(env, R_FS, new_fs & 0xffff);
2330 load_seg_vm(env, R_GS, new_gs & 0xffff);
eaa728ee
FB
2331
2332 env->eip = new_eip & 0xffff;
08b3ded6 2333 env->regs[R_ESP] = new_esp;
eaa728ee
FB
2334}
2335
2999a0b2 2336void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
eaa728ee
FB
2337{
2338 int tss_selector, type;
2339 uint32_t e1, e2;
2340
2341 /* specific case for TSS */
2342 if (env->eflags & NT_MASK) {
2343#ifdef TARGET_X86_64
20054ef0 2344 if (env->hflags & HF_LMA_MASK) {
100ec099 2345 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 2346 }
eaa728ee 2347#endif
100ec099 2348 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
20054ef0 2349 if (tss_selector & 4) {
100ec099 2350 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2351 }
100ec099
PD
2352 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2353 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2354 }
eaa728ee
FB
2355 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2356 /* NOTE: we check both segment and busy TSS */
20054ef0 2357 if (type != 3) {
100ec099 2358 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2359 }
100ec099 2360 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
eaa728ee 2361 } else {
100ec099 2362 helper_ret_protected(env, shift, 1, 0, GETPC());
eaa728ee 2363 }
db620f46 2364 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2365}
2366
2999a0b2 2367void helper_lret_protected(CPUX86State *env, int shift, int addend)
eaa728ee 2368{
100ec099 2369 helper_ret_protected(env, shift, 0, addend, GETPC());
eaa728ee
FB
2370}
2371
2999a0b2 2372void helper_sysenter(CPUX86State *env)
eaa728ee
FB
2373{
2374 if (env->sysenter_cs == 0) {
100ec099 2375 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee
FB
2376 }
2377 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2436b61a
AZ
2378
2379#ifdef TARGET_X86_64
2380 if (env->hflags & HF_LMA_MASK) {
2381 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2382 0, 0xffffffff,
2383 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2384 DESC_S_MASK |
20054ef0
BS
2385 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2386 DESC_L_MASK);
2436b61a
AZ
2387 } else
2388#endif
2389 {
2390 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2391 0, 0xffffffff,
2392 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2393 DESC_S_MASK |
2394 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2395 }
eaa728ee
FB
2396 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2397 0, 0xffffffff,
2398 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2399 DESC_S_MASK |
2400 DESC_W_MASK | DESC_A_MASK);
08b3ded6 2401 env->regs[R_ESP] = env->sysenter_esp;
a78d0eab 2402 env->eip = env->sysenter_eip;
eaa728ee
FB
2403}
2404
2999a0b2 2405void helper_sysexit(CPUX86State *env, int dflag)
eaa728ee
FB
2406{
2407 int cpl;
2408
2409 cpl = env->hflags & HF_CPL_MASK;
2410 if (env->sysenter_cs == 0 || cpl != 0) {
100ec099 2411 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee 2412 }
2436b61a
AZ
2413#ifdef TARGET_X86_64
2414 if (dflag == 2) {
20054ef0
BS
2415 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2416 3, 0, 0xffffffff,
2436b61a
AZ
2417 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2418 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
20054ef0
BS
2419 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2420 DESC_L_MASK);
2421 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2422 3, 0, 0xffffffff,
2436b61a
AZ
2423 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2424 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2425 DESC_W_MASK | DESC_A_MASK);
2426 } else
2427#endif
2428 {
20054ef0
BS
2429 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2430 3, 0, 0xffffffff,
2436b61a
AZ
2431 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2432 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2433 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
20054ef0
BS
2434 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2435 3, 0, 0xffffffff,
2436b61a
AZ
2436 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2437 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2438 DESC_W_MASK | DESC_A_MASK);
2439 }
08b3ded6 2440 env->regs[R_ESP] = env->regs[R_ECX];
a78d0eab 2441 env->eip = env->regs[R_EDX];
eaa728ee
FB
2442}
2443
2999a0b2 2444target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2445{
2446 unsigned int limit;
2447 uint32_t e1, e2, eflags, selector;
2448 int rpl, dpl, cpl, type;
2449
2450 selector = selector1 & 0xffff;
f0967a1a 2451 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2452 if ((selector & 0xfffc) == 0) {
dc1ded53 2453 goto fail;
20054ef0 2454 }
100ec099 2455 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2456 goto fail;
20054ef0 2457 }
eaa728ee
FB
2458 rpl = selector & 3;
2459 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2460 cpl = env->hflags & HF_CPL_MASK;
2461 if (e2 & DESC_S_MASK) {
2462 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2463 /* conforming */
2464 } else {
20054ef0 2465 if (dpl < cpl || dpl < rpl) {
eaa728ee 2466 goto fail;
20054ef0 2467 }
eaa728ee
FB
2468 }
2469 } else {
2470 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2471 switch (type) {
eaa728ee
FB
2472 case 1:
2473 case 2:
2474 case 3:
2475 case 9:
2476 case 11:
2477 break;
2478 default:
2479 goto fail;
2480 }
2481 if (dpl < cpl || dpl < rpl) {
2482 fail:
2483 CC_SRC = eflags & ~CC_Z;
2484 return 0;
2485 }
2486 }
2487 limit = get_seg_limit(e1, e2);
2488 CC_SRC = eflags | CC_Z;
2489 return limit;
2490}
2491
2999a0b2 2492target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2493{
2494 uint32_t e1, e2, eflags, selector;
2495 int rpl, dpl, cpl, type;
2496
2497 selector = selector1 & 0xffff;
f0967a1a 2498 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2499 if ((selector & 0xfffc) == 0) {
eaa728ee 2500 goto fail;
20054ef0 2501 }
100ec099 2502 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2503 goto fail;
20054ef0 2504 }
eaa728ee
FB
2505 rpl = selector & 3;
2506 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2507 cpl = env->hflags & HF_CPL_MASK;
2508 if (e2 & DESC_S_MASK) {
2509 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2510 /* conforming */
2511 } else {
20054ef0 2512 if (dpl < cpl || dpl < rpl) {
eaa728ee 2513 goto fail;
20054ef0 2514 }
eaa728ee
FB
2515 }
2516 } else {
2517 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2518 switch (type) {
eaa728ee
FB
2519 case 1:
2520 case 2:
2521 case 3:
2522 case 4:
2523 case 5:
2524 case 9:
2525 case 11:
2526 case 12:
2527 break;
2528 default:
2529 goto fail;
2530 }
2531 if (dpl < cpl || dpl < rpl) {
2532 fail:
2533 CC_SRC = eflags & ~CC_Z;
2534 return 0;
2535 }
2536 }
2537 CC_SRC = eflags | CC_Z;
2538 return e2 & 0x00f0ff00;
2539}
2540
2999a0b2 2541void helper_verr(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2542{
2543 uint32_t e1, e2, eflags, selector;
2544 int rpl, dpl, cpl;
2545
2546 selector = selector1 & 0xffff;
f0967a1a 2547 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2548 if ((selector & 0xfffc) == 0) {
eaa728ee 2549 goto fail;
20054ef0 2550 }
100ec099 2551 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2552 goto fail;
20054ef0
BS
2553 }
2554 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2555 goto fail;
20054ef0 2556 }
eaa728ee
FB
2557 rpl = selector & 3;
2558 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2559 cpl = env->hflags & HF_CPL_MASK;
2560 if (e2 & DESC_CS_MASK) {
20054ef0 2561 if (!(e2 & DESC_R_MASK)) {
eaa728ee 2562 goto fail;
20054ef0 2563 }
eaa728ee 2564 if (!(e2 & DESC_C_MASK)) {
20054ef0 2565 if (dpl < cpl || dpl < rpl) {
eaa728ee 2566 goto fail;
20054ef0 2567 }
eaa728ee
FB
2568 }
2569 } else {
2570 if (dpl < cpl || dpl < rpl) {
2571 fail:
2572 CC_SRC = eflags & ~CC_Z;
2573 return;
2574 }
2575 }
2576 CC_SRC = eflags | CC_Z;
2577}
2578
2999a0b2 2579void helper_verw(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2580{
2581 uint32_t e1, e2, eflags, selector;
2582 int rpl, dpl, cpl;
2583
2584 selector = selector1 & 0xffff;
f0967a1a 2585 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2586 if ((selector & 0xfffc) == 0) {
eaa728ee 2587 goto fail;
20054ef0 2588 }
100ec099 2589 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2590 goto fail;
20054ef0
BS
2591 }
2592 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2593 goto fail;
20054ef0 2594 }
eaa728ee
FB
2595 rpl = selector & 3;
2596 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2597 cpl = env->hflags & HF_CPL_MASK;
2598 if (e2 & DESC_CS_MASK) {
2599 goto fail;
2600 } else {
20054ef0 2601 if (dpl < cpl || dpl < rpl) {
eaa728ee 2602 goto fail;
20054ef0 2603 }
eaa728ee
FB
2604 if (!(e2 & DESC_W_MASK)) {
2605 fail:
2606 CC_SRC = eflags & ~CC_Z;
2607 return;
2608 }
2609 }
2610 CC_SRC = eflags | CC_Z;
2611}
2612
f299f437 2613#if defined(CONFIG_USER_ONLY)
2999a0b2 2614void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee 2615{
f299f437 2616 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
b98dbc90 2617 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
f299f437
BS
2618 selector &= 0xffff;
2619 cpu_x86_load_seg_cache(env, seg_reg, selector,
b98dbc90
PB
2620 (selector << 4), 0xffff,
2621 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2622 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
f299f437 2623 } else {
2999a0b2 2624 helper_load_seg(env, seg_reg, selector);
13822781 2625 }
eaa728ee 2626}
eaa728ee 2627#endif
81cf8d8a
PB
2628
2629/* check if Port I/O is allowed in TSS */
100ec099
PD
2630static inline void check_io(CPUX86State *env, int addr, int size,
2631 uintptr_t retaddr)
81cf8d8a
PB
2632{
2633 int io_offset, val, mask;
2634
2635 /* TSS must be a valid 32 bit one */
2636 if (!(env->tr.flags & DESC_P_MASK) ||
2637 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2638 env->tr.limit < 103) {
2639 goto fail;
2640 }
100ec099 2641 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
81cf8d8a
PB
2642 io_offset += (addr >> 3);
2643 /* Note: the check needs two bytes */
2644 if ((io_offset + 1) > env->tr.limit) {
2645 goto fail;
2646 }
100ec099 2647 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
81cf8d8a
PB
2648 val >>= (addr & 7);
2649 mask = (1 << size) - 1;
2650 /* all bits must be zero to allow the I/O */
2651 if ((val & mask) != 0) {
2652 fail:
100ec099 2653 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
81cf8d8a
PB
2654 }
2655}
2656
2657void helper_check_iob(CPUX86State *env, uint32_t t0)
2658{
100ec099 2659 check_io(env, t0, 1, GETPC());
81cf8d8a
PB
2660}
2661
2662void helper_check_iow(CPUX86State *env, uint32_t t0)
2663{
100ec099 2664 check_io(env, t0, 2, GETPC());
81cf8d8a
PB
2665}
2666
2667void helper_check_iol(CPUX86State *env, uint32_t t0)
2668{
100ec099 2669 check_io(env, t0, 4, GETPC());
81cf8d8a 2670}