]> git.proxmox.com Git - mirror_qemu.git/blame - target/i386/seg_helper.c
target-i386: correctly propagate retaddr into SVM helpers
[mirror_qemu.git] / target / i386 / seg_helper.c
CommitLineData
eaa728ee 1/*
10774999
BS
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
eaa728ee
FB
4 *
5 * Copyright (c) 2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 19 */
83dae095 20
b6a0aa05 21#include "qemu/osdep.h"
3e457172 22#include "cpu.h"
1de7afc9 23#include "qemu/log.h"
2ef6175a 24#include "exec/helper-proto.h"
63c91552 25#include "exec/exec-all.h"
f08b6170 26#include "exec/cpu_ldst.h"
508127e2 27#include "exec/log.h"
eaa728ee 28
3e457172 29//#define DEBUG_PCALL
d12d51d5
AL
30
31#ifdef DEBUG_PCALL
20054ef0 32# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
8995b7a0
AF
33# define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
d12d51d5 35#else
20054ef0 36# define LOG_PCALL(...) do { } while (0)
8995b7a0 37# define LOG_PCALL_STATE(cpu) do { } while (0)
d12d51d5
AL
38#endif
39
9220fe54
PM
40#ifdef CONFIG_USER_ONLY
41#define MEMSUFFIX _kernel
42#define DATA_SIZE 1
43#include "exec/cpu_ldst_useronly_template.h"
44
45#define DATA_SIZE 2
46#include "exec/cpu_ldst_useronly_template.h"
47
48#define DATA_SIZE 4
49#include "exec/cpu_ldst_useronly_template.h"
50
51#define DATA_SIZE 8
52#include "exec/cpu_ldst_useronly_template.h"
53#undef MEMSUFFIX
54#else
8a201bd4
PB
55#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
56#define MEMSUFFIX _kernel
57#define DATA_SIZE 1
58#include "exec/cpu_ldst_template.h"
59
60#define DATA_SIZE 2
61#include "exec/cpu_ldst_template.h"
62
63#define DATA_SIZE 4
64#include "exec/cpu_ldst_template.h"
65
66#define DATA_SIZE 8
67#include "exec/cpu_ldst_template.h"
68#undef CPU_MMU_INDEX
69#undef MEMSUFFIX
70#endif
71
eaa728ee 72/* return non zero if error */
100ec099
PD
73static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
74 uint32_t *e2_ptr, int selector,
75 uintptr_t retaddr)
eaa728ee
FB
76{
77 SegmentCache *dt;
78 int index;
79 target_ulong ptr;
80
20054ef0 81 if (selector & 0x4) {
eaa728ee 82 dt = &env->ldt;
20054ef0 83 } else {
eaa728ee 84 dt = &env->gdt;
20054ef0 85 }
eaa728ee 86 index = selector & ~7;
20054ef0 87 if ((index + 7) > dt->limit) {
eaa728ee 88 return -1;
20054ef0 89 }
eaa728ee 90 ptr = dt->base + index;
100ec099
PD
91 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
92 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee
FB
93 return 0;
94}
95
100ec099
PD
96static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
97 uint32_t *e2_ptr, int selector)
98{
99 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
100}
101
eaa728ee
FB
102static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
103{
104 unsigned int limit;
20054ef0 105
eaa728ee 106 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
20054ef0 107 if (e2 & DESC_G_MASK) {
eaa728ee 108 limit = (limit << 12) | 0xfff;
20054ef0 109 }
eaa728ee
FB
110 return limit;
111}
112
113static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
114{
20054ef0 115 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
eaa728ee
FB
116}
117
20054ef0
BS
118static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
119 uint32_t e2)
eaa728ee
FB
120{
121 sc->base = get_seg_base(e1, e2);
122 sc->limit = get_seg_limit(e1, e2);
123 sc->flags = e2;
124}
125
126/* init the segment cache in vm86 mode. */
2999a0b2 127static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
eaa728ee
FB
128{
129 selector &= 0xffff;
b98dbc90
PB
130
131 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
132 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
133 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
eaa728ee
FB
134}
135
2999a0b2 136static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
100ec099
PD
137 uint32_t *esp_ptr, int dpl,
138 uintptr_t retaddr)
eaa728ee 139{
a47dddd7 140 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
141 int type, index, shift;
142
143#if 0
144 {
145 int i;
146 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
20054ef0 147 for (i = 0; i < env->tr.limit; i++) {
eaa728ee 148 printf("%02x ", env->tr.base[i]);
20054ef0
BS
149 if ((i & 7) == 7) {
150 printf("\n");
151 }
eaa728ee
FB
152 }
153 printf("\n");
154 }
155#endif
156
20054ef0 157 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 158 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 159 }
eaa728ee 160 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 161 if ((type & 7) != 1) {
a47dddd7 162 cpu_abort(CPU(cpu), "invalid tss type");
20054ef0 163 }
eaa728ee
FB
164 shift = type >> 3;
165 index = (dpl * 4 + 2) << shift;
20054ef0 166 if (index + (4 << shift) - 1 > env->tr.limit) {
100ec099 167 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
20054ef0 168 }
eaa728ee 169 if (shift == 0) {
100ec099
PD
170 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
eaa728ee 172 } else {
100ec099
PD
173 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
174 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
eaa728ee
FB
175 }
176}
177
100ec099
PD
178static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
179 uintptr_t retaddr)
eaa728ee
FB
180{
181 uint32_t e1, e2;
d3b54918 182 int rpl, dpl;
eaa728ee
FB
183
184 if ((selector & 0xfffc) != 0) {
100ec099
PD
185 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0
BS
187 }
188 if (!(e2 & DESC_S_MASK)) {
100ec099 189 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 190 }
eaa728ee
FB
191 rpl = selector & 3;
192 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
eaa728ee 193 if (seg_reg == R_CS) {
20054ef0 194 if (!(e2 & DESC_CS_MASK)) {
100ec099 195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 196 }
20054ef0 197 if (dpl != rpl) {
100ec099 198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 199 }
eaa728ee
FB
200 } else if (seg_reg == R_SS) {
201 /* SS must be writable data */
20054ef0 202 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
100ec099 203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0
BS
204 }
205 if (dpl != cpl || dpl != rpl) {
100ec099 206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 207 }
eaa728ee
FB
208 } else {
209 /* not readable code */
20054ef0 210 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
100ec099 211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 212 }
eaa728ee
FB
213 /* if data or non conforming code, checks the rights */
214 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
20054ef0 215 if (dpl < cpl || dpl < rpl) {
100ec099 216 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 217 }
eaa728ee
FB
218 }
219 }
20054ef0 220 if (!(e2 & DESC_P_MASK)) {
100ec099 221 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
20054ef0 222 }
eaa728ee 223 cpu_x86_load_seg_cache(env, seg_reg, selector,
20054ef0
BS
224 get_seg_base(e1, e2),
225 get_seg_limit(e1, e2),
226 e2);
eaa728ee 227 } else {
20054ef0 228 if (seg_reg == R_SS || seg_reg == R_CS) {
100ec099 229 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 230 }
eaa728ee
FB
231 }
232}
233
234#define SWITCH_TSS_JMP 0
235#define SWITCH_TSS_IRET 1
236#define SWITCH_TSS_CALL 2
237
238/* XXX: restore CPU state in registers (PowerPC case) */
100ec099
PD
239static void switch_tss_ra(CPUX86State *env, int tss_selector,
240 uint32_t e1, uint32_t e2, int source,
241 uint32_t next_eip, uintptr_t retaddr)
eaa728ee
FB
242{
243 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
244 target_ulong tss_base;
245 uint32_t new_regs[8], new_segs[6];
246 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
247 uint32_t old_eflags, eflags_mask;
248 SegmentCache *dt;
249 int index;
250 target_ulong ptr;
251
252 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0
BS
253 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
254 source);
eaa728ee
FB
255
256 /* if task gate, we read the TSS segment and we load it */
257 if (type == 5) {
20054ef0 258 if (!(e2 & DESC_P_MASK)) {
100ec099 259 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
20054ef0 260 }
eaa728ee 261 tss_selector = e1 >> 16;
20054ef0 262 if (tss_selector & 4) {
100ec099 263 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
20054ef0 264 }
100ec099
PD
265 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0
BS
267 }
268 if (e2 & DESC_S_MASK) {
100ec099 269 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0 270 }
eaa728ee 271 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 272 if ((type & 7) != 1) {
100ec099 273 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0 274 }
eaa728ee
FB
275 }
276
20054ef0 277 if (!(e2 & DESC_P_MASK)) {
100ec099 278 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
20054ef0 279 }
eaa728ee 280
20054ef0 281 if (type & 8) {
eaa728ee 282 tss_limit_max = 103;
20054ef0 283 } else {
eaa728ee 284 tss_limit_max = 43;
20054ef0 285 }
eaa728ee
FB
286 tss_limit = get_seg_limit(e1, e2);
287 tss_base = get_seg_base(e1, e2);
288 if ((tss_selector & 4) != 0 ||
20054ef0 289 tss_limit < tss_limit_max) {
100ec099 290 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
20054ef0 291 }
eaa728ee 292 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 293 if (old_type & 8) {
eaa728ee 294 old_tss_limit_max = 103;
20054ef0 295 } else {
eaa728ee 296 old_tss_limit_max = 43;
20054ef0 297 }
eaa728ee
FB
298
299 /* read all the registers from the new TSS */
300 if (type & 8) {
301 /* 32 bit */
100ec099
PD
302 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
303 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
304 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
20054ef0 305 for (i = 0; i < 8; i++) {
100ec099
PD
306 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
307 retaddr);
20054ef0
BS
308 }
309 for (i = 0; i < 6; i++) {
100ec099
PD
310 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
311 retaddr);
20054ef0 312 }
100ec099
PD
313 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
314 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
eaa728ee
FB
315 } else {
316 /* 16 bit */
317 new_cr3 = 0;
100ec099
PD
318 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
319 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
20054ef0 320 for (i = 0; i < 8; i++) {
100ec099
PD
321 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
322 retaddr) | 0xffff0000;
20054ef0
BS
323 }
324 for (i = 0; i < 4; i++) {
100ec099
PD
325 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
326 retaddr);
20054ef0 327 }
100ec099 328 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
eaa728ee
FB
329 new_segs[R_FS] = 0;
330 new_segs[R_GS] = 0;
331 new_trap = 0;
332 }
4581cbcd
BS
333 /* XXX: avoid a compiler warning, see
334 http://support.amd.com/us/Processor_TechDocs/24593.pdf
335 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
336 (void)new_trap;
eaa728ee
FB
337
338 /* NOTE: we must avoid memory exceptions during the task switch,
339 so we make dummy accesses before */
340 /* XXX: it can still fail in some cases, so a bigger hack is
341 necessary to valid the TLB after having done the accesses */
342
100ec099
PD
343 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
344 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
345 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
346 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
eaa728ee
FB
347
348 /* clear busy bit (it is restartable) */
349 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
350 target_ulong ptr;
351 uint32_t e2;
20054ef0 352
eaa728ee 353 ptr = env->gdt.base + (env->tr.selector & ~7);
100ec099 354 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee 355 e2 &= ~DESC_TSS_BUSY_MASK;
100ec099 356 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
eaa728ee 357 }
997ff0d9 358 old_eflags = cpu_compute_eflags(env);
20054ef0 359 if (source == SWITCH_TSS_IRET) {
eaa728ee 360 old_eflags &= ~NT_MASK;
20054ef0 361 }
eaa728ee
FB
362
363 /* save the current state in the old TSS */
364 if (type & 8) {
365 /* 32 bit */
100ec099
PD
366 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
20054ef0 376 for (i = 0; i < 6; i++) {
100ec099
PD
377 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
378 env->segs[i].selector, retaddr);
20054ef0 379 }
eaa728ee
FB
380 } else {
381 /* 16 bit */
100ec099
PD
382 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
20054ef0 392 for (i = 0; i < 4; i++) {
100ec099
PD
393 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
394 env->segs[i].selector, retaddr);
20054ef0 395 }
eaa728ee
FB
396 }
397
398 /* now if an exception occurs, it will occurs in the next task
399 context */
400
401 if (source == SWITCH_TSS_CALL) {
100ec099 402 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
eaa728ee
FB
403 new_eflags |= NT_MASK;
404 }
405
406 /* set busy bit */
407 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
408 target_ulong ptr;
409 uint32_t e2;
20054ef0 410
eaa728ee 411 ptr = env->gdt.base + (tss_selector & ~7);
100ec099 412 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee 413 e2 |= DESC_TSS_BUSY_MASK;
100ec099 414 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
eaa728ee
FB
415 }
416
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
428 }
429
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
20054ef0 435 if (!(type & 8)) {
eaa728ee 436 eflags_mask &= 0xffff;
20054ef0 437 }
997ff0d9 438 cpu_load_eflags(env, new_eflags, eflags_mask);
20054ef0 439 /* XXX: what to do in 16 bit case? */
4b34e3ad 440 env->regs[R_EAX] = new_regs[0];
a4165610 441 env->regs[R_ECX] = new_regs[1];
00f5e6f2 442 env->regs[R_EDX] = new_regs[2];
70b51365 443 env->regs[R_EBX] = new_regs[3];
08b3ded6 444 env->regs[R_ESP] = new_regs[4];
c12dddd7 445 env->regs[R_EBP] = new_regs[5];
78c3c6d3 446 env->regs[R_ESI] = new_regs[6];
cf75c597 447 env->regs[R_EDI] = new_regs[7];
eaa728ee 448 if (new_eflags & VM_MASK) {
20054ef0 449 for (i = 0; i < 6; i++) {
2999a0b2 450 load_seg_vm(env, i, new_segs[i]);
20054ef0 451 }
eaa728ee 452 } else {
eaa728ee 453 /* first just selectors as the rest may trigger exceptions */
20054ef0 454 for (i = 0; i < 6; i++) {
eaa728ee 455 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
20054ef0 456 }
eaa728ee
FB
457 }
458
459 env->ldt.selector = new_ldt & ~4;
460 env->ldt.base = 0;
461 env->ldt.limit = 0;
462 env->ldt.flags = 0;
463
464 /* load the LDT */
20054ef0 465 if (new_ldt & 4) {
100ec099 466 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 467 }
eaa728ee
FB
468
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
20054ef0 472 if ((index + 7) > dt->limit) {
100ec099 473 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 474 }
eaa728ee 475 ptr = dt->base + index;
100ec099
PD
476 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
477 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
20054ef0 478 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
100ec099 479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0
BS
480 }
481 if (!(e2 & DESC_P_MASK)) {
100ec099 482 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 483 }
eaa728ee
FB
484 load_seg_cache_raw_dt(&env->ldt, e1, e2);
485 }
486
487 /* load the segments */
488 if (!(new_eflags & VM_MASK)) {
d3b54918 489 int cpl = new_segs[R_CS] & 3;
100ec099
PD
490 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
491 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
492 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
493 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
494 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
495 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
eaa728ee
FB
496 }
497
a78d0eab 498 /* check that env->eip is in the CS segment limits */
eaa728ee 499 if (new_eip > env->segs[R_CS].limit) {
20054ef0 500 /* XXX: different exception if CALL? */
100ec099 501 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
eaa728ee 502 }
01df040b
AL
503
504#ifndef CONFIG_USER_ONLY
505 /* reset local breakpoints */
428065ce 506 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
93d00d0f 507 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
01df040b
AL
508 }
509#endif
eaa728ee
FB
510}
511
100ec099
PD
512static void switch_tss(CPUX86State *env, int tss_selector,
513 uint32_t e1, uint32_t e2, int source,
514 uint32_t next_eip)
515{
516 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
517}
518
eaa728ee
FB
519static inline unsigned int get_sp_mask(unsigned int e2)
520{
20054ef0 521 if (e2 & DESC_B_MASK) {
eaa728ee 522 return 0xffffffff;
20054ef0 523 } else {
eaa728ee 524 return 0xffff;
20054ef0 525 }
eaa728ee
FB
526}
527
20054ef0 528static int exception_has_error_code(int intno)
2ed51f5b 529{
20054ef0
BS
530 switch (intno) {
531 case 8:
532 case 10:
533 case 11:
534 case 12:
535 case 13:
536 case 14:
537 case 17:
538 return 1;
539 }
540 return 0;
2ed51f5b
AL
541}
542
eaa728ee 543#ifdef TARGET_X86_64
08b3ded6
LG
544#define SET_ESP(val, sp_mask) \
545 do { \
546 if ((sp_mask) == 0xffff) { \
547 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
548 ((val) & 0xffff); \
549 } else if ((sp_mask) == 0xffffffffLL) { \
550 env->regs[R_ESP] = (uint32_t)(val); \
551 } else { \
552 env->regs[R_ESP] = (val); \
553 } \
20054ef0 554 } while (0)
eaa728ee 555#else
08b3ded6
LG
556#define SET_ESP(val, sp_mask) \
557 do { \
558 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
559 ((val) & (sp_mask)); \
20054ef0 560 } while (0)
eaa728ee
FB
561#endif
562
c0a04f0e
AL
563/* in 64-bit machines, this can overflow. So this segment addition macro
564 * can be used to trim the value to 32-bit whenever needed */
565#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
566
eaa728ee 567/* XXX: add a is_user flag to have proper security support */
100ec099 568#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
329e607d
BS
569 { \
570 sp -= 2; \
100ec099 571 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
20054ef0 572 }
eaa728ee 573
100ec099 574#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
20054ef0
BS
575 { \
576 sp -= 4; \
100ec099 577 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
20054ef0 578 }
eaa728ee 579
100ec099 580#define POPW_RA(ssp, sp, sp_mask, val, ra) \
329e607d 581 { \
100ec099 582 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
329e607d 583 sp += 2; \
20054ef0 584 }
eaa728ee 585
100ec099 586#define POPL_RA(ssp, sp, sp_mask, val, ra) \
329e607d 587 { \
100ec099 588 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
329e607d 589 sp += 4; \
20054ef0 590 }
eaa728ee 591
100ec099
PD
592#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
593#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
594#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
595#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
596
eaa728ee 597/* protected mode interrupt */
2999a0b2
BS
598static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
599 int error_code, unsigned int next_eip,
600 int is_hw)
eaa728ee
FB
601{
602 SegmentCache *dt;
603 target_ulong ptr, ssp;
604 int type, dpl, selector, ss_dpl, cpl;
605 int has_error_code, new_stack, shift;
1c918eba 606 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 607 uint32_t old_eip, sp_mask;
87446327 608 int vm86 = env->eflags & VM_MASK;
eaa728ee 609
eaa728ee 610 has_error_code = 0;
20054ef0
BS
611 if (!is_int && !is_hw) {
612 has_error_code = exception_has_error_code(intno);
613 }
614 if (is_int) {
eaa728ee 615 old_eip = next_eip;
20054ef0 616 } else {
eaa728ee 617 old_eip = env->eip;
20054ef0 618 }
eaa728ee
FB
619
620 dt = &env->idt;
20054ef0 621 if (intno * 8 + 7 > dt->limit) {
77b2bc2c 622 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 623 }
eaa728ee 624 ptr = dt->base + intno * 8;
329e607d
BS
625 e1 = cpu_ldl_kernel(env, ptr);
626 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
627 /* check gate type */
628 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 629 switch (type) {
eaa728ee
FB
630 case 5: /* task gate */
631 /* must do that check here to return the correct error code */
20054ef0 632 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 633 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 634 }
2999a0b2 635 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
eaa728ee
FB
636 if (has_error_code) {
637 int type;
638 uint32_t mask;
20054ef0 639
eaa728ee
FB
640 /* push the error code */
641 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
642 shift = type >> 3;
20054ef0 643 if (env->segs[R_SS].flags & DESC_B_MASK) {
eaa728ee 644 mask = 0xffffffff;
20054ef0 645 } else {
eaa728ee 646 mask = 0xffff;
20054ef0 647 }
08b3ded6 648 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
eaa728ee 649 ssp = env->segs[R_SS].base + esp;
20054ef0 650 if (shift) {
329e607d 651 cpu_stl_kernel(env, ssp, error_code);
20054ef0 652 } else {
329e607d 653 cpu_stw_kernel(env, ssp, error_code);
20054ef0 654 }
eaa728ee
FB
655 SET_ESP(esp, mask);
656 }
657 return;
658 case 6: /* 286 interrupt gate */
659 case 7: /* 286 trap gate */
660 case 14: /* 386 interrupt gate */
661 case 15: /* 386 trap gate */
662 break;
663 default:
77b2bc2c 664 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
eaa728ee
FB
665 break;
666 }
667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
668 cpl = env->hflags & HF_CPL_MASK;
1235fc06 669 /* check privilege if software int */
20054ef0 670 if (is_int && dpl < cpl) {
77b2bc2c 671 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 672 }
eaa728ee 673 /* check valid bit */
20054ef0 674 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 675 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 676 }
eaa728ee
FB
677 selector = e1 >> 16;
678 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
20054ef0 679 if ((selector & 0xfffc) == 0) {
77b2bc2c 680 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 681 }
2999a0b2 682 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 683 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
684 }
685 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 687 }
eaa728ee 688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 689 if (dpl > cpl) {
77b2bc2c 690 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
691 }
692 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 693 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 694 }
eaa728ee
FB
695 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
696 /* to inner privilege */
100ec099 697 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
20054ef0 698 if ((ss & 0xfffc) == 0) {
77b2bc2c 699 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
700 }
701 if ((ss & 3) != dpl) {
77b2bc2c 702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 703 }
2999a0b2 704 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 705 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 706 }
eaa728ee 707 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 708 if (ss_dpl != dpl) {
77b2bc2c 709 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 710 }
eaa728ee
FB
711 if (!(ss_e2 & DESC_S_MASK) ||
712 (ss_e2 & DESC_CS_MASK) ||
20054ef0 713 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 714 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
715 }
716 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 718 }
eaa728ee
FB
719 new_stack = 1;
720 sp_mask = get_sp_mask(ss_e2);
721 ssp = get_seg_base(ss_e1, ss_e2);
722 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
723 /* to same privilege */
87446327 724 if (vm86) {
77b2bc2c 725 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 726 }
eaa728ee
FB
727 new_stack = 0;
728 sp_mask = get_sp_mask(env->segs[R_SS].flags);
729 ssp = env->segs[R_SS].base;
08b3ded6 730 esp = env->regs[R_ESP];
eaa728ee
FB
731 dpl = cpl;
732 } else {
77b2bc2c 733 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
734 new_stack = 0; /* avoid warning */
735 sp_mask = 0; /* avoid warning */
736 ssp = 0; /* avoid warning */
737 esp = 0; /* avoid warning */
738 }
739
740 shift = type >> 3;
741
742#if 0
743 /* XXX: check that enough room is available */
744 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
87446327 745 if (vm86) {
eaa728ee 746 push_size += 8;
20054ef0 747 }
eaa728ee
FB
748 push_size <<= shift;
749#endif
750 if (shift == 1) {
751 if (new_stack) {
87446327 752 if (vm86) {
eaa728ee
FB
753 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
754 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
755 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
756 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
757 }
758 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 759 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 760 }
997ff0d9 761 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
762 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
763 PUSHL(ssp, esp, sp_mask, old_eip);
764 if (has_error_code) {
765 PUSHL(ssp, esp, sp_mask, error_code);
766 }
767 } else {
768 if (new_stack) {
87446327 769 if (vm86) {
eaa728ee
FB
770 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
771 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
772 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
773 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
774 }
775 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 776 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 777 }
997ff0d9 778 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
779 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
780 PUSHW(ssp, esp, sp_mask, old_eip);
781 if (has_error_code) {
782 PUSHW(ssp, esp, sp_mask, error_code);
783 }
784 }
785
fd460606
KC
786 /* interrupt gate clear IF mask */
787 if ((type & 1) == 0) {
788 env->eflags &= ~IF_MASK;
789 }
790 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
791
eaa728ee 792 if (new_stack) {
87446327 793 if (vm86) {
eaa728ee
FB
794 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
797 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
798 }
799 ss = (ss & ~3) | dpl;
800 cpu_x86_load_seg_cache(env, R_SS, ss,
801 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
802 }
803 SET_ESP(esp, sp_mask);
804
805 selector = (selector & ~3) | dpl;
806 cpu_x86_load_seg_cache(env, R_CS, selector,
807 get_seg_base(e1, e2),
808 get_seg_limit(e1, e2),
809 e2);
eaa728ee 810 env->eip = offset;
eaa728ee
FB
811}
812
813#ifdef TARGET_X86_64
814
100ec099 815#define PUSHQ_RA(sp, val, ra) \
20054ef0
BS
816 { \
817 sp -= 8; \
100ec099 818 cpu_stq_kernel_ra(env, sp, (val), ra); \
20054ef0 819 }
eaa728ee 820
100ec099 821#define POPQ_RA(sp, val, ra) \
20054ef0 822 { \
100ec099 823 val = cpu_ldq_kernel_ra(env, sp, ra); \
20054ef0
BS
824 sp += 8; \
825 }
eaa728ee 826
100ec099
PD
827#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
828#define POPQ(sp, val) POPQ_RA(sp, val, 0)
829
2999a0b2 830static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
eaa728ee 831{
a47dddd7 832 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
833 int index;
834
835#if 0
836 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
837 env->tr.base, env->tr.limit);
838#endif
839
20054ef0 840 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 841 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 842 }
eaa728ee 843 index = 8 * level + 4;
20054ef0 844 if ((index + 7) > env->tr.limit) {
77b2bc2c 845 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 846 }
329e607d 847 return cpu_ldq_kernel(env, env->tr.base + index);
eaa728ee
FB
848}
849
850/* 64 bit interrupt */
2999a0b2
BS
851static void do_interrupt64(CPUX86State *env, int intno, int is_int,
852 int error_code, target_ulong next_eip, int is_hw)
eaa728ee
FB
853{
854 SegmentCache *dt;
855 target_ulong ptr;
856 int type, dpl, selector, cpl, ist;
857 int has_error_code, new_stack;
858 uint32_t e1, e2, e3, ss;
859 target_ulong old_eip, esp, offset;
eaa728ee 860
eaa728ee 861 has_error_code = 0;
20054ef0
BS
862 if (!is_int && !is_hw) {
863 has_error_code = exception_has_error_code(intno);
864 }
865 if (is_int) {
eaa728ee 866 old_eip = next_eip;
20054ef0 867 } else {
eaa728ee 868 old_eip = env->eip;
20054ef0 869 }
eaa728ee
FB
870
871 dt = &env->idt;
20054ef0 872 if (intno * 16 + 15 > dt->limit) {
77b2bc2c 873 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 874 }
eaa728ee 875 ptr = dt->base + intno * 16;
329e607d
BS
876 e1 = cpu_ldl_kernel(env, ptr);
877 e2 = cpu_ldl_kernel(env, ptr + 4);
878 e3 = cpu_ldl_kernel(env, ptr + 8);
eaa728ee
FB
879 /* check gate type */
880 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 881 switch (type) {
eaa728ee
FB
882 case 14: /* 386 interrupt gate */
883 case 15: /* 386 trap gate */
884 break;
885 default:
77b2bc2c 886 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
eaa728ee
FB
887 break;
888 }
889 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
890 cpl = env->hflags & HF_CPL_MASK;
1235fc06 891 /* check privilege if software int */
20054ef0 892 if (is_int && dpl < cpl) {
77b2bc2c 893 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 894 }
eaa728ee 895 /* check valid bit */
20054ef0 896 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 897 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
20054ef0 898 }
eaa728ee
FB
899 selector = e1 >> 16;
900 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
901 ist = e2 & 7;
20054ef0 902 if ((selector & 0xfffc) == 0) {
77b2bc2c 903 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 904 }
eaa728ee 905
2999a0b2 906 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 907 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
908 }
909 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 910 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 911 }
eaa728ee 912 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 913 if (dpl > cpl) {
77b2bc2c 914 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
915 }
916 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 917 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0
BS
918 }
919 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
77b2bc2c 920 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 921 }
eaa728ee
FB
922 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
923 /* to inner privilege */
eaa728ee 924 new_stack = 1;
ae67dc72
PB
925 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
926 ss = 0;
eaa728ee
FB
927 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
928 /* to same privilege */
20054ef0 929 if (env->eflags & VM_MASK) {
77b2bc2c 930 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 931 }
eaa728ee 932 new_stack = 0;
ae67dc72 933 esp = env->regs[R_ESP];
eaa728ee
FB
934 dpl = cpl;
935 } else {
77b2bc2c 936 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
937 new_stack = 0; /* avoid warning */
938 esp = 0; /* avoid warning */
939 }
ae67dc72 940 esp &= ~0xfLL; /* align stack */
eaa728ee
FB
941
942 PUSHQ(esp, env->segs[R_SS].selector);
08b3ded6 943 PUSHQ(esp, env->regs[R_ESP]);
997ff0d9 944 PUSHQ(esp, cpu_compute_eflags(env));
eaa728ee
FB
945 PUSHQ(esp, env->segs[R_CS].selector);
946 PUSHQ(esp, old_eip);
947 if (has_error_code) {
948 PUSHQ(esp, error_code);
949 }
950
fd460606
KC
951 /* interrupt gate clear IF mask */
952 if ((type & 1) == 0) {
953 env->eflags &= ~IF_MASK;
954 }
955 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
956
eaa728ee
FB
957 if (new_stack) {
958 ss = 0 | dpl;
959 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
960 }
08b3ded6 961 env->regs[R_ESP] = esp;
eaa728ee
FB
962
963 selector = (selector & ~3) | dpl;
964 cpu_x86_load_seg_cache(env, R_CS, selector,
965 get_seg_base(e1, e2),
966 get_seg_limit(e1, e2),
967 e2);
eaa728ee 968 env->eip = offset;
eaa728ee
FB
969}
970#endif
971
d9957a8b 972#ifdef TARGET_X86_64
eaa728ee 973#if defined(CONFIG_USER_ONLY)
2999a0b2 974void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee 975{
27103424
AF
976 CPUState *cs = CPU(x86_env_get_cpu(env));
977
978 cs->exception_index = EXCP_SYSCALL;
eaa728ee 979 env->exception_next_eip = env->eip + next_eip_addend;
5638d180 980 cpu_loop_exit(cs);
eaa728ee
FB
981}
982#else
2999a0b2 983void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee
FB
984{
985 int selector;
986
987 if (!(env->efer & MSR_EFER_SCE)) {
100ec099 988 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
eaa728ee
FB
989 }
990 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
991 if (env->hflags & HF_LMA_MASK) {
992 int code64;
993
a4165610 994 env->regs[R_ECX] = env->eip + next_eip_addend;
997ff0d9 995 env->regs[11] = cpu_compute_eflags(env);
eaa728ee
FB
996
997 code64 = env->hflags & HF_CS64_MASK;
998
fd460606
KC
999 env->eflags &= ~env->fmask;
1000 cpu_load_eflags(env, env->eflags, 0);
eaa728ee
FB
1001 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1002 0, 0xffffffff,
1003 DESC_G_MASK | DESC_P_MASK |
1004 DESC_S_MASK |
20054ef0
BS
1005 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1006 DESC_L_MASK);
eaa728ee
FB
1007 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1008 0, 0xffffffff,
1009 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1010 DESC_S_MASK |
1011 DESC_W_MASK | DESC_A_MASK);
20054ef0 1012 if (code64) {
eaa728ee 1013 env->eip = env->lstar;
20054ef0 1014 } else {
eaa728ee 1015 env->eip = env->cstar;
20054ef0 1016 }
d9957a8b 1017 } else {
a4165610 1018 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
eaa728ee 1019
fd460606 1020 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
eaa728ee
FB
1021 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1022 0, 0xffffffff,
1023 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024 DESC_S_MASK |
1025 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1026 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1027 0, 0xffffffff,
1028 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1029 DESC_S_MASK |
1030 DESC_W_MASK | DESC_A_MASK);
eaa728ee
FB
1031 env->eip = (uint32_t)env->star;
1032 }
1033}
1034#endif
d9957a8b 1035#endif
eaa728ee 1036
d9957a8b 1037#ifdef TARGET_X86_64
2999a0b2 1038void helper_sysret(CPUX86State *env, int dflag)
eaa728ee
FB
1039{
1040 int cpl, selector;
1041
1042 if (!(env->efer & MSR_EFER_SCE)) {
100ec099 1043 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
eaa728ee
FB
1044 }
1045 cpl = env->hflags & HF_CPL_MASK;
1046 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
100ec099 1047 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee
FB
1048 }
1049 selector = (env->star >> 48) & 0xffff;
eaa728ee 1050 if (env->hflags & HF_LMA_MASK) {
fd460606
KC
1051 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1052 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1053 NT_MASK);
eaa728ee
FB
1054 if (dflag == 2) {
1055 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1056 0, 0xffffffff,
1057 DESC_G_MASK | DESC_P_MASK |
1058 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1059 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1060 DESC_L_MASK);
a4165610 1061 env->eip = env->regs[R_ECX];
eaa728ee
FB
1062 } else {
1063 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1064 0, 0xffffffff,
1065 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1066 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1067 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1068 env->eip = (uint32_t)env->regs[R_ECX];
eaa728ee 1069 }
ac576229 1070 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1071 0, 0xffffffff,
1072 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074 DESC_W_MASK | DESC_A_MASK);
d9957a8b 1075 } else {
fd460606 1076 env->eflags |= IF_MASK;
eaa728ee
FB
1077 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1078 0, 0xffffffff,
1079 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1082 env->eip = (uint32_t)env->regs[R_ECX];
ac576229 1083 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1084 0, 0xffffffff,
1085 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1086 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087 DESC_W_MASK | DESC_A_MASK);
eaa728ee 1088 }
eaa728ee 1089}
d9957a8b 1090#endif
eaa728ee
FB
1091
1092/* real mode interrupt */
2999a0b2
BS
1093static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1094 int error_code, unsigned int next_eip)
eaa728ee
FB
1095{
1096 SegmentCache *dt;
1097 target_ulong ptr, ssp;
1098 int selector;
1099 uint32_t offset, esp;
1100 uint32_t old_cs, old_eip;
eaa728ee 1101
20054ef0 1102 /* real mode (simpler!) */
eaa728ee 1103 dt = &env->idt;
20054ef0 1104 if (intno * 4 + 3 > dt->limit) {
77b2bc2c 1105 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 1106 }
eaa728ee 1107 ptr = dt->base + intno * 4;
329e607d
BS
1108 offset = cpu_lduw_kernel(env, ptr);
1109 selector = cpu_lduw_kernel(env, ptr + 2);
08b3ded6 1110 esp = env->regs[R_ESP];
eaa728ee 1111 ssp = env->segs[R_SS].base;
20054ef0 1112 if (is_int) {
eaa728ee 1113 old_eip = next_eip;
20054ef0 1114 } else {
eaa728ee 1115 old_eip = env->eip;
20054ef0 1116 }
eaa728ee 1117 old_cs = env->segs[R_CS].selector;
20054ef0 1118 /* XXX: use SS segment size? */
997ff0d9 1119 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
eaa728ee
FB
1120 PUSHW(ssp, esp, 0xffff, old_cs);
1121 PUSHW(ssp, esp, 0xffff, old_eip);
1122
1123 /* update processor state */
08b3ded6 1124 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
eaa728ee
FB
1125 env->eip = offset;
1126 env->segs[R_CS].selector = selector;
1127 env->segs[R_CS].base = (selector << 4);
1128 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1129}
1130
e694d4e2 1131#if defined(CONFIG_USER_ONLY)
33271823
PM
1132/* fake user mode interrupt. is_int is TRUE if coming from the int
1133 * instruction. next_eip is the env->eip value AFTER the interrupt
1134 * instruction. It is only relevant if is_int is TRUE or if intno
1135 * is EXCP_SYSCALL.
1136 */
2999a0b2
BS
1137static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1138 int error_code, target_ulong next_eip)
eaa728ee 1139{
885b7c44
SS
1140 if (is_int) {
1141 SegmentCache *dt;
1142 target_ulong ptr;
1143 int dpl, cpl, shift;
1144 uint32_t e2;
eaa728ee 1145
885b7c44
SS
1146 dt = &env->idt;
1147 if (env->hflags & HF_LMA_MASK) {
1148 shift = 4;
1149 } else {
1150 shift = 3;
1151 }
1152 ptr = dt->base + (intno << shift);
1153 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee 1154
885b7c44
SS
1155 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1156 cpl = env->hflags & HF_CPL_MASK;
1157 /* check privilege if software int */
1158 if (dpl < cpl) {
1159 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1160 }
20054ef0 1161 }
eaa728ee
FB
1162
1163 /* Since we emulate only user space, we cannot do more than
1164 exiting the emulation with the suitable exception and error
47575997
JM
1165 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1166 if (is_int || intno == EXCP_SYSCALL) {
a78d0eab 1167 env->eip = next_eip;
20054ef0 1168 }
eaa728ee
FB
1169}
1170
e694d4e2
BS
1171#else
1172
2999a0b2
BS
1173static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1174 int error_code, int is_hw, int rm)
2ed51f5b 1175{
19d6ca16 1176 CPUState *cs = CPU(x86_env_get_cpu(env));
b216aa6c 1177 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1178 control.event_inj));
1179
2ed51f5b 1180 if (!(event_inj & SVM_EVTINJ_VALID)) {
20054ef0
BS
1181 int type;
1182
1183 if (is_int) {
1184 type = SVM_EVTINJ_TYPE_SOFT;
1185 } else {
1186 type = SVM_EVTINJ_TYPE_EXEPT;
1187 }
1188 event_inj = intno | type | SVM_EVTINJ_VALID;
1189 if (!rm && exception_has_error_code(intno)) {
1190 event_inj |= SVM_EVTINJ_VALID_ERR;
b216aa6c 1191 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1192 control.event_inj_err),
1193 error_code);
1194 }
b216aa6c 1195 x86_stl_phys(cs,
ab1da857 1196 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1197 event_inj);
2ed51f5b
AL
1198 }
1199}
00ea18d1 1200#endif
2ed51f5b 1201
eaa728ee
FB
1202/*
1203 * Begin execution of an interruption. is_int is TRUE if coming from
a78d0eab 1204 * the int instruction. next_eip is the env->eip value AFTER the interrupt
eaa728ee
FB
1205 * instruction. It is only relevant if is_int is TRUE.
1206 */
ca4c810a 1207static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
2999a0b2 1208 int error_code, target_ulong next_eip, int is_hw)
eaa728ee 1209{
ca4c810a
AF
1210 CPUX86State *env = &cpu->env;
1211
8fec2b8c 1212 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1213 if ((env->cr[0] & CR0_PE_MASK)) {
1214 static int count;
20054ef0
BS
1215
1216 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1217 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1218 count, intno, error_code, is_int,
1219 env->hflags & HF_CPL_MASK,
a78d0eab
LG
1220 env->segs[R_CS].selector, env->eip,
1221 (int)env->segs[R_CS].base + env->eip,
08b3ded6 1222 env->segs[R_SS].selector, env->regs[R_ESP]);
eaa728ee 1223 if (intno == 0x0e) {
93fcfe39 1224 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1225 } else {
4b34e3ad 1226 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
eaa728ee 1227 }
93fcfe39 1228 qemu_log("\n");
a0762859 1229 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
eaa728ee
FB
1230#if 0
1231 {
1232 int i;
9bd5494e 1233 target_ulong ptr;
20054ef0 1234
93fcfe39 1235 qemu_log(" code=");
eaa728ee 1236 ptr = env->segs[R_CS].base + env->eip;
20054ef0 1237 for (i = 0; i < 16; i++) {
93fcfe39 1238 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1239 }
93fcfe39 1240 qemu_log("\n");
eaa728ee
FB
1241 }
1242#endif
1243 count++;
1244 }
1245 }
1246 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1247#if !defined(CONFIG_USER_ONLY)
20054ef0 1248 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1249 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
20054ef0 1250 }
00ea18d1 1251#endif
eb38c52c 1252#ifdef TARGET_X86_64
eaa728ee 1253 if (env->hflags & HF_LMA_MASK) {
2999a0b2 1254 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
eaa728ee
FB
1255 } else
1256#endif
1257 {
2999a0b2
BS
1258 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1259 is_hw);
eaa728ee
FB
1260 }
1261 } else {
00ea18d1 1262#if !defined(CONFIG_USER_ONLY)
20054ef0 1263 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1264 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
20054ef0 1265 }
00ea18d1 1266#endif
2999a0b2 1267 do_interrupt_real(env, intno, is_int, error_code, next_eip);
eaa728ee 1268 }
2ed51f5b 1269
00ea18d1 1270#if !defined(CONFIG_USER_ONLY)
2ed51f5b 1271 if (env->hflags & HF_SVMI_MASK) {
fdfba1a2 1272 CPUState *cs = CPU(cpu);
b216aa6c 1273 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
20054ef0
BS
1274 offsetof(struct vmcb,
1275 control.event_inj));
1276
b216aa6c 1277 x86_stl_phys(cs,
ab1da857 1278 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1279 event_inj & ~SVM_EVTINJ_VALID);
2ed51f5b 1280 }
00ea18d1 1281#endif
eaa728ee
FB
1282}
1283
97a8ea5a 1284void x86_cpu_do_interrupt(CPUState *cs)
e694d4e2 1285{
97a8ea5a
AF
1286 X86CPU *cpu = X86_CPU(cs);
1287 CPUX86State *env = &cpu->env;
1288
e694d4e2
BS
1289#if defined(CONFIG_USER_ONLY)
1290 /* if user mode only, we simulate a fake exception
1291 which will be handled outside the cpu execution
1292 loop */
27103424 1293 do_interrupt_user(env, cs->exception_index,
e694d4e2
BS
1294 env->exception_is_int,
1295 env->error_code,
1296 env->exception_next_eip);
1297 /* successfully delivered */
1298 env->old_exception = -1;
1299#else
1300 /* simulate a real cpu exception. On i386, it can
1301 trigger new exceptions, but we do not handle
1302 double or triple faults yet. */
27103424 1303 do_interrupt_all(cpu, cs->exception_index,
e694d4e2
BS
1304 env->exception_is_int,
1305 env->error_code,
1306 env->exception_next_eip, 0);
1307 /* successfully delivered */
1308 env->old_exception = -1;
1309#endif
e694d4e2
BS
1310}
1311
2999a0b2 1312void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
e694d4e2 1313{
ca4c810a 1314 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
e694d4e2
BS
1315}
1316
42f53fea
RH
1317bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1318{
1319 X86CPU *cpu = X86_CPU(cs);
1320 CPUX86State *env = &cpu->env;
1321 bool ret = false;
1322
1323#if !defined(CONFIG_USER_ONLY)
1324 if (interrupt_request & CPU_INTERRUPT_POLL) {
1325 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1326 apic_poll_irq(cpu->apic_state);
a4fc3212
PD
1327 /* Don't process multiple interrupt requests in a single call.
1328 This is required to make icount-driven execution deterministic. */
1329 return true;
42f53fea
RH
1330 }
1331#endif
1332 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1333 do_cpu_sipi(cpu);
d718b14b 1334 ret = true;
42f53fea
RH
1335 } else if (env->hflags2 & HF2_GIF_MASK) {
1336 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1337 !(env->hflags & HF_SMM_MASK)) {
65c9d60a 1338 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
42f53fea
RH
1339 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1340 do_smm_enter(cpu);
1341 ret = true;
1342 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1343 !(env->hflags2 & HF2_NMI_MASK)) {
1344 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1345 env->hflags2 |= HF2_NMI_MASK;
1346 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1347 ret = true;
1348 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1349 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1350 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1351 ret = true;
1352 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1353 (((env->hflags2 & HF2_VINTR_MASK) &&
1354 (env->hflags2 & HF2_HIF_MASK)) ||
1355 (!(env->hflags2 & HF2_VINTR_MASK) &&
1356 (env->eflags & IF_MASK &&
1357 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1358 int intno;
65c9d60a 1359 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
42f53fea
RH
1360 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1361 CPU_INTERRUPT_VIRQ);
1362 intno = cpu_get_pic_interrupt(env);
1363 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1364 "Servicing hardware INT=0x%02x\n", intno);
1365 do_interrupt_x86_hardirq(env, intno, 1);
1366 /* ensure that no TB jump will be modified as
1367 the program flow was changed */
1368 ret = true;
1369#if !defined(CONFIG_USER_ONLY)
1370 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1371 (env->eflags & IF_MASK) &&
1372 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1373 int intno;
1374 /* FIXME: this should respect TPR */
65c9d60a 1375 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
b216aa6c 1376 intno = x86_ldl_phys(cs, env->vm_vmcb
42f53fea
RH
1377 + offsetof(struct vmcb, control.int_vector));
1378 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1379 "Servicing virtual hardware INT=0x%02x\n", intno);
1380 do_interrupt_x86_hardirq(env, intno, 1);
1381 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1382 ret = true;
1383#endif
1384 }
1385 }
1386
1387 return ret;
1388}
1389
2999a0b2 1390void helper_lldt(CPUX86State *env, int selector)
eaa728ee
FB
1391{
1392 SegmentCache *dt;
1393 uint32_t e1, e2;
1394 int index, entry_limit;
1395 target_ulong ptr;
1396
1397 selector &= 0xffff;
1398 if ((selector & 0xfffc) == 0) {
1399 /* XXX: NULL selector case: invalid LDT */
1400 env->ldt.base = 0;
1401 env->ldt.limit = 0;
1402 } else {
20054ef0 1403 if (selector & 0x4) {
100ec099 1404 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1405 }
eaa728ee
FB
1406 dt = &env->gdt;
1407 index = selector & ~7;
1408#ifdef TARGET_X86_64
20054ef0 1409 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1410 entry_limit = 15;
20054ef0 1411 } else
eaa728ee 1412#endif
20054ef0 1413 {
eaa728ee 1414 entry_limit = 7;
20054ef0
BS
1415 }
1416 if ((index + entry_limit) > dt->limit) {
100ec099 1417 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1418 }
eaa728ee 1419 ptr = dt->base + index;
100ec099
PD
1420 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1421 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
20054ef0 1422 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
100ec099 1423 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1424 }
1425 if (!(e2 & DESC_P_MASK)) {
100ec099 1426 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1427 }
eaa728ee
FB
1428#ifdef TARGET_X86_64
1429 if (env->hflags & HF_LMA_MASK) {
1430 uint32_t e3;
20054ef0 1431
100ec099 1432 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
eaa728ee
FB
1433 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1434 env->ldt.base |= (target_ulong)e3 << 32;
1435 } else
1436#endif
1437 {
1438 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1439 }
1440 }
1441 env->ldt.selector = selector;
1442}
1443
2999a0b2 1444void helper_ltr(CPUX86State *env, int selector)
eaa728ee
FB
1445{
1446 SegmentCache *dt;
1447 uint32_t e1, e2;
1448 int index, type, entry_limit;
1449 target_ulong ptr;
1450
1451 selector &= 0xffff;
1452 if ((selector & 0xfffc) == 0) {
1453 /* NULL selector case: invalid TR */
1454 env->tr.base = 0;
1455 env->tr.limit = 0;
1456 env->tr.flags = 0;
1457 } else {
20054ef0 1458 if (selector & 0x4) {
100ec099 1459 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1460 }
eaa728ee
FB
1461 dt = &env->gdt;
1462 index = selector & ~7;
1463#ifdef TARGET_X86_64
20054ef0 1464 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1465 entry_limit = 15;
20054ef0 1466 } else
eaa728ee 1467#endif
20054ef0 1468 {
eaa728ee 1469 entry_limit = 7;
20054ef0
BS
1470 }
1471 if ((index + entry_limit) > dt->limit) {
100ec099 1472 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1473 }
eaa728ee 1474 ptr = dt->base + index;
100ec099
PD
1475 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1476 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
eaa728ee
FB
1477 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1478 if ((e2 & DESC_S_MASK) ||
20054ef0 1479 (type != 1 && type != 9)) {
100ec099 1480 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1481 }
1482 if (!(e2 & DESC_P_MASK)) {
100ec099 1483 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1484 }
eaa728ee
FB
1485#ifdef TARGET_X86_64
1486 if (env->hflags & HF_LMA_MASK) {
1487 uint32_t e3, e4;
20054ef0 1488
100ec099
PD
1489 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1490 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
20054ef0 1491 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
100ec099 1492 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1493 }
eaa728ee
FB
1494 load_seg_cache_raw_dt(&env->tr, e1, e2);
1495 env->tr.base |= (target_ulong)e3 << 32;
1496 } else
1497#endif
1498 {
1499 load_seg_cache_raw_dt(&env->tr, e1, e2);
1500 }
1501 e2 |= DESC_TSS_BUSY_MASK;
100ec099 1502 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
eaa728ee
FB
1503 }
1504 env->tr.selector = selector;
1505}
1506
1507/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2999a0b2 1508void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee
FB
1509{
1510 uint32_t e1, e2;
1511 int cpl, dpl, rpl;
1512 SegmentCache *dt;
1513 int index;
1514 target_ulong ptr;
1515
1516 selector &= 0xffff;
1517 cpl = env->hflags & HF_CPL_MASK;
1518 if ((selector & 0xfffc) == 0) {
1519 /* null selector case */
1520 if (seg_reg == R_SS
1521#ifdef TARGET_X86_64
1522 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1523#endif
20054ef0 1524 ) {
100ec099 1525 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1526 }
eaa728ee
FB
1527 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1528 } else {
1529
20054ef0 1530 if (selector & 0x4) {
eaa728ee 1531 dt = &env->ldt;
20054ef0 1532 } else {
eaa728ee 1533 dt = &env->gdt;
20054ef0 1534 }
eaa728ee 1535 index = selector & ~7;
20054ef0 1536 if ((index + 7) > dt->limit) {
100ec099 1537 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1538 }
eaa728ee 1539 ptr = dt->base + index;
100ec099
PD
1540 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1541 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
eaa728ee 1542
20054ef0 1543 if (!(e2 & DESC_S_MASK)) {
100ec099 1544 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1545 }
eaa728ee
FB
1546 rpl = selector & 3;
1547 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1548 if (seg_reg == R_SS) {
1549 /* must be writable segment */
20054ef0 1550 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
100ec099 1551 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1552 }
1553 if (rpl != cpl || dpl != cpl) {
100ec099 1554 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1555 }
eaa728ee
FB
1556 } else {
1557 /* must be readable segment */
20054ef0 1558 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
100ec099 1559 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1560 }
eaa728ee
FB
1561
1562 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1563 /* if not conforming code, test rights */
20054ef0 1564 if (dpl < cpl || dpl < rpl) {
100ec099 1565 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1566 }
eaa728ee
FB
1567 }
1568 }
1569
1570 if (!(e2 & DESC_P_MASK)) {
20054ef0 1571 if (seg_reg == R_SS) {
100ec099 1572 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
20054ef0 1573 } else {
100ec099 1574 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1575 }
eaa728ee
FB
1576 }
1577
1578 /* set the access bit if not already set */
1579 if (!(e2 & DESC_A_MASK)) {
1580 e2 |= DESC_A_MASK;
100ec099 1581 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
eaa728ee
FB
1582 }
1583
1584 cpu_x86_load_seg_cache(env, seg_reg, selector,
1585 get_seg_base(e1, e2),
1586 get_seg_limit(e1, e2),
1587 e2);
1588#if 0
93fcfe39 1589 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
1590 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1591#endif
1592 }
1593}
1594
1595/* protected mode jump */
2999a0b2 1596void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
100ec099 1597 target_ulong next_eip)
eaa728ee
FB
1598{
1599 int gate_cs, type;
1600 uint32_t e1, e2, cpl, dpl, rpl, limit;
eaa728ee 1601
20054ef0 1602 if ((new_cs & 0xfffc) == 0) {
100ec099 1603 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1604 }
100ec099
PD
1605 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1606 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1607 }
eaa728ee
FB
1608 cpl = env->hflags & HF_CPL_MASK;
1609 if (e2 & DESC_S_MASK) {
20054ef0 1610 if (!(e2 & DESC_CS_MASK)) {
100ec099 1611 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1612 }
eaa728ee
FB
1613 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1614 if (e2 & DESC_C_MASK) {
1615 /* conforming code segment */
20054ef0 1616 if (dpl > cpl) {
100ec099 1617 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1618 }
eaa728ee
FB
1619 } else {
1620 /* non conforming code segment */
1621 rpl = new_cs & 3;
20054ef0 1622 if (rpl > cpl) {
100ec099 1623 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1624 }
1625 if (dpl != cpl) {
100ec099 1626 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1627 }
eaa728ee 1628 }
20054ef0 1629 if (!(e2 & DESC_P_MASK)) {
100ec099 1630 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1631 }
eaa728ee
FB
1632 limit = get_seg_limit(e1, e2);
1633 if (new_eip > limit &&
20054ef0 1634 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
100ec099 1635 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1636 }
eaa728ee
FB
1637 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1638 get_seg_base(e1, e2), limit, e2);
a78d0eab 1639 env->eip = new_eip;
eaa728ee
FB
1640 } else {
1641 /* jump to call or task gate */
1642 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1643 rpl = new_cs & 3;
1644 cpl = env->hflags & HF_CPL_MASK;
1645 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 1646 switch (type) {
eaa728ee
FB
1647 case 1: /* 286 TSS */
1648 case 9: /* 386 TSS */
1649 case 5: /* task gate */
20054ef0 1650 if (dpl < cpl || dpl < rpl) {
100ec099 1651 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1652 }
100ec099 1653 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
eaa728ee
FB
1654 break;
1655 case 4: /* 286 call gate */
1656 case 12: /* 386 call gate */
20054ef0 1657 if ((dpl < cpl) || (dpl < rpl)) {
100ec099 1658 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1659 }
1660 if (!(e2 & DESC_P_MASK)) {
100ec099 1661 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1662 }
eaa728ee
FB
1663 gate_cs = e1 >> 16;
1664 new_eip = (e1 & 0xffff);
20054ef0 1665 if (type == 12) {
eaa728ee 1666 new_eip |= (e2 & 0xffff0000);
20054ef0 1667 }
100ec099
PD
1668 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1669 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1670 }
eaa728ee
FB
1671 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1672 /* must be code segment */
1673 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
20054ef0 1674 (DESC_S_MASK | DESC_CS_MASK))) {
100ec099 1675 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1676 }
eaa728ee 1677 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
20054ef0 1678 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
100ec099 1679 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0
BS
1680 }
1681 if (!(e2 & DESC_P_MASK)) {
100ec099 1682 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1683 }
eaa728ee 1684 limit = get_seg_limit(e1, e2);
20054ef0 1685 if (new_eip > limit) {
100ec099 1686 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1687 }
eaa728ee
FB
1688 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1689 get_seg_base(e1, e2), limit, e2);
a78d0eab 1690 env->eip = new_eip;
eaa728ee
FB
1691 break;
1692 default:
100ec099 1693 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
eaa728ee
FB
1694 break;
1695 }
1696 }
1697}
1698
1699/* real mode call */
2999a0b2 1700void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
eaa728ee
FB
1701 int shift, int next_eip)
1702{
1703 int new_eip;
1704 uint32_t esp, esp_mask;
1705 target_ulong ssp;
1706
1707 new_eip = new_eip1;
08b3ded6 1708 esp = env->regs[R_ESP];
eaa728ee
FB
1709 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1710 ssp = env->segs[R_SS].base;
1711 if (shift) {
100ec099
PD
1712 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1713 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
eaa728ee 1714 } else {
100ec099
PD
1715 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1716 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
eaa728ee
FB
1717 }
1718
1719 SET_ESP(esp, esp_mask);
1720 env->eip = new_eip;
1721 env->segs[R_CS].selector = new_cs;
1722 env->segs[R_CS].base = (new_cs << 4);
1723}
1724
1725/* protected mode call */
2999a0b2 1726void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
100ec099 1727 int shift, target_ulong next_eip)
eaa728ee
FB
1728{
1729 int new_stack, i;
1730 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 1731 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee 1732 uint32_t val, limit, old_sp_mask;
100ec099 1733 target_ulong ssp, old_ssp;
eaa728ee 1734
d12d51d5 1735 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
8995b7a0 1736 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 1737 if ((new_cs & 0xfffc) == 0) {
100ec099 1738 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1739 }
100ec099
PD
1740 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1741 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1742 }
eaa728ee 1743 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 1744 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee 1745 if (e2 & DESC_S_MASK) {
20054ef0 1746 if (!(e2 & DESC_CS_MASK)) {
100ec099 1747 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1748 }
eaa728ee
FB
1749 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1750 if (e2 & DESC_C_MASK) {
1751 /* conforming code segment */
20054ef0 1752 if (dpl > cpl) {
100ec099 1753 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1754 }
eaa728ee
FB
1755 } else {
1756 /* non conforming code segment */
1757 rpl = new_cs & 3;
20054ef0 1758 if (rpl > cpl) {
100ec099 1759 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1760 }
1761 if (dpl != cpl) {
100ec099 1762 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1763 }
eaa728ee 1764 }
20054ef0 1765 if (!(e2 & DESC_P_MASK)) {
100ec099 1766 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1767 }
eaa728ee
FB
1768
1769#ifdef TARGET_X86_64
1770 /* XXX: check 16/32 bit cases in long mode */
1771 if (shift == 2) {
1772 target_ulong rsp;
20054ef0 1773
eaa728ee 1774 /* 64 bit case */
08b3ded6 1775 rsp = env->regs[R_ESP];
100ec099
PD
1776 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1777 PUSHQ_RA(rsp, next_eip, GETPC());
eaa728ee 1778 /* from this point, not restartable */
08b3ded6 1779 env->regs[R_ESP] = rsp;
eaa728ee
FB
1780 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1781 get_seg_base(e1, e2),
1782 get_seg_limit(e1, e2), e2);
a78d0eab 1783 env->eip = new_eip;
eaa728ee
FB
1784 } else
1785#endif
1786 {
08b3ded6 1787 sp = env->regs[R_ESP];
eaa728ee
FB
1788 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1789 ssp = env->segs[R_SS].base;
1790 if (shift) {
100ec099
PD
1791 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1792 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee 1793 } else {
100ec099
PD
1794 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1795 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee
FB
1796 }
1797
1798 limit = get_seg_limit(e1, e2);
20054ef0 1799 if (new_eip > limit) {
100ec099 1800 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1801 }
eaa728ee
FB
1802 /* from this point, not restartable */
1803 SET_ESP(sp, sp_mask);
1804 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1805 get_seg_base(e1, e2), limit, e2);
a78d0eab 1806 env->eip = new_eip;
eaa728ee
FB
1807 }
1808 } else {
1809 /* check gate type */
1810 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1811 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1812 rpl = new_cs & 3;
20054ef0 1813 switch (type) {
eaa728ee
FB
1814 case 1: /* available 286 TSS */
1815 case 9: /* available 386 TSS */
1816 case 5: /* task gate */
20054ef0 1817 if (dpl < cpl || dpl < rpl) {
100ec099 1818 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1819 }
100ec099 1820 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
eaa728ee
FB
1821 return;
1822 case 4: /* 286 call gate */
1823 case 12: /* 386 call gate */
1824 break;
1825 default:
100ec099 1826 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
eaa728ee
FB
1827 break;
1828 }
1829 shift = type >> 3;
1830
20054ef0 1831 if (dpl < cpl || dpl < rpl) {
100ec099 1832 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1833 }
eaa728ee 1834 /* check valid bit */
20054ef0 1835 if (!(e2 & DESC_P_MASK)) {
100ec099 1836 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1837 }
eaa728ee
FB
1838 selector = e1 >> 16;
1839 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1840 param_count = e2 & 0x1f;
20054ef0 1841 if ((selector & 0xfffc) == 0) {
100ec099 1842 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1843 }
eaa728ee 1844
100ec099
PD
1845 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1846 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1847 }
1848 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
100ec099 1849 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1850 }
eaa728ee 1851 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1852 if (dpl > cpl) {
100ec099 1853 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1854 }
1855 if (!(e2 & DESC_P_MASK)) {
100ec099 1856 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1857 }
eaa728ee
FB
1858
1859 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1860 /* to inner privilege */
100ec099 1861 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
90a2541b
LG
1862 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1863 TARGET_FMT_lx "\n", ss, sp, param_count,
1864 env->regs[R_ESP]);
20054ef0 1865 if ((ss & 0xfffc) == 0) {
100ec099 1866 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0
BS
1867 }
1868 if ((ss & 3) != dpl) {
100ec099 1869 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1870 }
100ec099
PD
1871 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1872 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1873 }
eaa728ee 1874 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1875 if (ss_dpl != dpl) {
100ec099 1876 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1877 }
eaa728ee
FB
1878 if (!(ss_e2 & DESC_S_MASK) ||
1879 (ss_e2 & DESC_CS_MASK) ||
20054ef0 1880 !(ss_e2 & DESC_W_MASK)) {
100ec099 1881 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0
BS
1882 }
1883 if (!(ss_e2 & DESC_P_MASK)) {
100ec099 1884 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1885 }
eaa728ee 1886
20054ef0 1887 /* push_size = ((param_count * 2) + 8) << shift; */
eaa728ee
FB
1888
1889 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1890 old_ssp = env->segs[R_SS].base;
1891
1892 sp_mask = get_sp_mask(ss_e2);
1893 ssp = get_seg_base(ss_e1, ss_e2);
1894 if (shift) {
100ec099
PD
1895 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1896 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
20054ef0 1897 for (i = param_count - 1; i >= 0; i--) {
100ec099
PD
1898 val = cpu_ldl_kernel_ra(env, old_ssp +
1899 ((env->regs[R_ESP] + i * 4) &
1900 old_sp_mask), GETPC());
1901 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
eaa728ee
FB
1902 }
1903 } else {
100ec099
PD
1904 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1905 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
20054ef0 1906 for (i = param_count - 1; i >= 0; i--) {
100ec099
PD
1907 val = cpu_lduw_kernel_ra(env, old_ssp +
1908 ((env->regs[R_ESP] + i * 2) &
1909 old_sp_mask), GETPC());
1910 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
eaa728ee
FB
1911 }
1912 }
1913 new_stack = 1;
1914 } else {
1915 /* to same privilege */
08b3ded6 1916 sp = env->regs[R_ESP];
eaa728ee
FB
1917 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1918 ssp = env->segs[R_SS].base;
20054ef0 1919 /* push_size = (4 << shift); */
eaa728ee
FB
1920 new_stack = 0;
1921 }
1922
1923 if (shift) {
100ec099
PD
1924 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1925 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee 1926 } else {
100ec099
PD
1927 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1928 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee
FB
1929 }
1930
1931 /* from this point, not restartable */
1932
1933 if (new_stack) {
1934 ss = (ss & ~3) | dpl;
1935 cpu_x86_load_seg_cache(env, R_SS, ss,
1936 ssp,
1937 get_seg_limit(ss_e1, ss_e2),
1938 ss_e2);
1939 }
1940
1941 selector = (selector & ~3) | dpl;
1942 cpu_x86_load_seg_cache(env, R_CS, selector,
1943 get_seg_base(e1, e2),
1944 get_seg_limit(e1, e2),
1945 e2);
eaa728ee 1946 SET_ESP(sp, sp_mask);
a78d0eab 1947 env->eip = offset;
eaa728ee 1948 }
eaa728ee
FB
1949}
1950
1951/* real and vm86 mode iret */
2999a0b2 1952void helper_iret_real(CPUX86State *env, int shift)
eaa728ee
FB
1953{
1954 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1955 target_ulong ssp;
1956 int eflags_mask;
1957
20054ef0 1958 sp_mask = 0xffff; /* XXXX: use SS segment size? */
08b3ded6 1959 sp = env->regs[R_ESP];
eaa728ee
FB
1960 ssp = env->segs[R_SS].base;
1961 if (shift == 1) {
1962 /* 32 bits */
100ec099
PD
1963 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1964 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
eaa728ee 1965 new_cs &= 0xffff;
100ec099 1966 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
eaa728ee
FB
1967 } else {
1968 /* 16 bits */
100ec099
PD
1969 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1970 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1971 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
eaa728ee 1972 }
08b3ded6 1973 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
bdadc0b5 1974 env->segs[R_CS].selector = new_cs;
1975 env->segs[R_CS].base = (new_cs << 4);
eaa728ee 1976 env->eip = new_eip;
20054ef0
BS
1977 if (env->eflags & VM_MASK) {
1978 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1979 NT_MASK;
1980 } else {
1981 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1982 RF_MASK | NT_MASK;
1983 }
1984 if (shift == 0) {
eaa728ee 1985 eflags_mask &= 0xffff;
20054ef0 1986 }
997ff0d9 1987 cpu_load_eflags(env, new_eflags, eflags_mask);
db620f46 1988 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
1989}
1990
2999a0b2 1991static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
eaa728ee
FB
1992{
1993 int dpl;
1994 uint32_t e2;
1995
1996 /* XXX: on x86_64, we do not want to nullify FS and GS because
1997 they may still contain a valid base. I would be interested to
1998 know how a real x86_64 CPU behaves */
1999 if ((seg_reg == R_FS || seg_reg == R_GS) &&
20054ef0 2000 (env->segs[seg_reg].selector & 0xfffc) == 0) {
eaa728ee 2001 return;
20054ef0 2002 }
eaa728ee
FB
2003
2004 e2 = env->segs[seg_reg].flags;
2005 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2006 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2007 /* data or non conforming code segment */
2008 if (dpl < cpl) {
2009 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2010 }
2011 }
2012}
2013
2014/* protected mode iret */
2999a0b2 2015static inline void helper_ret_protected(CPUX86State *env, int shift,
100ec099
PD
2016 int is_iret, int addend,
2017 uintptr_t retaddr)
eaa728ee
FB
2018{
2019 uint32_t new_cs, new_eflags, new_ss;
2020 uint32_t new_es, new_ds, new_fs, new_gs;
2021 uint32_t e1, e2, ss_e1, ss_e2;
2022 int cpl, dpl, rpl, eflags_mask, iopl;
2023 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2024
2025#ifdef TARGET_X86_64
20054ef0 2026 if (shift == 2) {
eaa728ee 2027 sp_mask = -1;
20054ef0 2028 } else
eaa728ee 2029#endif
20054ef0 2030 {
eaa728ee 2031 sp_mask = get_sp_mask(env->segs[R_SS].flags);
20054ef0 2032 }
08b3ded6 2033 sp = env->regs[R_ESP];
eaa728ee
FB
2034 ssp = env->segs[R_SS].base;
2035 new_eflags = 0; /* avoid warning */
2036#ifdef TARGET_X86_64
2037 if (shift == 2) {
100ec099
PD
2038 POPQ_RA(sp, new_eip, retaddr);
2039 POPQ_RA(sp, new_cs, retaddr);
eaa728ee
FB
2040 new_cs &= 0xffff;
2041 if (is_iret) {
100ec099 2042 POPQ_RA(sp, new_eflags, retaddr);
eaa728ee
FB
2043 }
2044 } else
2045#endif
20054ef0
BS
2046 {
2047 if (shift == 1) {
2048 /* 32 bits */
100ec099
PD
2049 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2050 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
20054ef0
BS
2051 new_cs &= 0xffff;
2052 if (is_iret) {
100ec099 2053 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
20054ef0
BS
2054 if (new_eflags & VM_MASK) {
2055 goto return_to_vm86;
2056 }
2057 }
2058 } else {
2059 /* 16 bits */
100ec099
PD
2060 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2061 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
20054ef0 2062 if (is_iret) {
100ec099 2063 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
20054ef0 2064 }
eaa728ee 2065 }
eaa728ee 2066 }
d12d51d5
AL
2067 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2068 new_cs, new_eip, shift, addend);
8995b7a0 2069 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 2070 if ((new_cs & 0xfffc) == 0) {
100ec099 2071 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2072 }
100ec099
PD
2073 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2074 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2075 }
eaa728ee 2076 if (!(e2 & DESC_S_MASK) ||
20054ef0 2077 !(e2 & DESC_CS_MASK)) {
100ec099 2078 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2079 }
eaa728ee
FB
2080 cpl = env->hflags & HF_CPL_MASK;
2081 rpl = new_cs & 3;
20054ef0 2082 if (rpl < cpl) {
100ec099 2083 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2084 }
eaa728ee
FB
2085 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2086 if (e2 & DESC_C_MASK) {
20054ef0 2087 if (dpl > rpl) {
100ec099 2088 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2089 }
eaa728ee 2090 } else {
20054ef0 2091 if (dpl != rpl) {
100ec099 2092 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2093 }
eaa728ee 2094 }
20054ef0 2095 if (!(e2 & DESC_P_MASK)) {
100ec099 2096 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
20054ef0 2097 }
eaa728ee
FB
2098
2099 sp += addend;
2100 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2101 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2102 /* return to same privilege level */
eaa728ee
FB
2103 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2104 get_seg_base(e1, e2),
2105 get_seg_limit(e1, e2),
2106 e2);
2107 } else {
2108 /* return to different privilege level */
2109#ifdef TARGET_X86_64
2110 if (shift == 2) {
100ec099
PD
2111 POPQ_RA(sp, new_esp, retaddr);
2112 POPQ_RA(sp, new_ss, retaddr);
eaa728ee
FB
2113 new_ss &= 0xffff;
2114 } else
2115#endif
20054ef0
BS
2116 {
2117 if (shift == 1) {
2118 /* 32 bits */
100ec099
PD
2119 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2120 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
20054ef0
BS
2121 new_ss &= 0xffff;
2122 } else {
2123 /* 16 bits */
100ec099
PD
2124 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2125 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
20054ef0 2126 }
eaa728ee 2127 }
d12d51d5 2128 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
20054ef0 2129 new_ss, new_esp);
eaa728ee
FB
2130 if ((new_ss & 0xfffc) == 0) {
2131#ifdef TARGET_X86_64
20054ef0
BS
2132 /* NULL ss is allowed in long mode if cpl != 3 */
2133 /* XXX: test CS64? */
eaa728ee
FB
2134 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2135 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2136 0, 0xffffffff,
2137 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2138 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2139 DESC_W_MASK | DESC_A_MASK);
20054ef0 2140 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
eaa728ee
FB
2141 } else
2142#endif
2143 {
100ec099 2144 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
eaa728ee
FB
2145 }
2146 } else {
20054ef0 2147 if ((new_ss & 3) != rpl) {
100ec099 2148 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2149 }
100ec099
PD
2150 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2151 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2152 }
eaa728ee
FB
2153 if (!(ss_e2 & DESC_S_MASK) ||
2154 (ss_e2 & DESC_CS_MASK) ||
20054ef0 2155 !(ss_e2 & DESC_W_MASK)) {
100ec099 2156 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2157 }
eaa728ee 2158 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2159 if (dpl != rpl) {
100ec099 2160 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0
BS
2161 }
2162 if (!(ss_e2 & DESC_P_MASK)) {
100ec099 2163 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
20054ef0 2164 }
eaa728ee
FB
2165 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2166 get_seg_base(ss_e1, ss_e2),
2167 get_seg_limit(ss_e1, ss_e2),
2168 ss_e2);
2169 }
2170
2171 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2172 get_seg_base(e1, e2),
2173 get_seg_limit(e1, e2),
2174 e2);
eaa728ee
FB
2175 sp = new_esp;
2176#ifdef TARGET_X86_64
20054ef0 2177 if (env->hflags & HF_CS64_MASK) {
eaa728ee 2178 sp_mask = -1;
20054ef0 2179 } else
eaa728ee 2180#endif
20054ef0 2181 {
eaa728ee 2182 sp_mask = get_sp_mask(ss_e2);
20054ef0 2183 }
eaa728ee
FB
2184
2185 /* validate data segments */
2999a0b2
BS
2186 validate_seg(env, R_ES, rpl);
2187 validate_seg(env, R_DS, rpl);
2188 validate_seg(env, R_FS, rpl);
2189 validate_seg(env, R_GS, rpl);
eaa728ee
FB
2190
2191 sp += addend;
2192 }
2193 SET_ESP(sp, sp_mask);
2194 env->eip = new_eip;
2195 if (is_iret) {
2196 /* NOTE: 'cpl' is the _old_ CPL */
2197 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
20054ef0 2198 if (cpl == 0) {
eaa728ee 2199 eflags_mask |= IOPL_MASK;
20054ef0 2200 }
eaa728ee 2201 iopl = (env->eflags >> IOPL_SHIFT) & 3;
20054ef0 2202 if (cpl <= iopl) {
eaa728ee 2203 eflags_mask |= IF_MASK;
20054ef0
BS
2204 }
2205 if (shift == 0) {
eaa728ee 2206 eflags_mask &= 0xffff;
20054ef0 2207 }
997ff0d9 2208 cpu_load_eflags(env, new_eflags, eflags_mask);
eaa728ee
FB
2209 }
2210 return;
2211
2212 return_to_vm86:
100ec099
PD
2213 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2214 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2215 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2216 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2217 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2218 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
eaa728ee
FB
2219
2220 /* modify processor state */
997ff0d9
BS
2221 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2222 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2223 VIP_MASK);
2999a0b2 2224 load_seg_vm(env, R_CS, new_cs & 0xffff);
2999a0b2
BS
2225 load_seg_vm(env, R_SS, new_ss & 0xffff);
2226 load_seg_vm(env, R_ES, new_es & 0xffff);
2227 load_seg_vm(env, R_DS, new_ds & 0xffff);
2228 load_seg_vm(env, R_FS, new_fs & 0xffff);
2229 load_seg_vm(env, R_GS, new_gs & 0xffff);
eaa728ee
FB
2230
2231 env->eip = new_eip & 0xffff;
08b3ded6 2232 env->regs[R_ESP] = new_esp;
eaa728ee
FB
2233}
2234
2999a0b2 2235void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
eaa728ee
FB
2236{
2237 int tss_selector, type;
2238 uint32_t e1, e2;
2239
2240 /* specific case for TSS */
2241 if (env->eflags & NT_MASK) {
2242#ifdef TARGET_X86_64
20054ef0 2243 if (env->hflags & HF_LMA_MASK) {
100ec099 2244 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 2245 }
eaa728ee 2246#endif
100ec099 2247 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
20054ef0 2248 if (tss_selector & 4) {
100ec099 2249 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2250 }
100ec099
PD
2251 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2252 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2253 }
eaa728ee
FB
2254 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2255 /* NOTE: we check both segment and busy TSS */
20054ef0 2256 if (type != 3) {
100ec099 2257 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2258 }
100ec099 2259 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
eaa728ee 2260 } else {
100ec099 2261 helper_ret_protected(env, shift, 1, 0, GETPC());
eaa728ee 2262 }
db620f46 2263 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2264}
2265
2999a0b2 2266void helper_lret_protected(CPUX86State *env, int shift, int addend)
eaa728ee 2267{
100ec099 2268 helper_ret_protected(env, shift, 0, addend, GETPC());
eaa728ee
FB
2269}
2270
2999a0b2 2271void helper_sysenter(CPUX86State *env)
eaa728ee
FB
2272{
2273 if (env->sysenter_cs == 0) {
100ec099 2274 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee
FB
2275 }
2276 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2436b61a
AZ
2277
2278#ifdef TARGET_X86_64
2279 if (env->hflags & HF_LMA_MASK) {
2280 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2281 0, 0xffffffff,
2282 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2283 DESC_S_MASK |
20054ef0
BS
2284 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2285 DESC_L_MASK);
2436b61a
AZ
2286 } else
2287#endif
2288 {
2289 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2290 0, 0xffffffff,
2291 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2292 DESC_S_MASK |
2293 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2294 }
eaa728ee
FB
2295 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2296 0, 0xffffffff,
2297 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2298 DESC_S_MASK |
2299 DESC_W_MASK | DESC_A_MASK);
08b3ded6 2300 env->regs[R_ESP] = env->sysenter_esp;
a78d0eab 2301 env->eip = env->sysenter_eip;
eaa728ee
FB
2302}
2303
2999a0b2 2304void helper_sysexit(CPUX86State *env, int dflag)
eaa728ee
FB
2305{
2306 int cpl;
2307
2308 cpl = env->hflags & HF_CPL_MASK;
2309 if (env->sysenter_cs == 0 || cpl != 0) {
100ec099 2310 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee 2311 }
2436b61a
AZ
2312#ifdef TARGET_X86_64
2313 if (dflag == 2) {
20054ef0
BS
2314 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2315 3, 0, 0xffffffff,
2436b61a
AZ
2316 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2317 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
20054ef0
BS
2318 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2319 DESC_L_MASK);
2320 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2321 3, 0, 0xffffffff,
2436b61a
AZ
2322 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2323 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2324 DESC_W_MASK | DESC_A_MASK);
2325 } else
2326#endif
2327 {
20054ef0
BS
2328 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2329 3, 0, 0xffffffff,
2436b61a
AZ
2330 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2331 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2332 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
20054ef0
BS
2333 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2334 3, 0, 0xffffffff,
2436b61a
AZ
2335 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2336 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2337 DESC_W_MASK | DESC_A_MASK);
2338 }
08b3ded6 2339 env->regs[R_ESP] = env->regs[R_ECX];
a78d0eab 2340 env->eip = env->regs[R_EDX];
eaa728ee
FB
2341}
2342
2999a0b2 2343target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2344{
2345 unsigned int limit;
2346 uint32_t e1, e2, eflags, selector;
2347 int rpl, dpl, cpl, type;
2348
2349 selector = selector1 & 0xffff;
f0967a1a 2350 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2351 if ((selector & 0xfffc) == 0) {
dc1ded53 2352 goto fail;
20054ef0 2353 }
100ec099 2354 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2355 goto fail;
20054ef0 2356 }
eaa728ee
FB
2357 rpl = selector & 3;
2358 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2359 cpl = env->hflags & HF_CPL_MASK;
2360 if (e2 & DESC_S_MASK) {
2361 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2362 /* conforming */
2363 } else {
20054ef0 2364 if (dpl < cpl || dpl < rpl) {
eaa728ee 2365 goto fail;
20054ef0 2366 }
eaa728ee
FB
2367 }
2368 } else {
2369 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2370 switch (type) {
eaa728ee
FB
2371 case 1:
2372 case 2:
2373 case 3:
2374 case 9:
2375 case 11:
2376 break;
2377 default:
2378 goto fail;
2379 }
2380 if (dpl < cpl || dpl < rpl) {
2381 fail:
2382 CC_SRC = eflags & ~CC_Z;
2383 return 0;
2384 }
2385 }
2386 limit = get_seg_limit(e1, e2);
2387 CC_SRC = eflags | CC_Z;
2388 return limit;
2389}
2390
2999a0b2 2391target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2392{
2393 uint32_t e1, e2, eflags, selector;
2394 int rpl, dpl, cpl, type;
2395
2396 selector = selector1 & 0xffff;
f0967a1a 2397 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2398 if ((selector & 0xfffc) == 0) {
eaa728ee 2399 goto fail;
20054ef0 2400 }
100ec099 2401 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2402 goto fail;
20054ef0 2403 }
eaa728ee
FB
2404 rpl = selector & 3;
2405 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2406 cpl = env->hflags & HF_CPL_MASK;
2407 if (e2 & DESC_S_MASK) {
2408 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2409 /* conforming */
2410 } else {
20054ef0 2411 if (dpl < cpl || dpl < rpl) {
eaa728ee 2412 goto fail;
20054ef0 2413 }
eaa728ee
FB
2414 }
2415 } else {
2416 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2417 switch (type) {
eaa728ee
FB
2418 case 1:
2419 case 2:
2420 case 3:
2421 case 4:
2422 case 5:
2423 case 9:
2424 case 11:
2425 case 12:
2426 break;
2427 default:
2428 goto fail;
2429 }
2430 if (dpl < cpl || dpl < rpl) {
2431 fail:
2432 CC_SRC = eflags & ~CC_Z;
2433 return 0;
2434 }
2435 }
2436 CC_SRC = eflags | CC_Z;
2437 return e2 & 0x00f0ff00;
2438}
2439
2999a0b2 2440void helper_verr(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2441{
2442 uint32_t e1, e2, eflags, selector;
2443 int rpl, dpl, cpl;
2444
2445 selector = selector1 & 0xffff;
f0967a1a 2446 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2447 if ((selector & 0xfffc) == 0) {
eaa728ee 2448 goto fail;
20054ef0 2449 }
100ec099 2450 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2451 goto fail;
20054ef0
BS
2452 }
2453 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2454 goto fail;
20054ef0 2455 }
eaa728ee
FB
2456 rpl = selector & 3;
2457 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2458 cpl = env->hflags & HF_CPL_MASK;
2459 if (e2 & DESC_CS_MASK) {
20054ef0 2460 if (!(e2 & DESC_R_MASK)) {
eaa728ee 2461 goto fail;
20054ef0 2462 }
eaa728ee 2463 if (!(e2 & DESC_C_MASK)) {
20054ef0 2464 if (dpl < cpl || dpl < rpl) {
eaa728ee 2465 goto fail;
20054ef0 2466 }
eaa728ee
FB
2467 }
2468 } else {
2469 if (dpl < cpl || dpl < rpl) {
2470 fail:
2471 CC_SRC = eflags & ~CC_Z;
2472 return;
2473 }
2474 }
2475 CC_SRC = eflags | CC_Z;
2476}
2477
2999a0b2 2478void helper_verw(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2479{
2480 uint32_t e1, e2, eflags, selector;
2481 int rpl, dpl, cpl;
2482
2483 selector = selector1 & 0xffff;
f0967a1a 2484 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2485 if ((selector & 0xfffc) == 0) {
eaa728ee 2486 goto fail;
20054ef0 2487 }
100ec099 2488 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2489 goto fail;
20054ef0
BS
2490 }
2491 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2492 goto fail;
20054ef0 2493 }
eaa728ee
FB
2494 rpl = selector & 3;
2495 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2496 cpl = env->hflags & HF_CPL_MASK;
2497 if (e2 & DESC_CS_MASK) {
2498 goto fail;
2499 } else {
20054ef0 2500 if (dpl < cpl || dpl < rpl) {
eaa728ee 2501 goto fail;
20054ef0 2502 }
eaa728ee
FB
2503 if (!(e2 & DESC_W_MASK)) {
2504 fail:
2505 CC_SRC = eflags & ~CC_Z;
2506 return;
2507 }
2508 }
2509 CC_SRC = eflags | CC_Z;
2510}
2511
f299f437 2512#if defined(CONFIG_USER_ONLY)
2999a0b2 2513void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee 2514{
f299f437 2515 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
b98dbc90 2516 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
f299f437
BS
2517 selector &= 0xffff;
2518 cpu_x86_load_seg_cache(env, seg_reg, selector,
b98dbc90
PB
2519 (selector << 4), 0xffff,
2520 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2521 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
f299f437 2522 } else {
2999a0b2 2523 helper_load_seg(env, seg_reg, selector);
13822781 2524 }
eaa728ee 2525}
eaa728ee 2526#endif
81cf8d8a
PB
2527
2528/* check if Port I/O is allowed in TSS */
100ec099
PD
2529static inline void check_io(CPUX86State *env, int addr, int size,
2530 uintptr_t retaddr)
81cf8d8a
PB
2531{
2532 int io_offset, val, mask;
2533
2534 /* TSS must be a valid 32 bit one */
2535 if (!(env->tr.flags & DESC_P_MASK) ||
2536 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2537 env->tr.limit < 103) {
2538 goto fail;
2539 }
100ec099 2540 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
81cf8d8a
PB
2541 io_offset += (addr >> 3);
2542 /* Note: the check needs two bytes */
2543 if ((io_offset + 1) > env->tr.limit) {
2544 goto fail;
2545 }
100ec099 2546 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
81cf8d8a
PB
2547 val >>= (addr & 7);
2548 mask = (1 << size) - 1;
2549 /* all bits must be zero to allow the I/O */
2550 if ((val & mask) != 0) {
2551 fail:
100ec099 2552 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
81cf8d8a
PB
2553 }
2554}
2555
2556void helper_check_iob(CPUX86State *env, uint32_t t0)
2557{
100ec099 2558 check_io(env, t0, 1, GETPC());
81cf8d8a
PB
2559}
2560
2561void helper_check_iow(CPUX86State *env, uint32_t t0)
2562{
100ec099 2563 check_io(env, t0, 2, GETPC());
81cf8d8a
PB
2564}
2565
2566void helper_check_iol(CPUX86State *env, uint32_t t0)
2567{
100ec099 2568 check_io(env, t0, 4, GETPC());
81cf8d8a 2569}