]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/seg_helper.c
qga/commands-posix: Fix bug in guest-fstrim
[mirror_qemu.git] / target-i386 / seg_helper.c
CommitLineData
eaa728ee 1/*
10774999
BS
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
eaa728ee
FB
4 *
5 * Copyright (c) 2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 19 */
83dae095 20
3e457172 21#include "cpu.h"
1de7afc9 22#include "qemu/log.h"
2ef6175a 23#include "exec/helper-proto.h"
f08b6170 24#include "exec/cpu_ldst.h"
eaa728ee 25
3e457172 26//#define DEBUG_PCALL
d12d51d5
AL
27
28#ifdef DEBUG_PCALL
20054ef0 29# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
8995b7a0
AF
30# define LOG_PCALL_STATE(cpu) \
31 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
d12d51d5 32#else
20054ef0 33# define LOG_PCALL(...) do { } while (0)
8995b7a0 34# define LOG_PCALL_STATE(cpu) do { } while (0)
d12d51d5
AL
35#endif
36
9220fe54
PM
37#ifdef CONFIG_USER_ONLY
38#define MEMSUFFIX _kernel
39#define DATA_SIZE 1
40#include "exec/cpu_ldst_useronly_template.h"
41
42#define DATA_SIZE 2
43#include "exec/cpu_ldst_useronly_template.h"
44
45#define DATA_SIZE 4
46#include "exec/cpu_ldst_useronly_template.h"
47
48#define DATA_SIZE 8
49#include "exec/cpu_ldst_useronly_template.h"
50#undef MEMSUFFIX
51#else
8a201bd4
PB
52#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
53#define MEMSUFFIX _kernel
54#define DATA_SIZE 1
55#include "exec/cpu_ldst_template.h"
56
57#define DATA_SIZE 2
58#include "exec/cpu_ldst_template.h"
59
60#define DATA_SIZE 4
61#include "exec/cpu_ldst_template.h"
62
63#define DATA_SIZE 8
64#include "exec/cpu_ldst_template.h"
65#undef CPU_MMU_INDEX
66#undef MEMSUFFIX
67#endif
68
eaa728ee 69/* return non zero if error */
2999a0b2
BS
70static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
71 uint32_t *e2_ptr, int selector)
eaa728ee
FB
72{
73 SegmentCache *dt;
74 int index;
75 target_ulong ptr;
76
20054ef0 77 if (selector & 0x4) {
eaa728ee 78 dt = &env->ldt;
20054ef0 79 } else {
eaa728ee 80 dt = &env->gdt;
20054ef0 81 }
eaa728ee 82 index = selector & ~7;
20054ef0 83 if ((index + 7) > dt->limit) {
eaa728ee 84 return -1;
20054ef0 85 }
eaa728ee 86 ptr = dt->base + index;
329e607d
BS
87 *e1_ptr = cpu_ldl_kernel(env, ptr);
88 *e2_ptr = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
89 return 0;
90}
91
92static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
93{
94 unsigned int limit;
20054ef0 95
eaa728ee 96 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
20054ef0 97 if (e2 & DESC_G_MASK) {
eaa728ee 98 limit = (limit << 12) | 0xfff;
20054ef0 99 }
eaa728ee
FB
100 return limit;
101}
102
103static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
104{
20054ef0 105 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
eaa728ee
FB
106}
107
20054ef0
BS
108static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
109 uint32_t e2)
eaa728ee
FB
110{
111 sc->base = get_seg_base(e1, e2);
112 sc->limit = get_seg_limit(e1, e2);
113 sc->flags = e2;
114}
115
116/* init the segment cache in vm86 mode. */
2999a0b2 117static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
eaa728ee
FB
118{
119 selector &= 0xffff;
b98dbc90
PB
120
121 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
122 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
123 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
eaa728ee
FB
124}
125
2999a0b2 126static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
eaa728ee
FB
127 uint32_t *esp_ptr, int dpl)
128{
a47dddd7 129 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
130 int type, index, shift;
131
132#if 0
133 {
134 int i;
135 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
20054ef0 136 for (i = 0; i < env->tr.limit; i++) {
eaa728ee 137 printf("%02x ", env->tr.base[i]);
20054ef0
BS
138 if ((i & 7) == 7) {
139 printf("\n");
140 }
eaa728ee
FB
141 }
142 printf("\n");
143 }
144#endif
145
20054ef0 146 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 147 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 148 }
eaa728ee 149 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 150 if ((type & 7) != 1) {
a47dddd7 151 cpu_abort(CPU(cpu), "invalid tss type");
20054ef0 152 }
eaa728ee
FB
153 shift = type >> 3;
154 index = (dpl * 4 + 2) << shift;
20054ef0 155 if (index + (4 << shift) - 1 > env->tr.limit) {
77b2bc2c 156 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 157 }
eaa728ee 158 if (shift == 0) {
329e607d
BS
159 *esp_ptr = cpu_lduw_kernel(env, env->tr.base + index);
160 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 2);
eaa728ee 161 } else {
329e607d
BS
162 *esp_ptr = cpu_ldl_kernel(env, env->tr.base + index);
163 *ss_ptr = cpu_lduw_kernel(env, env->tr.base + index + 4);
eaa728ee
FB
164 }
165}
166
d3b54918 167static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl)
eaa728ee
FB
168{
169 uint32_t e1, e2;
d3b54918 170 int rpl, dpl;
eaa728ee
FB
171
172 if ((selector & 0xfffc) != 0) {
2999a0b2 173 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 174 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0
BS
175 }
176 if (!(e2 & DESC_S_MASK)) {
77b2bc2c 177 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 178 }
eaa728ee
FB
179 rpl = selector & 3;
180 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
eaa728ee 181 if (seg_reg == R_CS) {
20054ef0 182 if (!(e2 & DESC_CS_MASK)) {
77b2bc2c 183 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 184 }
20054ef0 185 if (dpl != rpl) {
77b2bc2c 186 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 187 }
eaa728ee
FB
188 } else if (seg_reg == R_SS) {
189 /* SS must be writable data */
20054ef0 190 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
77b2bc2c 191 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0
BS
192 }
193 if (dpl != cpl || dpl != rpl) {
77b2bc2c 194 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 195 }
eaa728ee
FB
196 } else {
197 /* not readable code */
20054ef0 198 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
77b2bc2c 199 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 200 }
eaa728ee
FB
201 /* if data or non conforming code, checks the rights */
202 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
20054ef0 203 if (dpl < cpl || dpl < rpl) {
77b2bc2c 204 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 205 }
eaa728ee
FB
206 }
207 }
20054ef0 208 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 209 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 210 }
eaa728ee 211 cpu_x86_load_seg_cache(env, seg_reg, selector,
20054ef0
BS
212 get_seg_base(e1, e2),
213 get_seg_limit(e1, e2),
214 e2);
eaa728ee 215 } else {
20054ef0 216 if (seg_reg == R_SS || seg_reg == R_CS) {
77b2bc2c 217 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 218 }
eaa728ee
FB
219 }
220}
221
222#define SWITCH_TSS_JMP 0
223#define SWITCH_TSS_IRET 1
224#define SWITCH_TSS_CALL 2
225
226/* XXX: restore CPU state in registers (PowerPC case) */
2999a0b2 227static void switch_tss(CPUX86State *env, int tss_selector,
eaa728ee
FB
228 uint32_t e1, uint32_t e2, int source,
229 uint32_t next_eip)
230{
231 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
232 target_ulong tss_base;
233 uint32_t new_regs[8], new_segs[6];
234 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
235 uint32_t old_eflags, eflags_mask;
236 SegmentCache *dt;
237 int index;
238 target_ulong ptr;
239
240 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0
BS
241 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
242 source);
eaa728ee
FB
243
244 /* if task gate, we read the TSS segment and we load it */
245 if (type == 5) {
20054ef0 246 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 247 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
20054ef0 248 }
eaa728ee 249 tss_selector = e1 >> 16;
20054ef0 250 if (tss_selector & 4) {
77b2bc2c 251 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 252 }
2999a0b2 253 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
77b2bc2c 254 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
20054ef0
BS
255 }
256 if (e2 & DESC_S_MASK) {
77b2bc2c 257 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
20054ef0 258 }
eaa728ee 259 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 260 if ((type & 7) != 1) {
77b2bc2c 261 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
20054ef0 262 }
eaa728ee
FB
263 }
264
20054ef0 265 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 266 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
20054ef0 267 }
eaa728ee 268
20054ef0 269 if (type & 8) {
eaa728ee 270 tss_limit_max = 103;
20054ef0 271 } else {
eaa728ee 272 tss_limit_max = 43;
20054ef0 273 }
eaa728ee
FB
274 tss_limit = get_seg_limit(e1, e2);
275 tss_base = get_seg_base(e1, e2);
276 if ((tss_selector & 4) != 0 ||
20054ef0 277 tss_limit < tss_limit_max) {
77b2bc2c 278 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 279 }
eaa728ee 280 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 281 if (old_type & 8) {
eaa728ee 282 old_tss_limit_max = 103;
20054ef0 283 } else {
eaa728ee 284 old_tss_limit_max = 43;
20054ef0 285 }
eaa728ee
FB
286
287 /* read all the registers from the new TSS */
288 if (type & 8) {
289 /* 32 bit */
329e607d
BS
290 new_cr3 = cpu_ldl_kernel(env, tss_base + 0x1c);
291 new_eip = cpu_ldl_kernel(env, tss_base + 0x20);
292 new_eflags = cpu_ldl_kernel(env, tss_base + 0x24);
20054ef0 293 for (i = 0; i < 8; i++) {
329e607d 294 new_regs[i] = cpu_ldl_kernel(env, tss_base + (0x28 + i * 4));
20054ef0
BS
295 }
296 for (i = 0; i < 6; i++) {
329e607d 297 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x48 + i * 4));
20054ef0 298 }
329e607d
BS
299 new_ldt = cpu_lduw_kernel(env, tss_base + 0x60);
300 new_trap = cpu_ldl_kernel(env, tss_base + 0x64);
eaa728ee
FB
301 } else {
302 /* 16 bit */
303 new_cr3 = 0;
329e607d
BS
304 new_eip = cpu_lduw_kernel(env, tss_base + 0x0e);
305 new_eflags = cpu_lduw_kernel(env, tss_base + 0x10);
20054ef0 306 for (i = 0; i < 8; i++) {
329e607d
BS
307 new_regs[i] = cpu_lduw_kernel(env, tss_base + (0x12 + i * 2)) |
308 0xffff0000;
20054ef0
BS
309 }
310 for (i = 0; i < 4; i++) {
329e607d 311 new_segs[i] = cpu_lduw_kernel(env, tss_base + (0x22 + i * 4));
20054ef0 312 }
329e607d 313 new_ldt = cpu_lduw_kernel(env, tss_base + 0x2a);
eaa728ee
FB
314 new_segs[R_FS] = 0;
315 new_segs[R_GS] = 0;
316 new_trap = 0;
317 }
4581cbcd
BS
318 /* XXX: avoid a compiler warning, see
319 http://support.amd.com/us/Processor_TechDocs/24593.pdf
320 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
321 (void)new_trap;
eaa728ee
FB
322
323 /* NOTE: we must avoid memory exceptions during the task switch,
324 so we make dummy accesses before */
325 /* XXX: it can still fail in some cases, so a bigger hack is
326 necessary to valid the TLB after having done the accesses */
327
329e607d
BS
328 v1 = cpu_ldub_kernel(env, env->tr.base);
329 v2 = cpu_ldub_kernel(env, env->tr.base + old_tss_limit_max);
330 cpu_stb_kernel(env, env->tr.base, v1);
331 cpu_stb_kernel(env, env->tr.base + old_tss_limit_max, v2);
eaa728ee
FB
332
333 /* clear busy bit (it is restartable) */
334 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
335 target_ulong ptr;
336 uint32_t e2;
20054ef0 337
eaa728ee 338 ptr = env->gdt.base + (env->tr.selector & ~7);
329e607d 339 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee 340 e2 &= ~DESC_TSS_BUSY_MASK;
329e607d 341 cpu_stl_kernel(env, ptr + 4, e2);
eaa728ee 342 }
997ff0d9 343 old_eflags = cpu_compute_eflags(env);
20054ef0 344 if (source == SWITCH_TSS_IRET) {
eaa728ee 345 old_eflags &= ~NT_MASK;
20054ef0 346 }
eaa728ee
FB
347
348 /* save the current state in the old TSS */
349 if (type & 8) {
350 /* 32 bit */
329e607d
BS
351 cpu_stl_kernel(env, env->tr.base + 0x20, next_eip);
352 cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags);
4b34e3ad 353 cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
a4165610 354 cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
00f5e6f2 355 cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
70b51365 356 cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
08b3ded6 357 cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
c12dddd7 358 cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
78c3c6d3 359 cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
cf75c597 360 cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
20054ef0 361 for (i = 0; i < 6; i++) {
329e607d
BS
362 cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4),
363 env->segs[i].selector);
20054ef0 364 }
eaa728ee
FB
365 } else {
366 /* 16 bit */
329e607d
BS
367 cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip);
368 cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags);
4b34e3ad 369 cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
a4165610 370 cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
00f5e6f2 371 cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
70b51365 372 cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
08b3ded6 373 cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
c12dddd7 374 cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
78c3c6d3 375 cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
cf75c597 376 cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
20054ef0 377 for (i = 0; i < 4; i++) {
329e607d
BS
378 cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4),
379 env->segs[i].selector);
20054ef0 380 }
eaa728ee
FB
381 }
382
383 /* now if an exception occurs, it will occurs in the next task
384 context */
385
386 if (source == SWITCH_TSS_CALL) {
329e607d 387 cpu_stw_kernel(env, tss_base, env->tr.selector);
eaa728ee
FB
388 new_eflags |= NT_MASK;
389 }
390
391 /* set busy bit */
392 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
393 target_ulong ptr;
394 uint32_t e2;
20054ef0 395
eaa728ee 396 ptr = env->gdt.base + (tss_selector & ~7);
329e607d 397 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee 398 e2 |= DESC_TSS_BUSY_MASK;
329e607d 399 cpu_stl_kernel(env, ptr + 4, e2);
eaa728ee
FB
400 }
401
402 /* set the new CPU state */
403 /* from this point, any exception which occurs can give problems */
404 env->cr[0] |= CR0_TS_MASK;
405 env->hflags |= HF_TS_MASK;
406 env->tr.selector = tss_selector;
407 env->tr.base = tss_base;
408 env->tr.limit = tss_limit;
409 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
410
411 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412 cpu_x86_update_cr3(env, new_cr3);
413 }
414
415 /* load all registers without an exception, then reload them with
416 possible exception */
417 env->eip = new_eip;
418 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
20054ef0 420 if (!(type & 8)) {
eaa728ee 421 eflags_mask &= 0xffff;
20054ef0 422 }
997ff0d9 423 cpu_load_eflags(env, new_eflags, eflags_mask);
20054ef0 424 /* XXX: what to do in 16 bit case? */
4b34e3ad 425 env->regs[R_EAX] = new_regs[0];
a4165610 426 env->regs[R_ECX] = new_regs[1];
00f5e6f2 427 env->regs[R_EDX] = new_regs[2];
70b51365 428 env->regs[R_EBX] = new_regs[3];
08b3ded6 429 env->regs[R_ESP] = new_regs[4];
c12dddd7 430 env->regs[R_EBP] = new_regs[5];
78c3c6d3 431 env->regs[R_ESI] = new_regs[6];
cf75c597 432 env->regs[R_EDI] = new_regs[7];
eaa728ee 433 if (new_eflags & VM_MASK) {
20054ef0 434 for (i = 0; i < 6; i++) {
2999a0b2 435 load_seg_vm(env, i, new_segs[i]);
20054ef0 436 }
eaa728ee 437 } else {
eaa728ee 438 /* first just selectors as the rest may trigger exceptions */
20054ef0 439 for (i = 0; i < 6; i++) {
eaa728ee 440 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
20054ef0 441 }
eaa728ee
FB
442 }
443
444 env->ldt.selector = new_ldt & ~4;
445 env->ldt.base = 0;
446 env->ldt.limit = 0;
447 env->ldt.flags = 0;
448
449 /* load the LDT */
20054ef0 450 if (new_ldt & 4) {
77b2bc2c 451 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0 452 }
eaa728ee
FB
453
454 if ((new_ldt & 0xfffc) != 0) {
455 dt = &env->gdt;
456 index = new_ldt & ~7;
20054ef0 457 if ((index + 7) > dt->limit) {
77b2bc2c 458 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0 459 }
eaa728ee 460 ptr = dt->base + index;
329e607d
BS
461 e1 = cpu_ldl_kernel(env, ptr);
462 e2 = cpu_ldl_kernel(env, ptr + 4);
20054ef0 463 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
77b2bc2c 464 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0
BS
465 }
466 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 467 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0 468 }
eaa728ee
FB
469 load_seg_cache_raw_dt(&env->ldt, e1, e2);
470 }
471
472 /* load the segments */
473 if (!(new_eflags & VM_MASK)) {
d3b54918
PB
474 int cpl = new_segs[R_CS] & 3;
475 tss_load_seg(env, R_CS, new_segs[R_CS], cpl);
476 tss_load_seg(env, R_SS, new_segs[R_SS], cpl);
477 tss_load_seg(env, R_ES, new_segs[R_ES], cpl);
478 tss_load_seg(env, R_DS, new_segs[R_DS], cpl);
479 tss_load_seg(env, R_FS, new_segs[R_FS], cpl);
480 tss_load_seg(env, R_GS, new_segs[R_GS], cpl);
eaa728ee
FB
481 }
482
a78d0eab 483 /* check that env->eip is in the CS segment limits */
eaa728ee 484 if (new_eip > env->segs[R_CS].limit) {
20054ef0 485 /* XXX: different exception if CALL? */
77b2bc2c 486 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee 487 }
01df040b
AL
488
489#ifndef CONFIG_USER_ONLY
490 /* reset local breakpoints */
428065ce
LG
491 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
492 for (i = 0; i < DR7_MAX_BP; i++) {
5902564a
LG
493 if (hw_local_breakpoint_enabled(env->dr[7], i) &&
494 !hw_global_breakpoint_enabled(env->dr[7], i)) {
01df040b 495 hw_breakpoint_remove(env, i);
20054ef0 496 }
01df040b 497 }
428065ce 498 env->dr[7] &= ~DR7_LOCAL_BP_MASK;
01df040b
AL
499 }
500#endif
eaa728ee
FB
501}
502
eaa728ee
FB
503static inline unsigned int get_sp_mask(unsigned int e2)
504{
20054ef0 505 if (e2 & DESC_B_MASK) {
eaa728ee 506 return 0xffffffff;
20054ef0 507 } else {
eaa728ee 508 return 0xffff;
20054ef0 509 }
eaa728ee
FB
510}
511
20054ef0 512static int exception_has_error_code(int intno)
2ed51f5b 513{
20054ef0
BS
514 switch (intno) {
515 case 8:
516 case 10:
517 case 11:
518 case 12:
519 case 13:
520 case 14:
521 case 17:
522 return 1;
523 }
524 return 0;
2ed51f5b
AL
525}
526
eaa728ee 527#ifdef TARGET_X86_64
08b3ded6
LG
528#define SET_ESP(val, sp_mask) \
529 do { \
530 if ((sp_mask) == 0xffff) { \
531 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
532 ((val) & 0xffff); \
533 } else if ((sp_mask) == 0xffffffffLL) { \
534 env->regs[R_ESP] = (uint32_t)(val); \
535 } else { \
536 env->regs[R_ESP] = (val); \
537 } \
20054ef0 538 } while (0)
eaa728ee 539#else
08b3ded6
LG
540#define SET_ESP(val, sp_mask) \
541 do { \
542 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
543 ((val) & (sp_mask)); \
20054ef0 544 } while (0)
eaa728ee
FB
545#endif
546
c0a04f0e
AL
547/* in 64-bit machines, this can overflow. So this segment addition macro
548 * can be used to trim the value to 32-bit whenever needed */
549#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
550
eaa728ee 551/* XXX: add a is_user flag to have proper security support */
329e607d
BS
552#define PUSHW(ssp, sp, sp_mask, val) \
553 { \
554 sp -= 2; \
555 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
20054ef0 556 }
eaa728ee 557
20054ef0
BS
558#define PUSHL(ssp, sp, sp_mask, val) \
559 { \
560 sp -= 4; \
329e607d 561 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
20054ef0 562 }
eaa728ee 563
329e607d
BS
564#define POPW(ssp, sp, sp_mask, val) \
565 { \
566 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
567 sp += 2; \
20054ef0 568 }
eaa728ee 569
329e607d
BS
570#define POPL(ssp, sp, sp_mask, val) \
571 { \
572 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
573 sp += 4; \
20054ef0 574 }
eaa728ee
FB
575
576/* protected mode interrupt */
2999a0b2
BS
577static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
578 int error_code, unsigned int next_eip,
579 int is_hw)
eaa728ee
FB
580{
581 SegmentCache *dt;
582 target_ulong ptr, ssp;
583 int type, dpl, selector, ss_dpl, cpl;
584 int has_error_code, new_stack, shift;
1c918eba 585 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 586 uint32_t old_eip, sp_mask;
87446327 587 int vm86 = env->eflags & VM_MASK;
eaa728ee 588
eaa728ee 589 has_error_code = 0;
20054ef0
BS
590 if (!is_int && !is_hw) {
591 has_error_code = exception_has_error_code(intno);
592 }
593 if (is_int) {
eaa728ee 594 old_eip = next_eip;
20054ef0 595 } else {
eaa728ee 596 old_eip = env->eip;
20054ef0 597 }
eaa728ee
FB
598
599 dt = &env->idt;
20054ef0 600 if (intno * 8 + 7 > dt->limit) {
77b2bc2c 601 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 602 }
eaa728ee 603 ptr = dt->base + intno * 8;
329e607d
BS
604 e1 = cpu_ldl_kernel(env, ptr);
605 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
606 /* check gate type */
607 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 608 switch (type) {
eaa728ee
FB
609 case 5: /* task gate */
610 /* must do that check here to return the correct error code */
20054ef0 611 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 612 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 613 }
2999a0b2 614 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
eaa728ee
FB
615 if (has_error_code) {
616 int type;
617 uint32_t mask;
20054ef0 618
eaa728ee
FB
619 /* push the error code */
620 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
621 shift = type >> 3;
20054ef0 622 if (env->segs[R_SS].flags & DESC_B_MASK) {
eaa728ee 623 mask = 0xffffffff;
20054ef0 624 } else {
eaa728ee 625 mask = 0xffff;
20054ef0 626 }
08b3ded6 627 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
eaa728ee 628 ssp = env->segs[R_SS].base + esp;
20054ef0 629 if (shift) {
329e607d 630 cpu_stl_kernel(env, ssp, error_code);
20054ef0 631 } else {
329e607d 632 cpu_stw_kernel(env, ssp, error_code);
20054ef0 633 }
eaa728ee
FB
634 SET_ESP(esp, mask);
635 }
636 return;
637 case 6: /* 286 interrupt gate */
638 case 7: /* 286 trap gate */
639 case 14: /* 386 interrupt gate */
640 case 15: /* 386 trap gate */
641 break;
642 default:
77b2bc2c 643 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
eaa728ee
FB
644 break;
645 }
646 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
647 cpl = env->hflags & HF_CPL_MASK;
1235fc06 648 /* check privilege if software int */
20054ef0 649 if (is_int && dpl < cpl) {
77b2bc2c 650 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 651 }
eaa728ee 652 /* check valid bit */
20054ef0 653 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 654 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 655 }
eaa728ee
FB
656 selector = e1 >> 16;
657 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
20054ef0 658 if ((selector & 0xfffc) == 0) {
77b2bc2c 659 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 660 }
2999a0b2 661 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 662 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
663 }
664 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 665 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 666 }
eaa728ee 667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 668 if (dpl > cpl) {
77b2bc2c 669 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
670 }
671 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 672 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 673 }
eaa728ee
FB
674 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
675 /* to inner privilege */
2999a0b2 676 get_ss_esp_from_tss(env, &ss, &esp, dpl);
20054ef0 677 if ((ss & 0xfffc) == 0) {
77b2bc2c 678 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
679 }
680 if ((ss & 3) != dpl) {
77b2bc2c 681 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 682 }
2999a0b2 683 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 684 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 685 }
eaa728ee 686 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 687 if (ss_dpl != dpl) {
77b2bc2c 688 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 689 }
eaa728ee
FB
690 if (!(ss_e2 & DESC_S_MASK) ||
691 (ss_e2 & DESC_CS_MASK) ||
20054ef0 692 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 693 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
694 }
695 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 696 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 697 }
eaa728ee
FB
698 new_stack = 1;
699 sp_mask = get_sp_mask(ss_e2);
700 ssp = get_seg_base(ss_e1, ss_e2);
701 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
702 /* to same privilege */
87446327 703 if (vm86) {
77b2bc2c 704 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 705 }
eaa728ee
FB
706 new_stack = 0;
707 sp_mask = get_sp_mask(env->segs[R_SS].flags);
708 ssp = env->segs[R_SS].base;
08b3ded6 709 esp = env->regs[R_ESP];
eaa728ee
FB
710 dpl = cpl;
711 } else {
77b2bc2c 712 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
713 new_stack = 0; /* avoid warning */
714 sp_mask = 0; /* avoid warning */
715 ssp = 0; /* avoid warning */
716 esp = 0; /* avoid warning */
717 }
718
719 shift = type >> 3;
720
721#if 0
722 /* XXX: check that enough room is available */
723 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
87446327 724 if (vm86) {
eaa728ee 725 push_size += 8;
20054ef0 726 }
eaa728ee
FB
727 push_size <<= shift;
728#endif
729 if (shift == 1) {
730 if (new_stack) {
87446327 731 if (vm86) {
eaa728ee
FB
732 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
733 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
734 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
735 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
736 }
737 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 738 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 739 }
997ff0d9 740 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
741 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
742 PUSHL(ssp, esp, sp_mask, old_eip);
743 if (has_error_code) {
744 PUSHL(ssp, esp, sp_mask, error_code);
745 }
746 } else {
747 if (new_stack) {
87446327 748 if (vm86) {
eaa728ee
FB
749 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
750 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
751 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
752 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
753 }
754 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 755 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 756 }
997ff0d9 757 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
758 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
759 PUSHW(ssp, esp, sp_mask, old_eip);
760 if (has_error_code) {
761 PUSHW(ssp, esp, sp_mask, error_code);
762 }
763 }
764
fd460606
KC
765 /* interrupt gate clear IF mask */
766 if ((type & 1) == 0) {
767 env->eflags &= ~IF_MASK;
768 }
769 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
770
eaa728ee 771 if (new_stack) {
87446327 772 if (vm86) {
eaa728ee
FB
773 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
774 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
775 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
776 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
777 }
778 ss = (ss & ~3) | dpl;
779 cpu_x86_load_seg_cache(env, R_SS, ss,
780 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
781 }
782 SET_ESP(esp, sp_mask);
783
784 selector = (selector & ~3) | dpl;
785 cpu_x86_load_seg_cache(env, R_CS, selector,
786 get_seg_base(e1, e2),
787 get_seg_limit(e1, e2),
788 e2);
eaa728ee 789 env->eip = offset;
eaa728ee
FB
790}
791
792#ifdef TARGET_X86_64
793
20054ef0
BS
794#define PUSHQ(sp, val) \
795 { \
796 sp -= 8; \
329e607d 797 cpu_stq_kernel(env, sp, (val)); \
20054ef0 798 }
eaa728ee 799
20054ef0
BS
800#define POPQ(sp, val) \
801 { \
329e607d 802 val = cpu_ldq_kernel(env, sp); \
20054ef0
BS
803 sp += 8; \
804 }
eaa728ee 805
2999a0b2 806static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
eaa728ee 807{
a47dddd7 808 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
809 int index;
810
811#if 0
812 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
813 env->tr.base, env->tr.limit);
814#endif
815
20054ef0 816 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 817 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 818 }
eaa728ee 819 index = 8 * level + 4;
20054ef0 820 if ((index + 7) > env->tr.limit) {
77b2bc2c 821 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 822 }
329e607d 823 return cpu_ldq_kernel(env, env->tr.base + index);
eaa728ee
FB
824}
825
826/* 64 bit interrupt */
2999a0b2
BS
827static void do_interrupt64(CPUX86State *env, int intno, int is_int,
828 int error_code, target_ulong next_eip, int is_hw)
eaa728ee
FB
829{
830 SegmentCache *dt;
831 target_ulong ptr;
832 int type, dpl, selector, cpl, ist;
833 int has_error_code, new_stack;
834 uint32_t e1, e2, e3, ss;
835 target_ulong old_eip, esp, offset;
eaa728ee 836
eaa728ee 837 has_error_code = 0;
20054ef0
BS
838 if (!is_int && !is_hw) {
839 has_error_code = exception_has_error_code(intno);
840 }
841 if (is_int) {
eaa728ee 842 old_eip = next_eip;
20054ef0 843 } else {
eaa728ee 844 old_eip = env->eip;
20054ef0 845 }
eaa728ee
FB
846
847 dt = &env->idt;
20054ef0 848 if (intno * 16 + 15 > dt->limit) {
77b2bc2c 849 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 850 }
eaa728ee 851 ptr = dt->base + intno * 16;
329e607d
BS
852 e1 = cpu_ldl_kernel(env, ptr);
853 e2 = cpu_ldl_kernel(env, ptr + 4);
854 e3 = cpu_ldl_kernel(env, ptr + 8);
eaa728ee
FB
855 /* check gate type */
856 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 857 switch (type) {
eaa728ee
FB
858 case 14: /* 386 interrupt gate */
859 case 15: /* 386 trap gate */
860 break;
861 default:
77b2bc2c 862 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
eaa728ee
FB
863 break;
864 }
865 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
866 cpl = env->hflags & HF_CPL_MASK;
1235fc06 867 /* check privilege if software int */
20054ef0 868 if (is_int && dpl < cpl) {
77b2bc2c 869 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 870 }
eaa728ee 871 /* check valid bit */
20054ef0 872 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 873 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
20054ef0 874 }
eaa728ee
FB
875 selector = e1 >> 16;
876 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
877 ist = e2 & 7;
20054ef0 878 if ((selector & 0xfffc) == 0) {
77b2bc2c 879 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 880 }
eaa728ee 881
2999a0b2 882 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 883 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
884 }
885 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 886 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 887 }
eaa728ee 888 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 889 if (dpl > cpl) {
77b2bc2c 890 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
891 }
892 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 893 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0
BS
894 }
895 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
77b2bc2c 896 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 897 }
eaa728ee
FB
898 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
899 /* to inner privilege */
eaa728ee 900 new_stack = 1;
ae67dc72
PB
901 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
902 ss = 0;
eaa728ee
FB
903 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
904 /* to same privilege */
20054ef0 905 if (env->eflags & VM_MASK) {
77b2bc2c 906 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 907 }
eaa728ee 908 new_stack = 0;
ae67dc72 909 esp = env->regs[R_ESP];
eaa728ee
FB
910 dpl = cpl;
911 } else {
77b2bc2c 912 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
913 new_stack = 0; /* avoid warning */
914 esp = 0; /* avoid warning */
915 }
ae67dc72 916 esp &= ~0xfLL; /* align stack */
eaa728ee
FB
917
918 PUSHQ(esp, env->segs[R_SS].selector);
08b3ded6 919 PUSHQ(esp, env->regs[R_ESP]);
997ff0d9 920 PUSHQ(esp, cpu_compute_eflags(env));
eaa728ee
FB
921 PUSHQ(esp, env->segs[R_CS].selector);
922 PUSHQ(esp, old_eip);
923 if (has_error_code) {
924 PUSHQ(esp, error_code);
925 }
926
fd460606
KC
927 /* interrupt gate clear IF mask */
928 if ((type & 1) == 0) {
929 env->eflags &= ~IF_MASK;
930 }
931 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
932
eaa728ee
FB
933 if (new_stack) {
934 ss = 0 | dpl;
935 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
936 }
08b3ded6 937 env->regs[R_ESP] = esp;
eaa728ee
FB
938
939 selector = (selector & ~3) | dpl;
940 cpu_x86_load_seg_cache(env, R_CS, selector,
941 get_seg_base(e1, e2),
942 get_seg_limit(e1, e2),
943 e2);
eaa728ee 944 env->eip = offset;
eaa728ee
FB
945}
946#endif
947
d9957a8b 948#ifdef TARGET_X86_64
eaa728ee 949#if defined(CONFIG_USER_ONLY)
2999a0b2 950void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee 951{
27103424
AF
952 CPUState *cs = CPU(x86_env_get_cpu(env));
953
954 cs->exception_index = EXCP_SYSCALL;
eaa728ee 955 env->exception_next_eip = env->eip + next_eip_addend;
5638d180 956 cpu_loop_exit(cs);
eaa728ee
FB
957}
958#else
2999a0b2 959void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee
FB
960{
961 int selector;
962
963 if (!(env->efer & MSR_EFER_SCE)) {
77b2bc2c 964 raise_exception_err(env, EXCP06_ILLOP, 0);
eaa728ee
FB
965 }
966 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
967 if (env->hflags & HF_LMA_MASK) {
968 int code64;
969
a4165610 970 env->regs[R_ECX] = env->eip + next_eip_addend;
997ff0d9 971 env->regs[11] = cpu_compute_eflags(env);
eaa728ee
FB
972
973 code64 = env->hflags & HF_CS64_MASK;
974
fd460606
KC
975 env->eflags &= ~env->fmask;
976 cpu_load_eflags(env, env->eflags, 0);
eaa728ee
FB
977 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
978 0, 0xffffffff,
979 DESC_G_MASK | DESC_P_MASK |
980 DESC_S_MASK |
20054ef0
BS
981 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
982 DESC_L_MASK);
eaa728ee
FB
983 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
984 0, 0xffffffff,
985 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
986 DESC_S_MASK |
987 DESC_W_MASK | DESC_A_MASK);
20054ef0 988 if (code64) {
eaa728ee 989 env->eip = env->lstar;
20054ef0 990 } else {
eaa728ee 991 env->eip = env->cstar;
20054ef0 992 }
d9957a8b 993 } else {
a4165610 994 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
eaa728ee 995
fd460606 996 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
eaa728ee
FB
997 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
998 0, 0xffffffff,
999 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1000 DESC_S_MASK |
1001 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1002 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1003 0, 0xffffffff,
1004 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1005 DESC_S_MASK |
1006 DESC_W_MASK | DESC_A_MASK);
eaa728ee
FB
1007 env->eip = (uint32_t)env->star;
1008 }
1009}
1010#endif
d9957a8b 1011#endif
eaa728ee 1012
d9957a8b 1013#ifdef TARGET_X86_64
2999a0b2 1014void helper_sysret(CPUX86State *env, int dflag)
eaa728ee
FB
1015{
1016 int cpl, selector;
1017
1018 if (!(env->efer & MSR_EFER_SCE)) {
77b2bc2c 1019 raise_exception_err(env, EXCP06_ILLOP, 0);
eaa728ee
FB
1020 }
1021 cpl = env->hflags & HF_CPL_MASK;
1022 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
77b2bc2c 1023 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
1024 }
1025 selector = (env->star >> 48) & 0xffff;
eaa728ee 1026 if (env->hflags & HF_LMA_MASK) {
fd460606
KC
1027 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1028 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1029 NT_MASK);
eaa728ee
FB
1030 if (dflag == 2) {
1031 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1032 0, 0xffffffff,
1033 DESC_G_MASK | DESC_P_MASK |
1034 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1035 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1036 DESC_L_MASK);
a4165610 1037 env->eip = env->regs[R_ECX];
eaa728ee
FB
1038 } else {
1039 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1040 0, 0xffffffff,
1041 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1042 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1043 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1044 env->eip = (uint32_t)env->regs[R_ECX];
eaa728ee 1045 }
ac576229 1046 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1047 0, 0xffffffff,
1048 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1049 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1050 DESC_W_MASK | DESC_A_MASK);
d9957a8b 1051 } else {
fd460606 1052 env->eflags |= IF_MASK;
eaa728ee
FB
1053 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1054 0, 0xffffffff,
1055 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1056 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1057 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1058 env->eip = (uint32_t)env->regs[R_ECX];
ac576229 1059 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1060 0, 0xffffffff,
1061 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1062 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1063 DESC_W_MASK | DESC_A_MASK);
eaa728ee 1064 }
eaa728ee 1065}
d9957a8b 1066#endif
eaa728ee
FB
1067
1068/* real mode interrupt */
2999a0b2
BS
1069static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1070 int error_code, unsigned int next_eip)
eaa728ee
FB
1071{
1072 SegmentCache *dt;
1073 target_ulong ptr, ssp;
1074 int selector;
1075 uint32_t offset, esp;
1076 uint32_t old_cs, old_eip;
eaa728ee 1077
20054ef0 1078 /* real mode (simpler!) */
eaa728ee 1079 dt = &env->idt;
20054ef0 1080 if (intno * 4 + 3 > dt->limit) {
77b2bc2c 1081 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 1082 }
eaa728ee 1083 ptr = dt->base + intno * 4;
329e607d
BS
1084 offset = cpu_lduw_kernel(env, ptr);
1085 selector = cpu_lduw_kernel(env, ptr + 2);
08b3ded6 1086 esp = env->regs[R_ESP];
eaa728ee 1087 ssp = env->segs[R_SS].base;
20054ef0 1088 if (is_int) {
eaa728ee 1089 old_eip = next_eip;
20054ef0 1090 } else {
eaa728ee 1091 old_eip = env->eip;
20054ef0 1092 }
eaa728ee 1093 old_cs = env->segs[R_CS].selector;
20054ef0 1094 /* XXX: use SS segment size? */
997ff0d9 1095 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
eaa728ee
FB
1096 PUSHW(ssp, esp, 0xffff, old_cs);
1097 PUSHW(ssp, esp, 0xffff, old_eip);
1098
1099 /* update processor state */
08b3ded6 1100 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
eaa728ee
FB
1101 env->eip = offset;
1102 env->segs[R_CS].selector = selector;
1103 env->segs[R_CS].base = (selector << 4);
1104 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1105}
1106
e694d4e2 1107#if defined(CONFIG_USER_ONLY)
eaa728ee 1108/* fake user mode interrupt */
2999a0b2
BS
1109static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1110 int error_code, target_ulong next_eip)
eaa728ee
FB
1111{
1112 SegmentCache *dt;
1113 target_ulong ptr;
1114 int dpl, cpl, shift;
1115 uint32_t e2;
1116
1117 dt = &env->idt;
1118 if (env->hflags & HF_LMA_MASK) {
1119 shift = 4;
1120 } else {
1121 shift = 3;
1122 }
1123 ptr = dt->base + (intno << shift);
329e607d 1124 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
1125
1126 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1127 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1128 /* check privilege if software int */
20054ef0 1129 if (is_int && dpl < cpl) {
77b2bc2c 1130 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
20054ef0 1131 }
eaa728ee
FB
1132
1133 /* Since we emulate only user space, we cannot do more than
1134 exiting the emulation with the suitable exception and error
47575997
JM
1135 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1136 if (is_int || intno == EXCP_SYSCALL) {
a78d0eab 1137 env->eip = next_eip;
20054ef0 1138 }
eaa728ee
FB
1139}
1140
e694d4e2
BS
1141#else
1142
2999a0b2
BS
1143static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1144 int error_code, int is_hw, int rm)
2ed51f5b 1145{
19d6ca16 1146 CPUState *cs = CPU(x86_env_get_cpu(env));
b216aa6c 1147 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1148 control.event_inj));
1149
2ed51f5b 1150 if (!(event_inj & SVM_EVTINJ_VALID)) {
20054ef0
BS
1151 int type;
1152
1153 if (is_int) {
1154 type = SVM_EVTINJ_TYPE_SOFT;
1155 } else {
1156 type = SVM_EVTINJ_TYPE_EXEPT;
1157 }
1158 event_inj = intno | type | SVM_EVTINJ_VALID;
1159 if (!rm && exception_has_error_code(intno)) {
1160 event_inj |= SVM_EVTINJ_VALID_ERR;
b216aa6c 1161 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1162 control.event_inj_err),
1163 error_code);
1164 }
b216aa6c 1165 x86_stl_phys(cs,
ab1da857 1166 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1167 event_inj);
2ed51f5b
AL
1168 }
1169}
00ea18d1 1170#endif
2ed51f5b 1171
eaa728ee
FB
1172/*
1173 * Begin execution of an interruption. is_int is TRUE if coming from
a78d0eab 1174 * the int instruction. next_eip is the env->eip value AFTER the interrupt
eaa728ee
FB
1175 * instruction. It is only relevant if is_int is TRUE.
1176 */
ca4c810a 1177static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
2999a0b2 1178 int error_code, target_ulong next_eip, int is_hw)
eaa728ee 1179{
ca4c810a
AF
1180 CPUX86State *env = &cpu->env;
1181
8fec2b8c 1182 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1183 if ((env->cr[0] & CR0_PE_MASK)) {
1184 static int count;
20054ef0
BS
1185
1186 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1187 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1188 count, intno, error_code, is_int,
1189 env->hflags & HF_CPL_MASK,
a78d0eab
LG
1190 env->segs[R_CS].selector, env->eip,
1191 (int)env->segs[R_CS].base + env->eip,
08b3ded6 1192 env->segs[R_SS].selector, env->regs[R_ESP]);
eaa728ee 1193 if (intno == 0x0e) {
93fcfe39 1194 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1195 } else {
4b34e3ad 1196 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
eaa728ee 1197 }
93fcfe39 1198 qemu_log("\n");
a0762859 1199 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
eaa728ee
FB
1200#if 0
1201 {
1202 int i;
9bd5494e 1203 target_ulong ptr;
20054ef0 1204
93fcfe39 1205 qemu_log(" code=");
eaa728ee 1206 ptr = env->segs[R_CS].base + env->eip;
20054ef0 1207 for (i = 0; i < 16; i++) {
93fcfe39 1208 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1209 }
93fcfe39 1210 qemu_log("\n");
eaa728ee
FB
1211 }
1212#endif
1213 count++;
1214 }
1215 }
1216 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1217#if !defined(CONFIG_USER_ONLY)
20054ef0 1218 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1219 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
20054ef0 1220 }
00ea18d1 1221#endif
eb38c52c 1222#ifdef TARGET_X86_64
eaa728ee 1223 if (env->hflags & HF_LMA_MASK) {
2999a0b2 1224 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
eaa728ee
FB
1225 } else
1226#endif
1227 {
2999a0b2
BS
1228 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1229 is_hw);
eaa728ee
FB
1230 }
1231 } else {
00ea18d1 1232#if !defined(CONFIG_USER_ONLY)
20054ef0 1233 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1234 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
20054ef0 1235 }
00ea18d1 1236#endif
2999a0b2 1237 do_interrupt_real(env, intno, is_int, error_code, next_eip);
eaa728ee 1238 }
2ed51f5b 1239
00ea18d1 1240#if !defined(CONFIG_USER_ONLY)
2ed51f5b 1241 if (env->hflags & HF_SVMI_MASK) {
fdfba1a2 1242 CPUState *cs = CPU(cpu);
b216aa6c 1243 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
20054ef0
BS
1244 offsetof(struct vmcb,
1245 control.event_inj));
1246
b216aa6c 1247 x86_stl_phys(cs,
ab1da857 1248 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1249 event_inj & ~SVM_EVTINJ_VALID);
2ed51f5b 1250 }
00ea18d1 1251#endif
eaa728ee
FB
1252}
1253
97a8ea5a 1254void x86_cpu_do_interrupt(CPUState *cs)
e694d4e2 1255{
97a8ea5a
AF
1256 X86CPU *cpu = X86_CPU(cs);
1257 CPUX86State *env = &cpu->env;
1258
e694d4e2
BS
1259#if defined(CONFIG_USER_ONLY)
1260 /* if user mode only, we simulate a fake exception
1261 which will be handled outside the cpu execution
1262 loop */
27103424 1263 do_interrupt_user(env, cs->exception_index,
e694d4e2
BS
1264 env->exception_is_int,
1265 env->error_code,
1266 env->exception_next_eip);
1267 /* successfully delivered */
1268 env->old_exception = -1;
1269#else
1270 /* simulate a real cpu exception. On i386, it can
1271 trigger new exceptions, but we do not handle
1272 double or triple faults yet. */
27103424 1273 do_interrupt_all(cpu, cs->exception_index,
e694d4e2
BS
1274 env->exception_is_int,
1275 env->error_code,
1276 env->exception_next_eip, 0);
1277 /* successfully delivered */
1278 env->old_exception = -1;
1279#endif
e694d4e2
BS
1280}
1281
2999a0b2 1282void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
e694d4e2 1283{
ca4c810a 1284 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
e694d4e2
BS
1285}
1286
42f53fea
RH
1287bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1288{
1289 X86CPU *cpu = X86_CPU(cs);
1290 CPUX86State *env = &cpu->env;
1291 bool ret = false;
1292
1293#if !defined(CONFIG_USER_ONLY)
1294 if (interrupt_request & CPU_INTERRUPT_POLL) {
1295 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1296 apic_poll_irq(cpu->apic_state);
1297 }
1298#endif
1299 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1300 do_cpu_sipi(cpu);
1301 } else if (env->hflags2 & HF2_GIF_MASK) {
1302 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1303 !(env->hflags & HF_SMM_MASK)) {
1304 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1305 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1306 do_smm_enter(cpu);
1307 ret = true;
1308 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1309 !(env->hflags2 & HF2_NMI_MASK)) {
1310 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1311 env->hflags2 |= HF2_NMI_MASK;
1312 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1313 ret = true;
1314 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1315 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1316 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1317 ret = true;
1318 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1319 (((env->hflags2 & HF2_VINTR_MASK) &&
1320 (env->hflags2 & HF2_HIF_MASK)) ||
1321 (!(env->hflags2 & HF2_VINTR_MASK) &&
1322 (env->eflags & IF_MASK &&
1323 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1324 int intno;
1325 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1326 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1327 CPU_INTERRUPT_VIRQ);
1328 intno = cpu_get_pic_interrupt(env);
1329 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1330 "Servicing hardware INT=0x%02x\n", intno);
1331 do_interrupt_x86_hardirq(env, intno, 1);
1332 /* ensure that no TB jump will be modified as
1333 the program flow was changed */
1334 ret = true;
1335#if !defined(CONFIG_USER_ONLY)
1336 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1337 (env->eflags & IF_MASK) &&
1338 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1339 int intno;
1340 /* FIXME: this should respect TPR */
1341 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
b216aa6c 1342 intno = x86_ldl_phys(cs, env->vm_vmcb
42f53fea
RH
1343 + offsetof(struct vmcb, control.int_vector));
1344 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1345 "Servicing virtual hardware INT=0x%02x\n", intno);
1346 do_interrupt_x86_hardirq(env, intno, 1);
1347 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1348 ret = true;
1349#endif
1350 }
1351 }
1352
1353 return ret;
1354}
1355
2999a0b2
BS
1356void helper_enter_level(CPUX86State *env, int level, int data32,
1357 target_ulong t1)
eaa728ee
FB
1358{
1359 target_ulong ssp;
1360 uint32_t esp_mask, esp, ebp;
1361
1362 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1363 ssp = env->segs[R_SS].base;
c12dddd7 1364 ebp = env->regs[R_EBP];
08b3ded6 1365 esp = env->regs[R_ESP];
eaa728ee
FB
1366 if (data32) {
1367 /* 32 bit */
1368 esp -= 4;
1369 while (--level) {
1370 esp -= 4;
1371 ebp -= 4;
329e607d
BS
1372 cpu_stl_data(env, ssp + (esp & esp_mask),
1373 cpu_ldl_data(env, ssp + (ebp & esp_mask)));
eaa728ee
FB
1374 }
1375 esp -= 4;
329e607d 1376 cpu_stl_data(env, ssp + (esp & esp_mask), t1);
eaa728ee
FB
1377 } else {
1378 /* 16 bit */
1379 esp -= 2;
1380 while (--level) {
1381 esp -= 2;
1382 ebp -= 2;
329e607d
BS
1383 cpu_stw_data(env, ssp + (esp & esp_mask),
1384 cpu_lduw_data(env, ssp + (ebp & esp_mask)));
eaa728ee
FB
1385 }
1386 esp -= 2;
329e607d 1387 cpu_stw_data(env, ssp + (esp & esp_mask), t1);
eaa728ee
FB
1388 }
1389}
1390
1391#ifdef TARGET_X86_64
2999a0b2
BS
1392void helper_enter64_level(CPUX86State *env, int level, int data64,
1393 target_ulong t1)
eaa728ee
FB
1394{
1395 target_ulong esp, ebp;
20054ef0 1396
c12dddd7 1397 ebp = env->regs[R_EBP];
08b3ded6 1398 esp = env->regs[R_ESP];
eaa728ee
FB
1399
1400 if (data64) {
1401 /* 64 bit */
1402 esp -= 8;
1403 while (--level) {
1404 esp -= 8;
1405 ebp -= 8;
329e607d 1406 cpu_stq_data(env, esp, cpu_ldq_data(env, ebp));
eaa728ee
FB
1407 }
1408 esp -= 8;
329e607d 1409 cpu_stq_data(env, esp, t1);
eaa728ee
FB
1410 } else {
1411 /* 16 bit */
1412 esp -= 2;
1413 while (--level) {
1414 esp -= 2;
1415 ebp -= 2;
329e607d 1416 cpu_stw_data(env, esp, cpu_lduw_data(env, ebp));
eaa728ee
FB
1417 }
1418 esp -= 2;
329e607d 1419 cpu_stw_data(env, esp, t1);
eaa728ee
FB
1420 }
1421}
1422#endif
1423
2999a0b2 1424void helper_lldt(CPUX86State *env, int selector)
eaa728ee
FB
1425{
1426 SegmentCache *dt;
1427 uint32_t e1, e2;
1428 int index, entry_limit;
1429 target_ulong ptr;
1430
1431 selector &= 0xffff;
1432 if ((selector & 0xfffc) == 0) {
1433 /* XXX: NULL selector case: invalid LDT */
1434 env->ldt.base = 0;
1435 env->ldt.limit = 0;
1436 } else {
20054ef0 1437 if (selector & 0x4) {
77b2bc2c 1438 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1439 }
eaa728ee
FB
1440 dt = &env->gdt;
1441 index = selector & ~7;
1442#ifdef TARGET_X86_64
20054ef0 1443 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1444 entry_limit = 15;
20054ef0 1445 } else
eaa728ee 1446#endif
20054ef0 1447 {
eaa728ee 1448 entry_limit = 7;
20054ef0
BS
1449 }
1450 if ((index + entry_limit) > dt->limit) {
77b2bc2c 1451 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1452 }
eaa728ee 1453 ptr = dt->base + index;
329e607d
BS
1454 e1 = cpu_ldl_kernel(env, ptr);
1455 e2 = cpu_ldl_kernel(env, ptr + 4);
20054ef0 1456 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
77b2bc2c 1457 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1458 }
1459 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1460 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 1461 }
eaa728ee
FB
1462#ifdef TARGET_X86_64
1463 if (env->hflags & HF_LMA_MASK) {
1464 uint32_t e3;
20054ef0 1465
329e607d 1466 e3 = cpu_ldl_kernel(env, ptr + 8);
eaa728ee
FB
1467 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1468 env->ldt.base |= (target_ulong)e3 << 32;
1469 } else
1470#endif
1471 {
1472 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1473 }
1474 }
1475 env->ldt.selector = selector;
1476}
1477
2999a0b2 1478void helper_ltr(CPUX86State *env, int selector)
eaa728ee
FB
1479{
1480 SegmentCache *dt;
1481 uint32_t e1, e2;
1482 int index, type, entry_limit;
1483 target_ulong ptr;
1484
1485 selector &= 0xffff;
1486 if ((selector & 0xfffc) == 0) {
1487 /* NULL selector case: invalid TR */
1488 env->tr.base = 0;
1489 env->tr.limit = 0;
1490 env->tr.flags = 0;
1491 } else {
20054ef0 1492 if (selector & 0x4) {
77b2bc2c 1493 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1494 }
eaa728ee
FB
1495 dt = &env->gdt;
1496 index = selector & ~7;
1497#ifdef TARGET_X86_64
20054ef0 1498 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1499 entry_limit = 15;
20054ef0 1500 } else
eaa728ee 1501#endif
20054ef0 1502 {
eaa728ee 1503 entry_limit = 7;
20054ef0
BS
1504 }
1505 if ((index + entry_limit) > dt->limit) {
77b2bc2c 1506 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1507 }
eaa728ee 1508 ptr = dt->base + index;
329e607d
BS
1509 e1 = cpu_ldl_kernel(env, ptr);
1510 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
1511 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1512 if ((e2 & DESC_S_MASK) ||
20054ef0 1513 (type != 1 && type != 9)) {
77b2bc2c 1514 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1515 }
1516 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1517 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 1518 }
eaa728ee
FB
1519#ifdef TARGET_X86_64
1520 if (env->hflags & HF_LMA_MASK) {
1521 uint32_t e3, e4;
20054ef0 1522
329e607d
BS
1523 e3 = cpu_ldl_kernel(env, ptr + 8);
1524 e4 = cpu_ldl_kernel(env, ptr + 12);
20054ef0 1525 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
77b2bc2c 1526 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1527 }
eaa728ee
FB
1528 load_seg_cache_raw_dt(&env->tr, e1, e2);
1529 env->tr.base |= (target_ulong)e3 << 32;
1530 } else
1531#endif
1532 {
1533 load_seg_cache_raw_dt(&env->tr, e1, e2);
1534 }
1535 e2 |= DESC_TSS_BUSY_MASK;
329e607d 1536 cpu_stl_kernel(env, ptr + 4, e2);
eaa728ee
FB
1537 }
1538 env->tr.selector = selector;
1539}
1540
1541/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2999a0b2 1542void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee
FB
1543{
1544 uint32_t e1, e2;
1545 int cpl, dpl, rpl;
1546 SegmentCache *dt;
1547 int index;
1548 target_ulong ptr;
1549
1550 selector &= 0xffff;
1551 cpl = env->hflags & HF_CPL_MASK;
1552 if ((selector & 0xfffc) == 0) {
1553 /* null selector case */
1554 if (seg_reg == R_SS
1555#ifdef TARGET_X86_64
1556 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1557#endif
20054ef0 1558 ) {
77b2bc2c 1559 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 1560 }
eaa728ee
FB
1561 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1562 } else {
1563
20054ef0 1564 if (selector & 0x4) {
eaa728ee 1565 dt = &env->ldt;
20054ef0 1566 } else {
eaa728ee 1567 dt = &env->gdt;
20054ef0 1568 }
eaa728ee 1569 index = selector & ~7;
20054ef0 1570 if ((index + 7) > dt->limit) {
77b2bc2c 1571 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1572 }
eaa728ee 1573 ptr = dt->base + index;
329e607d
BS
1574 e1 = cpu_ldl_kernel(env, ptr);
1575 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee 1576
20054ef0 1577 if (!(e2 & DESC_S_MASK)) {
77b2bc2c 1578 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1579 }
eaa728ee
FB
1580 rpl = selector & 3;
1581 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1582 if (seg_reg == R_SS) {
1583 /* must be writable segment */
20054ef0 1584 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
77b2bc2c 1585 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1586 }
1587 if (rpl != cpl || dpl != cpl) {
77b2bc2c 1588 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1589 }
eaa728ee
FB
1590 } else {
1591 /* must be readable segment */
20054ef0 1592 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
77b2bc2c 1593 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1594 }
eaa728ee
FB
1595
1596 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1597 /* if not conforming code, test rights */
20054ef0 1598 if (dpl < cpl || dpl < rpl) {
77b2bc2c 1599 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1600 }
eaa728ee
FB
1601 }
1602 }
1603
1604 if (!(e2 & DESC_P_MASK)) {
20054ef0 1605 if (seg_reg == R_SS) {
77b2bc2c 1606 raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
20054ef0 1607 } else {
77b2bc2c 1608 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 1609 }
eaa728ee
FB
1610 }
1611
1612 /* set the access bit if not already set */
1613 if (!(e2 & DESC_A_MASK)) {
1614 e2 |= DESC_A_MASK;
329e607d 1615 cpu_stl_kernel(env, ptr + 4, e2);
eaa728ee
FB
1616 }
1617
1618 cpu_x86_load_seg_cache(env, seg_reg, selector,
1619 get_seg_base(e1, e2),
1620 get_seg_limit(e1, e2),
1621 e2);
1622#if 0
93fcfe39 1623 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
1624 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1625#endif
1626 }
1627}
1628
1629/* protected mode jump */
2999a0b2 1630void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
eaa728ee
FB
1631 int next_eip_addend)
1632{
1633 int gate_cs, type;
1634 uint32_t e1, e2, cpl, dpl, rpl, limit;
1635 target_ulong next_eip;
1636
20054ef0 1637 if ((new_cs & 0xfffc) == 0) {
77b2bc2c 1638 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 1639 }
2999a0b2 1640 if (load_segment(env, &e1, &e2, new_cs) != 0) {
77b2bc2c 1641 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1642 }
eaa728ee
FB
1643 cpl = env->hflags & HF_CPL_MASK;
1644 if (e2 & DESC_S_MASK) {
20054ef0 1645 if (!(e2 & DESC_CS_MASK)) {
77b2bc2c 1646 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1647 }
eaa728ee
FB
1648 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1649 if (e2 & DESC_C_MASK) {
1650 /* conforming code segment */
20054ef0 1651 if (dpl > cpl) {
77b2bc2c 1652 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1653 }
eaa728ee
FB
1654 } else {
1655 /* non conforming code segment */
1656 rpl = new_cs & 3;
20054ef0 1657 if (rpl > cpl) {
77b2bc2c 1658 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
1659 }
1660 if (dpl != cpl) {
77b2bc2c 1661 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1662 }
eaa728ee 1663 }
20054ef0 1664 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1665 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 1666 }
eaa728ee
FB
1667 limit = get_seg_limit(e1, e2);
1668 if (new_eip > limit &&
20054ef0 1669 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
77b2bc2c 1670 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1671 }
eaa728ee
FB
1672 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1673 get_seg_base(e1, e2), limit, e2);
a78d0eab 1674 env->eip = new_eip;
eaa728ee
FB
1675 } else {
1676 /* jump to call or task gate */
1677 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1678 rpl = new_cs & 3;
1679 cpl = env->hflags & HF_CPL_MASK;
1680 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 1681 switch (type) {
eaa728ee
FB
1682 case 1: /* 286 TSS */
1683 case 9: /* 386 TSS */
1684 case 5: /* task gate */
20054ef0 1685 if (dpl < cpl || dpl < rpl) {
77b2bc2c 1686 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1687 }
eaa728ee 1688 next_eip = env->eip + next_eip_addend;
2999a0b2 1689 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
eaa728ee
FB
1690 break;
1691 case 4: /* 286 call gate */
1692 case 12: /* 386 call gate */
20054ef0 1693 if ((dpl < cpl) || (dpl < rpl)) {
77b2bc2c 1694 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
1695 }
1696 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1697 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 1698 }
eaa728ee
FB
1699 gate_cs = e1 >> 16;
1700 new_eip = (e1 & 0xffff);
20054ef0 1701 if (type == 12) {
eaa728ee 1702 new_eip |= (e2 & 0xffff0000);
20054ef0 1703 }
2999a0b2 1704 if (load_segment(env, &e1, &e2, gate_cs) != 0) {
77b2bc2c 1705 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0 1706 }
eaa728ee
FB
1707 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1708 /* must be code segment */
1709 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
20054ef0 1710 (DESC_S_MASK | DESC_CS_MASK))) {
77b2bc2c 1711 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0 1712 }
eaa728ee 1713 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
20054ef0 1714 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
77b2bc2c 1715 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0
BS
1716 }
1717 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1718 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0 1719 }
eaa728ee 1720 limit = get_seg_limit(e1, e2);
20054ef0 1721 if (new_eip > limit) {
77b2bc2c 1722 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 1723 }
eaa728ee
FB
1724 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1725 get_seg_base(e1, e2), limit, e2);
a78d0eab 1726 env->eip = new_eip;
eaa728ee
FB
1727 break;
1728 default:
77b2bc2c 1729 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
eaa728ee
FB
1730 break;
1731 }
1732 }
1733}
1734
1735/* real mode call */
2999a0b2 1736void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
eaa728ee
FB
1737 int shift, int next_eip)
1738{
1739 int new_eip;
1740 uint32_t esp, esp_mask;
1741 target_ulong ssp;
1742
1743 new_eip = new_eip1;
08b3ded6 1744 esp = env->regs[R_ESP];
eaa728ee
FB
1745 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1746 ssp = env->segs[R_SS].base;
1747 if (shift) {
1748 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
1749 PUSHL(ssp, esp, esp_mask, next_eip);
1750 } else {
1751 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
1752 PUSHW(ssp, esp, esp_mask, next_eip);
1753 }
1754
1755 SET_ESP(esp, esp_mask);
1756 env->eip = new_eip;
1757 env->segs[R_CS].selector = new_cs;
1758 env->segs[R_CS].base = (new_cs << 4);
1759}
1760
1761/* protected mode call */
2999a0b2 1762void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
eaa728ee
FB
1763 int shift, int next_eip_addend)
1764{
1765 int new_stack, i;
1766 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 1767 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
1768 uint32_t val, limit, old_sp_mask;
1769 target_ulong ssp, old_ssp, next_eip;
1770
1771 next_eip = env->eip + next_eip_addend;
d12d51d5 1772 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
8995b7a0 1773 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 1774 if ((new_cs & 0xfffc) == 0) {
77b2bc2c 1775 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 1776 }
2999a0b2 1777 if (load_segment(env, &e1, &e2, new_cs) != 0) {
77b2bc2c 1778 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1779 }
eaa728ee 1780 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 1781 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee 1782 if (e2 & DESC_S_MASK) {
20054ef0 1783 if (!(e2 & DESC_CS_MASK)) {
77b2bc2c 1784 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1785 }
eaa728ee
FB
1786 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1787 if (e2 & DESC_C_MASK) {
1788 /* conforming code segment */
20054ef0 1789 if (dpl > cpl) {
77b2bc2c 1790 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1791 }
eaa728ee
FB
1792 } else {
1793 /* non conforming code segment */
1794 rpl = new_cs & 3;
20054ef0 1795 if (rpl > cpl) {
77b2bc2c 1796 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
1797 }
1798 if (dpl != cpl) {
77b2bc2c 1799 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1800 }
eaa728ee 1801 }
20054ef0 1802 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1803 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 1804 }
eaa728ee
FB
1805
1806#ifdef TARGET_X86_64
1807 /* XXX: check 16/32 bit cases in long mode */
1808 if (shift == 2) {
1809 target_ulong rsp;
20054ef0 1810
eaa728ee 1811 /* 64 bit case */
08b3ded6 1812 rsp = env->regs[R_ESP];
eaa728ee
FB
1813 PUSHQ(rsp, env->segs[R_CS].selector);
1814 PUSHQ(rsp, next_eip);
1815 /* from this point, not restartable */
08b3ded6 1816 env->regs[R_ESP] = rsp;
eaa728ee
FB
1817 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1818 get_seg_base(e1, e2),
1819 get_seg_limit(e1, e2), e2);
a78d0eab 1820 env->eip = new_eip;
eaa728ee
FB
1821 } else
1822#endif
1823 {
08b3ded6 1824 sp = env->regs[R_ESP];
eaa728ee
FB
1825 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1826 ssp = env->segs[R_SS].base;
1827 if (shift) {
1828 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1829 PUSHL(ssp, sp, sp_mask, next_eip);
1830 } else {
1831 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1832 PUSHW(ssp, sp, sp_mask, next_eip);
1833 }
1834
1835 limit = get_seg_limit(e1, e2);
20054ef0 1836 if (new_eip > limit) {
77b2bc2c 1837 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1838 }
eaa728ee
FB
1839 /* from this point, not restartable */
1840 SET_ESP(sp, sp_mask);
1841 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1842 get_seg_base(e1, e2), limit, e2);
a78d0eab 1843 env->eip = new_eip;
eaa728ee
FB
1844 }
1845 } else {
1846 /* check gate type */
1847 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1848 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1849 rpl = new_cs & 3;
20054ef0 1850 switch (type) {
eaa728ee
FB
1851 case 1: /* available 286 TSS */
1852 case 9: /* available 386 TSS */
1853 case 5: /* task gate */
20054ef0 1854 if (dpl < cpl || dpl < rpl) {
77b2bc2c 1855 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1856 }
2999a0b2 1857 switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
eaa728ee
FB
1858 return;
1859 case 4: /* 286 call gate */
1860 case 12: /* 386 call gate */
1861 break;
1862 default:
77b2bc2c 1863 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
eaa728ee
FB
1864 break;
1865 }
1866 shift = type >> 3;
1867
20054ef0 1868 if (dpl < cpl || dpl < rpl) {
77b2bc2c 1869 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 1870 }
eaa728ee 1871 /* check valid bit */
20054ef0 1872 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1873 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 1874 }
eaa728ee
FB
1875 selector = e1 >> 16;
1876 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1877 param_count = e2 & 0x1f;
20054ef0 1878 if ((selector & 0xfffc) == 0) {
77b2bc2c 1879 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 1880 }
eaa728ee 1881
2999a0b2 1882 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 1883 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1884 }
1885 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 1886 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1887 }
eaa728ee 1888 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1889 if (dpl > cpl) {
77b2bc2c 1890 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1891 }
1892 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1893 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 1894 }
eaa728ee
FB
1895
1896 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1897 /* to inner privilege */
2999a0b2 1898 get_ss_esp_from_tss(env, &ss, &sp, dpl);
90a2541b
LG
1899 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1900 TARGET_FMT_lx "\n", ss, sp, param_count,
1901 env->regs[R_ESP]);
20054ef0 1902 if ((ss & 0xfffc) == 0) {
77b2bc2c 1903 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
1904 }
1905 if ((ss & 3) != dpl) {
77b2bc2c 1906 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 1907 }
2999a0b2 1908 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 1909 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 1910 }
eaa728ee 1911 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1912 if (ss_dpl != dpl) {
77b2bc2c 1913 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 1914 }
eaa728ee
FB
1915 if (!(ss_e2 & DESC_S_MASK) ||
1916 (ss_e2 & DESC_CS_MASK) ||
20054ef0 1917 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 1918 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
1919 }
1920 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 1921 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 1922 }
eaa728ee 1923
20054ef0 1924 /* push_size = ((param_count * 2) + 8) << shift; */
eaa728ee
FB
1925
1926 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1927 old_ssp = env->segs[R_SS].base;
1928
1929 sp_mask = get_sp_mask(ss_e2);
1930 ssp = get_seg_base(ss_e1, ss_e2);
1931 if (shift) {
1932 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
08b3ded6 1933 PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]);
20054ef0 1934 for (i = param_count - 1; i >= 0; i--) {
90a2541b
LG
1935 val = cpu_ldl_kernel(env, old_ssp +
1936 ((env->regs[R_ESP] + i * 4) &
1937 old_sp_mask));
eaa728ee
FB
1938 PUSHL(ssp, sp, sp_mask, val);
1939 }
1940 } else {
1941 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
08b3ded6 1942 PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]);
20054ef0 1943 for (i = param_count - 1; i >= 0; i--) {
90a2541b
LG
1944 val = cpu_lduw_kernel(env, old_ssp +
1945 ((env->regs[R_ESP] + i * 2) &
1946 old_sp_mask));
eaa728ee
FB
1947 PUSHW(ssp, sp, sp_mask, val);
1948 }
1949 }
1950 new_stack = 1;
1951 } else {
1952 /* to same privilege */
08b3ded6 1953 sp = env->regs[R_ESP];
eaa728ee
FB
1954 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1955 ssp = env->segs[R_SS].base;
20054ef0 1956 /* push_size = (4 << shift); */
eaa728ee
FB
1957 new_stack = 0;
1958 }
1959
1960 if (shift) {
1961 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
1962 PUSHL(ssp, sp, sp_mask, next_eip);
1963 } else {
1964 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
1965 PUSHW(ssp, sp, sp_mask, next_eip);
1966 }
1967
1968 /* from this point, not restartable */
1969
1970 if (new_stack) {
1971 ss = (ss & ~3) | dpl;
1972 cpu_x86_load_seg_cache(env, R_SS, ss,
1973 ssp,
1974 get_seg_limit(ss_e1, ss_e2),
1975 ss_e2);
1976 }
1977
1978 selector = (selector & ~3) | dpl;
1979 cpu_x86_load_seg_cache(env, R_CS, selector,
1980 get_seg_base(e1, e2),
1981 get_seg_limit(e1, e2),
1982 e2);
eaa728ee 1983 SET_ESP(sp, sp_mask);
a78d0eab 1984 env->eip = offset;
eaa728ee 1985 }
eaa728ee
FB
1986}
1987
1988/* real and vm86 mode iret */
2999a0b2 1989void helper_iret_real(CPUX86State *env, int shift)
eaa728ee
FB
1990{
1991 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1992 target_ulong ssp;
1993 int eflags_mask;
1994
20054ef0 1995 sp_mask = 0xffff; /* XXXX: use SS segment size? */
08b3ded6 1996 sp = env->regs[R_ESP];
eaa728ee
FB
1997 ssp = env->segs[R_SS].base;
1998 if (shift == 1) {
1999 /* 32 bits */
2000 POPL(ssp, sp, sp_mask, new_eip);
2001 POPL(ssp, sp, sp_mask, new_cs);
2002 new_cs &= 0xffff;
2003 POPL(ssp, sp, sp_mask, new_eflags);
2004 } else {
2005 /* 16 bits */
2006 POPW(ssp, sp, sp_mask, new_eip);
2007 POPW(ssp, sp, sp_mask, new_cs);
2008 POPW(ssp, sp, sp_mask, new_eflags);
2009 }
08b3ded6 2010 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2011 env->segs[R_CS].selector = new_cs;
2012 env->segs[R_CS].base = (new_cs << 4);
eaa728ee 2013 env->eip = new_eip;
20054ef0
BS
2014 if (env->eflags & VM_MASK) {
2015 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2016 NT_MASK;
2017 } else {
2018 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2019 RF_MASK | NT_MASK;
2020 }
2021 if (shift == 0) {
eaa728ee 2022 eflags_mask &= 0xffff;
20054ef0 2023 }
997ff0d9 2024 cpu_load_eflags(env, new_eflags, eflags_mask);
db620f46 2025 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2026}
2027
2999a0b2 2028static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
eaa728ee
FB
2029{
2030 int dpl;
2031 uint32_t e2;
2032
2033 /* XXX: on x86_64, we do not want to nullify FS and GS because
2034 they may still contain a valid base. I would be interested to
2035 know how a real x86_64 CPU behaves */
2036 if ((seg_reg == R_FS || seg_reg == R_GS) &&
20054ef0 2037 (env->segs[seg_reg].selector & 0xfffc) == 0) {
eaa728ee 2038 return;
20054ef0 2039 }
eaa728ee
FB
2040
2041 e2 = env->segs[seg_reg].flags;
2042 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2043 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2044 /* data or non conforming code segment */
2045 if (dpl < cpl) {
2046 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2047 }
2048 }
2049}
2050
2051/* protected mode iret */
2999a0b2
BS
2052static inline void helper_ret_protected(CPUX86State *env, int shift,
2053 int is_iret, int addend)
eaa728ee
FB
2054{
2055 uint32_t new_cs, new_eflags, new_ss;
2056 uint32_t new_es, new_ds, new_fs, new_gs;
2057 uint32_t e1, e2, ss_e1, ss_e2;
2058 int cpl, dpl, rpl, eflags_mask, iopl;
2059 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2060
2061#ifdef TARGET_X86_64
20054ef0 2062 if (shift == 2) {
eaa728ee 2063 sp_mask = -1;
20054ef0 2064 } else
eaa728ee 2065#endif
20054ef0 2066 {
eaa728ee 2067 sp_mask = get_sp_mask(env->segs[R_SS].flags);
20054ef0 2068 }
08b3ded6 2069 sp = env->regs[R_ESP];
eaa728ee
FB
2070 ssp = env->segs[R_SS].base;
2071 new_eflags = 0; /* avoid warning */
2072#ifdef TARGET_X86_64
2073 if (shift == 2) {
2074 POPQ(sp, new_eip);
2075 POPQ(sp, new_cs);
2076 new_cs &= 0xffff;
2077 if (is_iret) {
2078 POPQ(sp, new_eflags);
2079 }
2080 } else
2081#endif
20054ef0
BS
2082 {
2083 if (shift == 1) {
2084 /* 32 bits */
2085 POPL(ssp, sp, sp_mask, new_eip);
2086 POPL(ssp, sp, sp_mask, new_cs);
2087 new_cs &= 0xffff;
2088 if (is_iret) {
2089 POPL(ssp, sp, sp_mask, new_eflags);
2090 if (new_eflags & VM_MASK) {
2091 goto return_to_vm86;
2092 }
2093 }
2094 } else {
2095 /* 16 bits */
2096 POPW(ssp, sp, sp_mask, new_eip);
2097 POPW(ssp, sp, sp_mask, new_cs);
2098 if (is_iret) {
2099 POPW(ssp, sp, sp_mask, new_eflags);
2100 }
eaa728ee 2101 }
eaa728ee 2102 }
d12d51d5
AL
2103 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2104 new_cs, new_eip, shift, addend);
8995b7a0 2105 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 2106 if ((new_cs & 0xfffc) == 0) {
77b2bc2c 2107 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2108 }
2999a0b2 2109 if (load_segment(env, &e1, &e2, new_cs) != 0) {
77b2bc2c 2110 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2111 }
eaa728ee 2112 if (!(e2 & DESC_S_MASK) ||
20054ef0 2113 !(e2 & DESC_CS_MASK)) {
77b2bc2c 2114 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2115 }
eaa728ee
FB
2116 cpl = env->hflags & HF_CPL_MASK;
2117 rpl = new_cs & 3;
20054ef0 2118 if (rpl < cpl) {
77b2bc2c 2119 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2120 }
eaa728ee
FB
2121 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2122 if (e2 & DESC_C_MASK) {
20054ef0 2123 if (dpl > rpl) {
77b2bc2c 2124 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2125 }
eaa728ee 2126 } else {
20054ef0 2127 if (dpl != rpl) {
77b2bc2c 2128 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2129 }
eaa728ee 2130 }
20054ef0 2131 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2132 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 2133 }
eaa728ee
FB
2134
2135 sp += addend;
2136 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2137 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2138 /* return to same privilege level */
eaa728ee
FB
2139 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2140 get_seg_base(e1, e2),
2141 get_seg_limit(e1, e2),
2142 e2);
2143 } else {
2144 /* return to different privilege level */
2145#ifdef TARGET_X86_64
2146 if (shift == 2) {
2147 POPQ(sp, new_esp);
2148 POPQ(sp, new_ss);
2149 new_ss &= 0xffff;
2150 } else
2151#endif
20054ef0
BS
2152 {
2153 if (shift == 1) {
2154 /* 32 bits */
2155 POPL(ssp, sp, sp_mask, new_esp);
2156 POPL(ssp, sp, sp_mask, new_ss);
2157 new_ss &= 0xffff;
2158 } else {
2159 /* 16 bits */
2160 POPW(ssp, sp, sp_mask, new_esp);
2161 POPW(ssp, sp, sp_mask, new_ss);
2162 }
eaa728ee 2163 }
d12d51d5 2164 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
20054ef0 2165 new_ss, new_esp);
eaa728ee
FB
2166 if ((new_ss & 0xfffc) == 0) {
2167#ifdef TARGET_X86_64
20054ef0
BS
2168 /* NULL ss is allowed in long mode if cpl != 3 */
2169 /* XXX: test CS64? */
eaa728ee
FB
2170 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2171 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2172 0, 0xffffffff,
2173 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2174 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2175 DESC_W_MASK | DESC_A_MASK);
20054ef0 2176 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
eaa728ee
FB
2177 } else
2178#endif
2179 {
77b2bc2c 2180 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
2181 }
2182 } else {
20054ef0 2183 if ((new_ss & 3) != rpl) {
77b2bc2c 2184 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0 2185 }
2999a0b2 2186 if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) {
77b2bc2c 2187 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0 2188 }
eaa728ee
FB
2189 if (!(ss_e2 & DESC_S_MASK) ||
2190 (ss_e2 & DESC_CS_MASK) ||
20054ef0 2191 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 2192 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0 2193 }
eaa728ee 2194 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2195 if (dpl != rpl) {
77b2bc2c 2196 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0
BS
2197 }
2198 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 2199 raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
20054ef0 2200 }
eaa728ee
FB
2201 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2202 get_seg_base(ss_e1, ss_e2),
2203 get_seg_limit(ss_e1, ss_e2),
2204 ss_e2);
2205 }
2206
2207 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2208 get_seg_base(e1, e2),
2209 get_seg_limit(e1, e2),
2210 e2);
eaa728ee
FB
2211 sp = new_esp;
2212#ifdef TARGET_X86_64
20054ef0 2213 if (env->hflags & HF_CS64_MASK) {
eaa728ee 2214 sp_mask = -1;
20054ef0 2215 } else
eaa728ee 2216#endif
20054ef0 2217 {
eaa728ee 2218 sp_mask = get_sp_mask(ss_e2);
20054ef0 2219 }
eaa728ee
FB
2220
2221 /* validate data segments */
2999a0b2
BS
2222 validate_seg(env, R_ES, rpl);
2223 validate_seg(env, R_DS, rpl);
2224 validate_seg(env, R_FS, rpl);
2225 validate_seg(env, R_GS, rpl);
eaa728ee
FB
2226
2227 sp += addend;
2228 }
2229 SET_ESP(sp, sp_mask);
2230 env->eip = new_eip;
2231 if (is_iret) {
2232 /* NOTE: 'cpl' is the _old_ CPL */
2233 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
20054ef0 2234 if (cpl == 0) {
eaa728ee 2235 eflags_mask |= IOPL_MASK;
20054ef0 2236 }
eaa728ee 2237 iopl = (env->eflags >> IOPL_SHIFT) & 3;
20054ef0 2238 if (cpl <= iopl) {
eaa728ee 2239 eflags_mask |= IF_MASK;
20054ef0
BS
2240 }
2241 if (shift == 0) {
eaa728ee 2242 eflags_mask &= 0xffff;
20054ef0 2243 }
997ff0d9 2244 cpu_load_eflags(env, new_eflags, eflags_mask);
eaa728ee
FB
2245 }
2246 return;
2247
2248 return_to_vm86:
2249 POPL(ssp, sp, sp_mask, new_esp);
2250 POPL(ssp, sp, sp_mask, new_ss);
2251 POPL(ssp, sp, sp_mask, new_es);
2252 POPL(ssp, sp, sp_mask, new_ds);
2253 POPL(ssp, sp, sp_mask, new_fs);
2254 POPL(ssp, sp, sp_mask, new_gs);
2255
2256 /* modify processor state */
997ff0d9
BS
2257 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2258 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2259 VIP_MASK);
2999a0b2 2260 load_seg_vm(env, R_CS, new_cs & 0xffff);
2999a0b2
BS
2261 load_seg_vm(env, R_SS, new_ss & 0xffff);
2262 load_seg_vm(env, R_ES, new_es & 0xffff);
2263 load_seg_vm(env, R_DS, new_ds & 0xffff);
2264 load_seg_vm(env, R_FS, new_fs & 0xffff);
2265 load_seg_vm(env, R_GS, new_gs & 0xffff);
eaa728ee
FB
2266
2267 env->eip = new_eip & 0xffff;
08b3ded6 2268 env->regs[R_ESP] = new_esp;
eaa728ee
FB
2269}
2270
2999a0b2 2271void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
eaa728ee
FB
2272{
2273 int tss_selector, type;
2274 uint32_t e1, e2;
2275
2276 /* specific case for TSS */
2277 if (env->eflags & NT_MASK) {
2278#ifdef TARGET_X86_64
20054ef0 2279 if (env->hflags & HF_LMA_MASK) {
77b2bc2c 2280 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 2281 }
eaa728ee 2282#endif
329e607d 2283 tss_selector = cpu_lduw_kernel(env, env->tr.base + 0);
20054ef0 2284 if (tss_selector & 4) {
77b2bc2c 2285 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 2286 }
2999a0b2 2287 if (load_segment(env, &e1, &e2, tss_selector) != 0) {
77b2bc2c 2288 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 2289 }
eaa728ee
FB
2290 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2291 /* NOTE: we check both segment and busy TSS */
20054ef0 2292 if (type != 3) {
77b2bc2c 2293 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 2294 }
2999a0b2 2295 switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
eaa728ee 2296 } else {
2999a0b2 2297 helper_ret_protected(env, shift, 1, 0);
eaa728ee 2298 }
db620f46 2299 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2300}
2301
2999a0b2 2302void helper_lret_protected(CPUX86State *env, int shift, int addend)
eaa728ee 2303{
2999a0b2 2304 helper_ret_protected(env, shift, 0, addend);
eaa728ee
FB
2305}
2306
2999a0b2 2307void helper_sysenter(CPUX86State *env)
eaa728ee
FB
2308{
2309 if (env->sysenter_cs == 0) {
77b2bc2c 2310 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
2311 }
2312 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2436b61a
AZ
2313
2314#ifdef TARGET_X86_64
2315 if (env->hflags & HF_LMA_MASK) {
2316 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2317 0, 0xffffffff,
2318 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2319 DESC_S_MASK |
20054ef0
BS
2320 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2321 DESC_L_MASK);
2436b61a
AZ
2322 } else
2323#endif
2324 {
2325 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2326 0, 0xffffffff,
2327 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2328 DESC_S_MASK |
2329 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2330 }
eaa728ee
FB
2331 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2332 0, 0xffffffff,
2333 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2334 DESC_S_MASK |
2335 DESC_W_MASK | DESC_A_MASK);
08b3ded6 2336 env->regs[R_ESP] = env->sysenter_esp;
a78d0eab 2337 env->eip = env->sysenter_eip;
eaa728ee
FB
2338}
2339
2999a0b2 2340void helper_sysexit(CPUX86State *env, int dflag)
eaa728ee
FB
2341{
2342 int cpl;
2343
2344 cpl = env->hflags & HF_CPL_MASK;
2345 if (env->sysenter_cs == 0 || cpl != 0) {
77b2bc2c 2346 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee 2347 }
2436b61a
AZ
2348#ifdef TARGET_X86_64
2349 if (dflag == 2) {
20054ef0
BS
2350 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2351 3, 0, 0xffffffff,
2436b61a
AZ
2352 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2353 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
20054ef0
BS
2354 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2355 DESC_L_MASK);
2356 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2357 3, 0, 0xffffffff,
2436b61a
AZ
2358 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2359 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2360 DESC_W_MASK | DESC_A_MASK);
2361 } else
2362#endif
2363 {
20054ef0
BS
2364 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2365 3, 0, 0xffffffff,
2436b61a
AZ
2366 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2367 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2368 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
20054ef0
BS
2369 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2370 3, 0, 0xffffffff,
2436b61a
AZ
2371 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2372 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2373 DESC_W_MASK | DESC_A_MASK);
2374 }
08b3ded6 2375 env->regs[R_ESP] = env->regs[R_ECX];
a78d0eab 2376 env->eip = env->regs[R_EDX];
eaa728ee
FB
2377}
2378
2999a0b2 2379target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2380{
2381 unsigned int limit;
2382 uint32_t e1, e2, eflags, selector;
2383 int rpl, dpl, cpl, type;
2384
2385 selector = selector1 & 0xffff;
f0967a1a 2386 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2387 if ((selector & 0xfffc) == 0) {
dc1ded53 2388 goto fail;
20054ef0 2389 }
2999a0b2 2390 if (load_segment(env, &e1, &e2, selector) != 0) {
eaa728ee 2391 goto fail;
20054ef0 2392 }
eaa728ee
FB
2393 rpl = selector & 3;
2394 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2395 cpl = env->hflags & HF_CPL_MASK;
2396 if (e2 & DESC_S_MASK) {
2397 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2398 /* conforming */
2399 } else {
20054ef0 2400 if (dpl < cpl || dpl < rpl) {
eaa728ee 2401 goto fail;
20054ef0 2402 }
eaa728ee
FB
2403 }
2404 } else {
2405 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2406 switch (type) {
eaa728ee
FB
2407 case 1:
2408 case 2:
2409 case 3:
2410 case 9:
2411 case 11:
2412 break;
2413 default:
2414 goto fail;
2415 }
2416 if (dpl < cpl || dpl < rpl) {
2417 fail:
2418 CC_SRC = eflags & ~CC_Z;
2419 return 0;
2420 }
2421 }
2422 limit = get_seg_limit(e1, e2);
2423 CC_SRC = eflags | CC_Z;
2424 return limit;
2425}
2426
2999a0b2 2427target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2428{
2429 uint32_t e1, e2, eflags, selector;
2430 int rpl, dpl, cpl, type;
2431
2432 selector = selector1 & 0xffff;
f0967a1a 2433 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2434 if ((selector & 0xfffc) == 0) {
eaa728ee 2435 goto fail;
20054ef0 2436 }
2999a0b2 2437 if (load_segment(env, &e1, &e2, selector) != 0) {
eaa728ee 2438 goto fail;
20054ef0 2439 }
eaa728ee
FB
2440 rpl = selector & 3;
2441 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2442 cpl = env->hflags & HF_CPL_MASK;
2443 if (e2 & DESC_S_MASK) {
2444 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2445 /* conforming */
2446 } else {
20054ef0 2447 if (dpl < cpl || dpl < rpl) {
eaa728ee 2448 goto fail;
20054ef0 2449 }
eaa728ee
FB
2450 }
2451 } else {
2452 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2453 switch (type) {
eaa728ee
FB
2454 case 1:
2455 case 2:
2456 case 3:
2457 case 4:
2458 case 5:
2459 case 9:
2460 case 11:
2461 case 12:
2462 break;
2463 default:
2464 goto fail;
2465 }
2466 if (dpl < cpl || dpl < rpl) {
2467 fail:
2468 CC_SRC = eflags & ~CC_Z;
2469 return 0;
2470 }
2471 }
2472 CC_SRC = eflags | CC_Z;
2473 return e2 & 0x00f0ff00;
2474}
2475
2999a0b2 2476void helper_verr(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2477{
2478 uint32_t e1, e2, eflags, selector;
2479 int rpl, dpl, cpl;
2480
2481 selector = selector1 & 0xffff;
f0967a1a 2482 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2483 if ((selector & 0xfffc) == 0) {
eaa728ee 2484 goto fail;
20054ef0 2485 }
2999a0b2 2486 if (load_segment(env, &e1, &e2, selector) != 0) {
eaa728ee 2487 goto fail;
20054ef0
BS
2488 }
2489 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2490 goto fail;
20054ef0 2491 }
eaa728ee
FB
2492 rpl = selector & 3;
2493 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2494 cpl = env->hflags & HF_CPL_MASK;
2495 if (e2 & DESC_CS_MASK) {
20054ef0 2496 if (!(e2 & DESC_R_MASK)) {
eaa728ee 2497 goto fail;
20054ef0 2498 }
eaa728ee 2499 if (!(e2 & DESC_C_MASK)) {
20054ef0 2500 if (dpl < cpl || dpl < rpl) {
eaa728ee 2501 goto fail;
20054ef0 2502 }
eaa728ee
FB
2503 }
2504 } else {
2505 if (dpl < cpl || dpl < rpl) {
2506 fail:
2507 CC_SRC = eflags & ~CC_Z;
2508 return;
2509 }
2510 }
2511 CC_SRC = eflags | CC_Z;
2512}
2513
2999a0b2 2514void helper_verw(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2515{
2516 uint32_t e1, e2, eflags, selector;
2517 int rpl, dpl, cpl;
2518
2519 selector = selector1 & 0xffff;
f0967a1a 2520 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2521 if ((selector & 0xfffc) == 0) {
eaa728ee 2522 goto fail;
20054ef0 2523 }
2999a0b2 2524 if (load_segment(env, &e1, &e2, selector) != 0) {
eaa728ee 2525 goto fail;
20054ef0
BS
2526 }
2527 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2528 goto fail;
20054ef0 2529 }
eaa728ee
FB
2530 rpl = selector & 3;
2531 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2532 cpl = env->hflags & HF_CPL_MASK;
2533 if (e2 & DESC_CS_MASK) {
2534 goto fail;
2535 } else {
20054ef0 2536 if (dpl < cpl || dpl < rpl) {
eaa728ee 2537 goto fail;
20054ef0 2538 }
eaa728ee
FB
2539 if (!(e2 & DESC_W_MASK)) {
2540 fail:
2541 CC_SRC = eflags & ~CC_Z;
2542 return;
2543 }
2544 }
2545 CC_SRC = eflags | CC_Z;
2546}
2547
f299f437 2548#if defined(CONFIG_USER_ONLY)
2999a0b2 2549void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee 2550{
f299f437 2551 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
b98dbc90 2552 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
f299f437
BS
2553 selector &= 0xffff;
2554 cpu_x86_load_seg_cache(env, seg_reg, selector,
b98dbc90
PB
2555 (selector << 4), 0xffff,
2556 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2557 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
f299f437 2558 } else {
2999a0b2 2559 helper_load_seg(env, seg_reg, selector);
13822781 2560 }
eaa728ee 2561}
eaa728ee 2562#endif
81cf8d8a
PB
2563
2564/* check if Port I/O is allowed in TSS */
2565static inline void check_io(CPUX86State *env, int addr, int size)
2566{
2567 int io_offset, val, mask;
2568
2569 /* TSS must be a valid 32 bit one */
2570 if (!(env->tr.flags & DESC_P_MASK) ||
2571 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2572 env->tr.limit < 103) {
2573 goto fail;
2574 }
2575 io_offset = cpu_lduw_kernel(env, env->tr.base + 0x66);
2576 io_offset += (addr >> 3);
2577 /* Note: the check needs two bytes */
2578 if ((io_offset + 1) > env->tr.limit) {
2579 goto fail;
2580 }
2581 val = cpu_lduw_kernel(env, env->tr.base + io_offset);
2582 val >>= (addr & 7);
2583 mask = (1 << size) - 1;
2584 /* all bits must be zero to allow the I/O */
2585 if ((val & mask) != 0) {
2586 fail:
2587 raise_exception_err(env, EXCP0D_GPF, 0);
2588 }
2589}
2590
2591void helper_check_iob(CPUX86State *env, uint32_t t0)
2592{
2593 check_io(env, t0, 1);
2594}
2595
2596void helper_check_iow(CPUX86State *env, uint32_t t0)
2597{
2598 check_io(env, t0, 2);
2599}
2600
2601void helper_check_iol(CPUX86State *env, uint32_t t0)
2602{
2603 check_io(env, t0, 4);
2604}