]> git.proxmox.com Git - mirror_qemu.git/blame - target-i386/seg_helper.c
spapr: Add DRC count indexed hotplug identifier type
[mirror_qemu.git] / target-i386 / seg_helper.c
CommitLineData
eaa728ee 1/*
10774999
BS
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
eaa728ee
FB
4 *
5 * Copyright (c) 2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
8167ee88 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 19 */
83dae095 20
b6a0aa05 21#include "qemu/osdep.h"
3e457172 22#include "cpu.h"
1de7afc9 23#include "qemu/log.h"
2ef6175a 24#include "exec/helper-proto.h"
63c91552 25#include "exec/exec-all.h"
f08b6170 26#include "exec/cpu_ldst.h"
508127e2 27#include "exec/log.h"
eaa728ee 28
3e457172 29//#define DEBUG_PCALL
d12d51d5
AL
30
31#ifdef DEBUG_PCALL
20054ef0 32# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
8995b7a0
AF
33# define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
d12d51d5 35#else
20054ef0 36# define LOG_PCALL(...) do { } while (0)
8995b7a0 37# define LOG_PCALL_STATE(cpu) do { } while (0)
d12d51d5
AL
38#endif
39
9220fe54
PM
40#ifdef CONFIG_USER_ONLY
41#define MEMSUFFIX _kernel
42#define DATA_SIZE 1
43#include "exec/cpu_ldst_useronly_template.h"
44
45#define DATA_SIZE 2
46#include "exec/cpu_ldst_useronly_template.h"
47
48#define DATA_SIZE 4
49#include "exec/cpu_ldst_useronly_template.h"
50
51#define DATA_SIZE 8
52#include "exec/cpu_ldst_useronly_template.h"
53#undef MEMSUFFIX
54#else
8a201bd4
PB
55#define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
56#define MEMSUFFIX _kernel
57#define DATA_SIZE 1
58#include "exec/cpu_ldst_template.h"
59
60#define DATA_SIZE 2
61#include "exec/cpu_ldst_template.h"
62
63#define DATA_SIZE 4
64#include "exec/cpu_ldst_template.h"
65
66#define DATA_SIZE 8
67#include "exec/cpu_ldst_template.h"
68#undef CPU_MMU_INDEX
69#undef MEMSUFFIX
70#endif
71
eaa728ee 72/* return non zero if error */
100ec099
PD
73static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
74 uint32_t *e2_ptr, int selector,
75 uintptr_t retaddr)
eaa728ee
FB
76{
77 SegmentCache *dt;
78 int index;
79 target_ulong ptr;
80
20054ef0 81 if (selector & 0x4) {
eaa728ee 82 dt = &env->ldt;
20054ef0 83 } else {
eaa728ee 84 dt = &env->gdt;
20054ef0 85 }
eaa728ee 86 index = selector & ~7;
20054ef0 87 if ((index + 7) > dt->limit) {
eaa728ee 88 return -1;
20054ef0 89 }
eaa728ee 90 ptr = dt->base + index;
100ec099
PD
91 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
92 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee
FB
93 return 0;
94}
95
100ec099
PD
96static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
97 uint32_t *e2_ptr, int selector)
98{
99 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
100}
101
eaa728ee
FB
102static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
103{
104 unsigned int limit;
20054ef0 105
eaa728ee 106 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
20054ef0 107 if (e2 & DESC_G_MASK) {
eaa728ee 108 limit = (limit << 12) | 0xfff;
20054ef0 109 }
eaa728ee
FB
110 return limit;
111}
112
113static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
114{
20054ef0 115 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
eaa728ee
FB
116}
117
20054ef0
BS
118static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
119 uint32_t e2)
eaa728ee
FB
120{
121 sc->base = get_seg_base(e1, e2);
122 sc->limit = get_seg_limit(e1, e2);
123 sc->flags = e2;
124}
125
126/* init the segment cache in vm86 mode. */
2999a0b2 127static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
eaa728ee
FB
128{
129 selector &= 0xffff;
b98dbc90
PB
130
131 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
132 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
133 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
eaa728ee
FB
134}
135
2999a0b2 136static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
100ec099
PD
137 uint32_t *esp_ptr, int dpl,
138 uintptr_t retaddr)
eaa728ee 139{
a47dddd7 140 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
141 int type, index, shift;
142
143#if 0
144 {
145 int i;
146 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
20054ef0 147 for (i = 0; i < env->tr.limit; i++) {
eaa728ee 148 printf("%02x ", env->tr.base[i]);
20054ef0
BS
149 if ((i & 7) == 7) {
150 printf("\n");
151 }
eaa728ee
FB
152 }
153 printf("\n");
154 }
155#endif
156
20054ef0 157 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 158 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 159 }
eaa728ee 160 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 161 if ((type & 7) != 1) {
a47dddd7 162 cpu_abort(CPU(cpu), "invalid tss type");
20054ef0 163 }
eaa728ee
FB
164 shift = type >> 3;
165 index = (dpl * 4 + 2) << shift;
20054ef0 166 if (index + (4 << shift) - 1 > env->tr.limit) {
100ec099 167 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
20054ef0 168 }
eaa728ee 169 if (shift == 0) {
100ec099
PD
170 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
171 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
eaa728ee 172 } else {
100ec099
PD
173 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
174 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
eaa728ee
FB
175 }
176}
177
100ec099
PD
178static void tss_load_seg(CPUX86State *env, int seg_reg, int selector, int cpl,
179 uintptr_t retaddr)
eaa728ee
FB
180{
181 uint32_t e1, e2;
d3b54918 182 int rpl, dpl;
eaa728ee
FB
183
184 if ((selector & 0xfffc) != 0) {
100ec099
PD
185 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
186 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0
BS
187 }
188 if (!(e2 & DESC_S_MASK)) {
100ec099 189 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 190 }
eaa728ee
FB
191 rpl = selector & 3;
192 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
eaa728ee 193 if (seg_reg == R_CS) {
20054ef0 194 if (!(e2 & DESC_CS_MASK)) {
100ec099 195 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 196 }
20054ef0 197 if (dpl != rpl) {
100ec099 198 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 199 }
eaa728ee
FB
200 } else if (seg_reg == R_SS) {
201 /* SS must be writable data */
20054ef0 202 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
100ec099 203 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0
BS
204 }
205 if (dpl != cpl || dpl != rpl) {
100ec099 206 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 207 }
eaa728ee
FB
208 } else {
209 /* not readable code */
20054ef0 210 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
100ec099 211 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 212 }
eaa728ee
FB
213 /* if data or non conforming code, checks the rights */
214 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
20054ef0 215 if (dpl < cpl || dpl < rpl) {
100ec099 216 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 217 }
eaa728ee
FB
218 }
219 }
20054ef0 220 if (!(e2 & DESC_P_MASK)) {
100ec099 221 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
20054ef0 222 }
eaa728ee 223 cpu_x86_load_seg_cache(env, seg_reg, selector,
20054ef0
BS
224 get_seg_base(e1, e2),
225 get_seg_limit(e1, e2),
226 e2);
eaa728ee 227 } else {
20054ef0 228 if (seg_reg == R_SS || seg_reg == R_CS) {
100ec099 229 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
20054ef0 230 }
eaa728ee
FB
231 }
232}
233
234#define SWITCH_TSS_JMP 0
235#define SWITCH_TSS_IRET 1
236#define SWITCH_TSS_CALL 2
237
238/* XXX: restore CPU state in registers (PowerPC case) */
100ec099
PD
239static void switch_tss_ra(CPUX86State *env, int tss_selector,
240 uint32_t e1, uint32_t e2, int source,
241 uint32_t next_eip, uintptr_t retaddr)
eaa728ee
FB
242{
243 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
244 target_ulong tss_base;
245 uint32_t new_regs[8], new_segs[6];
246 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
247 uint32_t old_eflags, eflags_mask;
248 SegmentCache *dt;
249 int index;
250 target_ulong ptr;
251
252 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0
BS
253 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
254 source);
eaa728ee
FB
255
256 /* if task gate, we read the TSS segment and we load it */
257 if (type == 5) {
20054ef0 258 if (!(e2 & DESC_P_MASK)) {
100ec099 259 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
20054ef0 260 }
eaa728ee 261 tss_selector = e1 >> 16;
20054ef0 262 if (tss_selector & 4) {
100ec099 263 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
20054ef0 264 }
100ec099
PD
265 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
266 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0
BS
267 }
268 if (e2 & DESC_S_MASK) {
100ec099 269 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0 270 }
eaa728ee 271 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 272 if ((type & 7) != 1) {
100ec099 273 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
20054ef0 274 }
eaa728ee
FB
275 }
276
20054ef0 277 if (!(e2 & DESC_P_MASK)) {
100ec099 278 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
20054ef0 279 }
eaa728ee 280
20054ef0 281 if (type & 8) {
eaa728ee 282 tss_limit_max = 103;
20054ef0 283 } else {
eaa728ee 284 tss_limit_max = 43;
20054ef0 285 }
eaa728ee
FB
286 tss_limit = get_seg_limit(e1, e2);
287 tss_base = get_seg_base(e1, e2);
288 if ((tss_selector & 4) != 0 ||
20054ef0 289 tss_limit < tss_limit_max) {
100ec099 290 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
20054ef0 291 }
eaa728ee 292 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 293 if (old_type & 8) {
eaa728ee 294 old_tss_limit_max = 103;
20054ef0 295 } else {
eaa728ee 296 old_tss_limit_max = 43;
20054ef0 297 }
eaa728ee
FB
298
299 /* read all the registers from the new TSS */
300 if (type & 8) {
301 /* 32 bit */
100ec099
PD
302 new_cr3 = cpu_ldl_kernel_ra(env, tss_base + 0x1c, retaddr);
303 new_eip = cpu_ldl_kernel_ra(env, tss_base + 0x20, retaddr);
304 new_eflags = cpu_ldl_kernel_ra(env, tss_base + 0x24, retaddr);
20054ef0 305 for (i = 0; i < 8; i++) {
100ec099
PD
306 new_regs[i] = cpu_ldl_kernel_ra(env, tss_base + (0x28 + i * 4),
307 retaddr);
20054ef0
BS
308 }
309 for (i = 0; i < 6; i++) {
100ec099
PD
310 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x48 + i * 4),
311 retaddr);
20054ef0 312 }
100ec099
PD
313 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x60, retaddr);
314 new_trap = cpu_ldl_kernel_ra(env, tss_base + 0x64, retaddr);
eaa728ee
FB
315 } else {
316 /* 16 bit */
317 new_cr3 = 0;
100ec099
PD
318 new_eip = cpu_lduw_kernel_ra(env, tss_base + 0x0e, retaddr);
319 new_eflags = cpu_lduw_kernel_ra(env, tss_base + 0x10, retaddr);
20054ef0 320 for (i = 0; i < 8; i++) {
100ec099
PD
321 new_regs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x12 + i * 2),
322 retaddr) | 0xffff0000;
20054ef0
BS
323 }
324 for (i = 0; i < 4; i++) {
100ec099
PD
325 new_segs[i] = cpu_lduw_kernel_ra(env, tss_base + (0x22 + i * 4),
326 retaddr);
20054ef0 327 }
100ec099 328 new_ldt = cpu_lduw_kernel_ra(env, tss_base + 0x2a, retaddr);
eaa728ee
FB
329 new_segs[R_FS] = 0;
330 new_segs[R_GS] = 0;
331 new_trap = 0;
332 }
4581cbcd
BS
333 /* XXX: avoid a compiler warning, see
334 http://support.amd.com/us/Processor_TechDocs/24593.pdf
335 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
336 (void)new_trap;
eaa728ee
FB
337
338 /* NOTE: we must avoid memory exceptions during the task switch,
339 so we make dummy accesses before */
340 /* XXX: it can still fail in some cases, so a bigger hack is
341 necessary to valid the TLB after having done the accesses */
342
100ec099
PD
343 v1 = cpu_ldub_kernel_ra(env, env->tr.base, retaddr);
344 v2 = cpu_ldub_kernel_ra(env, env->tr.base + old_tss_limit_max, retaddr);
345 cpu_stb_kernel_ra(env, env->tr.base, v1, retaddr);
346 cpu_stb_kernel_ra(env, env->tr.base + old_tss_limit_max, v2, retaddr);
eaa728ee
FB
347
348 /* clear busy bit (it is restartable) */
349 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
350 target_ulong ptr;
351 uint32_t e2;
20054ef0 352
eaa728ee 353 ptr = env->gdt.base + (env->tr.selector & ~7);
100ec099 354 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee 355 e2 &= ~DESC_TSS_BUSY_MASK;
100ec099 356 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
eaa728ee 357 }
997ff0d9 358 old_eflags = cpu_compute_eflags(env);
20054ef0 359 if (source == SWITCH_TSS_IRET) {
eaa728ee 360 old_eflags &= ~NT_MASK;
20054ef0 361 }
eaa728ee
FB
362
363 /* save the current state in the old TSS */
364 if (type & 8) {
365 /* 32 bit */
100ec099
PD
366 cpu_stl_kernel_ra(env, env->tr.base + 0x20, next_eip, retaddr);
367 cpu_stl_kernel_ra(env, env->tr.base + 0x24, old_eflags, retaddr);
368 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX], retaddr);
369 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX], retaddr);
370 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX], retaddr);
371 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX], retaddr);
372 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP], retaddr);
373 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP], retaddr);
374 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI], retaddr);
375 cpu_stl_kernel_ra(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI], retaddr);
20054ef0 376 for (i = 0; i < 6; i++) {
100ec099
PD
377 cpu_stw_kernel_ra(env, env->tr.base + (0x48 + i * 4),
378 env->segs[i].selector, retaddr);
20054ef0 379 }
eaa728ee
FB
380 } else {
381 /* 16 bit */
100ec099
PD
382 cpu_stw_kernel_ra(env, env->tr.base + 0x0e, next_eip, retaddr);
383 cpu_stw_kernel_ra(env, env->tr.base + 0x10, old_eflags, retaddr);
384 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX], retaddr);
385 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX], retaddr);
386 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX], retaddr);
387 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX], retaddr);
388 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP], retaddr);
389 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP], retaddr);
390 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI], retaddr);
391 cpu_stw_kernel_ra(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI], retaddr);
20054ef0 392 for (i = 0; i < 4; i++) {
100ec099
PD
393 cpu_stw_kernel_ra(env, env->tr.base + (0x22 + i * 4),
394 env->segs[i].selector, retaddr);
20054ef0 395 }
eaa728ee
FB
396 }
397
398 /* now if an exception occurs, it will occurs in the next task
399 context */
400
401 if (source == SWITCH_TSS_CALL) {
100ec099 402 cpu_stw_kernel_ra(env, tss_base, env->tr.selector, retaddr);
eaa728ee
FB
403 new_eflags |= NT_MASK;
404 }
405
406 /* set busy bit */
407 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
408 target_ulong ptr;
409 uint32_t e2;
20054ef0 410
eaa728ee 411 ptr = env->gdt.base + (tss_selector & ~7);
100ec099 412 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
eaa728ee 413 e2 |= DESC_TSS_BUSY_MASK;
100ec099 414 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
eaa728ee
FB
415 }
416
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env->cr[0] |= CR0_TS_MASK;
420 env->hflags |= HF_TS_MASK;
421 env->tr.selector = tss_selector;
422 env->tr.base = tss_base;
423 env->tr.limit = tss_limit;
424 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
425
426 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
427 cpu_x86_update_cr3(env, new_cr3);
428 }
429
430 /* load all registers without an exception, then reload them with
431 possible exception */
432 env->eip = new_eip;
433 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
434 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
20054ef0 435 if (!(type & 8)) {
eaa728ee 436 eflags_mask &= 0xffff;
20054ef0 437 }
997ff0d9 438 cpu_load_eflags(env, new_eflags, eflags_mask);
20054ef0 439 /* XXX: what to do in 16 bit case? */
4b34e3ad 440 env->regs[R_EAX] = new_regs[0];
a4165610 441 env->regs[R_ECX] = new_regs[1];
00f5e6f2 442 env->regs[R_EDX] = new_regs[2];
70b51365 443 env->regs[R_EBX] = new_regs[3];
08b3ded6 444 env->regs[R_ESP] = new_regs[4];
c12dddd7 445 env->regs[R_EBP] = new_regs[5];
78c3c6d3 446 env->regs[R_ESI] = new_regs[6];
cf75c597 447 env->regs[R_EDI] = new_regs[7];
eaa728ee 448 if (new_eflags & VM_MASK) {
20054ef0 449 for (i = 0; i < 6; i++) {
2999a0b2 450 load_seg_vm(env, i, new_segs[i]);
20054ef0 451 }
eaa728ee 452 } else {
eaa728ee 453 /* first just selectors as the rest may trigger exceptions */
20054ef0 454 for (i = 0; i < 6; i++) {
eaa728ee 455 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
20054ef0 456 }
eaa728ee
FB
457 }
458
459 env->ldt.selector = new_ldt & ~4;
460 env->ldt.base = 0;
461 env->ldt.limit = 0;
462 env->ldt.flags = 0;
463
464 /* load the LDT */
20054ef0 465 if (new_ldt & 4) {
100ec099 466 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 467 }
eaa728ee
FB
468
469 if ((new_ldt & 0xfffc) != 0) {
470 dt = &env->gdt;
471 index = new_ldt & ~7;
20054ef0 472 if ((index + 7) > dt->limit) {
100ec099 473 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 474 }
eaa728ee 475 ptr = dt->base + index;
100ec099
PD
476 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
477 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
20054ef0 478 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
100ec099 479 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0
BS
480 }
481 if (!(e2 & DESC_P_MASK)) {
100ec099 482 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
20054ef0 483 }
eaa728ee
FB
484 load_seg_cache_raw_dt(&env->ldt, e1, e2);
485 }
486
487 /* load the segments */
488 if (!(new_eflags & VM_MASK)) {
d3b54918 489 int cpl = new_segs[R_CS] & 3;
100ec099
PD
490 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
491 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
492 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
493 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
494 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
495 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
eaa728ee
FB
496 }
497
a78d0eab 498 /* check that env->eip is in the CS segment limits */
eaa728ee 499 if (new_eip > env->segs[R_CS].limit) {
20054ef0 500 /* XXX: different exception if CALL? */
100ec099 501 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
eaa728ee 502 }
01df040b
AL
503
504#ifndef CONFIG_USER_ONLY
505 /* reset local breakpoints */
428065ce 506 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
93d00d0f 507 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
01df040b
AL
508 }
509#endif
eaa728ee
FB
510}
511
100ec099
PD
512static void switch_tss(CPUX86State *env, int tss_selector,
513 uint32_t e1, uint32_t e2, int source,
514 uint32_t next_eip)
515{
516 switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
517}
518
eaa728ee
FB
519static inline unsigned int get_sp_mask(unsigned int e2)
520{
20054ef0 521 if (e2 & DESC_B_MASK) {
eaa728ee 522 return 0xffffffff;
20054ef0 523 } else {
eaa728ee 524 return 0xffff;
20054ef0 525 }
eaa728ee
FB
526}
527
20054ef0 528static int exception_has_error_code(int intno)
2ed51f5b 529{
20054ef0
BS
530 switch (intno) {
531 case 8:
532 case 10:
533 case 11:
534 case 12:
535 case 13:
536 case 14:
537 case 17:
538 return 1;
539 }
540 return 0;
2ed51f5b
AL
541}
542
eaa728ee 543#ifdef TARGET_X86_64
08b3ded6
LG
544#define SET_ESP(val, sp_mask) \
545 do { \
546 if ((sp_mask) == 0xffff) { \
547 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
548 ((val) & 0xffff); \
549 } else if ((sp_mask) == 0xffffffffLL) { \
550 env->regs[R_ESP] = (uint32_t)(val); \
551 } else { \
552 env->regs[R_ESP] = (val); \
553 } \
20054ef0 554 } while (0)
eaa728ee 555#else
08b3ded6
LG
556#define SET_ESP(val, sp_mask) \
557 do { \
558 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
559 ((val) & (sp_mask)); \
20054ef0 560 } while (0)
eaa728ee
FB
561#endif
562
c0a04f0e
AL
563/* in 64-bit machines, this can overflow. So this segment addition macro
564 * can be used to trim the value to 32-bit whenever needed */
565#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
566
eaa728ee 567/* XXX: add a is_user flag to have proper security support */
100ec099 568#define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
329e607d
BS
569 { \
570 sp -= 2; \
100ec099 571 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
20054ef0 572 }
eaa728ee 573
100ec099 574#define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
20054ef0
BS
575 { \
576 sp -= 4; \
100ec099 577 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
20054ef0 578 }
eaa728ee 579
100ec099 580#define POPW_RA(ssp, sp, sp_mask, val, ra) \
329e607d 581 { \
100ec099 582 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
329e607d 583 sp += 2; \
20054ef0 584 }
eaa728ee 585
100ec099 586#define POPL_RA(ssp, sp, sp_mask, val, ra) \
329e607d 587 { \
100ec099 588 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
329e607d 589 sp += 4; \
20054ef0 590 }
eaa728ee 591
100ec099
PD
592#define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
593#define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
594#define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
595#define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
596
eaa728ee 597/* protected mode interrupt */
2999a0b2
BS
598static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
599 int error_code, unsigned int next_eip,
600 int is_hw)
eaa728ee
FB
601{
602 SegmentCache *dt;
603 target_ulong ptr, ssp;
604 int type, dpl, selector, ss_dpl, cpl;
605 int has_error_code, new_stack, shift;
1c918eba 606 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 607 uint32_t old_eip, sp_mask;
87446327 608 int vm86 = env->eflags & VM_MASK;
eaa728ee 609
eaa728ee 610 has_error_code = 0;
20054ef0
BS
611 if (!is_int && !is_hw) {
612 has_error_code = exception_has_error_code(intno);
613 }
614 if (is_int) {
eaa728ee 615 old_eip = next_eip;
20054ef0 616 } else {
eaa728ee 617 old_eip = env->eip;
20054ef0 618 }
eaa728ee
FB
619
620 dt = &env->idt;
20054ef0 621 if (intno * 8 + 7 > dt->limit) {
77b2bc2c 622 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 623 }
eaa728ee 624 ptr = dt->base + intno * 8;
329e607d
BS
625 e1 = cpu_ldl_kernel(env, ptr);
626 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee
FB
627 /* check gate type */
628 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 629 switch (type) {
eaa728ee
FB
630 case 5: /* task gate */
631 /* must do that check here to return the correct error code */
20054ef0 632 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 633 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 634 }
2999a0b2 635 switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
eaa728ee
FB
636 if (has_error_code) {
637 int type;
638 uint32_t mask;
20054ef0 639
eaa728ee
FB
640 /* push the error code */
641 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
642 shift = type >> 3;
20054ef0 643 if (env->segs[R_SS].flags & DESC_B_MASK) {
eaa728ee 644 mask = 0xffffffff;
20054ef0 645 } else {
eaa728ee 646 mask = 0xffff;
20054ef0 647 }
08b3ded6 648 esp = (env->regs[R_ESP] - (2 << shift)) & mask;
eaa728ee 649 ssp = env->segs[R_SS].base + esp;
20054ef0 650 if (shift) {
329e607d 651 cpu_stl_kernel(env, ssp, error_code);
20054ef0 652 } else {
329e607d 653 cpu_stw_kernel(env, ssp, error_code);
20054ef0 654 }
eaa728ee
FB
655 SET_ESP(esp, mask);
656 }
657 return;
658 case 6: /* 286 interrupt gate */
659 case 7: /* 286 trap gate */
660 case 14: /* 386 interrupt gate */
661 case 15: /* 386 trap gate */
662 break;
663 default:
77b2bc2c 664 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
eaa728ee
FB
665 break;
666 }
667 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
668 cpl = env->hflags & HF_CPL_MASK;
1235fc06 669 /* check privilege if software int */
20054ef0 670 if (is_int && dpl < cpl) {
77b2bc2c 671 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 672 }
eaa728ee 673 /* check valid bit */
20054ef0 674 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 675 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 676 }
eaa728ee
FB
677 selector = e1 >> 16;
678 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
20054ef0 679 if ((selector & 0xfffc) == 0) {
77b2bc2c 680 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 681 }
2999a0b2 682 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 683 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
684 }
685 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 686 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 687 }
eaa728ee 688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 689 if (dpl > cpl) {
77b2bc2c 690 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
691 }
692 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 693 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 694 }
eaa728ee
FB
695 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
696 /* to inner privilege */
100ec099 697 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
20054ef0 698 if ((ss & 0xfffc) == 0) {
77b2bc2c 699 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
700 }
701 if ((ss & 3) != dpl) {
77b2bc2c 702 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 703 }
2999a0b2 704 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 705 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 706 }
eaa728ee 707 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 708 if (ss_dpl != dpl) {
77b2bc2c 709 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 710 }
eaa728ee
FB
711 if (!(ss_e2 & DESC_S_MASK) ||
712 (ss_e2 & DESC_CS_MASK) ||
20054ef0 713 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 714 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
715 }
716 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 717 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 718 }
eaa728ee
FB
719 new_stack = 1;
720 sp_mask = get_sp_mask(ss_e2);
721 ssp = get_seg_base(ss_e1, ss_e2);
722 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
723 /* to same privilege */
87446327 724 if (vm86) {
77b2bc2c 725 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 726 }
eaa728ee
FB
727 new_stack = 0;
728 sp_mask = get_sp_mask(env->segs[R_SS].flags);
729 ssp = env->segs[R_SS].base;
08b3ded6 730 esp = env->regs[R_ESP];
eaa728ee
FB
731 dpl = cpl;
732 } else {
77b2bc2c 733 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
734 new_stack = 0; /* avoid warning */
735 sp_mask = 0; /* avoid warning */
736 ssp = 0; /* avoid warning */
737 esp = 0; /* avoid warning */
738 }
739
740 shift = type >> 3;
741
742#if 0
743 /* XXX: check that enough room is available */
744 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
87446327 745 if (vm86) {
eaa728ee 746 push_size += 8;
20054ef0 747 }
eaa728ee
FB
748 push_size <<= shift;
749#endif
750 if (shift == 1) {
751 if (new_stack) {
87446327 752 if (vm86) {
eaa728ee
FB
753 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
754 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
755 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
756 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
757 }
758 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 759 PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 760 }
997ff0d9 761 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
762 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
763 PUSHL(ssp, esp, sp_mask, old_eip);
764 if (has_error_code) {
765 PUSHL(ssp, esp, sp_mask, error_code);
766 }
767 } else {
768 if (new_stack) {
87446327 769 if (vm86) {
eaa728ee
FB
770 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
771 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
772 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
773 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
774 }
775 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
08b3ded6 776 PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
eaa728ee 777 }
997ff0d9 778 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
779 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
780 PUSHW(ssp, esp, sp_mask, old_eip);
781 if (has_error_code) {
782 PUSHW(ssp, esp, sp_mask, error_code);
783 }
784 }
785
fd460606
KC
786 /* interrupt gate clear IF mask */
787 if ((type & 1) == 0) {
788 env->eflags &= ~IF_MASK;
789 }
790 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
791
eaa728ee 792 if (new_stack) {
87446327 793 if (vm86) {
eaa728ee
FB
794 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
797 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
798 }
799 ss = (ss & ~3) | dpl;
800 cpu_x86_load_seg_cache(env, R_SS, ss,
801 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
802 }
803 SET_ESP(esp, sp_mask);
804
805 selector = (selector & ~3) | dpl;
806 cpu_x86_load_seg_cache(env, R_CS, selector,
807 get_seg_base(e1, e2),
808 get_seg_limit(e1, e2),
809 e2);
eaa728ee 810 env->eip = offset;
eaa728ee
FB
811}
812
813#ifdef TARGET_X86_64
814
100ec099 815#define PUSHQ_RA(sp, val, ra) \
20054ef0
BS
816 { \
817 sp -= 8; \
100ec099 818 cpu_stq_kernel_ra(env, sp, (val), ra); \
20054ef0 819 }
eaa728ee 820
100ec099 821#define POPQ_RA(sp, val, ra) \
20054ef0 822 { \
100ec099 823 val = cpu_ldq_kernel_ra(env, sp, ra); \
20054ef0
BS
824 sp += 8; \
825 }
eaa728ee 826
100ec099
PD
827#define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
828#define POPQ(sp, val) POPQ_RA(sp, val, 0)
829
2999a0b2 830static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
eaa728ee 831{
a47dddd7 832 X86CPU *cpu = x86_env_get_cpu(env);
eaa728ee
FB
833 int index;
834
835#if 0
836 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
837 env->tr.base, env->tr.limit);
838#endif
839
20054ef0 840 if (!(env->tr.flags & DESC_P_MASK)) {
a47dddd7 841 cpu_abort(CPU(cpu), "invalid tss");
20054ef0 842 }
eaa728ee 843 index = 8 * level + 4;
20054ef0 844 if ((index + 7) > env->tr.limit) {
77b2bc2c 845 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 846 }
329e607d 847 return cpu_ldq_kernel(env, env->tr.base + index);
eaa728ee
FB
848}
849
850/* 64 bit interrupt */
2999a0b2
BS
851static void do_interrupt64(CPUX86State *env, int intno, int is_int,
852 int error_code, target_ulong next_eip, int is_hw)
eaa728ee
FB
853{
854 SegmentCache *dt;
855 target_ulong ptr;
856 int type, dpl, selector, cpl, ist;
857 int has_error_code, new_stack;
858 uint32_t e1, e2, e3, ss;
859 target_ulong old_eip, esp, offset;
eaa728ee 860
eaa728ee 861 has_error_code = 0;
20054ef0
BS
862 if (!is_int && !is_hw) {
863 has_error_code = exception_has_error_code(intno);
864 }
865 if (is_int) {
eaa728ee 866 old_eip = next_eip;
20054ef0 867 } else {
eaa728ee 868 old_eip = env->eip;
20054ef0 869 }
eaa728ee
FB
870
871 dt = &env->idt;
20054ef0 872 if (intno * 16 + 15 > dt->limit) {
77b2bc2c 873 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 874 }
eaa728ee 875 ptr = dt->base + intno * 16;
329e607d
BS
876 e1 = cpu_ldl_kernel(env, ptr);
877 e2 = cpu_ldl_kernel(env, ptr + 4);
878 e3 = cpu_ldl_kernel(env, ptr + 8);
eaa728ee
FB
879 /* check gate type */
880 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 881 switch (type) {
eaa728ee
FB
882 case 14: /* 386 interrupt gate */
883 case 15: /* 386 trap gate */
884 break;
885 default:
77b2bc2c 886 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
eaa728ee
FB
887 break;
888 }
889 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
890 cpl = env->hflags & HF_CPL_MASK;
1235fc06 891 /* check privilege if software int */
20054ef0 892 if (is_int && dpl < cpl) {
77b2bc2c 893 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 894 }
eaa728ee 895 /* check valid bit */
20054ef0 896 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 897 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
20054ef0 898 }
eaa728ee
FB
899 selector = e1 >> 16;
900 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
901 ist = e2 & 7;
20054ef0 902 if ((selector & 0xfffc) == 0) {
77b2bc2c 903 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 904 }
eaa728ee 905
2999a0b2 906 if (load_segment(env, &e1, &e2, selector) != 0) {
77b2bc2c 907 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
908 }
909 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 910 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 911 }
eaa728ee 912 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 913 if (dpl > cpl) {
77b2bc2c 914 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
915 }
916 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 917 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0
BS
918 }
919 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
77b2bc2c 920 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 921 }
eaa728ee
FB
922 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
923 /* to inner privilege */
eaa728ee 924 new_stack = 1;
ae67dc72
PB
925 esp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
926 ss = 0;
eaa728ee
FB
927 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
928 /* to same privilege */
20054ef0 929 if (env->eflags & VM_MASK) {
77b2bc2c 930 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 931 }
eaa728ee 932 new_stack = 0;
ae67dc72 933 esp = env->regs[R_ESP];
eaa728ee
FB
934 dpl = cpl;
935 } else {
77b2bc2c 936 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
937 new_stack = 0; /* avoid warning */
938 esp = 0; /* avoid warning */
939 }
ae67dc72 940 esp &= ~0xfLL; /* align stack */
eaa728ee
FB
941
942 PUSHQ(esp, env->segs[R_SS].selector);
08b3ded6 943 PUSHQ(esp, env->regs[R_ESP]);
997ff0d9 944 PUSHQ(esp, cpu_compute_eflags(env));
eaa728ee
FB
945 PUSHQ(esp, env->segs[R_CS].selector);
946 PUSHQ(esp, old_eip);
947 if (has_error_code) {
948 PUSHQ(esp, error_code);
949 }
950
fd460606
KC
951 /* interrupt gate clear IF mask */
952 if ((type & 1) == 0) {
953 env->eflags &= ~IF_MASK;
954 }
955 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
956
eaa728ee
FB
957 if (new_stack) {
958 ss = 0 | dpl;
959 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
960 }
08b3ded6 961 env->regs[R_ESP] = esp;
eaa728ee
FB
962
963 selector = (selector & ~3) | dpl;
964 cpu_x86_load_seg_cache(env, R_CS, selector,
965 get_seg_base(e1, e2),
966 get_seg_limit(e1, e2),
967 e2);
eaa728ee 968 env->eip = offset;
eaa728ee
FB
969}
970#endif
971
d9957a8b 972#ifdef TARGET_X86_64
eaa728ee 973#if defined(CONFIG_USER_ONLY)
2999a0b2 974void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee 975{
27103424
AF
976 CPUState *cs = CPU(x86_env_get_cpu(env));
977
978 cs->exception_index = EXCP_SYSCALL;
eaa728ee 979 env->exception_next_eip = env->eip + next_eip_addend;
5638d180 980 cpu_loop_exit(cs);
eaa728ee
FB
981}
982#else
2999a0b2 983void helper_syscall(CPUX86State *env, int next_eip_addend)
eaa728ee
FB
984{
985 int selector;
986
987 if (!(env->efer & MSR_EFER_SCE)) {
100ec099 988 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
eaa728ee
FB
989 }
990 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
991 if (env->hflags & HF_LMA_MASK) {
992 int code64;
993
a4165610 994 env->regs[R_ECX] = env->eip + next_eip_addend;
997ff0d9 995 env->regs[11] = cpu_compute_eflags(env);
eaa728ee
FB
996
997 code64 = env->hflags & HF_CS64_MASK;
998
fd460606
KC
999 env->eflags &= ~env->fmask;
1000 cpu_load_eflags(env, env->eflags, 0);
eaa728ee
FB
1001 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1002 0, 0xffffffff,
1003 DESC_G_MASK | DESC_P_MASK |
1004 DESC_S_MASK |
20054ef0
BS
1005 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1006 DESC_L_MASK);
eaa728ee
FB
1007 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1008 0, 0xffffffff,
1009 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1010 DESC_S_MASK |
1011 DESC_W_MASK | DESC_A_MASK);
20054ef0 1012 if (code64) {
eaa728ee 1013 env->eip = env->lstar;
20054ef0 1014 } else {
eaa728ee 1015 env->eip = env->cstar;
20054ef0 1016 }
d9957a8b 1017 } else {
a4165610 1018 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
eaa728ee 1019
fd460606 1020 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
eaa728ee
FB
1021 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1022 0, 0xffffffff,
1023 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1024 DESC_S_MASK |
1025 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1026 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1027 0, 0xffffffff,
1028 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1029 DESC_S_MASK |
1030 DESC_W_MASK | DESC_A_MASK);
eaa728ee
FB
1031 env->eip = (uint32_t)env->star;
1032 }
1033}
1034#endif
d9957a8b 1035#endif
eaa728ee 1036
d9957a8b 1037#ifdef TARGET_X86_64
2999a0b2 1038void helper_sysret(CPUX86State *env, int dflag)
eaa728ee
FB
1039{
1040 int cpl, selector;
1041
1042 if (!(env->efer & MSR_EFER_SCE)) {
100ec099 1043 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
eaa728ee
FB
1044 }
1045 cpl = env->hflags & HF_CPL_MASK;
1046 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
100ec099 1047 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee
FB
1048 }
1049 selector = (env->star >> 48) & 0xffff;
eaa728ee 1050 if (env->hflags & HF_LMA_MASK) {
fd460606
KC
1051 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1052 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1053 NT_MASK);
eaa728ee
FB
1054 if (dflag == 2) {
1055 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1056 0, 0xffffffff,
1057 DESC_G_MASK | DESC_P_MASK |
1058 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1059 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1060 DESC_L_MASK);
a4165610 1061 env->eip = env->regs[R_ECX];
eaa728ee
FB
1062 } else {
1063 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1064 0, 0xffffffff,
1065 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1066 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1067 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1068 env->eip = (uint32_t)env->regs[R_ECX];
eaa728ee 1069 }
ac576229 1070 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1071 0, 0xffffffff,
1072 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1073 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1074 DESC_W_MASK | DESC_A_MASK);
d9957a8b 1075 } else {
fd460606 1076 env->eflags |= IF_MASK;
eaa728ee
FB
1077 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1078 0, 0xffffffff,
1079 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
a4165610 1082 env->eip = (uint32_t)env->regs[R_ECX];
ac576229 1083 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
eaa728ee
FB
1084 0, 0xffffffff,
1085 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1086 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087 DESC_W_MASK | DESC_A_MASK);
eaa728ee 1088 }
eaa728ee 1089}
d9957a8b 1090#endif
eaa728ee
FB
1091
1092/* real mode interrupt */
2999a0b2
BS
1093static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1094 int error_code, unsigned int next_eip)
eaa728ee
FB
1095{
1096 SegmentCache *dt;
1097 target_ulong ptr, ssp;
1098 int selector;
1099 uint32_t offset, esp;
1100 uint32_t old_cs, old_eip;
eaa728ee 1101
20054ef0 1102 /* real mode (simpler!) */
eaa728ee 1103 dt = &env->idt;
20054ef0 1104 if (intno * 4 + 3 > dt->limit) {
77b2bc2c 1105 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 1106 }
eaa728ee 1107 ptr = dt->base + intno * 4;
329e607d
BS
1108 offset = cpu_lduw_kernel(env, ptr);
1109 selector = cpu_lduw_kernel(env, ptr + 2);
08b3ded6 1110 esp = env->regs[R_ESP];
eaa728ee 1111 ssp = env->segs[R_SS].base;
20054ef0 1112 if (is_int) {
eaa728ee 1113 old_eip = next_eip;
20054ef0 1114 } else {
eaa728ee 1115 old_eip = env->eip;
20054ef0 1116 }
eaa728ee 1117 old_cs = env->segs[R_CS].selector;
20054ef0 1118 /* XXX: use SS segment size? */
997ff0d9 1119 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
eaa728ee
FB
1120 PUSHW(ssp, esp, 0xffff, old_cs);
1121 PUSHW(ssp, esp, 0xffff, old_eip);
1122
1123 /* update processor state */
08b3ded6 1124 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
eaa728ee
FB
1125 env->eip = offset;
1126 env->segs[R_CS].selector = selector;
1127 env->segs[R_CS].base = (selector << 4);
1128 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1129}
1130
e694d4e2 1131#if defined(CONFIG_USER_ONLY)
33271823
PM
1132/* fake user mode interrupt. is_int is TRUE if coming from the int
1133 * instruction. next_eip is the env->eip value AFTER the interrupt
1134 * instruction. It is only relevant if is_int is TRUE or if intno
1135 * is EXCP_SYSCALL.
1136 */
2999a0b2
BS
1137static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
1138 int error_code, target_ulong next_eip)
eaa728ee 1139{
885b7c44
SS
1140 if (is_int) {
1141 SegmentCache *dt;
1142 target_ulong ptr;
1143 int dpl, cpl, shift;
1144 uint32_t e2;
eaa728ee 1145
885b7c44
SS
1146 dt = &env->idt;
1147 if (env->hflags & HF_LMA_MASK) {
1148 shift = 4;
1149 } else {
1150 shift = 3;
1151 }
1152 ptr = dt->base + (intno << shift);
1153 e2 = cpu_ldl_kernel(env, ptr + 4);
eaa728ee 1154
885b7c44
SS
1155 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1156 cpl = env->hflags & HF_CPL_MASK;
1157 /* check privilege if software int */
1158 if (dpl < cpl) {
1159 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
1160 }
20054ef0 1161 }
eaa728ee
FB
1162
1163 /* Since we emulate only user space, we cannot do more than
1164 exiting the emulation with the suitable exception and error
47575997
JM
1165 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1166 if (is_int || intno == EXCP_SYSCALL) {
a78d0eab 1167 env->eip = next_eip;
20054ef0 1168 }
eaa728ee
FB
1169}
1170
e694d4e2
BS
1171#else
1172
2999a0b2
BS
1173static void handle_even_inj(CPUX86State *env, int intno, int is_int,
1174 int error_code, int is_hw, int rm)
2ed51f5b 1175{
19d6ca16 1176 CPUState *cs = CPU(x86_env_get_cpu(env));
b216aa6c 1177 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1178 control.event_inj));
1179
2ed51f5b 1180 if (!(event_inj & SVM_EVTINJ_VALID)) {
20054ef0
BS
1181 int type;
1182
1183 if (is_int) {
1184 type = SVM_EVTINJ_TYPE_SOFT;
1185 } else {
1186 type = SVM_EVTINJ_TYPE_EXEPT;
1187 }
1188 event_inj = intno | type | SVM_EVTINJ_VALID;
1189 if (!rm && exception_has_error_code(intno)) {
1190 event_inj |= SVM_EVTINJ_VALID_ERR;
b216aa6c 1191 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
20054ef0
BS
1192 control.event_inj_err),
1193 error_code);
1194 }
b216aa6c 1195 x86_stl_phys(cs,
ab1da857 1196 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1197 event_inj);
2ed51f5b
AL
1198 }
1199}
00ea18d1 1200#endif
2ed51f5b 1201
eaa728ee
FB
1202/*
1203 * Begin execution of an interruption. is_int is TRUE if coming from
a78d0eab 1204 * the int instruction. next_eip is the env->eip value AFTER the interrupt
eaa728ee
FB
1205 * instruction. It is only relevant if is_int is TRUE.
1206 */
ca4c810a 1207static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
2999a0b2 1208 int error_code, target_ulong next_eip, int is_hw)
eaa728ee 1209{
ca4c810a
AF
1210 CPUX86State *env = &cpu->env;
1211
8fec2b8c 1212 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1213 if ((env->cr[0] & CR0_PE_MASK)) {
1214 static int count;
20054ef0
BS
1215
1216 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1217 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1218 count, intno, error_code, is_int,
1219 env->hflags & HF_CPL_MASK,
a78d0eab
LG
1220 env->segs[R_CS].selector, env->eip,
1221 (int)env->segs[R_CS].base + env->eip,
08b3ded6 1222 env->segs[R_SS].selector, env->regs[R_ESP]);
eaa728ee 1223 if (intno == 0x0e) {
93fcfe39 1224 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1225 } else {
4b34e3ad 1226 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
eaa728ee 1227 }
93fcfe39 1228 qemu_log("\n");
a0762859 1229 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
eaa728ee
FB
1230#if 0
1231 {
1232 int i;
9bd5494e 1233 target_ulong ptr;
20054ef0 1234
93fcfe39 1235 qemu_log(" code=");
eaa728ee 1236 ptr = env->segs[R_CS].base + env->eip;
20054ef0 1237 for (i = 0; i < 16; i++) {
93fcfe39 1238 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1239 }
93fcfe39 1240 qemu_log("\n");
eaa728ee
FB
1241 }
1242#endif
1243 count++;
1244 }
1245 }
1246 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1247#if !defined(CONFIG_USER_ONLY)
20054ef0 1248 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1249 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
20054ef0 1250 }
00ea18d1 1251#endif
eb38c52c 1252#ifdef TARGET_X86_64
eaa728ee 1253 if (env->hflags & HF_LMA_MASK) {
2999a0b2 1254 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
eaa728ee
FB
1255 } else
1256#endif
1257 {
2999a0b2
BS
1258 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1259 is_hw);
eaa728ee
FB
1260 }
1261 } else {
00ea18d1 1262#if !defined(CONFIG_USER_ONLY)
20054ef0 1263 if (env->hflags & HF_SVMI_MASK) {
2999a0b2 1264 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
20054ef0 1265 }
00ea18d1 1266#endif
2999a0b2 1267 do_interrupt_real(env, intno, is_int, error_code, next_eip);
eaa728ee 1268 }
2ed51f5b 1269
00ea18d1 1270#if !defined(CONFIG_USER_ONLY)
2ed51f5b 1271 if (env->hflags & HF_SVMI_MASK) {
fdfba1a2 1272 CPUState *cs = CPU(cpu);
b216aa6c 1273 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
20054ef0
BS
1274 offsetof(struct vmcb,
1275 control.event_inj));
1276
b216aa6c 1277 x86_stl_phys(cs,
ab1da857 1278 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
20054ef0 1279 event_inj & ~SVM_EVTINJ_VALID);
2ed51f5b 1280 }
00ea18d1 1281#endif
eaa728ee
FB
1282}
1283
97a8ea5a 1284void x86_cpu_do_interrupt(CPUState *cs)
e694d4e2 1285{
97a8ea5a
AF
1286 X86CPU *cpu = X86_CPU(cs);
1287 CPUX86State *env = &cpu->env;
1288
e694d4e2
BS
1289#if defined(CONFIG_USER_ONLY)
1290 /* if user mode only, we simulate a fake exception
1291 which will be handled outside the cpu execution
1292 loop */
27103424 1293 do_interrupt_user(env, cs->exception_index,
e694d4e2
BS
1294 env->exception_is_int,
1295 env->error_code,
1296 env->exception_next_eip);
1297 /* successfully delivered */
1298 env->old_exception = -1;
1299#else
1300 /* simulate a real cpu exception. On i386, it can
1301 trigger new exceptions, but we do not handle
1302 double or triple faults yet. */
27103424 1303 do_interrupt_all(cpu, cs->exception_index,
e694d4e2
BS
1304 env->exception_is_int,
1305 env->error_code,
1306 env->exception_next_eip, 0);
1307 /* successfully delivered */
1308 env->old_exception = -1;
1309#endif
e694d4e2
BS
1310}
1311
2999a0b2 1312void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
e694d4e2 1313{
ca4c810a 1314 do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
e694d4e2
BS
1315}
1316
42f53fea
RH
1317bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
1318{
1319 X86CPU *cpu = X86_CPU(cs);
1320 CPUX86State *env = &cpu->env;
1321 bool ret = false;
1322
1323#if !defined(CONFIG_USER_ONLY)
1324 if (interrupt_request & CPU_INTERRUPT_POLL) {
1325 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
1326 apic_poll_irq(cpu->apic_state);
a4fc3212
PD
1327 /* Don't process multiple interrupt requests in a single call.
1328 This is required to make icount-driven execution deterministic. */
1329 return true;
42f53fea
RH
1330 }
1331#endif
1332 if (interrupt_request & CPU_INTERRUPT_SIPI) {
1333 do_cpu_sipi(cpu);
1334 } else if (env->hflags2 & HF2_GIF_MASK) {
1335 if ((interrupt_request & CPU_INTERRUPT_SMI) &&
1336 !(env->hflags & HF_SMM_MASK)) {
1337 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0);
1338 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
1339 do_smm_enter(cpu);
1340 ret = true;
1341 } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
1342 !(env->hflags2 & HF2_NMI_MASK)) {
1343 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
1344 env->hflags2 |= HF2_NMI_MASK;
1345 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
1346 ret = true;
1347 } else if (interrupt_request & CPU_INTERRUPT_MCE) {
1348 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
1349 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
1350 ret = true;
1351 } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
1352 (((env->hflags2 & HF2_VINTR_MASK) &&
1353 (env->hflags2 & HF2_HIF_MASK)) ||
1354 (!(env->hflags2 & HF2_VINTR_MASK) &&
1355 (env->eflags & IF_MASK &&
1356 !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
1357 int intno;
1358 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0);
1359 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
1360 CPU_INTERRUPT_VIRQ);
1361 intno = cpu_get_pic_interrupt(env);
1362 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1363 "Servicing hardware INT=0x%02x\n", intno);
1364 do_interrupt_x86_hardirq(env, intno, 1);
1365 /* ensure that no TB jump will be modified as
1366 the program flow was changed */
1367 ret = true;
1368#if !defined(CONFIG_USER_ONLY)
1369 } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
1370 (env->eflags & IF_MASK) &&
1371 !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
1372 int intno;
1373 /* FIXME: this should respect TPR */
1374 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0);
b216aa6c 1375 intno = x86_ldl_phys(cs, env->vm_vmcb
42f53fea
RH
1376 + offsetof(struct vmcb, control.int_vector));
1377 qemu_log_mask(CPU_LOG_TB_IN_ASM,
1378 "Servicing virtual hardware INT=0x%02x\n", intno);
1379 do_interrupt_x86_hardirq(env, intno, 1);
1380 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
1381 ret = true;
1382#endif
1383 }
1384 }
1385
1386 return ret;
1387}
1388
2999a0b2 1389void helper_lldt(CPUX86State *env, int selector)
eaa728ee
FB
1390{
1391 SegmentCache *dt;
1392 uint32_t e1, e2;
1393 int index, entry_limit;
1394 target_ulong ptr;
1395
1396 selector &= 0xffff;
1397 if ((selector & 0xfffc) == 0) {
1398 /* XXX: NULL selector case: invalid LDT */
1399 env->ldt.base = 0;
1400 env->ldt.limit = 0;
1401 } else {
20054ef0 1402 if (selector & 0x4) {
100ec099 1403 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1404 }
eaa728ee
FB
1405 dt = &env->gdt;
1406 index = selector & ~7;
1407#ifdef TARGET_X86_64
20054ef0 1408 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1409 entry_limit = 15;
20054ef0 1410 } else
eaa728ee 1411#endif
20054ef0 1412 {
eaa728ee 1413 entry_limit = 7;
20054ef0
BS
1414 }
1415 if ((index + entry_limit) > dt->limit) {
100ec099 1416 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1417 }
eaa728ee 1418 ptr = dt->base + index;
100ec099
PD
1419 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1420 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
20054ef0 1421 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
100ec099 1422 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1423 }
1424 if (!(e2 & DESC_P_MASK)) {
100ec099 1425 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1426 }
eaa728ee
FB
1427#ifdef TARGET_X86_64
1428 if (env->hflags & HF_LMA_MASK) {
1429 uint32_t e3;
20054ef0 1430
100ec099 1431 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
eaa728ee
FB
1432 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1433 env->ldt.base |= (target_ulong)e3 << 32;
1434 } else
1435#endif
1436 {
1437 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1438 }
1439 }
1440 env->ldt.selector = selector;
1441}
1442
2999a0b2 1443void helper_ltr(CPUX86State *env, int selector)
eaa728ee
FB
1444{
1445 SegmentCache *dt;
1446 uint32_t e1, e2;
1447 int index, type, entry_limit;
1448 target_ulong ptr;
1449
1450 selector &= 0xffff;
1451 if ((selector & 0xfffc) == 0) {
1452 /* NULL selector case: invalid TR */
1453 env->tr.base = 0;
1454 env->tr.limit = 0;
1455 env->tr.flags = 0;
1456 } else {
20054ef0 1457 if (selector & 0x4) {
100ec099 1458 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1459 }
eaa728ee
FB
1460 dt = &env->gdt;
1461 index = selector & ~7;
1462#ifdef TARGET_X86_64
20054ef0 1463 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1464 entry_limit = 15;
20054ef0 1465 } else
eaa728ee 1466#endif
20054ef0 1467 {
eaa728ee 1468 entry_limit = 7;
20054ef0
BS
1469 }
1470 if ((index + entry_limit) > dt->limit) {
100ec099 1471 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1472 }
eaa728ee 1473 ptr = dt->base + index;
100ec099
PD
1474 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1475 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
eaa728ee
FB
1476 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1477 if ((e2 & DESC_S_MASK) ||
20054ef0 1478 (type != 1 && type != 9)) {
100ec099 1479 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1480 }
1481 if (!(e2 & DESC_P_MASK)) {
100ec099 1482 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1483 }
eaa728ee
FB
1484#ifdef TARGET_X86_64
1485 if (env->hflags & HF_LMA_MASK) {
1486 uint32_t e3, e4;
20054ef0 1487
100ec099
PD
1488 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1489 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
20054ef0 1490 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
100ec099 1491 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1492 }
eaa728ee
FB
1493 load_seg_cache_raw_dt(&env->tr, e1, e2);
1494 env->tr.base |= (target_ulong)e3 << 32;
1495 } else
1496#endif
1497 {
1498 load_seg_cache_raw_dt(&env->tr, e1, e2);
1499 }
1500 e2 |= DESC_TSS_BUSY_MASK;
100ec099 1501 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
eaa728ee
FB
1502 }
1503 env->tr.selector = selector;
1504}
1505
1506/* only works if protected mode and not VM86. seg_reg must be != R_CS */
2999a0b2 1507void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee
FB
1508{
1509 uint32_t e1, e2;
1510 int cpl, dpl, rpl;
1511 SegmentCache *dt;
1512 int index;
1513 target_ulong ptr;
1514
1515 selector &= 0xffff;
1516 cpl = env->hflags & HF_CPL_MASK;
1517 if ((selector & 0xfffc) == 0) {
1518 /* null selector case */
1519 if (seg_reg == R_SS
1520#ifdef TARGET_X86_64
1521 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1522#endif
20054ef0 1523 ) {
100ec099 1524 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1525 }
eaa728ee
FB
1526 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1527 } else {
1528
20054ef0 1529 if (selector & 0x4) {
eaa728ee 1530 dt = &env->ldt;
20054ef0 1531 } else {
eaa728ee 1532 dt = &env->gdt;
20054ef0 1533 }
eaa728ee 1534 index = selector & ~7;
20054ef0 1535 if ((index + 7) > dt->limit) {
100ec099 1536 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1537 }
eaa728ee 1538 ptr = dt->base + index;
100ec099
PD
1539 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1540 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
eaa728ee 1541
20054ef0 1542 if (!(e2 & DESC_S_MASK)) {
100ec099 1543 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1544 }
eaa728ee
FB
1545 rpl = selector & 3;
1546 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1547 if (seg_reg == R_SS) {
1548 /* must be writable segment */
20054ef0 1549 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
100ec099 1550 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1551 }
1552 if (rpl != cpl || dpl != cpl) {
100ec099 1553 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1554 }
eaa728ee
FB
1555 } else {
1556 /* must be readable segment */
20054ef0 1557 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
100ec099 1558 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1559 }
eaa728ee
FB
1560
1561 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1562 /* if not conforming code, test rights */
20054ef0 1563 if (dpl < cpl || dpl < rpl) {
100ec099 1564 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1565 }
eaa728ee
FB
1566 }
1567 }
1568
1569 if (!(e2 & DESC_P_MASK)) {
20054ef0 1570 if (seg_reg == R_SS) {
100ec099 1571 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
20054ef0 1572 } else {
100ec099 1573 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1574 }
eaa728ee
FB
1575 }
1576
1577 /* set the access bit if not already set */
1578 if (!(e2 & DESC_A_MASK)) {
1579 e2 |= DESC_A_MASK;
100ec099 1580 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
eaa728ee
FB
1581 }
1582
1583 cpu_x86_load_seg_cache(env, seg_reg, selector,
1584 get_seg_base(e1, e2),
1585 get_seg_limit(e1, e2),
1586 e2);
1587#if 0
93fcfe39 1588 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
1589 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1590#endif
1591 }
1592}
1593
1594/* protected mode jump */
2999a0b2 1595void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
100ec099 1596 target_ulong next_eip)
eaa728ee
FB
1597{
1598 int gate_cs, type;
1599 uint32_t e1, e2, cpl, dpl, rpl, limit;
eaa728ee 1600
20054ef0 1601 if ((new_cs & 0xfffc) == 0) {
100ec099 1602 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1603 }
100ec099
PD
1604 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1605 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1606 }
eaa728ee
FB
1607 cpl = env->hflags & HF_CPL_MASK;
1608 if (e2 & DESC_S_MASK) {
20054ef0 1609 if (!(e2 & DESC_CS_MASK)) {
100ec099 1610 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1611 }
eaa728ee
FB
1612 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1613 if (e2 & DESC_C_MASK) {
1614 /* conforming code segment */
20054ef0 1615 if (dpl > cpl) {
100ec099 1616 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1617 }
eaa728ee
FB
1618 } else {
1619 /* non conforming code segment */
1620 rpl = new_cs & 3;
20054ef0 1621 if (rpl > cpl) {
100ec099 1622 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1623 }
1624 if (dpl != cpl) {
100ec099 1625 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1626 }
eaa728ee 1627 }
20054ef0 1628 if (!(e2 & DESC_P_MASK)) {
100ec099 1629 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1630 }
eaa728ee
FB
1631 limit = get_seg_limit(e1, e2);
1632 if (new_eip > limit &&
20054ef0 1633 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
100ec099 1634 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1635 }
eaa728ee
FB
1636 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1637 get_seg_base(e1, e2), limit, e2);
a78d0eab 1638 env->eip = new_eip;
eaa728ee
FB
1639 } else {
1640 /* jump to call or task gate */
1641 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1642 rpl = new_cs & 3;
1643 cpl = env->hflags & HF_CPL_MASK;
1644 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 1645 switch (type) {
eaa728ee
FB
1646 case 1: /* 286 TSS */
1647 case 9: /* 386 TSS */
1648 case 5: /* task gate */
20054ef0 1649 if (dpl < cpl || dpl < rpl) {
100ec099 1650 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1651 }
100ec099 1652 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
eaa728ee
FB
1653 break;
1654 case 4: /* 286 call gate */
1655 case 12: /* 386 call gate */
20054ef0 1656 if ((dpl < cpl) || (dpl < rpl)) {
100ec099 1657 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1658 }
1659 if (!(e2 & DESC_P_MASK)) {
100ec099 1660 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1661 }
eaa728ee
FB
1662 gate_cs = e1 >> 16;
1663 new_eip = (e1 & 0xffff);
20054ef0 1664 if (type == 12) {
eaa728ee 1665 new_eip |= (e2 & 0xffff0000);
20054ef0 1666 }
100ec099
PD
1667 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1668 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1669 }
eaa728ee
FB
1670 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1671 /* must be code segment */
1672 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
20054ef0 1673 (DESC_S_MASK | DESC_CS_MASK))) {
100ec099 1674 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1675 }
eaa728ee 1676 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
20054ef0 1677 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
100ec099 1678 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0
BS
1679 }
1680 if (!(e2 & DESC_P_MASK)) {
100ec099 1681 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
20054ef0 1682 }
eaa728ee 1683 limit = get_seg_limit(e1, e2);
20054ef0 1684 if (new_eip > limit) {
100ec099 1685 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1686 }
eaa728ee
FB
1687 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1688 get_seg_base(e1, e2), limit, e2);
a78d0eab 1689 env->eip = new_eip;
eaa728ee
FB
1690 break;
1691 default:
100ec099 1692 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
eaa728ee
FB
1693 break;
1694 }
1695 }
1696}
1697
1698/* real mode call */
2999a0b2 1699void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
eaa728ee
FB
1700 int shift, int next_eip)
1701{
1702 int new_eip;
1703 uint32_t esp, esp_mask;
1704 target_ulong ssp;
1705
1706 new_eip = new_eip1;
08b3ded6 1707 esp = env->regs[R_ESP];
eaa728ee
FB
1708 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1709 ssp = env->segs[R_SS].base;
1710 if (shift) {
100ec099
PD
1711 PUSHL_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1712 PUSHL_RA(ssp, esp, esp_mask, next_eip, GETPC());
eaa728ee 1713 } else {
100ec099
PD
1714 PUSHW_RA(ssp, esp, esp_mask, env->segs[R_CS].selector, GETPC());
1715 PUSHW_RA(ssp, esp, esp_mask, next_eip, GETPC());
eaa728ee
FB
1716 }
1717
1718 SET_ESP(esp, esp_mask);
1719 env->eip = new_eip;
1720 env->segs[R_CS].selector = new_cs;
1721 env->segs[R_CS].base = (new_cs << 4);
1722}
1723
1724/* protected mode call */
2999a0b2 1725void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
100ec099 1726 int shift, target_ulong next_eip)
eaa728ee
FB
1727{
1728 int new_stack, i;
1729 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 1730 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee 1731 uint32_t val, limit, old_sp_mask;
100ec099 1732 target_ulong ssp, old_ssp;
eaa728ee 1733
d12d51d5 1734 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
8995b7a0 1735 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 1736 if ((new_cs & 0xfffc) == 0) {
100ec099 1737 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1738 }
100ec099
PD
1739 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1740 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1741 }
eaa728ee 1742 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 1743 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee 1744 if (e2 & DESC_S_MASK) {
20054ef0 1745 if (!(e2 & DESC_CS_MASK)) {
100ec099 1746 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1747 }
eaa728ee
FB
1748 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1749 if (e2 & DESC_C_MASK) {
1750 /* conforming code segment */
20054ef0 1751 if (dpl > cpl) {
100ec099 1752 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1753 }
eaa728ee
FB
1754 } else {
1755 /* non conforming code segment */
1756 rpl = new_cs & 3;
20054ef0 1757 if (rpl > cpl) {
100ec099 1758 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0
BS
1759 }
1760 if (dpl != cpl) {
100ec099 1761 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1762 }
eaa728ee 1763 }
20054ef0 1764 if (!(e2 & DESC_P_MASK)) {
100ec099 1765 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1766 }
eaa728ee
FB
1767
1768#ifdef TARGET_X86_64
1769 /* XXX: check 16/32 bit cases in long mode */
1770 if (shift == 2) {
1771 target_ulong rsp;
20054ef0 1772
eaa728ee 1773 /* 64 bit case */
08b3ded6 1774 rsp = env->regs[R_ESP];
100ec099
PD
1775 PUSHQ_RA(rsp, env->segs[R_CS].selector, GETPC());
1776 PUSHQ_RA(rsp, next_eip, GETPC());
eaa728ee 1777 /* from this point, not restartable */
08b3ded6 1778 env->regs[R_ESP] = rsp;
eaa728ee
FB
1779 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1780 get_seg_base(e1, e2),
1781 get_seg_limit(e1, e2), e2);
a78d0eab 1782 env->eip = new_eip;
eaa728ee
FB
1783 } else
1784#endif
1785 {
08b3ded6 1786 sp = env->regs[R_ESP];
eaa728ee
FB
1787 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1788 ssp = env->segs[R_SS].base;
1789 if (shift) {
100ec099
PD
1790 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1791 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee 1792 } else {
100ec099
PD
1793 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1794 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee
FB
1795 }
1796
1797 limit = get_seg_limit(e1, e2);
20054ef0 1798 if (new_eip > limit) {
100ec099 1799 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1800 }
eaa728ee
FB
1801 /* from this point, not restartable */
1802 SET_ESP(sp, sp_mask);
1803 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1804 get_seg_base(e1, e2), limit, e2);
a78d0eab 1805 env->eip = new_eip;
eaa728ee
FB
1806 }
1807 } else {
1808 /* check gate type */
1809 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1810 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1811 rpl = new_cs & 3;
20054ef0 1812 switch (type) {
eaa728ee
FB
1813 case 1: /* available 286 TSS */
1814 case 9: /* available 386 TSS */
1815 case 5: /* task gate */
20054ef0 1816 if (dpl < cpl || dpl < rpl) {
100ec099 1817 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1818 }
100ec099 1819 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
eaa728ee
FB
1820 return;
1821 case 4: /* 286 call gate */
1822 case 12: /* 386 call gate */
1823 break;
1824 default:
100ec099 1825 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
eaa728ee
FB
1826 break;
1827 }
1828 shift = type >> 3;
1829
20054ef0 1830 if (dpl < cpl || dpl < rpl) {
100ec099 1831 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
20054ef0 1832 }
eaa728ee 1833 /* check valid bit */
20054ef0 1834 if (!(e2 & DESC_P_MASK)) {
100ec099 1835 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
20054ef0 1836 }
eaa728ee
FB
1837 selector = e1 >> 16;
1838 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1839 param_count = e2 & 0x1f;
20054ef0 1840 if ((selector & 0xfffc) == 0) {
100ec099 1841 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 1842 }
eaa728ee 1843
100ec099
PD
1844 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1845 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1846 }
1847 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
100ec099 1848 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0 1849 }
eaa728ee 1850 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1851 if (dpl > cpl) {
100ec099 1852 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
20054ef0
BS
1853 }
1854 if (!(e2 & DESC_P_MASK)) {
100ec099 1855 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
20054ef0 1856 }
eaa728ee
FB
1857
1858 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1859 /* to inner privilege */
100ec099 1860 get_ss_esp_from_tss(env, &ss, &sp, dpl, GETPC());
90a2541b
LG
1861 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1862 TARGET_FMT_lx "\n", ss, sp, param_count,
1863 env->regs[R_ESP]);
20054ef0 1864 if ((ss & 0xfffc) == 0) {
100ec099 1865 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0
BS
1866 }
1867 if ((ss & 3) != dpl) {
100ec099 1868 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1869 }
100ec099
PD
1870 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1871 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1872 }
eaa728ee 1873 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 1874 if (ss_dpl != dpl) {
100ec099 1875 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1876 }
eaa728ee
FB
1877 if (!(ss_e2 & DESC_S_MASK) ||
1878 (ss_e2 & DESC_CS_MASK) ||
20054ef0 1879 !(ss_e2 & DESC_W_MASK)) {
100ec099 1880 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0
BS
1881 }
1882 if (!(ss_e2 & DESC_P_MASK)) {
100ec099 1883 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
20054ef0 1884 }
eaa728ee 1885
20054ef0 1886 /* push_size = ((param_count * 2) + 8) << shift; */
eaa728ee
FB
1887
1888 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1889 old_ssp = env->segs[R_SS].base;
1890
1891 sp_mask = get_sp_mask(ss_e2);
1892 ssp = get_seg_base(ss_e1, ss_e2);
1893 if (shift) {
100ec099
PD
1894 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1895 PUSHL_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
20054ef0 1896 for (i = param_count - 1; i >= 0; i--) {
100ec099
PD
1897 val = cpu_ldl_kernel_ra(env, old_ssp +
1898 ((env->regs[R_ESP] + i * 4) &
1899 old_sp_mask), GETPC());
1900 PUSHL_RA(ssp, sp, sp_mask, val, GETPC());
eaa728ee
FB
1901 }
1902 } else {
100ec099
PD
1903 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_SS].selector, GETPC());
1904 PUSHW_RA(ssp, sp, sp_mask, env->regs[R_ESP], GETPC());
20054ef0 1905 for (i = param_count - 1; i >= 0; i--) {
100ec099
PD
1906 val = cpu_lduw_kernel_ra(env, old_ssp +
1907 ((env->regs[R_ESP] + i * 2) &
1908 old_sp_mask), GETPC());
1909 PUSHW_RA(ssp, sp, sp_mask, val, GETPC());
eaa728ee
FB
1910 }
1911 }
1912 new_stack = 1;
1913 } else {
1914 /* to same privilege */
08b3ded6 1915 sp = env->regs[R_ESP];
eaa728ee
FB
1916 sp_mask = get_sp_mask(env->segs[R_SS].flags);
1917 ssp = env->segs[R_SS].base;
20054ef0 1918 /* push_size = (4 << shift); */
eaa728ee
FB
1919 new_stack = 0;
1920 }
1921
1922 if (shift) {
100ec099
PD
1923 PUSHL_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1924 PUSHL_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee 1925 } else {
100ec099
PD
1926 PUSHW_RA(ssp, sp, sp_mask, env->segs[R_CS].selector, GETPC());
1927 PUSHW_RA(ssp, sp, sp_mask, next_eip, GETPC());
eaa728ee
FB
1928 }
1929
1930 /* from this point, not restartable */
1931
1932 if (new_stack) {
1933 ss = (ss & ~3) | dpl;
1934 cpu_x86_load_seg_cache(env, R_SS, ss,
1935 ssp,
1936 get_seg_limit(ss_e1, ss_e2),
1937 ss_e2);
1938 }
1939
1940 selector = (selector & ~3) | dpl;
1941 cpu_x86_load_seg_cache(env, R_CS, selector,
1942 get_seg_base(e1, e2),
1943 get_seg_limit(e1, e2),
1944 e2);
eaa728ee 1945 SET_ESP(sp, sp_mask);
a78d0eab 1946 env->eip = offset;
eaa728ee 1947 }
eaa728ee
FB
1948}
1949
1950/* real and vm86 mode iret */
2999a0b2 1951void helper_iret_real(CPUX86State *env, int shift)
eaa728ee
FB
1952{
1953 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
1954 target_ulong ssp;
1955 int eflags_mask;
1956
20054ef0 1957 sp_mask = 0xffff; /* XXXX: use SS segment size? */
08b3ded6 1958 sp = env->regs[R_ESP];
eaa728ee
FB
1959 ssp = env->segs[R_SS].base;
1960 if (shift == 1) {
1961 /* 32 bits */
100ec099
PD
1962 POPL_RA(ssp, sp, sp_mask, new_eip, GETPC());
1963 POPL_RA(ssp, sp, sp_mask, new_cs, GETPC());
eaa728ee 1964 new_cs &= 0xffff;
100ec099 1965 POPL_RA(ssp, sp, sp_mask, new_eflags, GETPC());
eaa728ee
FB
1966 } else {
1967 /* 16 bits */
100ec099
PD
1968 POPW_RA(ssp, sp, sp_mask, new_eip, GETPC());
1969 POPW_RA(ssp, sp, sp_mask, new_cs, GETPC());
1970 POPW_RA(ssp, sp, sp_mask, new_eflags, GETPC());
eaa728ee 1971 }
08b3ded6 1972 env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
bdadc0b5 1973 env->segs[R_CS].selector = new_cs;
1974 env->segs[R_CS].base = (new_cs << 4);
eaa728ee 1975 env->eip = new_eip;
20054ef0
BS
1976 if (env->eflags & VM_MASK) {
1977 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1978 NT_MASK;
1979 } else {
1980 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1981 RF_MASK | NT_MASK;
1982 }
1983 if (shift == 0) {
eaa728ee 1984 eflags_mask &= 0xffff;
20054ef0 1985 }
997ff0d9 1986 cpu_load_eflags(env, new_eflags, eflags_mask);
db620f46 1987 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
1988}
1989
2999a0b2 1990static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
eaa728ee
FB
1991{
1992 int dpl;
1993 uint32_t e2;
1994
1995 /* XXX: on x86_64, we do not want to nullify FS and GS because
1996 they may still contain a valid base. I would be interested to
1997 know how a real x86_64 CPU behaves */
1998 if ((seg_reg == R_FS || seg_reg == R_GS) &&
20054ef0 1999 (env->segs[seg_reg].selector & 0xfffc) == 0) {
eaa728ee 2000 return;
20054ef0 2001 }
eaa728ee
FB
2002
2003 e2 = env->segs[seg_reg].flags;
2004 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2005 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2006 /* data or non conforming code segment */
2007 if (dpl < cpl) {
2008 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2009 }
2010 }
2011}
2012
2013/* protected mode iret */
2999a0b2 2014static inline void helper_ret_protected(CPUX86State *env, int shift,
100ec099
PD
2015 int is_iret, int addend,
2016 uintptr_t retaddr)
eaa728ee
FB
2017{
2018 uint32_t new_cs, new_eflags, new_ss;
2019 uint32_t new_es, new_ds, new_fs, new_gs;
2020 uint32_t e1, e2, ss_e1, ss_e2;
2021 int cpl, dpl, rpl, eflags_mask, iopl;
2022 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2023
2024#ifdef TARGET_X86_64
20054ef0 2025 if (shift == 2) {
eaa728ee 2026 sp_mask = -1;
20054ef0 2027 } else
eaa728ee 2028#endif
20054ef0 2029 {
eaa728ee 2030 sp_mask = get_sp_mask(env->segs[R_SS].flags);
20054ef0 2031 }
08b3ded6 2032 sp = env->regs[R_ESP];
eaa728ee
FB
2033 ssp = env->segs[R_SS].base;
2034 new_eflags = 0; /* avoid warning */
2035#ifdef TARGET_X86_64
2036 if (shift == 2) {
100ec099
PD
2037 POPQ_RA(sp, new_eip, retaddr);
2038 POPQ_RA(sp, new_cs, retaddr);
eaa728ee
FB
2039 new_cs &= 0xffff;
2040 if (is_iret) {
100ec099 2041 POPQ_RA(sp, new_eflags, retaddr);
eaa728ee
FB
2042 }
2043 } else
2044#endif
20054ef0
BS
2045 {
2046 if (shift == 1) {
2047 /* 32 bits */
100ec099
PD
2048 POPL_RA(ssp, sp, sp_mask, new_eip, retaddr);
2049 POPL_RA(ssp, sp, sp_mask, new_cs, retaddr);
20054ef0
BS
2050 new_cs &= 0xffff;
2051 if (is_iret) {
100ec099 2052 POPL_RA(ssp, sp, sp_mask, new_eflags, retaddr);
20054ef0
BS
2053 if (new_eflags & VM_MASK) {
2054 goto return_to_vm86;
2055 }
2056 }
2057 } else {
2058 /* 16 bits */
100ec099
PD
2059 POPW_RA(ssp, sp, sp_mask, new_eip, retaddr);
2060 POPW_RA(ssp, sp, sp_mask, new_cs, retaddr);
20054ef0 2061 if (is_iret) {
100ec099 2062 POPW_RA(ssp, sp, sp_mask, new_eflags, retaddr);
20054ef0 2063 }
eaa728ee 2064 }
eaa728ee 2065 }
d12d51d5
AL
2066 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2067 new_cs, new_eip, shift, addend);
8995b7a0 2068 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
20054ef0 2069 if ((new_cs & 0xfffc) == 0) {
100ec099 2070 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2071 }
100ec099
PD
2072 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2073 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2074 }
eaa728ee 2075 if (!(e2 & DESC_S_MASK) ||
20054ef0 2076 !(e2 & DESC_CS_MASK)) {
100ec099 2077 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2078 }
eaa728ee
FB
2079 cpl = env->hflags & HF_CPL_MASK;
2080 rpl = new_cs & 3;
20054ef0 2081 if (rpl < cpl) {
100ec099 2082 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2083 }
eaa728ee
FB
2084 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2085 if (e2 & DESC_C_MASK) {
20054ef0 2086 if (dpl > rpl) {
100ec099 2087 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2088 }
eaa728ee 2089 } else {
20054ef0 2090 if (dpl != rpl) {
100ec099 2091 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
20054ef0 2092 }
eaa728ee 2093 }
20054ef0 2094 if (!(e2 & DESC_P_MASK)) {
100ec099 2095 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
20054ef0 2096 }
eaa728ee
FB
2097
2098 sp += addend;
2099 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2100 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2101 /* return to same privilege level */
eaa728ee
FB
2102 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2103 get_seg_base(e1, e2),
2104 get_seg_limit(e1, e2),
2105 e2);
2106 } else {
2107 /* return to different privilege level */
2108#ifdef TARGET_X86_64
2109 if (shift == 2) {
100ec099
PD
2110 POPQ_RA(sp, new_esp, retaddr);
2111 POPQ_RA(sp, new_ss, retaddr);
eaa728ee
FB
2112 new_ss &= 0xffff;
2113 } else
2114#endif
20054ef0
BS
2115 {
2116 if (shift == 1) {
2117 /* 32 bits */
100ec099
PD
2118 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2119 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
20054ef0
BS
2120 new_ss &= 0xffff;
2121 } else {
2122 /* 16 bits */
100ec099
PD
2123 POPW_RA(ssp, sp, sp_mask, new_esp, retaddr);
2124 POPW_RA(ssp, sp, sp_mask, new_ss, retaddr);
20054ef0 2125 }
eaa728ee 2126 }
d12d51d5 2127 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
20054ef0 2128 new_ss, new_esp);
eaa728ee
FB
2129 if ((new_ss & 0xfffc) == 0) {
2130#ifdef TARGET_X86_64
20054ef0
BS
2131 /* NULL ss is allowed in long mode if cpl != 3 */
2132 /* XXX: test CS64? */
eaa728ee
FB
2133 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2134 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2135 0, 0xffffffff,
2136 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2137 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2138 DESC_W_MASK | DESC_A_MASK);
20054ef0 2139 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
eaa728ee
FB
2140 } else
2141#endif
2142 {
100ec099 2143 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
eaa728ee
FB
2144 }
2145 } else {
20054ef0 2146 if ((new_ss & 3) != rpl) {
100ec099 2147 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2148 }
100ec099
PD
2149 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2150 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2151 }
eaa728ee
FB
2152 if (!(ss_e2 & DESC_S_MASK) ||
2153 (ss_e2 & DESC_CS_MASK) ||
20054ef0 2154 !(ss_e2 & DESC_W_MASK)) {
100ec099 2155 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0 2156 }
eaa728ee 2157 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2158 if (dpl != rpl) {
100ec099 2159 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
20054ef0
BS
2160 }
2161 if (!(ss_e2 & DESC_P_MASK)) {
100ec099 2162 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
20054ef0 2163 }
eaa728ee
FB
2164 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2165 get_seg_base(ss_e1, ss_e2),
2166 get_seg_limit(ss_e1, ss_e2),
2167 ss_e2);
2168 }
2169
2170 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2171 get_seg_base(e1, e2),
2172 get_seg_limit(e1, e2),
2173 e2);
eaa728ee
FB
2174 sp = new_esp;
2175#ifdef TARGET_X86_64
20054ef0 2176 if (env->hflags & HF_CS64_MASK) {
eaa728ee 2177 sp_mask = -1;
20054ef0 2178 } else
eaa728ee 2179#endif
20054ef0 2180 {
eaa728ee 2181 sp_mask = get_sp_mask(ss_e2);
20054ef0 2182 }
eaa728ee
FB
2183
2184 /* validate data segments */
2999a0b2
BS
2185 validate_seg(env, R_ES, rpl);
2186 validate_seg(env, R_DS, rpl);
2187 validate_seg(env, R_FS, rpl);
2188 validate_seg(env, R_GS, rpl);
eaa728ee
FB
2189
2190 sp += addend;
2191 }
2192 SET_ESP(sp, sp_mask);
2193 env->eip = new_eip;
2194 if (is_iret) {
2195 /* NOTE: 'cpl' is the _old_ CPL */
2196 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
20054ef0 2197 if (cpl == 0) {
eaa728ee 2198 eflags_mask |= IOPL_MASK;
20054ef0 2199 }
eaa728ee 2200 iopl = (env->eflags >> IOPL_SHIFT) & 3;
20054ef0 2201 if (cpl <= iopl) {
eaa728ee 2202 eflags_mask |= IF_MASK;
20054ef0
BS
2203 }
2204 if (shift == 0) {
eaa728ee 2205 eflags_mask &= 0xffff;
20054ef0 2206 }
997ff0d9 2207 cpu_load_eflags(env, new_eflags, eflags_mask);
eaa728ee
FB
2208 }
2209 return;
2210
2211 return_to_vm86:
100ec099
PD
2212 POPL_RA(ssp, sp, sp_mask, new_esp, retaddr);
2213 POPL_RA(ssp, sp, sp_mask, new_ss, retaddr);
2214 POPL_RA(ssp, sp, sp_mask, new_es, retaddr);
2215 POPL_RA(ssp, sp, sp_mask, new_ds, retaddr);
2216 POPL_RA(ssp, sp, sp_mask, new_fs, retaddr);
2217 POPL_RA(ssp, sp, sp_mask, new_gs, retaddr);
eaa728ee
FB
2218
2219 /* modify processor state */
997ff0d9
BS
2220 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2221 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2222 VIP_MASK);
2999a0b2 2223 load_seg_vm(env, R_CS, new_cs & 0xffff);
2999a0b2
BS
2224 load_seg_vm(env, R_SS, new_ss & 0xffff);
2225 load_seg_vm(env, R_ES, new_es & 0xffff);
2226 load_seg_vm(env, R_DS, new_ds & 0xffff);
2227 load_seg_vm(env, R_FS, new_fs & 0xffff);
2228 load_seg_vm(env, R_GS, new_gs & 0xffff);
eaa728ee
FB
2229
2230 env->eip = new_eip & 0xffff;
08b3ded6 2231 env->regs[R_ESP] = new_esp;
eaa728ee
FB
2232}
2233
2999a0b2 2234void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
eaa728ee
FB
2235{
2236 int tss_selector, type;
2237 uint32_t e1, e2;
2238
2239 /* specific case for TSS */
2240 if (env->eflags & NT_MASK) {
2241#ifdef TARGET_X86_64
20054ef0 2242 if (env->hflags & HF_LMA_MASK) {
100ec099 2243 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
20054ef0 2244 }
eaa728ee 2245#endif
100ec099 2246 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
20054ef0 2247 if (tss_selector & 4) {
100ec099 2248 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2249 }
100ec099
PD
2250 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2251 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2252 }
eaa728ee
FB
2253 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2254 /* NOTE: we check both segment and busy TSS */
20054ef0 2255 if (type != 3) {
100ec099 2256 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
20054ef0 2257 }
100ec099 2258 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
eaa728ee 2259 } else {
100ec099 2260 helper_ret_protected(env, shift, 1, 0, GETPC());
eaa728ee 2261 }
db620f46 2262 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2263}
2264
2999a0b2 2265void helper_lret_protected(CPUX86State *env, int shift, int addend)
eaa728ee 2266{
100ec099 2267 helper_ret_protected(env, shift, 0, addend, GETPC());
eaa728ee
FB
2268}
2269
2999a0b2 2270void helper_sysenter(CPUX86State *env)
eaa728ee
FB
2271{
2272 if (env->sysenter_cs == 0) {
100ec099 2273 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee
FB
2274 }
2275 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2436b61a
AZ
2276
2277#ifdef TARGET_X86_64
2278 if (env->hflags & HF_LMA_MASK) {
2279 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2280 0, 0xffffffff,
2281 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2282 DESC_S_MASK |
20054ef0
BS
2283 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2284 DESC_L_MASK);
2436b61a
AZ
2285 } else
2286#endif
2287 {
2288 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2289 0, 0xffffffff,
2290 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2291 DESC_S_MASK |
2292 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2293 }
eaa728ee
FB
2294 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2295 0, 0xffffffff,
2296 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2297 DESC_S_MASK |
2298 DESC_W_MASK | DESC_A_MASK);
08b3ded6 2299 env->regs[R_ESP] = env->sysenter_esp;
a78d0eab 2300 env->eip = env->sysenter_eip;
eaa728ee
FB
2301}
2302
2999a0b2 2303void helper_sysexit(CPUX86State *env, int dflag)
eaa728ee
FB
2304{
2305 int cpl;
2306
2307 cpl = env->hflags & HF_CPL_MASK;
2308 if (env->sysenter_cs == 0 || cpl != 0) {
100ec099 2309 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
eaa728ee 2310 }
2436b61a
AZ
2311#ifdef TARGET_X86_64
2312 if (dflag == 2) {
20054ef0
BS
2313 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2314 3, 0, 0xffffffff,
2436b61a
AZ
2315 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2316 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
20054ef0
BS
2317 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2318 DESC_L_MASK);
2319 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2320 3, 0, 0xffffffff,
2436b61a
AZ
2321 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2322 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2323 DESC_W_MASK | DESC_A_MASK);
2324 } else
2325#endif
2326 {
20054ef0
BS
2327 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2328 3, 0, 0xffffffff,
2436b61a
AZ
2329 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2330 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2331 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
20054ef0
BS
2332 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2333 3, 0, 0xffffffff,
2436b61a
AZ
2334 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2335 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2336 DESC_W_MASK | DESC_A_MASK);
2337 }
08b3ded6 2338 env->regs[R_ESP] = env->regs[R_ECX];
a78d0eab 2339 env->eip = env->regs[R_EDX];
eaa728ee
FB
2340}
2341
2999a0b2 2342target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2343{
2344 unsigned int limit;
2345 uint32_t e1, e2, eflags, selector;
2346 int rpl, dpl, cpl, type;
2347
2348 selector = selector1 & 0xffff;
f0967a1a 2349 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2350 if ((selector & 0xfffc) == 0) {
dc1ded53 2351 goto fail;
20054ef0 2352 }
100ec099 2353 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2354 goto fail;
20054ef0 2355 }
eaa728ee
FB
2356 rpl = selector & 3;
2357 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2358 cpl = env->hflags & HF_CPL_MASK;
2359 if (e2 & DESC_S_MASK) {
2360 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2361 /* conforming */
2362 } else {
20054ef0 2363 if (dpl < cpl || dpl < rpl) {
eaa728ee 2364 goto fail;
20054ef0 2365 }
eaa728ee
FB
2366 }
2367 } else {
2368 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2369 switch (type) {
eaa728ee
FB
2370 case 1:
2371 case 2:
2372 case 3:
2373 case 9:
2374 case 11:
2375 break;
2376 default:
2377 goto fail;
2378 }
2379 if (dpl < cpl || dpl < rpl) {
2380 fail:
2381 CC_SRC = eflags & ~CC_Z;
2382 return 0;
2383 }
2384 }
2385 limit = get_seg_limit(e1, e2);
2386 CC_SRC = eflags | CC_Z;
2387 return limit;
2388}
2389
2999a0b2 2390target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2391{
2392 uint32_t e1, e2, eflags, selector;
2393 int rpl, dpl, cpl, type;
2394
2395 selector = selector1 & 0xffff;
f0967a1a 2396 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2397 if ((selector & 0xfffc) == 0) {
eaa728ee 2398 goto fail;
20054ef0 2399 }
100ec099 2400 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2401 goto fail;
20054ef0 2402 }
eaa728ee
FB
2403 rpl = selector & 3;
2404 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2405 cpl = env->hflags & HF_CPL_MASK;
2406 if (e2 & DESC_S_MASK) {
2407 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2408 /* conforming */
2409 } else {
20054ef0 2410 if (dpl < cpl || dpl < rpl) {
eaa728ee 2411 goto fail;
20054ef0 2412 }
eaa728ee
FB
2413 }
2414 } else {
2415 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2416 switch (type) {
eaa728ee
FB
2417 case 1:
2418 case 2:
2419 case 3:
2420 case 4:
2421 case 5:
2422 case 9:
2423 case 11:
2424 case 12:
2425 break;
2426 default:
2427 goto fail;
2428 }
2429 if (dpl < cpl || dpl < rpl) {
2430 fail:
2431 CC_SRC = eflags & ~CC_Z;
2432 return 0;
2433 }
2434 }
2435 CC_SRC = eflags | CC_Z;
2436 return e2 & 0x00f0ff00;
2437}
2438
2999a0b2 2439void helper_verr(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2440{
2441 uint32_t e1, e2, eflags, selector;
2442 int rpl, dpl, cpl;
2443
2444 selector = selector1 & 0xffff;
f0967a1a 2445 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2446 if ((selector & 0xfffc) == 0) {
eaa728ee 2447 goto fail;
20054ef0 2448 }
100ec099 2449 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2450 goto fail;
20054ef0
BS
2451 }
2452 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2453 goto fail;
20054ef0 2454 }
eaa728ee
FB
2455 rpl = selector & 3;
2456 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2457 cpl = env->hflags & HF_CPL_MASK;
2458 if (e2 & DESC_CS_MASK) {
20054ef0 2459 if (!(e2 & DESC_R_MASK)) {
eaa728ee 2460 goto fail;
20054ef0 2461 }
eaa728ee 2462 if (!(e2 & DESC_C_MASK)) {
20054ef0 2463 if (dpl < cpl || dpl < rpl) {
eaa728ee 2464 goto fail;
20054ef0 2465 }
eaa728ee
FB
2466 }
2467 } else {
2468 if (dpl < cpl || dpl < rpl) {
2469 fail:
2470 CC_SRC = eflags & ~CC_Z;
2471 return;
2472 }
2473 }
2474 CC_SRC = eflags | CC_Z;
2475}
2476
2999a0b2 2477void helper_verw(CPUX86State *env, target_ulong selector1)
eaa728ee
FB
2478{
2479 uint32_t e1, e2, eflags, selector;
2480 int rpl, dpl, cpl;
2481
2482 selector = selector1 & 0xffff;
f0967a1a 2483 eflags = cpu_cc_compute_all(env, CC_OP);
20054ef0 2484 if ((selector & 0xfffc) == 0) {
eaa728ee 2485 goto fail;
20054ef0 2486 }
100ec099 2487 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
eaa728ee 2488 goto fail;
20054ef0
BS
2489 }
2490 if (!(e2 & DESC_S_MASK)) {
eaa728ee 2491 goto fail;
20054ef0 2492 }
eaa728ee
FB
2493 rpl = selector & 3;
2494 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2495 cpl = env->hflags & HF_CPL_MASK;
2496 if (e2 & DESC_CS_MASK) {
2497 goto fail;
2498 } else {
20054ef0 2499 if (dpl < cpl || dpl < rpl) {
eaa728ee 2500 goto fail;
20054ef0 2501 }
eaa728ee
FB
2502 if (!(e2 & DESC_W_MASK)) {
2503 fail:
2504 CC_SRC = eflags & ~CC_Z;
2505 return;
2506 }
2507 }
2508 CC_SRC = eflags | CC_Z;
2509}
2510
f299f437 2511#if defined(CONFIG_USER_ONLY)
2999a0b2 2512void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
eaa728ee 2513{
f299f437 2514 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
b98dbc90 2515 int dpl = (env->eflags & VM_MASK) ? 3 : 0;
f299f437
BS
2516 selector &= 0xffff;
2517 cpu_x86_load_seg_cache(env, seg_reg, selector,
b98dbc90
PB
2518 (selector << 4), 0xffff,
2519 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2520 DESC_A_MASK | (dpl << DESC_DPL_SHIFT));
f299f437 2521 } else {
2999a0b2 2522 helper_load_seg(env, seg_reg, selector);
13822781 2523 }
eaa728ee 2524}
eaa728ee 2525#endif
81cf8d8a
PB
2526
2527/* check if Port I/O is allowed in TSS */
100ec099
PD
2528static inline void check_io(CPUX86State *env, int addr, int size,
2529 uintptr_t retaddr)
81cf8d8a
PB
2530{
2531 int io_offset, val, mask;
2532
2533 /* TSS must be a valid 32 bit one */
2534 if (!(env->tr.flags & DESC_P_MASK) ||
2535 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
2536 env->tr.limit < 103) {
2537 goto fail;
2538 }
100ec099 2539 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
81cf8d8a
PB
2540 io_offset += (addr >> 3);
2541 /* Note: the check needs two bytes */
2542 if ((io_offset + 1) > env->tr.limit) {
2543 goto fail;
2544 }
100ec099 2545 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
81cf8d8a
PB
2546 val >>= (addr & 7);
2547 mask = (1 << size) - 1;
2548 /* all bits must be zero to allow the I/O */
2549 if ((val & mask) != 0) {
2550 fail:
100ec099 2551 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
81cf8d8a
PB
2552 }
2553}
2554
2555void helper_check_iob(CPUX86State *env, uint32_t t0)
2556{
100ec099 2557 check_io(env, t0, 1, GETPC());
81cf8d8a
PB
2558}
2559
2560void helper_check_iow(CPUX86State *env, uint32_t t0)
2561{
100ec099 2562 check_io(env, t0, 2, GETPC());
81cf8d8a
PB
2563}
2564
2565void helper_check_iol(CPUX86State *env, uint32_t t0)
2566{
100ec099 2567 check_io(env, t0, 4, GETPC());
81cf8d8a 2568}