]> git.proxmox.com Git - qemu.git/blame - target-i386/op_helper.c
x86: split off SVM helpers
[qemu.git] / target-i386 / op_helper.c
CommitLineData
eaa728ee
FB
1/*
2 * i386 helpers
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
eaa728ee 18 */
83dae095 19
3e457172
BS
20#include "cpu.h"
21#include "dyngen-exec.h"
35bed8ee 22#include "ioport.h"
3e457172
BS
23#include "qemu-log.h"
24#include "cpu-defs.h"
25#include "helper.h"
eaa728ee 26
3e457172
BS
27#if !defined(CONFIG_USER_ONLY)
28#include "softmmu_exec.h"
29#endif /* !defined(CONFIG_USER_ONLY) */
eaa728ee 30
3e457172 31//#define DEBUG_PCALL
d12d51d5
AL
32
33#ifdef DEBUG_PCALL
20054ef0
BS
34# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
35# define LOG_PCALL_STATE(env) \
36 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
d12d51d5 37#else
20054ef0
BS
38# define LOG_PCALL(...) do { } while (0)
39# define LOG_PCALL_STATE(env) do { } while (0)
d12d51d5
AL
40#endif
41
eaa728ee
FB
42/* broken thread support */
43
c227f099 44static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
eaa728ee
FB
45
46void helper_lock(void)
47{
48 spin_lock(&global_cpu_lock);
49}
50
51void helper_unlock(void)
52{
53 spin_unlock(&global_cpu_lock);
54}
55
eaa728ee
FB
56/* return non zero if error */
57static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
58 int selector)
59{
60 SegmentCache *dt;
61 int index;
62 target_ulong ptr;
63
20054ef0 64 if (selector & 0x4) {
eaa728ee 65 dt = &env->ldt;
20054ef0 66 } else {
eaa728ee 67 dt = &env->gdt;
20054ef0 68 }
eaa728ee 69 index = selector & ~7;
20054ef0 70 if ((index + 7) > dt->limit) {
eaa728ee 71 return -1;
20054ef0 72 }
eaa728ee
FB
73 ptr = dt->base + index;
74 *e1_ptr = ldl_kernel(ptr);
75 *e2_ptr = ldl_kernel(ptr + 4);
76 return 0;
77}
78
79static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
80{
81 unsigned int limit;
20054ef0 82
eaa728ee 83 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
20054ef0 84 if (e2 & DESC_G_MASK) {
eaa728ee 85 limit = (limit << 12) | 0xfff;
20054ef0 86 }
eaa728ee
FB
87 return limit;
88}
89
90static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
91{
20054ef0 92 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
eaa728ee
FB
93}
94
20054ef0
BS
95static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
96 uint32_t e2)
eaa728ee
FB
97{
98 sc->base = get_seg_base(e1, e2);
99 sc->limit = get_seg_limit(e1, e2);
100 sc->flags = e2;
101}
102
103/* init the segment cache in vm86 mode. */
104static inline void load_seg_vm(int seg, int selector)
105{
106 selector &= 0xffff;
107 cpu_x86_load_seg_cache(env, seg, selector,
108 (selector << 4), 0xffff, 0);
109}
110
111static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
112 uint32_t *esp_ptr, int dpl)
113{
114 int type, index, shift;
115
116#if 0
117 {
118 int i;
119 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
20054ef0 120 for (i = 0; i < env->tr.limit; i++) {
eaa728ee 121 printf("%02x ", env->tr.base[i]);
20054ef0
BS
122 if ((i & 7) == 7) {
123 printf("\n");
124 }
eaa728ee
FB
125 }
126 printf("\n");
127 }
128#endif
129
20054ef0 130 if (!(env->tr.flags & DESC_P_MASK)) {
eaa728ee 131 cpu_abort(env, "invalid tss");
20054ef0 132 }
eaa728ee 133 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 134 if ((type & 7) != 1) {
eaa728ee 135 cpu_abort(env, "invalid tss type");
20054ef0 136 }
eaa728ee
FB
137 shift = type >> 3;
138 index = (dpl * 4 + 2) << shift;
20054ef0 139 if (index + (4 << shift) - 1 > env->tr.limit) {
77b2bc2c 140 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 141 }
eaa728ee
FB
142 if (shift == 0) {
143 *esp_ptr = lduw_kernel(env->tr.base + index);
144 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
145 } else {
146 *esp_ptr = ldl_kernel(env->tr.base + index);
147 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
148 }
149}
150
151/* XXX: merge with load_seg() */
152static void tss_load_seg(int seg_reg, int selector)
153{
154 uint32_t e1, e2;
155 int rpl, dpl, cpl;
156
157 if ((selector & 0xfffc) != 0) {
20054ef0 158 if (load_segment(&e1, &e2, selector) != 0) {
77b2bc2c 159 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0
BS
160 }
161 if (!(e2 & DESC_S_MASK)) {
77b2bc2c 162 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 163 }
eaa728ee
FB
164 rpl = selector & 3;
165 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
166 cpl = env->hflags & HF_CPL_MASK;
167 if (seg_reg == R_CS) {
20054ef0 168 if (!(e2 & DESC_CS_MASK)) {
77b2bc2c 169 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0
BS
170 }
171 /* XXX: is it correct? */
172 if (dpl != rpl) {
77b2bc2c 173 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0
BS
174 }
175 if ((e2 & DESC_C_MASK) && dpl > rpl) {
77b2bc2c 176 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 177 }
eaa728ee
FB
178 } else if (seg_reg == R_SS) {
179 /* SS must be writable data */
20054ef0 180 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
77b2bc2c 181 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0
BS
182 }
183 if (dpl != cpl || dpl != rpl) {
77b2bc2c 184 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 185 }
eaa728ee
FB
186 } else {
187 /* not readable code */
20054ef0 188 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
77b2bc2c 189 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 190 }
eaa728ee
FB
191 /* if data or non conforming code, checks the rights */
192 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
20054ef0 193 if (dpl < cpl || dpl < rpl) {
77b2bc2c 194 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 195 }
eaa728ee
FB
196 }
197 }
20054ef0 198 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 199 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 200 }
eaa728ee 201 cpu_x86_load_seg_cache(env, seg_reg, selector,
20054ef0
BS
202 get_seg_base(e1, e2),
203 get_seg_limit(e1, e2),
204 e2);
eaa728ee 205 } else {
20054ef0 206 if (seg_reg == R_SS || seg_reg == R_CS) {
77b2bc2c 207 raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
20054ef0 208 }
eaa728ee
FB
209 }
210}
211
212#define SWITCH_TSS_JMP 0
213#define SWITCH_TSS_IRET 1
214#define SWITCH_TSS_CALL 2
215
216/* XXX: restore CPU state in registers (PowerPC case) */
217static void switch_tss(int tss_selector,
218 uint32_t e1, uint32_t e2, int source,
219 uint32_t next_eip)
220{
221 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
222 target_ulong tss_base;
223 uint32_t new_regs[8], new_segs[6];
224 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
225 uint32_t old_eflags, eflags_mask;
226 SegmentCache *dt;
227 int index;
228 target_ulong ptr;
229
230 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0
BS
231 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
232 source);
eaa728ee
FB
233
234 /* if task gate, we read the TSS segment and we load it */
235 if (type == 5) {
20054ef0 236 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 237 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
20054ef0 238 }
eaa728ee 239 tss_selector = e1 >> 16;
20054ef0 240 if (tss_selector & 4) {
77b2bc2c 241 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0
BS
242 }
243 if (load_segment(&e1, &e2, tss_selector) != 0) {
77b2bc2c 244 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
20054ef0
BS
245 }
246 if (e2 & DESC_S_MASK) {
77b2bc2c 247 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
20054ef0 248 }
eaa728ee 249 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 250 if ((type & 7) != 1) {
77b2bc2c 251 raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
20054ef0 252 }
eaa728ee
FB
253 }
254
20054ef0 255 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 256 raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
20054ef0 257 }
eaa728ee 258
20054ef0 259 if (type & 8) {
eaa728ee 260 tss_limit_max = 103;
20054ef0 261 } else {
eaa728ee 262 tss_limit_max = 43;
20054ef0 263 }
eaa728ee
FB
264 tss_limit = get_seg_limit(e1, e2);
265 tss_base = get_seg_base(e1, e2);
266 if ((tss_selector & 4) != 0 ||
20054ef0 267 tss_limit < tss_limit_max) {
77b2bc2c 268 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 269 }
eaa728ee 270 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 271 if (old_type & 8) {
eaa728ee 272 old_tss_limit_max = 103;
20054ef0 273 } else {
eaa728ee 274 old_tss_limit_max = 43;
20054ef0 275 }
eaa728ee
FB
276
277 /* read all the registers from the new TSS */
278 if (type & 8) {
279 /* 32 bit */
280 new_cr3 = ldl_kernel(tss_base + 0x1c);
281 new_eip = ldl_kernel(tss_base + 0x20);
282 new_eflags = ldl_kernel(tss_base + 0x24);
20054ef0 283 for (i = 0; i < 8; i++) {
eaa728ee 284 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
20054ef0
BS
285 }
286 for (i = 0; i < 6; i++) {
eaa728ee 287 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
20054ef0 288 }
eaa728ee
FB
289 new_ldt = lduw_kernel(tss_base + 0x60);
290 new_trap = ldl_kernel(tss_base + 0x64);
291 } else {
292 /* 16 bit */
293 new_cr3 = 0;
294 new_eip = lduw_kernel(tss_base + 0x0e);
295 new_eflags = lduw_kernel(tss_base + 0x10);
20054ef0 296 for (i = 0; i < 8; i++) {
eaa728ee 297 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
20054ef0
BS
298 }
299 for (i = 0; i < 4; i++) {
eaa728ee 300 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
20054ef0 301 }
eaa728ee
FB
302 new_ldt = lduw_kernel(tss_base + 0x2a);
303 new_segs[R_FS] = 0;
304 new_segs[R_GS] = 0;
305 new_trap = 0;
306 }
4581cbcd
BS
307 /* XXX: avoid a compiler warning, see
308 http://support.amd.com/us/Processor_TechDocs/24593.pdf
309 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
310 (void)new_trap;
eaa728ee
FB
311
312 /* NOTE: we must avoid memory exceptions during the task switch,
313 so we make dummy accesses before */
314 /* XXX: it can still fail in some cases, so a bigger hack is
315 necessary to valid the TLB after having done the accesses */
316
317 v1 = ldub_kernel(env->tr.base);
318 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
319 stb_kernel(env->tr.base, v1);
320 stb_kernel(env->tr.base + old_tss_limit_max, v2);
321
322 /* clear busy bit (it is restartable) */
323 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
324 target_ulong ptr;
325 uint32_t e2;
20054ef0 326
eaa728ee
FB
327 ptr = env->gdt.base + (env->tr.selector & ~7);
328 e2 = ldl_kernel(ptr + 4);
329 e2 &= ~DESC_TSS_BUSY_MASK;
330 stl_kernel(ptr + 4, e2);
331 }
997ff0d9 332 old_eflags = cpu_compute_eflags(env);
20054ef0 333 if (source == SWITCH_TSS_IRET) {
eaa728ee 334 old_eflags &= ~NT_MASK;
20054ef0 335 }
eaa728ee
FB
336
337 /* save the current state in the old TSS */
338 if (type & 8) {
339 /* 32 bit */
340 stl_kernel(env->tr.base + 0x20, next_eip);
341 stl_kernel(env->tr.base + 0x24, old_eflags);
342 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
343 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
344 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
345 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
346 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
347 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
348 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
349 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
20054ef0 350 for (i = 0; i < 6; i++) {
eaa728ee 351 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
20054ef0 352 }
eaa728ee
FB
353 } else {
354 /* 16 bit */
355 stw_kernel(env->tr.base + 0x0e, next_eip);
356 stw_kernel(env->tr.base + 0x10, old_eflags);
357 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
358 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
359 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
360 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
361 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
362 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
363 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
364 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
20054ef0 365 for (i = 0; i < 4; i++) {
eaa728ee 366 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
20054ef0 367 }
eaa728ee
FB
368 }
369
370 /* now if an exception occurs, it will occurs in the next task
371 context */
372
373 if (source == SWITCH_TSS_CALL) {
374 stw_kernel(tss_base, env->tr.selector);
375 new_eflags |= NT_MASK;
376 }
377
378 /* set busy bit */
379 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
380 target_ulong ptr;
381 uint32_t e2;
20054ef0 382
eaa728ee
FB
383 ptr = env->gdt.base + (tss_selector & ~7);
384 e2 = ldl_kernel(ptr + 4);
385 e2 |= DESC_TSS_BUSY_MASK;
386 stl_kernel(ptr + 4, e2);
387 }
388
389 /* set the new CPU state */
390 /* from this point, any exception which occurs can give problems */
391 env->cr[0] |= CR0_TS_MASK;
392 env->hflags |= HF_TS_MASK;
393 env->tr.selector = tss_selector;
394 env->tr.base = tss_base;
395 env->tr.limit = tss_limit;
396 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
397
398 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
399 cpu_x86_update_cr3(env, new_cr3);
400 }
401
402 /* load all registers without an exception, then reload them with
403 possible exception */
404 env->eip = new_eip;
405 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
406 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
20054ef0 407 if (!(type & 8)) {
eaa728ee 408 eflags_mask &= 0xffff;
20054ef0 409 }
997ff0d9 410 cpu_load_eflags(env, new_eflags, eflags_mask);
20054ef0 411 /* XXX: what to do in 16 bit case? */
eaa728ee
FB
412 EAX = new_regs[0];
413 ECX = new_regs[1];
414 EDX = new_regs[2];
415 EBX = new_regs[3];
416 ESP = new_regs[4];
417 EBP = new_regs[5];
418 ESI = new_regs[6];
419 EDI = new_regs[7];
420 if (new_eflags & VM_MASK) {
20054ef0 421 for (i = 0; i < 6; i++) {
eaa728ee 422 load_seg_vm(i, new_segs[i]);
20054ef0 423 }
eaa728ee
FB
424 /* in vm86, CPL is always 3 */
425 cpu_x86_set_cpl(env, 3);
426 } else {
427 /* CPL is set the RPL of CS */
428 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
429 /* first just selectors as the rest may trigger exceptions */
20054ef0 430 for (i = 0; i < 6; i++) {
eaa728ee 431 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
20054ef0 432 }
eaa728ee
FB
433 }
434
435 env->ldt.selector = new_ldt & ~4;
436 env->ldt.base = 0;
437 env->ldt.limit = 0;
438 env->ldt.flags = 0;
439
440 /* load the LDT */
20054ef0 441 if (new_ldt & 4) {
77b2bc2c 442 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0 443 }
eaa728ee
FB
444
445 if ((new_ldt & 0xfffc) != 0) {
446 dt = &env->gdt;
447 index = new_ldt & ~7;
20054ef0 448 if ((index + 7) > dt->limit) {
77b2bc2c 449 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0 450 }
eaa728ee
FB
451 ptr = dt->base + index;
452 e1 = ldl_kernel(ptr);
453 e2 = ldl_kernel(ptr + 4);
20054ef0 454 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
77b2bc2c 455 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0
BS
456 }
457 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 458 raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
20054ef0 459 }
eaa728ee
FB
460 load_seg_cache_raw_dt(&env->ldt, e1, e2);
461 }
462
463 /* load the segments */
464 if (!(new_eflags & VM_MASK)) {
465 tss_load_seg(R_CS, new_segs[R_CS]);
466 tss_load_seg(R_SS, new_segs[R_SS]);
467 tss_load_seg(R_ES, new_segs[R_ES]);
468 tss_load_seg(R_DS, new_segs[R_DS]);
469 tss_load_seg(R_FS, new_segs[R_FS]);
470 tss_load_seg(R_GS, new_segs[R_GS]);
471 }
472
473 /* check that EIP is in the CS segment limits */
474 if (new_eip > env->segs[R_CS].limit) {
20054ef0 475 /* XXX: different exception if CALL? */
77b2bc2c 476 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee 477 }
01df040b
AL
478
479#ifndef CONFIG_USER_ONLY
480 /* reset local breakpoints */
481 if (env->dr[7] & 0x55) {
482 for (i = 0; i < 4; i++) {
20054ef0 483 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) {
01df040b 484 hw_breakpoint_remove(env, i);
20054ef0 485 }
01df040b
AL
486 }
487 env->dr[7] &= ~0x55;
488 }
489#endif
eaa728ee
FB
490}
491
492/* check if Port I/O is allowed in TSS */
493static inline void check_io(int addr, int size)
494{
495 int io_offset, val, mask;
496
497 /* TSS must be a valid 32 bit one */
498 if (!(env->tr.flags & DESC_P_MASK) ||
499 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
20054ef0 500 env->tr.limit < 103) {
eaa728ee 501 goto fail;
20054ef0 502 }
eaa728ee
FB
503 io_offset = lduw_kernel(env->tr.base + 0x66);
504 io_offset += (addr >> 3);
505 /* Note: the check needs two bytes */
20054ef0 506 if ((io_offset + 1) > env->tr.limit) {
eaa728ee 507 goto fail;
20054ef0 508 }
eaa728ee
FB
509 val = lduw_kernel(env->tr.base + io_offset);
510 val >>= (addr & 7);
511 mask = (1 << size) - 1;
512 /* all bits must be zero to allow the I/O */
513 if ((val & mask) != 0) {
514 fail:
77b2bc2c 515 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
516 }
517}
518
519void helper_check_iob(uint32_t t0)
520{
521 check_io(t0, 1);
522}
523
524void helper_check_iow(uint32_t t0)
525{
526 check_io(t0, 2);
527}
528
529void helper_check_iol(uint32_t t0)
530{
531 check_io(t0, 4);
532}
533
534void helper_outb(uint32_t port, uint32_t data)
535{
afcea8cb 536 cpu_outb(port, data & 0xff);
eaa728ee
FB
537}
538
539target_ulong helper_inb(uint32_t port)
540{
afcea8cb 541 return cpu_inb(port);
eaa728ee
FB
542}
543
544void helper_outw(uint32_t port, uint32_t data)
545{
afcea8cb 546 cpu_outw(port, data & 0xffff);
eaa728ee
FB
547}
548
549target_ulong helper_inw(uint32_t port)
550{
afcea8cb 551 return cpu_inw(port);
eaa728ee
FB
552}
553
554void helper_outl(uint32_t port, uint32_t data)
555{
afcea8cb 556 cpu_outl(port, data);
eaa728ee
FB
557}
558
559target_ulong helper_inl(uint32_t port)
560{
afcea8cb 561 return cpu_inl(port);
eaa728ee
FB
562}
563
564static inline unsigned int get_sp_mask(unsigned int e2)
565{
20054ef0 566 if (e2 & DESC_B_MASK) {
eaa728ee 567 return 0xffffffff;
20054ef0 568 } else {
eaa728ee 569 return 0xffff;
20054ef0 570 }
eaa728ee
FB
571}
572
20054ef0 573static int exception_has_error_code(int intno)
2ed51f5b 574{
20054ef0
BS
575 switch (intno) {
576 case 8:
577 case 10:
578 case 11:
579 case 12:
580 case 13:
581 case 14:
582 case 17:
583 return 1;
584 }
585 return 0;
2ed51f5b
AL
586}
587
eaa728ee 588#ifdef TARGET_X86_64
20054ef0
BS
589#define SET_ESP(val, sp_mask) \
590 do { \
591 if ((sp_mask) == 0xffff) { \
592 ESP = (ESP & ~0xffff) | ((val) & 0xffff); \
593 } else if ((sp_mask) == 0xffffffffLL) { \
594 ESP = (uint32_t)(val); \
595 } else { \
596 ESP = (val); \
597 } \
598 } while (0)
eaa728ee 599#else
20054ef0
BS
600#define SET_ESP(val, sp_mask) \
601 do { \
602 ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \
603 } while (0)
eaa728ee
FB
604#endif
605
c0a04f0e
AL
606/* in 64-bit machines, this can overflow. So this segment addition macro
607 * can be used to trim the value to 32-bit whenever needed */
608#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
609
eaa728ee 610/* XXX: add a is_user flag to have proper security support */
20054ef0
BS
611#define PUSHW(ssp, sp, sp_mask, val) \
612 { \
613 sp -= 2; \
614 stw_kernel((ssp) + (sp & (sp_mask)), (val)); \
615 }
eaa728ee 616
20054ef0
BS
617#define PUSHL(ssp, sp, sp_mask, val) \
618 { \
619 sp -= 4; \
620 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
621 }
eaa728ee 622
20054ef0
BS
623#define POPW(ssp, sp, sp_mask, val) \
624 { \
625 val = lduw_kernel((ssp) + (sp & (sp_mask))); \
626 sp += 2; \
627 }
eaa728ee 628
20054ef0
BS
629#define POPL(ssp, sp, sp_mask, val) \
630 { \
631 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask)); \
632 sp += 4; \
633 }
eaa728ee
FB
634
635/* protected mode interrupt */
636static void do_interrupt_protected(int intno, int is_int, int error_code,
637 unsigned int next_eip, int is_hw)
638{
639 SegmentCache *dt;
640 target_ulong ptr, ssp;
641 int type, dpl, selector, ss_dpl, cpl;
642 int has_error_code, new_stack, shift;
1c918eba 643 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
eaa728ee 644 uint32_t old_eip, sp_mask;
eaa728ee 645
eaa728ee 646 has_error_code = 0;
20054ef0
BS
647 if (!is_int && !is_hw) {
648 has_error_code = exception_has_error_code(intno);
649 }
650 if (is_int) {
eaa728ee 651 old_eip = next_eip;
20054ef0 652 } else {
eaa728ee 653 old_eip = env->eip;
20054ef0 654 }
eaa728ee
FB
655
656 dt = &env->idt;
20054ef0 657 if (intno * 8 + 7 > dt->limit) {
77b2bc2c 658 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 659 }
eaa728ee
FB
660 ptr = dt->base + intno * 8;
661 e1 = ldl_kernel(ptr);
662 e2 = ldl_kernel(ptr + 4);
663 /* check gate type */
664 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 665 switch (type) {
eaa728ee
FB
666 case 5: /* task gate */
667 /* must do that check here to return the correct error code */
20054ef0 668 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 669 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 670 }
eaa728ee
FB
671 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
672 if (has_error_code) {
673 int type;
674 uint32_t mask;
20054ef0 675
eaa728ee
FB
676 /* push the error code */
677 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
678 shift = type >> 3;
20054ef0 679 if (env->segs[R_SS].flags & DESC_B_MASK) {
eaa728ee 680 mask = 0xffffffff;
20054ef0 681 } else {
eaa728ee 682 mask = 0xffff;
20054ef0 683 }
eaa728ee
FB
684 esp = (ESP - (2 << shift)) & mask;
685 ssp = env->segs[R_SS].base + esp;
20054ef0 686 if (shift) {
eaa728ee 687 stl_kernel(ssp, error_code);
20054ef0 688 } else {
eaa728ee 689 stw_kernel(ssp, error_code);
20054ef0 690 }
eaa728ee
FB
691 SET_ESP(esp, mask);
692 }
693 return;
694 case 6: /* 286 interrupt gate */
695 case 7: /* 286 trap gate */
696 case 14: /* 386 interrupt gate */
697 case 15: /* 386 trap gate */
698 break;
699 default:
77b2bc2c 700 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
eaa728ee
FB
701 break;
702 }
703 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
704 cpl = env->hflags & HF_CPL_MASK;
1235fc06 705 /* check privilege if software int */
20054ef0 706 if (is_int && dpl < cpl) {
77b2bc2c 707 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 708 }
eaa728ee 709 /* check valid bit */
20054ef0 710 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 711 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
20054ef0 712 }
eaa728ee
FB
713 selector = e1 >> 16;
714 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
20054ef0 715 if ((selector & 0xfffc) == 0) {
77b2bc2c 716 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0
BS
717 }
718 if (load_segment(&e1, &e2, selector) != 0) {
77b2bc2c 719 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
720 }
721 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 722 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 723 }
eaa728ee 724 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 725 if (dpl > cpl) {
77b2bc2c 726 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
727 }
728 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 729 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 730 }
eaa728ee
FB
731 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
732 /* to inner privilege */
733 get_ss_esp_from_tss(&ss, &esp, dpl);
20054ef0 734 if ((ss & 0xfffc) == 0) {
77b2bc2c 735 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
736 }
737 if ((ss & 3) != dpl) {
77b2bc2c 738 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
739 }
740 if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 741 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 742 }
eaa728ee 743 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 744 if (ss_dpl != dpl) {
77b2bc2c 745 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 746 }
eaa728ee
FB
747 if (!(ss_e2 & DESC_S_MASK) ||
748 (ss_e2 & DESC_CS_MASK) ||
20054ef0 749 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 750 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
751 }
752 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 753 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 754 }
eaa728ee
FB
755 new_stack = 1;
756 sp_mask = get_sp_mask(ss_e2);
757 ssp = get_seg_base(ss_e1, ss_e2);
758 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
759 /* to same privilege */
20054ef0 760 if (env->eflags & VM_MASK) {
77b2bc2c 761 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 762 }
eaa728ee
FB
763 new_stack = 0;
764 sp_mask = get_sp_mask(env->segs[R_SS].flags);
765 ssp = env->segs[R_SS].base;
766 esp = ESP;
767 dpl = cpl;
768 } else {
77b2bc2c 769 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
770 new_stack = 0; /* avoid warning */
771 sp_mask = 0; /* avoid warning */
772 ssp = 0; /* avoid warning */
773 esp = 0; /* avoid warning */
774 }
775
776 shift = type >> 3;
777
778#if 0
779 /* XXX: check that enough room is available */
780 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
20054ef0 781 if (env->eflags & VM_MASK) {
eaa728ee 782 push_size += 8;
20054ef0 783 }
eaa728ee
FB
784 push_size <<= shift;
785#endif
786 if (shift == 1) {
787 if (new_stack) {
788 if (env->eflags & VM_MASK) {
789 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
790 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
791 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
792 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
793 }
794 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
795 PUSHL(ssp, esp, sp_mask, ESP);
796 }
997ff0d9 797 PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
798 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
799 PUSHL(ssp, esp, sp_mask, old_eip);
800 if (has_error_code) {
801 PUSHL(ssp, esp, sp_mask, error_code);
802 }
803 } else {
804 if (new_stack) {
805 if (env->eflags & VM_MASK) {
806 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
807 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
808 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
809 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
810 }
811 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
812 PUSHW(ssp, esp, sp_mask, ESP);
813 }
997ff0d9 814 PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
eaa728ee
FB
815 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
816 PUSHW(ssp, esp, sp_mask, old_eip);
817 if (has_error_code) {
818 PUSHW(ssp, esp, sp_mask, error_code);
819 }
820 }
821
822 if (new_stack) {
823 if (env->eflags & VM_MASK) {
824 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
825 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
826 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
827 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
828 }
829 ss = (ss & ~3) | dpl;
830 cpu_x86_load_seg_cache(env, R_SS, ss,
831 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
832 }
833 SET_ESP(esp, sp_mask);
834
835 selector = (selector & ~3) | dpl;
836 cpu_x86_load_seg_cache(env, R_CS, selector,
837 get_seg_base(e1, e2),
838 get_seg_limit(e1, e2),
839 e2);
840 cpu_x86_set_cpl(env, dpl);
841 env->eip = offset;
842
843 /* interrupt gate clear IF mask */
844 if ((type & 1) == 0) {
845 env->eflags &= ~IF_MASK;
846 }
847 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
848}
849
850#ifdef TARGET_X86_64
851
20054ef0
BS
852#define PUSHQ(sp, val) \
853 { \
854 sp -= 8; \
855 stq_kernel(sp, (val)); \
856 }
eaa728ee 857
20054ef0
BS
858#define POPQ(sp, val) \
859 { \
860 val = ldq_kernel(sp); \
861 sp += 8; \
862 }
eaa728ee
FB
863
864static inline target_ulong get_rsp_from_tss(int level)
865{
866 int index;
867
868#if 0
869 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
870 env->tr.base, env->tr.limit);
871#endif
872
20054ef0 873 if (!(env->tr.flags & DESC_P_MASK)) {
eaa728ee 874 cpu_abort(env, "invalid tss");
20054ef0 875 }
eaa728ee 876 index = 8 * level + 4;
20054ef0 877 if ((index + 7) > env->tr.limit) {
77b2bc2c 878 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
20054ef0 879 }
eaa728ee
FB
880 return ldq_kernel(env->tr.base + index);
881}
882
883/* 64 bit interrupt */
884static void do_interrupt64(int intno, int is_int, int error_code,
885 target_ulong next_eip, int is_hw)
886{
887 SegmentCache *dt;
888 target_ulong ptr;
889 int type, dpl, selector, cpl, ist;
890 int has_error_code, new_stack;
891 uint32_t e1, e2, e3, ss;
892 target_ulong old_eip, esp, offset;
eaa728ee 893
eaa728ee 894 has_error_code = 0;
20054ef0
BS
895 if (!is_int && !is_hw) {
896 has_error_code = exception_has_error_code(intno);
897 }
898 if (is_int) {
eaa728ee 899 old_eip = next_eip;
20054ef0 900 } else {
eaa728ee 901 old_eip = env->eip;
20054ef0 902 }
eaa728ee
FB
903
904 dt = &env->idt;
20054ef0 905 if (intno * 16 + 15 > dt->limit) {
77b2bc2c 906 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 907 }
eaa728ee
FB
908 ptr = dt->base + intno * 16;
909 e1 = ldl_kernel(ptr);
910 e2 = ldl_kernel(ptr + 4);
911 e3 = ldl_kernel(ptr + 8);
912 /* check gate type */
913 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
20054ef0 914 switch (type) {
eaa728ee
FB
915 case 14: /* 386 interrupt gate */
916 case 15: /* 386 trap gate */
917 break;
918 default:
77b2bc2c 919 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
eaa728ee
FB
920 break;
921 }
922 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
923 cpl = env->hflags & HF_CPL_MASK;
1235fc06 924 /* check privilege if software int */
20054ef0 925 if (is_int && dpl < cpl) {
77b2bc2c 926 raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
20054ef0 927 }
eaa728ee 928 /* check valid bit */
20054ef0 929 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 930 raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
20054ef0 931 }
eaa728ee
FB
932 selector = e1 >> 16;
933 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
934 ist = e2 & 7;
20054ef0 935 if ((selector & 0xfffc) == 0) {
77b2bc2c 936 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 937 }
eaa728ee 938
20054ef0 939 if (load_segment(&e1, &e2, selector) != 0) {
77b2bc2c 940 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
941 }
942 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 943 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 944 }
eaa728ee 945 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 946 if (dpl > cpl) {
77b2bc2c 947 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
948 }
949 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 950 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0
BS
951 }
952 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
77b2bc2c 953 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 954 }
eaa728ee
FB
955 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
956 /* to inner privilege */
20054ef0 957 if (ist != 0) {
eaa728ee 958 esp = get_rsp_from_tss(ist + 3);
20054ef0 959 } else {
eaa728ee 960 esp = get_rsp_from_tss(dpl);
20054ef0 961 }
eaa728ee
FB
962 esp &= ~0xfLL; /* align stack */
963 ss = 0;
964 new_stack = 1;
965 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
966 /* to same privilege */
20054ef0 967 if (env->eflags & VM_MASK) {
77b2bc2c 968 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 969 }
eaa728ee 970 new_stack = 0;
20054ef0 971 if (ist != 0) {
eaa728ee 972 esp = get_rsp_from_tss(ist + 3);
20054ef0 973 } else {
eaa728ee 974 esp = ESP;
20054ef0 975 }
eaa728ee
FB
976 esp &= ~0xfLL; /* align stack */
977 dpl = cpl;
978 } else {
77b2bc2c 979 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
eaa728ee
FB
980 new_stack = 0; /* avoid warning */
981 esp = 0; /* avoid warning */
982 }
983
984 PUSHQ(esp, env->segs[R_SS].selector);
985 PUSHQ(esp, ESP);
997ff0d9 986 PUSHQ(esp, cpu_compute_eflags(env));
eaa728ee
FB
987 PUSHQ(esp, env->segs[R_CS].selector);
988 PUSHQ(esp, old_eip);
989 if (has_error_code) {
990 PUSHQ(esp, error_code);
991 }
992
993 if (new_stack) {
994 ss = 0 | dpl;
995 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
996 }
997 ESP = esp;
998
999 selector = (selector & ~3) | dpl;
1000 cpu_x86_load_seg_cache(env, R_CS, selector,
1001 get_seg_base(e1, e2),
1002 get_seg_limit(e1, e2),
1003 e2);
1004 cpu_x86_set_cpl(env, dpl);
1005 env->eip = offset;
1006
1007 /* interrupt gate clear IF mask */
1008 if ((type & 1) == 0) {
1009 env->eflags &= ~IF_MASK;
1010 }
1011 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1012}
1013#endif
1014
d9957a8b 1015#ifdef TARGET_X86_64
eaa728ee
FB
1016#if defined(CONFIG_USER_ONLY)
1017void helper_syscall(int next_eip_addend)
1018{
1019 env->exception_index = EXCP_SYSCALL;
1020 env->exception_next_eip = env->eip + next_eip_addend;
1162c041 1021 cpu_loop_exit(env);
eaa728ee
FB
1022}
1023#else
1024void helper_syscall(int next_eip_addend)
1025{
1026 int selector;
1027
1028 if (!(env->efer & MSR_EFER_SCE)) {
77b2bc2c 1029 raise_exception_err(env, EXCP06_ILLOP, 0);
eaa728ee
FB
1030 }
1031 selector = (env->star >> 32) & 0xffff;
eaa728ee
FB
1032 if (env->hflags & HF_LMA_MASK) {
1033 int code64;
1034
1035 ECX = env->eip + next_eip_addend;
997ff0d9 1036 env->regs[11] = cpu_compute_eflags(env);
eaa728ee
FB
1037
1038 code64 = env->hflags & HF_CS64_MASK;
1039
1040 cpu_x86_set_cpl(env, 0);
1041 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1042 0, 0xffffffff,
1043 DESC_G_MASK | DESC_P_MASK |
1044 DESC_S_MASK |
20054ef0
BS
1045 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1046 DESC_L_MASK);
eaa728ee
FB
1047 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1048 0, 0xffffffff,
1049 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1050 DESC_S_MASK |
1051 DESC_W_MASK | DESC_A_MASK);
1052 env->eflags &= ~env->fmask;
997ff0d9 1053 cpu_load_eflags(env, env->eflags, 0);
20054ef0 1054 if (code64) {
eaa728ee 1055 env->eip = env->lstar;
20054ef0 1056 } else {
eaa728ee 1057 env->eip = env->cstar;
20054ef0 1058 }
d9957a8b 1059 } else {
eaa728ee
FB
1060 ECX = (uint32_t)(env->eip + next_eip_addend);
1061
1062 cpu_x86_set_cpl(env, 0);
1063 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1064 0, 0xffffffff,
1065 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1066 DESC_S_MASK |
1067 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1068 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1069 0, 0xffffffff,
1070 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1071 DESC_S_MASK |
1072 DESC_W_MASK | DESC_A_MASK);
1073 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1074 env->eip = (uint32_t)env->star;
1075 }
1076}
1077#endif
d9957a8b 1078#endif
eaa728ee 1079
d9957a8b 1080#ifdef TARGET_X86_64
eaa728ee
FB
1081void helper_sysret(int dflag)
1082{
1083 int cpl, selector;
1084
1085 if (!(env->efer & MSR_EFER_SCE)) {
77b2bc2c 1086 raise_exception_err(env, EXCP06_ILLOP, 0);
eaa728ee
FB
1087 }
1088 cpl = env->hflags & HF_CPL_MASK;
1089 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
77b2bc2c 1090 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
1091 }
1092 selector = (env->star >> 48) & 0xffff;
eaa728ee
FB
1093 if (env->hflags & HF_LMA_MASK) {
1094 if (dflag == 2) {
1095 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1096 0, 0xffffffff,
1097 DESC_G_MASK | DESC_P_MASK |
1098 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1099 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1100 DESC_L_MASK);
1101 env->eip = ECX;
1102 } else {
1103 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1104 0, 0xffffffff,
1105 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1106 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1107 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1108 env->eip = (uint32_t)ECX;
1109 }
1110 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1111 0, 0xffffffff,
1112 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1113 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1114 DESC_W_MASK | DESC_A_MASK);
997ff0d9
BS
1115 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1116 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1117 NT_MASK);
eaa728ee 1118 cpu_x86_set_cpl(env, 3);
d9957a8b 1119 } else {
eaa728ee
FB
1120 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1121 0, 0xffffffff,
1122 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1123 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1124 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1125 env->eip = (uint32_t)ECX;
1126 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1127 0, 0xffffffff,
1128 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1129 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1130 DESC_W_MASK | DESC_A_MASK);
1131 env->eflags |= IF_MASK;
1132 cpu_x86_set_cpl(env, 3);
1133 }
eaa728ee 1134}
d9957a8b 1135#endif
eaa728ee
FB
1136
1137/* real mode interrupt */
1138static void do_interrupt_real(int intno, int is_int, int error_code,
1139 unsigned int next_eip)
1140{
1141 SegmentCache *dt;
1142 target_ulong ptr, ssp;
1143 int selector;
1144 uint32_t offset, esp;
1145 uint32_t old_cs, old_eip;
eaa728ee 1146
20054ef0 1147 /* real mode (simpler!) */
eaa728ee 1148 dt = &env->idt;
20054ef0 1149 if (intno * 4 + 3 > dt->limit) {
77b2bc2c 1150 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
20054ef0 1151 }
eaa728ee
FB
1152 ptr = dt->base + intno * 4;
1153 offset = lduw_kernel(ptr);
1154 selector = lduw_kernel(ptr + 2);
1155 esp = ESP;
1156 ssp = env->segs[R_SS].base;
20054ef0 1157 if (is_int) {
eaa728ee 1158 old_eip = next_eip;
20054ef0 1159 } else {
eaa728ee 1160 old_eip = env->eip;
20054ef0 1161 }
eaa728ee 1162 old_cs = env->segs[R_CS].selector;
20054ef0 1163 /* XXX: use SS segment size? */
997ff0d9 1164 PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
eaa728ee
FB
1165 PUSHW(ssp, esp, 0xffff, old_cs);
1166 PUSHW(ssp, esp, 0xffff, old_eip);
1167
1168 /* update processor state */
1169 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1170 env->eip = offset;
1171 env->segs[R_CS].selector = selector;
1172 env->segs[R_CS].base = (selector << 4);
1173 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1174}
1175
e694d4e2 1176#if defined(CONFIG_USER_ONLY)
eaa728ee 1177/* fake user mode interrupt */
e694d4e2
BS
1178static void do_interrupt_user(int intno, int is_int, int error_code,
1179 target_ulong next_eip)
eaa728ee
FB
1180{
1181 SegmentCache *dt;
1182 target_ulong ptr;
1183 int dpl, cpl, shift;
1184 uint32_t e2;
1185
1186 dt = &env->idt;
1187 if (env->hflags & HF_LMA_MASK) {
1188 shift = 4;
1189 } else {
1190 shift = 3;
1191 }
1192 ptr = dt->base + (intno << shift);
1193 e2 = ldl_kernel(ptr + 4);
1194
1195 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1196 cpl = env->hflags & HF_CPL_MASK;
1235fc06 1197 /* check privilege if software int */
20054ef0 1198 if (is_int && dpl < cpl) {
77b2bc2c 1199 raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
20054ef0 1200 }
eaa728ee
FB
1201
1202 /* Since we emulate only user space, we cannot do more than
1203 exiting the emulation with the suitable exception and error
1204 code */
20054ef0 1205 if (is_int) {
eaa728ee 1206 EIP = next_eip;
20054ef0 1207 }
eaa728ee
FB
1208}
1209
e694d4e2
BS
1210#else
1211
2ed51f5b 1212static void handle_even_inj(int intno, int is_int, int error_code,
20054ef0 1213 int is_hw, int rm)
2ed51f5b 1214{
20054ef0
BS
1215 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
1216 control.event_inj));
1217
2ed51f5b 1218 if (!(event_inj & SVM_EVTINJ_VALID)) {
20054ef0
BS
1219 int type;
1220
1221 if (is_int) {
1222 type = SVM_EVTINJ_TYPE_SOFT;
1223 } else {
1224 type = SVM_EVTINJ_TYPE_EXEPT;
1225 }
1226 event_inj = intno | type | SVM_EVTINJ_VALID;
1227 if (!rm && exception_has_error_code(intno)) {
1228 event_inj |= SVM_EVTINJ_VALID_ERR;
1229 stl_phys(env->vm_vmcb + offsetof(struct vmcb,
1230 control.event_inj_err),
1231 error_code);
1232 }
1233 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1234 event_inj);
2ed51f5b
AL
1235 }
1236}
00ea18d1 1237#endif
2ed51f5b 1238
eaa728ee
FB
1239/*
1240 * Begin execution of an interruption. is_int is TRUE if coming from
1241 * the int instruction. next_eip is the EIP value AFTER the interrupt
1242 * instruction. It is only relevant if is_int is TRUE.
1243 */
e694d4e2
BS
1244static void do_interrupt_all(int intno, int is_int, int error_code,
1245 target_ulong next_eip, int is_hw)
eaa728ee 1246{
8fec2b8c 1247 if (qemu_loglevel_mask(CPU_LOG_INT)) {
eaa728ee
FB
1248 if ((env->cr[0] & CR0_PE_MASK)) {
1249 static int count;
20054ef0
BS
1250
1251 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1252 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1253 count, intno, error_code, is_int,
1254 env->hflags & HF_CPL_MASK,
1255 env->segs[R_CS].selector, EIP,
1256 (int)env->segs[R_CS].base + EIP,
1257 env->segs[R_SS].selector, ESP);
eaa728ee 1258 if (intno == 0x0e) {
93fcfe39 1259 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
eaa728ee 1260 } else {
93fcfe39 1261 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
eaa728ee 1262 }
93fcfe39
AL
1263 qemu_log("\n");
1264 log_cpu_state(env, X86_DUMP_CCOP);
eaa728ee
FB
1265#if 0
1266 {
1267 int i;
9bd5494e 1268 target_ulong ptr;
20054ef0 1269
93fcfe39 1270 qemu_log(" code=");
eaa728ee 1271 ptr = env->segs[R_CS].base + env->eip;
20054ef0 1272 for (i = 0; i < 16; i++) {
93fcfe39 1273 qemu_log(" %02x", ldub(ptr + i));
eaa728ee 1274 }
93fcfe39 1275 qemu_log("\n");
eaa728ee
FB
1276 }
1277#endif
1278 count++;
1279 }
1280 }
1281 if (env->cr[0] & CR0_PE_MASK) {
00ea18d1 1282#if !defined(CONFIG_USER_ONLY)
20054ef0 1283 if (env->hflags & HF_SVMI_MASK) {
2ed51f5b 1284 handle_even_inj(intno, is_int, error_code, is_hw, 0);
20054ef0 1285 }
00ea18d1 1286#endif
eb38c52c 1287#ifdef TARGET_X86_64
eaa728ee
FB
1288 if (env->hflags & HF_LMA_MASK) {
1289 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1290 } else
1291#endif
1292 {
1293 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1294 }
1295 } else {
00ea18d1 1296#if !defined(CONFIG_USER_ONLY)
20054ef0 1297 if (env->hflags & HF_SVMI_MASK) {
2ed51f5b 1298 handle_even_inj(intno, is_int, error_code, is_hw, 1);
20054ef0 1299 }
00ea18d1 1300#endif
eaa728ee
FB
1301 do_interrupt_real(intno, is_int, error_code, next_eip);
1302 }
2ed51f5b 1303
00ea18d1 1304#if !defined(CONFIG_USER_ONLY)
2ed51f5b 1305 if (env->hflags & HF_SVMI_MASK) {
20054ef0
BS
1306 uint32_t event_inj = ldl_phys(env->vm_vmcb +
1307 offsetof(struct vmcb,
1308 control.event_inj));
1309
1310 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1311 event_inj & ~SVM_EVTINJ_VALID);
2ed51f5b 1312 }
00ea18d1 1313#endif
eaa728ee
FB
1314}
1315
317ac620 1316void do_interrupt(CPUX86State *env1)
e694d4e2 1317{
317ac620 1318 CPUX86State *saved_env;
e694d4e2
BS
1319
1320 saved_env = env;
1321 env = env1;
1322#if defined(CONFIG_USER_ONLY)
1323 /* if user mode only, we simulate a fake exception
1324 which will be handled outside the cpu execution
1325 loop */
1326 do_interrupt_user(env->exception_index,
1327 env->exception_is_int,
1328 env->error_code,
1329 env->exception_next_eip);
1330 /* successfully delivered */
1331 env->old_exception = -1;
1332#else
1333 /* simulate a real cpu exception. On i386, it can
1334 trigger new exceptions, but we do not handle
1335 double or triple faults yet. */
1336 do_interrupt_all(env->exception_index,
1337 env->exception_is_int,
1338 env->error_code,
1339 env->exception_next_eip, 0);
1340 /* successfully delivered */
1341 env->old_exception = -1;
1342#endif
1343 env = saved_env;
1344}
1345
317ac620 1346void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
e694d4e2 1347{
317ac620 1348 CPUX86State *saved_env;
e694d4e2
BS
1349
1350 saved_env = env;
1351 env = env1;
1352 do_interrupt_all(intno, 0, 0, 0, is_hw);
1353 env = saved_env;
1354}
1355
eaa728ee
FB
1356/* SMM support */
1357
1358#if defined(CONFIG_USER_ONLY)
1359
317ac620 1360void do_smm_enter(CPUX86State *env1)
eaa728ee
FB
1361{
1362}
1363
1364void helper_rsm(void)
1365{
1366}
1367
1368#else
1369
1370#ifdef TARGET_X86_64
1371#define SMM_REVISION_ID 0x00020064
1372#else
1373#define SMM_REVISION_ID 0x00020000
1374#endif
1375
317ac620 1376void do_smm_enter(CPUX86State *env1)
eaa728ee
FB
1377{
1378 target_ulong sm_state;
1379 SegmentCache *dt;
1380 int i, offset;
317ac620 1381 CPUX86State *saved_env;
e694d4e2
BS
1382
1383 saved_env = env;
1384 env = env1;
eaa728ee 1385
93fcfe39
AL
1386 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1387 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1388
1389 env->hflags |= HF_SMM_MASK;
1390 cpu_smm_update(env);
1391
1392 sm_state = env->smbase + 0x8000;
1393
1394#ifdef TARGET_X86_64
20054ef0 1395 for (i = 0; i < 6; i++) {
eaa728ee
FB
1396 dt = &env->segs[i];
1397 offset = 0x7e00 + i * 16;
1398 stw_phys(sm_state + offset, dt->selector);
1399 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1400 stl_phys(sm_state + offset + 4, dt->limit);
1401 stq_phys(sm_state + offset + 8, dt->base);
1402 }
1403
1404 stq_phys(sm_state + 0x7e68, env->gdt.base);
1405 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1406
1407 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1408 stq_phys(sm_state + 0x7e78, env->ldt.base);
1409 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1410 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1411
1412 stq_phys(sm_state + 0x7e88, env->idt.base);
1413 stl_phys(sm_state + 0x7e84, env->idt.limit);
1414
1415 stw_phys(sm_state + 0x7e90, env->tr.selector);
1416 stq_phys(sm_state + 0x7e98, env->tr.base);
1417 stl_phys(sm_state + 0x7e94, env->tr.limit);
1418 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1419
1420 stq_phys(sm_state + 0x7ed0, env->efer);
1421
1422 stq_phys(sm_state + 0x7ff8, EAX);
1423 stq_phys(sm_state + 0x7ff0, ECX);
1424 stq_phys(sm_state + 0x7fe8, EDX);
1425 stq_phys(sm_state + 0x7fe0, EBX);
1426 stq_phys(sm_state + 0x7fd8, ESP);
1427 stq_phys(sm_state + 0x7fd0, EBP);
1428 stq_phys(sm_state + 0x7fc8, ESI);
1429 stq_phys(sm_state + 0x7fc0, EDI);
20054ef0 1430 for (i = 8; i < 16; i++) {
eaa728ee 1431 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
20054ef0 1432 }
eaa728ee 1433 stq_phys(sm_state + 0x7f78, env->eip);
997ff0d9 1434 stl_phys(sm_state + 0x7f70, cpu_compute_eflags(env));
eaa728ee
FB
1435 stl_phys(sm_state + 0x7f68, env->dr[6]);
1436 stl_phys(sm_state + 0x7f60, env->dr[7]);
1437
1438 stl_phys(sm_state + 0x7f48, env->cr[4]);
1439 stl_phys(sm_state + 0x7f50, env->cr[3]);
1440 stl_phys(sm_state + 0x7f58, env->cr[0]);
1441
1442 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1443 stl_phys(sm_state + 0x7f00, env->smbase);
1444#else
1445 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1446 stl_phys(sm_state + 0x7ff8, env->cr[3]);
997ff0d9 1447 stl_phys(sm_state + 0x7ff4, cpu_compute_eflags(env));
eaa728ee
FB
1448 stl_phys(sm_state + 0x7ff0, env->eip);
1449 stl_phys(sm_state + 0x7fec, EDI);
1450 stl_phys(sm_state + 0x7fe8, ESI);
1451 stl_phys(sm_state + 0x7fe4, EBP);
1452 stl_phys(sm_state + 0x7fe0, ESP);
1453 stl_phys(sm_state + 0x7fdc, EBX);
1454 stl_phys(sm_state + 0x7fd8, EDX);
1455 stl_phys(sm_state + 0x7fd4, ECX);
1456 stl_phys(sm_state + 0x7fd0, EAX);
1457 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1458 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1459
1460 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1461 stl_phys(sm_state + 0x7f64, env->tr.base);
1462 stl_phys(sm_state + 0x7f60, env->tr.limit);
1463 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1464
1465 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1466 stl_phys(sm_state + 0x7f80, env->ldt.base);
1467 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1468 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1469
1470 stl_phys(sm_state + 0x7f74, env->gdt.base);
1471 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1472
1473 stl_phys(sm_state + 0x7f58, env->idt.base);
1474 stl_phys(sm_state + 0x7f54, env->idt.limit);
1475
20054ef0 1476 for (i = 0; i < 6; i++) {
eaa728ee 1477 dt = &env->segs[i];
20054ef0 1478 if (i < 3) {
eaa728ee 1479 offset = 0x7f84 + i * 12;
20054ef0 1480 } else {
eaa728ee 1481 offset = 0x7f2c + (i - 3) * 12;
20054ef0 1482 }
eaa728ee
FB
1483 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1484 stl_phys(sm_state + offset + 8, dt->base);
1485 stl_phys(sm_state + offset + 4, dt->limit);
1486 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1487 }
1488 stl_phys(sm_state + 0x7f14, env->cr[4]);
1489
1490 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1491 stl_phys(sm_state + 0x7ef8, env->smbase);
1492#endif
1493 /* init SMM cpu state */
1494
1495#ifdef TARGET_X86_64
5efc27bb 1496 cpu_load_efer(env, 0);
eaa728ee 1497#endif
997ff0d9
BS
1498 cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
1499 DF_MASK));
eaa728ee
FB
1500 env->eip = 0x00008000;
1501 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1502 0xffffffff, 0);
1503 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1504 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1505 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1506 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1507 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1508
1509 cpu_x86_update_cr0(env,
20054ef0
BS
1510 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
1511 CR0_PG_MASK));
eaa728ee
FB
1512 cpu_x86_update_cr4(env, 0);
1513 env->dr[7] = 0x00000400;
1514 CC_OP = CC_OP_EFLAGS;
e694d4e2 1515 env = saved_env;
eaa728ee
FB
1516}
1517
1518void helper_rsm(void)
1519{
1520 target_ulong sm_state;
1521 int i, offset;
1522 uint32_t val;
1523
1524 sm_state = env->smbase + 0x8000;
1525#ifdef TARGET_X86_64
5efc27bb 1526 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
eaa728ee 1527
20054ef0 1528 for (i = 0; i < 6; i++) {
eaa728ee
FB
1529 offset = 0x7e00 + i * 16;
1530 cpu_x86_load_seg_cache(env, i,
1531 lduw_phys(sm_state + offset),
1532 ldq_phys(sm_state + offset + 8),
1533 ldl_phys(sm_state + offset + 4),
20054ef0
BS
1534 (lduw_phys(sm_state + offset + 2) &
1535 0xf0ff) << 8);
eaa728ee
FB
1536 }
1537
1538 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1539 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1540
1541 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1542 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1543 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1544 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1545
1546 env->idt.base = ldq_phys(sm_state + 0x7e88);
1547 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1548
1549 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1550 env->tr.base = ldq_phys(sm_state + 0x7e98);
1551 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1552 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1553
1554 EAX = ldq_phys(sm_state + 0x7ff8);
1555 ECX = ldq_phys(sm_state + 0x7ff0);
1556 EDX = ldq_phys(sm_state + 0x7fe8);
1557 EBX = ldq_phys(sm_state + 0x7fe0);
1558 ESP = ldq_phys(sm_state + 0x7fd8);
1559 EBP = ldq_phys(sm_state + 0x7fd0);
1560 ESI = ldq_phys(sm_state + 0x7fc8);
1561 EDI = ldq_phys(sm_state + 0x7fc0);
20054ef0 1562 for (i = 8; i < 16; i++) {
eaa728ee 1563 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
20054ef0 1564 }
eaa728ee 1565 env->eip = ldq_phys(sm_state + 0x7f78);
997ff0d9
BS
1566 cpu_load_eflags(env, ldl_phys(sm_state + 0x7f70),
1567 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
eaa728ee
FB
1568 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1569 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1570
1571 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1572 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1573 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1574
1575 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1576 if (val & 0x20000) {
1577 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1578 }
1579#else
1580 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1581 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
997ff0d9
BS
1582 cpu_load_eflags(env, ldl_phys(sm_state + 0x7ff4),
1583 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
eaa728ee
FB
1584 env->eip = ldl_phys(sm_state + 0x7ff0);
1585 EDI = ldl_phys(sm_state + 0x7fec);
1586 ESI = ldl_phys(sm_state + 0x7fe8);
1587 EBP = ldl_phys(sm_state + 0x7fe4);
1588 ESP = ldl_phys(sm_state + 0x7fe0);
1589 EBX = ldl_phys(sm_state + 0x7fdc);
1590 EDX = ldl_phys(sm_state + 0x7fd8);
1591 ECX = ldl_phys(sm_state + 0x7fd4);
1592 EAX = ldl_phys(sm_state + 0x7fd0);
1593 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1594 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1595
1596 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1597 env->tr.base = ldl_phys(sm_state + 0x7f64);
1598 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1599 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1600
1601 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1602 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1603 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1604 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1605
1606 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1607 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1608
1609 env->idt.base = ldl_phys(sm_state + 0x7f58);
1610 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1611
20054ef0
BS
1612 for (i = 0; i < 6; i++) {
1613 if (i < 3) {
eaa728ee 1614 offset = 0x7f84 + i * 12;
20054ef0 1615 } else {
eaa728ee 1616 offset = 0x7f2c + (i - 3) * 12;
20054ef0 1617 }
eaa728ee
FB
1618 cpu_x86_load_seg_cache(env, i,
1619 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1620 ldl_phys(sm_state + offset + 8),
1621 ldl_phys(sm_state + offset + 4),
1622 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1623 }
1624 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1625
1626 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1627 if (val & 0x20000) {
1628 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1629 }
1630#endif
1631 CC_OP = CC_OP_EFLAGS;
1632 env->hflags &= ~HF_SMM_MASK;
1633 cpu_smm_update(env);
1634
93fcfe39
AL
1635 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1636 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
eaa728ee
FB
1637}
1638
1639#endif /* !CONFIG_USER_ONLY */
1640
eaa728ee
FB
1641void helper_into(int next_eip_addend)
1642{
1643 int eflags;
20054ef0 1644
a7812ae4 1645 eflags = helper_cc_compute_all(CC_OP);
eaa728ee 1646 if (eflags & CC_O) {
77b2bc2c 1647 raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
eaa728ee
FB
1648 }
1649}
1650
1651void helper_cmpxchg8b(target_ulong a0)
1652{
1653 uint64_t d;
1654 int eflags;
1655
a7812ae4 1656 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1657 d = ldq(a0);
1658 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1659 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1660 eflags |= CC_Z;
1661 } else {
278ed7c3 1662 /* always do the store */
20054ef0 1663 stq(a0, d);
eaa728ee
FB
1664 EDX = (uint32_t)(d >> 32);
1665 EAX = (uint32_t)d;
1666 eflags &= ~CC_Z;
1667 }
1668 CC_SRC = eflags;
1669}
1670
1671#ifdef TARGET_X86_64
1672void helper_cmpxchg16b(target_ulong a0)
1673{
1674 uint64_t d0, d1;
1675 int eflags;
1676
20054ef0 1677 if ((a0 & 0xf) != 0) {
77b2bc2c 1678 raise_exception(env, EXCP0D_GPF);
20054ef0 1679 }
a7812ae4 1680 eflags = helper_cc_compute_all(CC_OP);
eaa728ee
FB
1681 d0 = ldq(a0);
1682 d1 = ldq(a0 + 8);
1683 if (d0 == EAX && d1 == EDX) {
1684 stq(a0, EBX);
1685 stq(a0 + 8, ECX);
1686 eflags |= CC_Z;
1687 } else {
278ed7c3 1688 /* always do the store */
20054ef0
BS
1689 stq(a0, d0);
1690 stq(a0 + 8, d1);
eaa728ee
FB
1691 EDX = d1;
1692 EAX = d0;
1693 eflags &= ~CC_Z;
1694 }
1695 CC_SRC = eflags;
1696}
1697#endif
1698
1699void helper_single_step(void)
1700{
01df040b
AL
1701#ifndef CONFIG_USER_ONLY
1702 check_hw_breakpoints(env, 1);
1703 env->dr[6] |= DR6_BS;
1704#endif
77b2bc2c 1705 raise_exception(env, EXCP01_DB);
eaa728ee
FB
1706}
1707
1708void helper_cpuid(void)
1709{
6fd805e1 1710 uint32_t eax, ebx, ecx, edx;
eaa728ee 1711
6bada5e8 1712 cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0);
e737b32a 1713
e00b6f80 1714 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
6fd805e1
AL
1715 EAX = eax;
1716 EBX = ebx;
1717 ECX = ecx;
1718 EDX = edx;
eaa728ee
FB
1719}
1720
1721void helper_enter_level(int level, int data32, target_ulong t1)
1722{
1723 target_ulong ssp;
1724 uint32_t esp_mask, esp, ebp;
1725
1726 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1727 ssp = env->segs[R_SS].base;
1728 ebp = EBP;
1729 esp = ESP;
1730 if (data32) {
1731 /* 32 bit */
1732 esp -= 4;
1733 while (--level) {
1734 esp -= 4;
1735 ebp -= 4;
1736 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1737 }
1738 esp -= 4;
1739 stl(ssp + (esp & esp_mask), t1);
1740 } else {
1741 /* 16 bit */
1742 esp -= 2;
1743 while (--level) {
1744 esp -= 2;
1745 ebp -= 2;
1746 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1747 }
1748 esp -= 2;
1749 stw(ssp + (esp & esp_mask), t1);
1750 }
1751}
1752
1753#ifdef TARGET_X86_64
1754void helper_enter64_level(int level, int data64, target_ulong t1)
1755{
1756 target_ulong esp, ebp;
20054ef0 1757
eaa728ee
FB
1758 ebp = EBP;
1759 esp = ESP;
1760
1761 if (data64) {
1762 /* 64 bit */
1763 esp -= 8;
1764 while (--level) {
1765 esp -= 8;
1766 ebp -= 8;
1767 stq(esp, ldq(ebp));
1768 }
1769 esp -= 8;
1770 stq(esp, t1);
1771 } else {
1772 /* 16 bit */
1773 esp -= 2;
1774 while (--level) {
1775 esp -= 2;
1776 ebp -= 2;
1777 stw(esp, lduw(ebp));
1778 }
1779 esp -= 2;
1780 stw(esp, t1);
1781 }
1782}
1783#endif
1784
1785void helper_lldt(int selector)
1786{
1787 SegmentCache *dt;
1788 uint32_t e1, e2;
1789 int index, entry_limit;
1790 target_ulong ptr;
1791
1792 selector &= 0xffff;
1793 if ((selector & 0xfffc) == 0) {
1794 /* XXX: NULL selector case: invalid LDT */
1795 env->ldt.base = 0;
1796 env->ldt.limit = 0;
1797 } else {
20054ef0 1798 if (selector & 0x4) {
77b2bc2c 1799 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1800 }
eaa728ee
FB
1801 dt = &env->gdt;
1802 index = selector & ~7;
1803#ifdef TARGET_X86_64
20054ef0 1804 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1805 entry_limit = 15;
20054ef0 1806 } else
eaa728ee 1807#endif
20054ef0 1808 {
eaa728ee 1809 entry_limit = 7;
20054ef0
BS
1810 }
1811 if ((index + entry_limit) > dt->limit) {
77b2bc2c 1812 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1813 }
eaa728ee
FB
1814 ptr = dt->base + index;
1815 e1 = ldl_kernel(ptr);
1816 e2 = ldl_kernel(ptr + 4);
20054ef0 1817 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
77b2bc2c 1818 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1819 }
1820 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1821 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 1822 }
eaa728ee
FB
1823#ifdef TARGET_X86_64
1824 if (env->hflags & HF_LMA_MASK) {
1825 uint32_t e3;
20054ef0 1826
eaa728ee
FB
1827 e3 = ldl_kernel(ptr + 8);
1828 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1829 env->ldt.base |= (target_ulong)e3 << 32;
1830 } else
1831#endif
1832 {
1833 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1834 }
1835 }
1836 env->ldt.selector = selector;
1837}
1838
1839void helper_ltr(int selector)
1840{
1841 SegmentCache *dt;
1842 uint32_t e1, e2;
1843 int index, type, entry_limit;
1844 target_ulong ptr;
1845
1846 selector &= 0xffff;
1847 if ((selector & 0xfffc) == 0) {
1848 /* NULL selector case: invalid TR */
1849 env->tr.base = 0;
1850 env->tr.limit = 0;
1851 env->tr.flags = 0;
1852 } else {
20054ef0 1853 if (selector & 0x4) {
77b2bc2c 1854 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1855 }
eaa728ee
FB
1856 dt = &env->gdt;
1857 index = selector & ~7;
1858#ifdef TARGET_X86_64
20054ef0 1859 if (env->hflags & HF_LMA_MASK) {
eaa728ee 1860 entry_limit = 15;
20054ef0 1861 } else
eaa728ee 1862#endif
20054ef0 1863 {
eaa728ee 1864 entry_limit = 7;
20054ef0
BS
1865 }
1866 if ((index + entry_limit) > dt->limit) {
77b2bc2c 1867 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1868 }
eaa728ee
FB
1869 ptr = dt->base + index;
1870 e1 = ldl_kernel(ptr);
1871 e2 = ldl_kernel(ptr + 4);
1872 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1873 if ((e2 & DESC_S_MASK) ||
20054ef0 1874 (type != 1 && type != 9)) {
77b2bc2c 1875 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1876 }
1877 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 1878 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 1879 }
eaa728ee
FB
1880#ifdef TARGET_X86_64
1881 if (env->hflags & HF_LMA_MASK) {
1882 uint32_t e3, e4;
20054ef0 1883
eaa728ee
FB
1884 e3 = ldl_kernel(ptr + 8);
1885 e4 = ldl_kernel(ptr + 12);
20054ef0 1886 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
77b2bc2c 1887 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1888 }
eaa728ee
FB
1889 load_seg_cache_raw_dt(&env->tr, e1, e2);
1890 env->tr.base |= (target_ulong)e3 << 32;
1891 } else
1892#endif
1893 {
1894 load_seg_cache_raw_dt(&env->tr, e1, e2);
1895 }
1896 e2 |= DESC_TSS_BUSY_MASK;
1897 stl_kernel(ptr + 4, e2);
1898 }
1899 env->tr.selector = selector;
1900}
1901
1902/* only works if protected mode and not VM86. seg_reg must be != R_CS */
1903void helper_load_seg(int seg_reg, int selector)
1904{
1905 uint32_t e1, e2;
1906 int cpl, dpl, rpl;
1907 SegmentCache *dt;
1908 int index;
1909 target_ulong ptr;
1910
1911 selector &= 0xffff;
1912 cpl = env->hflags & HF_CPL_MASK;
1913 if ((selector & 0xfffc) == 0) {
1914 /* null selector case */
1915 if (seg_reg == R_SS
1916#ifdef TARGET_X86_64
1917 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1918#endif
20054ef0 1919 ) {
77b2bc2c 1920 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 1921 }
eaa728ee
FB
1922 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1923 } else {
1924
20054ef0 1925 if (selector & 0x4) {
eaa728ee 1926 dt = &env->ldt;
20054ef0 1927 } else {
eaa728ee 1928 dt = &env->gdt;
20054ef0 1929 }
eaa728ee 1930 index = selector & ~7;
20054ef0 1931 if ((index + 7) > dt->limit) {
77b2bc2c 1932 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1933 }
eaa728ee
FB
1934 ptr = dt->base + index;
1935 e1 = ldl_kernel(ptr);
1936 e2 = ldl_kernel(ptr + 4);
1937
20054ef0 1938 if (!(e2 & DESC_S_MASK)) {
77b2bc2c 1939 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1940 }
eaa728ee
FB
1941 rpl = selector & 3;
1942 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1943 if (seg_reg == R_SS) {
1944 /* must be writable segment */
20054ef0 1945 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
77b2bc2c 1946 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
1947 }
1948 if (rpl != cpl || dpl != cpl) {
77b2bc2c 1949 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1950 }
eaa728ee
FB
1951 } else {
1952 /* must be readable segment */
20054ef0 1953 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
77b2bc2c 1954 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1955 }
eaa728ee
FB
1956
1957 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1958 /* if not conforming code, test rights */
20054ef0 1959 if (dpl < cpl || dpl < rpl) {
77b2bc2c 1960 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 1961 }
eaa728ee
FB
1962 }
1963 }
1964
1965 if (!(e2 & DESC_P_MASK)) {
20054ef0 1966 if (seg_reg == R_SS) {
77b2bc2c 1967 raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
20054ef0 1968 } else {
77b2bc2c 1969 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 1970 }
eaa728ee
FB
1971 }
1972
1973 /* set the access bit if not already set */
1974 if (!(e2 & DESC_A_MASK)) {
1975 e2 |= DESC_A_MASK;
1976 stl_kernel(ptr + 4, e2);
1977 }
1978
1979 cpu_x86_load_seg_cache(env, seg_reg, selector,
1980 get_seg_base(e1, e2),
1981 get_seg_limit(e1, e2),
1982 e2);
1983#if 0
93fcfe39 1984 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
eaa728ee
FB
1985 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1986#endif
1987 }
1988}
1989
1990/* protected mode jump */
1991void helper_ljmp_protected(int new_cs, target_ulong new_eip,
1992 int next_eip_addend)
1993{
1994 int gate_cs, type;
1995 uint32_t e1, e2, cpl, dpl, rpl, limit;
1996 target_ulong next_eip;
1997
20054ef0 1998 if ((new_cs & 0xfffc) == 0) {
77b2bc2c 1999 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0
BS
2000 }
2001 if (load_segment(&e1, &e2, new_cs) != 0) {
77b2bc2c 2002 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2003 }
eaa728ee
FB
2004 cpl = env->hflags & HF_CPL_MASK;
2005 if (e2 & DESC_S_MASK) {
20054ef0 2006 if (!(e2 & DESC_CS_MASK)) {
77b2bc2c 2007 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2008 }
eaa728ee
FB
2009 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2010 if (e2 & DESC_C_MASK) {
2011 /* conforming code segment */
20054ef0 2012 if (dpl > cpl) {
77b2bc2c 2013 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2014 }
eaa728ee
FB
2015 } else {
2016 /* non conforming code segment */
2017 rpl = new_cs & 3;
20054ef0 2018 if (rpl > cpl) {
77b2bc2c 2019 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
2020 }
2021 if (dpl != cpl) {
77b2bc2c 2022 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2023 }
eaa728ee 2024 }
20054ef0 2025 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2026 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 2027 }
eaa728ee
FB
2028 limit = get_seg_limit(e1, e2);
2029 if (new_eip > limit &&
20054ef0 2030 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
77b2bc2c 2031 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2032 }
eaa728ee
FB
2033 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2034 get_seg_base(e1, e2), limit, e2);
2035 EIP = new_eip;
2036 } else {
2037 /* jump to call or task gate */
2038 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2039 rpl = new_cs & 3;
2040 cpl = env->hflags & HF_CPL_MASK;
2041 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 2042 switch (type) {
eaa728ee
FB
2043 case 1: /* 286 TSS */
2044 case 9: /* 386 TSS */
2045 case 5: /* task gate */
20054ef0 2046 if (dpl < cpl || dpl < rpl) {
77b2bc2c 2047 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2048 }
eaa728ee
FB
2049 next_eip = env->eip + next_eip_addend;
2050 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2051 CC_OP = CC_OP_EFLAGS;
2052 break;
2053 case 4: /* 286 call gate */
2054 case 12: /* 386 call gate */
20054ef0 2055 if ((dpl < cpl) || (dpl < rpl)) {
77b2bc2c 2056 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
2057 }
2058 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2059 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 2060 }
eaa728ee
FB
2061 gate_cs = e1 >> 16;
2062 new_eip = (e1 & 0xffff);
20054ef0 2063 if (type == 12) {
eaa728ee 2064 new_eip |= (e2 & 0xffff0000);
20054ef0
BS
2065 }
2066 if (load_segment(&e1, &e2, gate_cs) != 0) {
77b2bc2c 2067 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0 2068 }
eaa728ee
FB
2069 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2070 /* must be code segment */
2071 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
20054ef0 2072 (DESC_S_MASK | DESC_CS_MASK))) {
77b2bc2c 2073 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0 2074 }
eaa728ee 2075 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
20054ef0 2076 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
77b2bc2c 2077 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0
BS
2078 }
2079 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2080 raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
20054ef0 2081 }
eaa728ee 2082 limit = get_seg_limit(e1, e2);
20054ef0 2083 if (new_eip > limit) {
77b2bc2c 2084 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 2085 }
eaa728ee
FB
2086 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2087 get_seg_base(e1, e2), limit, e2);
2088 EIP = new_eip;
2089 break;
2090 default:
77b2bc2c 2091 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
eaa728ee
FB
2092 break;
2093 }
2094 }
2095}
2096
2097/* real mode call */
2098void helper_lcall_real(int new_cs, target_ulong new_eip1,
2099 int shift, int next_eip)
2100{
2101 int new_eip;
2102 uint32_t esp, esp_mask;
2103 target_ulong ssp;
2104
2105 new_eip = new_eip1;
2106 esp = ESP;
2107 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2108 ssp = env->segs[R_SS].base;
2109 if (shift) {
2110 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2111 PUSHL(ssp, esp, esp_mask, next_eip);
2112 } else {
2113 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2114 PUSHW(ssp, esp, esp_mask, next_eip);
2115 }
2116
2117 SET_ESP(esp, esp_mask);
2118 env->eip = new_eip;
2119 env->segs[R_CS].selector = new_cs;
2120 env->segs[R_CS].base = (new_cs << 4);
2121}
2122
2123/* protected mode call */
20054ef0 2124void helper_lcall_protected(int new_cs, target_ulong new_eip,
eaa728ee
FB
2125 int shift, int next_eip_addend)
2126{
2127 int new_stack, i;
2128 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
1c918eba 2129 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
eaa728ee
FB
2130 uint32_t val, limit, old_sp_mask;
2131 target_ulong ssp, old_ssp, next_eip;
2132
2133 next_eip = env->eip + next_eip_addend;
d12d51d5
AL
2134 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2135 LOG_PCALL_STATE(env);
20054ef0 2136 if ((new_cs & 0xfffc) == 0) {
77b2bc2c 2137 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0
BS
2138 }
2139 if (load_segment(&e1, &e2, new_cs) != 0) {
77b2bc2c 2140 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2141 }
eaa728ee 2142 cpl = env->hflags & HF_CPL_MASK;
d12d51d5 2143 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
eaa728ee 2144 if (e2 & DESC_S_MASK) {
20054ef0 2145 if (!(e2 & DESC_CS_MASK)) {
77b2bc2c 2146 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2147 }
eaa728ee
FB
2148 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2149 if (e2 & DESC_C_MASK) {
2150 /* conforming code segment */
20054ef0 2151 if (dpl > cpl) {
77b2bc2c 2152 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2153 }
eaa728ee
FB
2154 } else {
2155 /* non conforming code segment */
2156 rpl = new_cs & 3;
20054ef0 2157 if (rpl > cpl) {
77b2bc2c 2158 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
2159 }
2160 if (dpl != cpl) {
77b2bc2c 2161 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2162 }
eaa728ee 2163 }
20054ef0 2164 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2165 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 2166 }
eaa728ee
FB
2167
2168#ifdef TARGET_X86_64
2169 /* XXX: check 16/32 bit cases in long mode */
2170 if (shift == 2) {
2171 target_ulong rsp;
20054ef0 2172
eaa728ee
FB
2173 /* 64 bit case */
2174 rsp = ESP;
2175 PUSHQ(rsp, env->segs[R_CS].selector);
2176 PUSHQ(rsp, next_eip);
2177 /* from this point, not restartable */
2178 ESP = rsp;
2179 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2180 get_seg_base(e1, e2),
2181 get_seg_limit(e1, e2), e2);
2182 EIP = new_eip;
2183 } else
2184#endif
2185 {
2186 sp = ESP;
2187 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2188 ssp = env->segs[R_SS].base;
2189 if (shift) {
2190 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2191 PUSHL(ssp, sp, sp_mask, next_eip);
2192 } else {
2193 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2194 PUSHW(ssp, sp, sp_mask, next_eip);
2195 }
2196
2197 limit = get_seg_limit(e1, e2);
20054ef0 2198 if (new_eip > limit) {
77b2bc2c 2199 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2200 }
eaa728ee
FB
2201 /* from this point, not restartable */
2202 SET_ESP(sp, sp_mask);
2203 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2204 get_seg_base(e1, e2), limit, e2);
2205 EIP = new_eip;
2206 }
2207 } else {
2208 /* check gate type */
2209 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2210 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2211 rpl = new_cs & 3;
20054ef0 2212 switch (type) {
eaa728ee
FB
2213 case 1: /* available 286 TSS */
2214 case 9: /* available 386 TSS */
2215 case 5: /* task gate */
20054ef0 2216 if (dpl < cpl || dpl < rpl) {
77b2bc2c 2217 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2218 }
eaa728ee
FB
2219 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2220 CC_OP = CC_OP_EFLAGS;
2221 return;
2222 case 4: /* 286 call gate */
2223 case 12: /* 386 call gate */
2224 break;
2225 default:
77b2bc2c 2226 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
eaa728ee
FB
2227 break;
2228 }
2229 shift = type >> 3;
2230
20054ef0 2231 if (dpl < cpl || dpl < rpl) {
77b2bc2c 2232 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2233 }
eaa728ee 2234 /* check valid bit */
20054ef0 2235 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2236 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 2237 }
eaa728ee
FB
2238 selector = e1 >> 16;
2239 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2240 param_count = e2 & 0x1f;
20054ef0 2241 if ((selector & 0xfffc) == 0) {
77b2bc2c 2242 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 2243 }
eaa728ee 2244
20054ef0 2245 if (load_segment(&e1, &e2, selector) != 0) {
77b2bc2c 2246 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
2247 }
2248 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
77b2bc2c 2249 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0 2250 }
eaa728ee 2251 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2252 if (dpl > cpl) {
77b2bc2c 2253 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
20054ef0
BS
2254 }
2255 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2256 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
20054ef0 2257 }
eaa728ee
FB
2258
2259 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2260 /* to inner privilege */
2261 get_ss_esp_from_tss(&ss, &sp, dpl);
20054ef0
BS
2262 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
2263 "\n",
2264 ss, sp, param_count, ESP);
2265 if ((ss & 0xfffc) == 0) {
77b2bc2c 2266 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
2267 }
2268 if ((ss & 3) != dpl) {
77b2bc2c 2269 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
2270 }
2271 if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
77b2bc2c 2272 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 2273 }
eaa728ee 2274 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2275 if (ss_dpl != dpl) {
77b2bc2c 2276 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 2277 }
eaa728ee
FB
2278 if (!(ss_e2 & DESC_S_MASK) ||
2279 (ss_e2 & DESC_CS_MASK) ||
20054ef0 2280 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 2281 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0
BS
2282 }
2283 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 2284 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
20054ef0 2285 }
eaa728ee 2286
20054ef0 2287 /* push_size = ((param_count * 2) + 8) << shift; */
eaa728ee
FB
2288
2289 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2290 old_ssp = env->segs[R_SS].base;
2291
2292 sp_mask = get_sp_mask(ss_e2);
2293 ssp = get_seg_base(ss_e1, ss_e2);
2294 if (shift) {
2295 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2296 PUSHL(ssp, sp, sp_mask, ESP);
20054ef0 2297 for (i = param_count - 1; i >= 0; i--) {
eaa728ee
FB
2298 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2299 PUSHL(ssp, sp, sp_mask, val);
2300 }
2301 } else {
2302 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2303 PUSHW(ssp, sp, sp_mask, ESP);
20054ef0 2304 for (i = param_count - 1; i >= 0; i--) {
eaa728ee
FB
2305 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2306 PUSHW(ssp, sp, sp_mask, val);
2307 }
2308 }
2309 new_stack = 1;
2310 } else {
2311 /* to same privilege */
2312 sp = ESP;
2313 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2314 ssp = env->segs[R_SS].base;
20054ef0 2315 /* push_size = (4 << shift); */
eaa728ee
FB
2316 new_stack = 0;
2317 }
2318
2319 if (shift) {
2320 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2321 PUSHL(ssp, sp, sp_mask, next_eip);
2322 } else {
2323 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2324 PUSHW(ssp, sp, sp_mask, next_eip);
2325 }
2326
2327 /* from this point, not restartable */
2328
2329 if (new_stack) {
2330 ss = (ss & ~3) | dpl;
2331 cpu_x86_load_seg_cache(env, R_SS, ss,
2332 ssp,
2333 get_seg_limit(ss_e1, ss_e2),
2334 ss_e2);
2335 }
2336
2337 selector = (selector & ~3) | dpl;
2338 cpu_x86_load_seg_cache(env, R_CS, selector,
2339 get_seg_base(e1, e2),
2340 get_seg_limit(e1, e2),
2341 e2);
2342 cpu_x86_set_cpl(env, dpl);
2343 SET_ESP(sp, sp_mask);
2344 EIP = offset;
2345 }
eaa728ee
FB
2346}
2347
2348/* real and vm86 mode iret */
2349void helper_iret_real(int shift)
2350{
2351 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2352 target_ulong ssp;
2353 int eflags_mask;
2354
20054ef0 2355 sp_mask = 0xffff; /* XXXX: use SS segment size? */
eaa728ee
FB
2356 sp = ESP;
2357 ssp = env->segs[R_SS].base;
2358 if (shift == 1) {
2359 /* 32 bits */
2360 POPL(ssp, sp, sp_mask, new_eip);
2361 POPL(ssp, sp, sp_mask, new_cs);
2362 new_cs &= 0xffff;
2363 POPL(ssp, sp, sp_mask, new_eflags);
2364 } else {
2365 /* 16 bits */
2366 POPW(ssp, sp, sp_mask, new_eip);
2367 POPW(ssp, sp, sp_mask, new_cs);
2368 POPW(ssp, sp, sp_mask, new_eflags);
2369 }
2370 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
bdadc0b5 2371 env->segs[R_CS].selector = new_cs;
2372 env->segs[R_CS].base = (new_cs << 4);
eaa728ee 2373 env->eip = new_eip;
20054ef0
BS
2374 if (env->eflags & VM_MASK) {
2375 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
2376 NT_MASK;
2377 } else {
2378 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
2379 RF_MASK | NT_MASK;
2380 }
2381 if (shift == 0) {
eaa728ee 2382 eflags_mask &= 0xffff;
20054ef0 2383 }
997ff0d9 2384 cpu_load_eflags(env, new_eflags, eflags_mask);
db620f46 2385 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2386}
2387
2388static inline void validate_seg(int seg_reg, int cpl)
2389{
2390 int dpl;
2391 uint32_t e2;
2392
2393 /* XXX: on x86_64, we do not want to nullify FS and GS because
2394 they may still contain a valid base. I would be interested to
2395 know how a real x86_64 CPU behaves */
2396 if ((seg_reg == R_FS || seg_reg == R_GS) &&
20054ef0 2397 (env->segs[seg_reg].selector & 0xfffc) == 0) {
eaa728ee 2398 return;
20054ef0 2399 }
eaa728ee
FB
2400
2401 e2 = env->segs[seg_reg].flags;
2402 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2403 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2404 /* data or non conforming code segment */
2405 if (dpl < cpl) {
2406 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2407 }
2408 }
2409}
2410
2411/* protected mode iret */
2412static inline void helper_ret_protected(int shift, int is_iret, int addend)
2413{
2414 uint32_t new_cs, new_eflags, new_ss;
2415 uint32_t new_es, new_ds, new_fs, new_gs;
2416 uint32_t e1, e2, ss_e1, ss_e2;
2417 int cpl, dpl, rpl, eflags_mask, iopl;
2418 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2419
2420#ifdef TARGET_X86_64
20054ef0 2421 if (shift == 2) {
eaa728ee 2422 sp_mask = -1;
20054ef0 2423 } else
eaa728ee 2424#endif
20054ef0 2425 {
eaa728ee 2426 sp_mask = get_sp_mask(env->segs[R_SS].flags);
20054ef0 2427 }
eaa728ee
FB
2428 sp = ESP;
2429 ssp = env->segs[R_SS].base;
2430 new_eflags = 0; /* avoid warning */
2431#ifdef TARGET_X86_64
2432 if (shift == 2) {
2433 POPQ(sp, new_eip);
2434 POPQ(sp, new_cs);
2435 new_cs &= 0xffff;
2436 if (is_iret) {
2437 POPQ(sp, new_eflags);
2438 }
2439 } else
2440#endif
20054ef0
BS
2441 {
2442 if (shift == 1) {
2443 /* 32 bits */
2444 POPL(ssp, sp, sp_mask, new_eip);
2445 POPL(ssp, sp, sp_mask, new_cs);
2446 new_cs &= 0xffff;
2447 if (is_iret) {
2448 POPL(ssp, sp, sp_mask, new_eflags);
2449 if (new_eflags & VM_MASK) {
2450 goto return_to_vm86;
2451 }
2452 }
2453 } else {
2454 /* 16 bits */
2455 POPW(ssp, sp, sp_mask, new_eip);
2456 POPW(ssp, sp, sp_mask, new_cs);
2457 if (is_iret) {
2458 POPW(ssp, sp, sp_mask, new_eflags);
2459 }
eaa728ee 2460 }
eaa728ee 2461 }
d12d51d5
AL
2462 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2463 new_cs, new_eip, shift, addend);
2464 LOG_PCALL_STATE(env);
20054ef0 2465 if ((new_cs & 0xfffc) == 0) {
77b2bc2c 2466 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0
BS
2467 }
2468 if (load_segment(&e1, &e2, new_cs) != 0) {
77b2bc2c 2469 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2470 }
eaa728ee 2471 if (!(e2 & DESC_S_MASK) ||
20054ef0 2472 !(e2 & DESC_CS_MASK)) {
77b2bc2c 2473 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2474 }
eaa728ee
FB
2475 cpl = env->hflags & HF_CPL_MASK;
2476 rpl = new_cs & 3;
20054ef0 2477 if (rpl < cpl) {
77b2bc2c 2478 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2479 }
eaa728ee
FB
2480 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2481 if (e2 & DESC_C_MASK) {
20054ef0 2482 if (dpl > rpl) {
77b2bc2c 2483 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2484 }
eaa728ee 2485 } else {
20054ef0 2486 if (dpl != rpl) {
77b2bc2c 2487 raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
20054ef0 2488 }
eaa728ee 2489 }
20054ef0 2490 if (!(e2 & DESC_P_MASK)) {
77b2bc2c 2491 raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
20054ef0 2492 }
eaa728ee
FB
2493
2494 sp += addend;
2495 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2496 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
1235fc06 2497 /* return to same privilege level */
eaa728ee
FB
2498 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2499 get_seg_base(e1, e2),
2500 get_seg_limit(e1, e2),
2501 e2);
2502 } else {
2503 /* return to different privilege level */
2504#ifdef TARGET_X86_64
2505 if (shift == 2) {
2506 POPQ(sp, new_esp);
2507 POPQ(sp, new_ss);
2508 new_ss &= 0xffff;
2509 } else
2510#endif
20054ef0
BS
2511 {
2512 if (shift == 1) {
2513 /* 32 bits */
2514 POPL(ssp, sp, sp_mask, new_esp);
2515 POPL(ssp, sp, sp_mask, new_ss);
2516 new_ss &= 0xffff;
2517 } else {
2518 /* 16 bits */
2519 POPW(ssp, sp, sp_mask, new_esp);
2520 POPW(ssp, sp, sp_mask, new_ss);
2521 }
eaa728ee 2522 }
d12d51d5 2523 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
20054ef0 2524 new_ss, new_esp);
eaa728ee
FB
2525 if ((new_ss & 0xfffc) == 0) {
2526#ifdef TARGET_X86_64
20054ef0
BS
2527 /* NULL ss is allowed in long mode if cpl != 3 */
2528 /* XXX: test CS64? */
eaa728ee
FB
2529 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2530 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2531 0, 0xffffffff,
2532 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2533 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2534 DESC_W_MASK | DESC_A_MASK);
20054ef0 2535 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
eaa728ee
FB
2536 } else
2537#endif
2538 {
77b2bc2c 2539 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
2540 }
2541 } else {
20054ef0 2542 if ((new_ss & 3) != rpl) {
77b2bc2c 2543 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0
BS
2544 }
2545 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) {
77b2bc2c 2546 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0 2547 }
eaa728ee
FB
2548 if (!(ss_e2 & DESC_S_MASK) ||
2549 (ss_e2 & DESC_CS_MASK) ||
20054ef0 2550 !(ss_e2 & DESC_W_MASK)) {
77b2bc2c 2551 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0 2552 }
eaa728ee 2553 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
20054ef0 2554 if (dpl != rpl) {
77b2bc2c 2555 raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
20054ef0
BS
2556 }
2557 if (!(ss_e2 & DESC_P_MASK)) {
77b2bc2c 2558 raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
20054ef0 2559 }
eaa728ee
FB
2560 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2561 get_seg_base(ss_e1, ss_e2),
2562 get_seg_limit(ss_e1, ss_e2),
2563 ss_e2);
2564 }
2565
2566 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2567 get_seg_base(e1, e2),
2568 get_seg_limit(e1, e2),
2569 e2);
2570 cpu_x86_set_cpl(env, rpl);
2571 sp = new_esp;
2572#ifdef TARGET_X86_64
20054ef0 2573 if (env->hflags & HF_CS64_MASK) {
eaa728ee 2574 sp_mask = -1;
20054ef0 2575 } else
eaa728ee 2576#endif
20054ef0 2577 {
eaa728ee 2578 sp_mask = get_sp_mask(ss_e2);
20054ef0 2579 }
eaa728ee
FB
2580
2581 /* validate data segments */
2582 validate_seg(R_ES, rpl);
2583 validate_seg(R_DS, rpl);
2584 validate_seg(R_FS, rpl);
2585 validate_seg(R_GS, rpl);
2586
2587 sp += addend;
2588 }
2589 SET_ESP(sp, sp_mask);
2590 env->eip = new_eip;
2591 if (is_iret) {
2592 /* NOTE: 'cpl' is the _old_ CPL */
2593 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
20054ef0 2594 if (cpl == 0) {
eaa728ee 2595 eflags_mask |= IOPL_MASK;
20054ef0 2596 }
eaa728ee 2597 iopl = (env->eflags >> IOPL_SHIFT) & 3;
20054ef0 2598 if (cpl <= iopl) {
eaa728ee 2599 eflags_mask |= IF_MASK;
20054ef0
BS
2600 }
2601 if (shift == 0) {
eaa728ee 2602 eflags_mask &= 0xffff;
20054ef0 2603 }
997ff0d9 2604 cpu_load_eflags(env, new_eflags, eflags_mask);
eaa728ee
FB
2605 }
2606 return;
2607
2608 return_to_vm86:
2609 POPL(ssp, sp, sp_mask, new_esp);
2610 POPL(ssp, sp, sp_mask, new_ss);
2611 POPL(ssp, sp, sp_mask, new_es);
2612 POPL(ssp, sp, sp_mask, new_ds);
2613 POPL(ssp, sp, sp_mask, new_fs);
2614 POPL(ssp, sp, sp_mask, new_gs);
2615
2616 /* modify processor state */
997ff0d9
BS
2617 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2618 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2619 VIP_MASK);
eaa728ee
FB
2620 load_seg_vm(R_CS, new_cs & 0xffff);
2621 cpu_x86_set_cpl(env, 3);
2622 load_seg_vm(R_SS, new_ss & 0xffff);
2623 load_seg_vm(R_ES, new_es & 0xffff);
2624 load_seg_vm(R_DS, new_ds & 0xffff);
2625 load_seg_vm(R_FS, new_fs & 0xffff);
2626 load_seg_vm(R_GS, new_gs & 0xffff);
2627
2628 env->eip = new_eip & 0xffff;
2629 ESP = new_esp;
2630}
2631
2632void helper_iret_protected(int shift, int next_eip)
2633{
2634 int tss_selector, type;
2635 uint32_t e1, e2;
2636
2637 /* specific case for TSS */
2638 if (env->eflags & NT_MASK) {
2639#ifdef TARGET_X86_64
20054ef0 2640 if (env->hflags & HF_LMA_MASK) {
77b2bc2c 2641 raise_exception_err(env, EXCP0D_GPF, 0);
20054ef0 2642 }
eaa728ee
FB
2643#endif
2644 tss_selector = lduw_kernel(env->tr.base + 0);
20054ef0 2645 if (tss_selector & 4) {
77b2bc2c 2646 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0
BS
2647 }
2648 if (load_segment(&e1, &e2, tss_selector) != 0) {
77b2bc2c 2649 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 2650 }
eaa728ee
FB
2651 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2652 /* NOTE: we check both segment and busy TSS */
20054ef0 2653 if (type != 3) {
77b2bc2c 2654 raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
20054ef0 2655 }
eaa728ee
FB
2656 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2657 } else {
2658 helper_ret_protected(shift, 1, 0);
2659 }
db620f46 2660 env->hflags2 &= ~HF2_NMI_MASK;
eaa728ee
FB
2661}
2662
2663void helper_lret_protected(int shift, int addend)
2664{
2665 helper_ret_protected(shift, 0, addend);
eaa728ee
FB
2666}
2667
2668void helper_sysenter(void)
2669{
2670 if (env->sysenter_cs == 0) {
77b2bc2c 2671 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
2672 }
2673 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2674 cpu_x86_set_cpl(env, 0);
2436b61a
AZ
2675
2676#ifdef TARGET_X86_64
2677 if (env->hflags & HF_LMA_MASK) {
2678 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2679 0, 0xffffffff,
2680 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2681 DESC_S_MASK |
20054ef0
BS
2682 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2683 DESC_L_MASK);
2436b61a
AZ
2684 } else
2685#endif
2686 {
2687 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2688 0, 0xffffffff,
2689 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2690 DESC_S_MASK |
2691 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2692 }
eaa728ee
FB
2693 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2694 0, 0xffffffff,
2695 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2696 DESC_S_MASK |
2697 DESC_W_MASK | DESC_A_MASK);
2698 ESP = env->sysenter_esp;
2699 EIP = env->sysenter_eip;
2700}
2701
2436b61a 2702void helper_sysexit(int dflag)
eaa728ee
FB
2703{
2704 int cpl;
2705
2706 cpl = env->hflags & HF_CPL_MASK;
2707 if (env->sysenter_cs == 0 || cpl != 0) {
77b2bc2c 2708 raise_exception_err(env, EXCP0D_GPF, 0);
eaa728ee
FB
2709 }
2710 cpu_x86_set_cpl(env, 3);
2436b61a
AZ
2711#ifdef TARGET_X86_64
2712 if (dflag == 2) {
20054ef0
BS
2713 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2714 3, 0, 0xffffffff,
2436b61a
AZ
2715 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2716 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
20054ef0
BS
2717 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2718 DESC_L_MASK);
2719 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2720 3, 0, 0xffffffff,
2436b61a
AZ
2721 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2722 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2723 DESC_W_MASK | DESC_A_MASK);
2724 } else
2725#endif
2726 {
20054ef0
BS
2727 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2728 3, 0, 0xffffffff,
2436b61a
AZ
2729 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2730 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2731 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
20054ef0
BS
2732 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2733 3, 0, 0xffffffff,
2436b61a
AZ
2734 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2735 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2736 DESC_W_MASK | DESC_A_MASK);
2737 }
eaa728ee
FB
2738 ESP = ECX;
2739 EIP = EDX;
eaa728ee
FB
2740}
2741
872929aa
FB
2742#if defined(CONFIG_USER_ONLY)
2743target_ulong helper_read_crN(int reg)
eaa728ee 2744{
872929aa
FB
2745 return 0;
2746}
2747
2748void helper_write_crN(int reg, target_ulong t0)
2749{
2750}
01df040b
AL
2751
2752void helper_movl_drN_T0(int reg, target_ulong t0)
2753{
2754}
872929aa
FB
2755#else
2756target_ulong helper_read_crN(int reg)
2757{
2758 target_ulong val;
2759
6bada5e8 2760 cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0);
20054ef0 2761 switch (reg) {
872929aa
FB
2762 default:
2763 val = env->cr[reg];
2764 break;
2765 case 8:
db620f46 2766 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 2767 val = cpu_get_apic_tpr(env->apic_state);
db620f46
FB
2768 } else {
2769 val = env->v_tpr;
2770 }
872929aa
FB
2771 break;
2772 }
2773 return val;
2774}
2775
2776void helper_write_crN(int reg, target_ulong t0)
2777{
6bada5e8 2778 cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0);
20054ef0 2779 switch (reg) {
eaa728ee
FB
2780 case 0:
2781 cpu_x86_update_cr0(env, t0);
2782 break;
2783 case 3:
2784 cpu_x86_update_cr3(env, t0);
2785 break;
2786 case 4:
2787 cpu_x86_update_cr4(env, t0);
2788 break;
2789 case 8:
db620f46 2790 if (!(env->hflags2 & HF2_VINTR_MASK)) {
4a942cea 2791 cpu_set_apic_tpr(env->apic_state, t0);
db620f46
FB
2792 }
2793 env->v_tpr = t0 & 0x0f;
eaa728ee
FB
2794 break;
2795 default:
2796 env->cr[reg] = t0;
2797 break;
2798 }
eaa728ee 2799}
01df040b
AL
2800
2801void helper_movl_drN_T0(int reg, target_ulong t0)
2802{
2803 int i;
2804
2805 if (reg < 4) {
2806 hw_breakpoint_remove(env, reg);
2807 env->dr[reg] = t0;
2808 hw_breakpoint_insert(env, reg);
2809 } else if (reg == 7) {
20054ef0 2810 for (i = 0; i < 4; i++) {
01df040b 2811 hw_breakpoint_remove(env, i);
20054ef0 2812 }
01df040b 2813 env->dr[7] = t0;
20054ef0 2814 for (i = 0; i < 4; i++) {
01df040b 2815 hw_breakpoint_insert(env, i);
20054ef0
BS
2816 }
2817 } else {
01df040b 2818 env->dr[reg] = t0;
20054ef0 2819 }
01df040b 2820}
872929aa 2821#endif
eaa728ee
FB
2822
2823void helper_lmsw(target_ulong t0)
2824{
2825 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2826 if already set to one. */
2827 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
872929aa 2828 helper_write_crN(0, t0);
eaa728ee
FB
2829}
2830
eaa728ee
FB
2831void helper_invlpg(target_ulong addr)
2832{
6bada5e8 2833 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0);
914178d3 2834 tlb_flush_page(env, addr);
eaa728ee
FB
2835}
2836
2837void helper_rdtsc(void)
2838{
2839 uint64_t val;
2840
2841 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
77b2bc2c 2842 raise_exception(env, EXCP0D_GPF);
eaa728ee 2843 }
6bada5e8 2844 cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0);
872929aa 2845
33c263df 2846 val = cpu_get_tsc(env) + env->tsc_offset;
eaa728ee
FB
2847 EAX = (uint32_t)(val);
2848 EDX = (uint32_t)(val >> 32);
2849}
2850
1b050077
AP
2851void helper_rdtscp(void)
2852{
2853 helper_rdtsc();
2854 ECX = (uint32_t)(env->tsc_aux);
2855}
2856
eaa728ee
FB
2857void helper_rdpmc(void)
2858{
2859 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
77b2bc2c 2860 raise_exception(env, EXCP0D_GPF);
eaa728ee 2861 }
6bada5e8 2862 cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0);
20054ef0 2863
eaa728ee 2864 /* currently unimplemented */
71547a3b 2865 qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
77b2bc2c 2866 raise_exception_err(env, EXCP06_ILLOP, 0);
eaa728ee
FB
2867}
2868
2869#if defined(CONFIG_USER_ONLY)
2870void helper_wrmsr(void)
2871{
2872}
2873
2874void helper_rdmsr(void)
2875{
2876}
2877#else
2878void helper_wrmsr(void)
2879{
2880 uint64_t val;
2881
6bada5e8 2882 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1);
872929aa 2883
eaa728ee
FB
2884 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2885
20054ef0 2886 switch ((uint32_t)ECX) {
eaa728ee
FB
2887 case MSR_IA32_SYSENTER_CS:
2888 env->sysenter_cs = val & 0xffff;
2889 break;
2890 case MSR_IA32_SYSENTER_ESP:
2891 env->sysenter_esp = val;
2892 break;
2893 case MSR_IA32_SYSENTER_EIP:
2894 env->sysenter_eip = val;
2895 break;
2896 case MSR_IA32_APICBASE:
4a942cea 2897 cpu_set_apic_base(env->apic_state, val);
eaa728ee
FB
2898 break;
2899 case MSR_EFER:
2900 {
2901 uint64_t update_mask;
20054ef0 2902
eaa728ee 2903 update_mask = 0;
20054ef0 2904 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) {
eaa728ee 2905 update_mask |= MSR_EFER_SCE;
20054ef0
BS
2906 }
2907 if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
eaa728ee 2908 update_mask |= MSR_EFER_LME;
20054ef0
BS
2909 }
2910 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
eaa728ee 2911 update_mask |= MSR_EFER_FFXSR;
20054ef0
BS
2912 }
2913 if (env->cpuid_ext2_features & CPUID_EXT2_NX) {
eaa728ee 2914 update_mask |= MSR_EFER_NXE;
20054ef0
BS
2915 }
2916 if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
5efc27bb 2917 update_mask |= MSR_EFER_SVME;
20054ef0
BS
2918 }
2919 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
eef26553 2920 update_mask |= MSR_EFER_FFXSR;
20054ef0 2921 }
5efc27bb
FB
2922 cpu_load_efer(env, (env->efer & ~update_mask) |
2923 (val & update_mask));
eaa728ee
FB
2924 }
2925 break;
2926 case MSR_STAR:
2927 env->star = val;
2928 break;
2929 case MSR_PAT:
2930 env->pat = val;
2931 break;
2932 case MSR_VM_HSAVE_PA:
2933 env->vm_hsave = val;
2934 break;
2935#ifdef TARGET_X86_64
2936 case MSR_LSTAR:
2937 env->lstar = val;
2938 break;
2939 case MSR_CSTAR:
2940 env->cstar = val;
2941 break;
2942 case MSR_FMASK:
2943 env->fmask = val;
2944 break;
2945 case MSR_FSBASE:
2946 env->segs[R_FS].base = val;
2947 break;
2948 case MSR_GSBASE:
2949 env->segs[R_GS].base = val;
2950 break;
2951 case MSR_KERNELGSBASE:
2952 env->kernelgsbase = val;
2953 break;
2954#endif
165d9b82
AL
2955 case MSR_MTRRphysBase(0):
2956 case MSR_MTRRphysBase(1):
2957 case MSR_MTRRphysBase(2):
2958 case MSR_MTRRphysBase(3):
2959 case MSR_MTRRphysBase(4):
2960 case MSR_MTRRphysBase(5):
2961 case MSR_MTRRphysBase(6):
2962 case MSR_MTRRphysBase(7):
2963 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
2964 break;
2965 case MSR_MTRRphysMask(0):
2966 case MSR_MTRRphysMask(1):
2967 case MSR_MTRRphysMask(2):
2968 case MSR_MTRRphysMask(3):
2969 case MSR_MTRRphysMask(4):
2970 case MSR_MTRRphysMask(5):
2971 case MSR_MTRRphysMask(6):
2972 case MSR_MTRRphysMask(7):
2973 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
2974 break;
2975 case MSR_MTRRfix64K_00000:
2976 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
2977 break;
2978 case MSR_MTRRfix16K_80000:
2979 case MSR_MTRRfix16K_A0000:
2980 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
2981 break;
2982 case MSR_MTRRfix4K_C0000:
2983 case MSR_MTRRfix4K_C8000:
2984 case MSR_MTRRfix4K_D0000:
2985 case MSR_MTRRfix4K_D8000:
2986 case MSR_MTRRfix4K_E0000:
2987 case MSR_MTRRfix4K_E8000:
2988 case MSR_MTRRfix4K_F0000:
2989 case MSR_MTRRfix4K_F8000:
2990 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
2991 break;
2992 case MSR_MTRRdefType:
2993 env->mtrr_deftype = val;
2994 break;
79c4f6b0
HY
2995 case MSR_MCG_STATUS:
2996 env->mcg_status = val;
2997 break;
2998 case MSR_MCG_CTL:
2999 if ((env->mcg_cap & MCG_CTL_P)
20054ef0 3000 && (val == 0 || val == ~(uint64_t)0)) {
79c4f6b0 3001 env->mcg_ctl = val;
20054ef0 3002 }
79c4f6b0 3003 break;
1b050077
AP
3004 case MSR_TSC_AUX:
3005 env->tsc_aux = val;
3006 break;
21e87c46
AK
3007 case MSR_IA32_MISC_ENABLE:
3008 env->msr_ia32_misc_enable = val;
3009 break;
eaa728ee 3010 default:
79c4f6b0
HY
3011 if ((uint32_t)ECX >= MSR_MC0_CTL
3012 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3013 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3014 if ((offset & 0x3) != 0
20054ef0 3015 || (val == 0 || val == ~(uint64_t)0)) {
79c4f6b0 3016 env->mce_banks[offset] = val;
20054ef0 3017 }
79c4f6b0
HY
3018 break;
3019 }
20054ef0 3020 /* XXX: exception? */
eaa728ee
FB
3021 break;
3022 }
3023}
3024
3025void helper_rdmsr(void)
3026{
3027 uint64_t val;
872929aa 3028
6bada5e8 3029 cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0);
872929aa 3030
20054ef0 3031 switch ((uint32_t)ECX) {
eaa728ee
FB
3032 case MSR_IA32_SYSENTER_CS:
3033 val = env->sysenter_cs;
3034 break;
3035 case MSR_IA32_SYSENTER_ESP:
3036 val = env->sysenter_esp;
3037 break;
3038 case MSR_IA32_SYSENTER_EIP:
3039 val = env->sysenter_eip;
3040 break;
3041 case MSR_IA32_APICBASE:
4a942cea 3042 val = cpu_get_apic_base(env->apic_state);
eaa728ee
FB
3043 break;
3044 case MSR_EFER:
3045 val = env->efer;
3046 break;
3047 case MSR_STAR:
3048 val = env->star;
3049 break;
3050 case MSR_PAT:
3051 val = env->pat;
3052 break;
3053 case MSR_VM_HSAVE_PA:
3054 val = env->vm_hsave;
3055 break;
d5e49a81
AZ
3056 case MSR_IA32_PERF_STATUS:
3057 /* tsc_increment_by_tick */
3058 val = 1000ULL;
3059 /* CPU multiplier */
3060 val |= (((uint64_t)4ULL) << 40);
3061 break;
eaa728ee
FB
3062#ifdef TARGET_X86_64
3063 case MSR_LSTAR:
3064 val = env->lstar;
3065 break;
3066 case MSR_CSTAR:
3067 val = env->cstar;
3068 break;
3069 case MSR_FMASK:
3070 val = env->fmask;
3071 break;
3072 case MSR_FSBASE:
3073 val = env->segs[R_FS].base;
3074 break;
3075 case MSR_GSBASE:
3076 val = env->segs[R_GS].base;
3077 break;
3078 case MSR_KERNELGSBASE:
3079 val = env->kernelgsbase;
3080 break;
1b050077
AP
3081 case MSR_TSC_AUX:
3082 val = env->tsc_aux;
3083 break;
eaa728ee 3084#endif
165d9b82
AL
3085 case MSR_MTRRphysBase(0):
3086 case MSR_MTRRphysBase(1):
3087 case MSR_MTRRphysBase(2):
3088 case MSR_MTRRphysBase(3):
3089 case MSR_MTRRphysBase(4):
3090 case MSR_MTRRphysBase(5):
3091 case MSR_MTRRphysBase(6):
3092 case MSR_MTRRphysBase(7):
3093 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3094 break;
3095 case MSR_MTRRphysMask(0):
3096 case MSR_MTRRphysMask(1):
3097 case MSR_MTRRphysMask(2):
3098 case MSR_MTRRphysMask(3):
3099 case MSR_MTRRphysMask(4):
3100 case MSR_MTRRphysMask(5):
3101 case MSR_MTRRphysMask(6):
3102 case MSR_MTRRphysMask(7):
3103 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3104 break;
3105 case MSR_MTRRfix64K_00000:
3106 val = env->mtrr_fixed[0];
3107 break;
3108 case MSR_MTRRfix16K_80000:
3109 case MSR_MTRRfix16K_A0000:
3110 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3111 break;
3112 case MSR_MTRRfix4K_C0000:
3113 case MSR_MTRRfix4K_C8000:
3114 case MSR_MTRRfix4K_D0000:
3115 case MSR_MTRRfix4K_D8000:
3116 case MSR_MTRRfix4K_E0000:
3117 case MSR_MTRRfix4K_E8000:
3118 case MSR_MTRRfix4K_F0000:
3119 case MSR_MTRRfix4K_F8000:
3120 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3121 break;
3122 case MSR_MTRRdefType:
3123 val = env->mtrr_deftype;
3124 break;
dd5e3b17 3125 case MSR_MTRRcap:
20054ef0
BS
3126 if (env->cpuid_features & CPUID_MTRR) {
3127 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
3128 MSR_MTRRcap_WC_SUPPORTED;
3129 } else {
3130 /* XXX: exception? */
dd5e3b17 3131 val = 0;
20054ef0 3132 }
dd5e3b17 3133 break;
79c4f6b0
HY
3134 case MSR_MCG_CAP:
3135 val = env->mcg_cap;
3136 break;
3137 case MSR_MCG_CTL:
20054ef0 3138 if (env->mcg_cap & MCG_CTL_P) {
79c4f6b0 3139 val = env->mcg_ctl;
20054ef0 3140 } else {
79c4f6b0 3141 val = 0;
20054ef0 3142 }
79c4f6b0
HY
3143 break;
3144 case MSR_MCG_STATUS:
3145 val = env->mcg_status;
3146 break;
21e87c46
AK
3147 case MSR_IA32_MISC_ENABLE:
3148 val = env->msr_ia32_misc_enable;
3149 break;
eaa728ee 3150 default:
79c4f6b0
HY
3151 if ((uint32_t)ECX >= MSR_MC0_CTL
3152 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3153 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3154 val = env->mce_banks[offset];
3155 break;
3156 }
20054ef0 3157 /* XXX: exception? */
eaa728ee
FB
3158 val = 0;
3159 break;
3160 }
3161 EAX = (uint32_t)(val);
3162 EDX = (uint32_t)(val >> 32);
3163}
3164#endif
3165
3166target_ulong helper_lsl(target_ulong selector1)
3167{
3168 unsigned int limit;
3169 uint32_t e1, e2, eflags, selector;
3170 int rpl, dpl, cpl, type;
3171
3172 selector = selector1 & 0xffff;
a7812ae4 3173 eflags = helper_cc_compute_all(CC_OP);
20054ef0 3174 if ((selector & 0xfffc) == 0) {
dc1ded53 3175 goto fail;
20054ef0
BS
3176 }
3177 if (load_segment(&e1, &e2, selector) != 0) {
eaa728ee 3178 goto fail;
20054ef0 3179 }
eaa728ee
FB
3180 rpl = selector & 3;
3181 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3182 cpl = env->hflags & HF_CPL_MASK;
3183 if (e2 & DESC_S_MASK) {
3184 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3185 /* conforming */
3186 } else {
20054ef0 3187 if (dpl < cpl || dpl < rpl) {
eaa728ee 3188 goto fail;
20054ef0 3189 }
eaa728ee
FB
3190 }
3191 } else {
3192 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 3193 switch (type) {
eaa728ee
FB
3194 case 1:
3195 case 2:
3196 case 3:
3197 case 9:
3198 case 11:
3199 break;
3200 default:
3201 goto fail;
3202 }
3203 if (dpl < cpl || dpl < rpl) {
3204 fail:
3205 CC_SRC = eflags & ~CC_Z;
3206 return 0;
3207 }
3208 }
3209 limit = get_seg_limit(e1, e2);
3210 CC_SRC = eflags | CC_Z;
3211 return limit;
3212}
3213
3214target_ulong helper_lar(target_ulong selector1)
3215{
3216 uint32_t e1, e2, eflags, selector;
3217 int rpl, dpl, cpl, type;
3218
3219 selector = selector1 & 0xffff;
a7812ae4 3220 eflags = helper_cc_compute_all(CC_OP);
20054ef0 3221 if ((selector & 0xfffc) == 0) {
eaa728ee 3222 goto fail;
20054ef0
BS
3223 }
3224 if (load_segment(&e1, &e2, selector) != 0) {
eaa728ee 3225 goto fail;
20054ef0 3226 }
eaa728ee
FB
3227 rpl = selector & 3;
3228 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3229 cpl = env->hflags & HF_CPL_MASK;
3230 if (e2 & DESC_S_MASK) {
3231 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3232 /* conforming */
3233 } else {
20054ef0 3234 if (dpl < cpl || dpl < rpl) {
eaa728ee 3235 goto fail;
20054ef0 3236 }
eaa728ee
FB
3237 }
3238 } else {
3239 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
20054ef0 3240 switch (type) {
eaa728ee
FB
3241 case 1:
3242 case 2:
3243 case 3:
3244 case 4:
3245 case 5:
3246 case 9:
3247 case 11:
3248 case 12:
3249 break;
3250 default:
3251 goto fail;
3252 }
3253 if (dpl < cpl || dpl < rpl) {
3254 fail:
3255 CC_SRC = eflags & ~CC_Z;
3256 return 0;
3257 }
3258 }
3259 CC_SRC = eflags | CC_Z;
3260 return e2 & 0x00f0ff00;
3261}
3262
3263void helper_verr(target_ulong selector1)
3264{
3265 uint32_t e1, e2, eflags, selector;
3266 int rpl, dpl, cpl;
3267
3268 selector = selector1 & 0xffff;
a7812ae4 3269 eflags = helper_cc_compute_all(CC_OP);
20054ef0 3270 if ((selector & 0xfffc) == 0) {
eaa728ee 3271 goto fail;
20054ef0
BS
3272 }
3273 if (load_segment(&e1, &e2, selector) != 0) {
eaa728ee 3274 goto fail;
20054ef0
BS
3275 }
3276 if (!(e2 & DESC_S_MASK)) {
eaa728ee 3277 goto fail;
20054ef0 3278 }
eaa728ee
FB
3279 rpl = selector & 3;
3280 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3281 cpl = env->hflags & HF_CPL_MASK;
3282 if (e2 & DESC_CS_MASK) {
20054ef0 3283 if (!(e2 & DESC_R_MASK)) {
eaa728ee 3284 goto fail;
20054ef0 3285 }
eaa728ee 3286 if (!(e2 & DESC_C_MASK)) {
20054ef0 3287 if (dpl < cpl || dpl < rpl) {
eaa728ee 3288 goto fail;
20054ef0 3289 }
eaa728ee
FB
3290 }
3291 } else {
3292 if (dpl < cpl || dpl < rpl) {
3293 fail:
3294 CC_SRC = eflags & ~CC_Z;
3295 return;
3296 }
3297 }
3298 CC_SRC = eflags | CC_Z;
3299}
3300
3301void helper_verw(target_ulong selector1)
3302{
3303 uint32_t e1, e2, eflags, selector;
3304 int rpl, dpl, cpl;
3305
3306 selector = selector1 & 0xffff;
a7812ae4 3307 eflags = helper_cc_compute_all(CC_OP);
20054ef0 3308 if ((selector & 0xfffc) == 0) {
eaa728ee 3309 goto fail;
20054ef0
BS
3310 }
3311 if (load_segment(&e1, &e2, selector) != 0) {
eaa728ee 3312 goto fail;
20054ef0
BS
3313 }
3314 if (!(e2 & DESC_S_MASK)) {
eaa728ee 3315 goto fail;
20054ef0 3316 }
eaa728ee
FB
3317 rpl = selector & 3;
3318 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3319 cpl = env->hflags & HF_CPL_MASK;
3320 if (e2 & DESC_CS_MASK) {
3321 goto fail;
3322 } else {
20054ef0 3323 if (dpl < cpl || dpl < rpl) {
eaa728ee 3324 goto fail;
20054ef0 3325 }
eaa728ee
FB
3326 if (!(e2 & DESC_W_MASK)) {
3327 fail:
3328 CC_SRC = eflags & ~CC_Z;
3329 return;
3330 }
3331 }
3332 CC_SRC = eflags | CC_Z;
3333}
3334
f299f437
BS
3335#if defined(CONFIG_USER_ONLY)
3336void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
eaa728ee 3337{
f299f437 3338 CPUX86State *saved_env;
eaa728ee 3339
f299f437
BS
3340 saved_env = env;
3341 env = s;
3342 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
3343 selector &= 0xffff;
3344 cpu_x86_load_seg_cache(env, seg_reg, selector,
3345 (selector << 4), 0xffff, 0);
3346 } else {
3347 helper_load_seg(seg_reg, selector);
13822781 3348 }
f299f437 3349 env = saved_env;
eaa728ee 3350}
eaa728ee 3351#endif
20054ef0 3352
f299f437 3353static void do_hlt(void)
eaa728ee 3354{
f299f437
BS
3355 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3356 env->halted = 1;
3357 env->exception_index = EXCP_HLT;
3358 cpu_loop_exit(env);
eaa728ee
FB
3359}
3360
f299f437 3361void helper_hlt(int next_eip_addend)
eaa728ee 3362{
6bada5e8 3363 cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0);
f299f437 3364 EIP += next_eip_addend;
20054ef0 3365
f299f437 3366 do_hlt();
eaa728ee
FB
3367}
3368
f299f437 3369void helper_monitor(target_ulong ptr)
eaa728ee 3370{
f299f437
BS
3371 if ((uint32_t)ECX != 0) {
3372 raise_exception(env, EXCP0D_GPF);
20054ef0 3373 }
f299f437 3374 /* XXX: store address? */
6bada5e8 3375 cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0);
eaa728ee
FB
3376}
3377
f299f437 3378void helper_mwait(int next_eip_addend)
eaa728ee 3379{
f299f437
BS
3380 if ((uint32_t)ECX != 0) {
3381 raise_exception(env, EXCP0D_GPF);
3382 }
6bada5e8 3383 cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0);
f299f437 3384 EIP += next_eip_addend;
20054ef0 3385
f299f437
BS
3386 /* XXX: not complete but not completely erroneous */
3387 if (env->cpu_index != 0 || env->next_cpu != NULL) {
3388 /* more than one CPU: do not sleep because another CPU may
3389 wake this one */
3390 } else {
3391 do_hlt();
3392 }
eaa728ee
FB
3393}
3394
f299f437 3395void helper_debug(void)
eaa728ee 3396{
f299f437
BS
3397 env->exception_index = EXCP_DEBUG;
3398 cpu_loop_exit(env);
eaa728ee
FB
3399}
3400
f299f437 3401void helper_boundw(target_ulong a0, int v)
eaa728ee 3402{
f299f437 3403 int low, high;
eaa728ee 3404
f299f437
BS
3405 low = ldsw(a0);
3406 high = ldsw(a0 + 2);
3407 v = (int16_t)v;
3408 if (v < low || v > high) {
3409 raise_exception(env, EXCP05_BOUND);
3410 }
eaa728ee
FB
3411}
3412
f299f437 3413void helper_boundl(target_ulong a0, int v)
eaa728ee 3414{
f299f437 3415 int low, high;
eaa728ee 3416
f299f437
BS
3417 low = ldl(a0);
3418 high = ldl(a0 + 4);
3419 if (v < low || v > high) {
3420 raise_exception(env, EXCP05_BOUND);
3421 }
eaa728ee
FB
3422}
3423
eaa728ee
FB
3424#if !defined(CONFIG_USER_ONLY)
3425
3426#define MMUSUFFIX _mmu
3427
3428#define SHIFT 0
3429#include "softmmu_template.h"
3430
3431#define SHIFT 1
3432#include "softmmu_template.h"
3433
3434#define SHIFT 2
3435#include "softmmu_template.h"
3436
3437#define SHIFT 3
3438#include "softmmu_template.h"
3439
3440#endif
3441
d9957a8b 3442#if !defined(CONFIG_USER_ONLY)
eaa728ee
FB
3443/* try to fill the TLB and return an exception if error. If retaddr is
3444 NULL, it means that the function was called in C code (i.e. not
3445 from generated code or from helper.c) */
3446/* XXX: fix it to restore all registers */
317ac620 3447void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
20503968 3448 uintptr_t retaddr)
eaa728ee
FB
3449{
3450 TranslationBlock *tb;
3451 int ret;
eaa728ee
FB
3452 CPUX86State *saved_env;
3453
eaa728ee 3454 saved_env = env;
bccd9ec5 3455 env = env1;
eaa728ee 3456
97b348e7 3457 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
eaa728ee
FB
3458 if (ret) {
3459 if (retaddr) {
3460 /* now we have a real cpu fault */
20503968 3461 tb = tb_find_pc(retaddr);
eaa728ee
FB
3462 if (tb) {
3463 /* the PC is inside the translated code. It means that we have
3464 a virtual CPU fault */
20503968 3465 cpu_restore_state(tb, env, retaddr);
eaa728ee
FB
3466 }
3467 }
77b2bc2c 3468 raise_exception_err(env, env->exception_index, env->error_code);
eaa728ee
FB
3469 }
3470 env = saved_env;
3471}
d9957a8b 3472#endif