]>
git.proxmox.com Git - mirror_qemu.git/blob - target/i386/tcg/sysemu/seg_helper.c
2 * x86 segmentation related helpers: (sysemu-only code)
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "qemu/main-loop.h"
25 #include "exec/helper-proto.h"
26 #include "exec/cpu_ldst.h"
27 #include "tcg/helper-tcg.h"
28 #include "../seg_helper.h"
30 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
34 if (!(env
->efer
& MSR_EFER_SCE
)) {
35 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
37 selector
= (env
->star
>> 32) & 0xffff;
39 if (env
->hflags
& HF_LMA_MASK
) {
42 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
43 env
->regs
[11] = cpu_compute_eflags(env
) & ~RF_MASK
;
45 code64
= env
->hflags
& HF_CS64_MASK
;
47 env
->eflags
&= ~(env
->fmask
| RF_MASK
);
48 cpu_load_eflags(env
, env
->eflags
, 0);
49 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
51 DESC_G_MASK
| DESC_P_MASK
|
53 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
55 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
57 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
59 DESC_W_MASK
| DESC_A_MASK
);
61 env
->eip
= env
->lstar
;
63 env
->eip
= env
->cstar
;
68 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
70 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
71 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
73 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
75 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
76 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
78 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
80 DESC_W_MASK
| DESC_A_MASK
);
81 env
->eip
= (uint32_t)env
->star
;
85 void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
86 int error_code
, int is_hw
, int rm
)
88 CPUState
*cs
= env_cpu(env
);
89 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
92 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
96 type
= SVM_EVTINJ_TYPE_SOFT
;
98 type
= SVM_EVTINJ_TYPE_EXEPT
;
100 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
101 if (!rm
&& exception_has_error_code(intno
)) {
102 event_inj
|= SVM_EVTINJ_VALID_ERR
;
103 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
104 control
.event_inj_err
),
108 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
113 void x86_cpu_do_interrupt(CPUState
*cs
)
115 X86CPU
*cpu
= X86_CPU(cs
);
116 CPUX86State
*env
= &cpu
->env
;
118 if (cs
->exception_index
== EXCP_VMEXIT
) {
119 assert(env
->old_exception
== -1);
122 do_interrupt_all(cpu
, cs
->exception_index
,
123 env
->exception_is_int
,
125 env
->exception_next_eip
, 0);
126 /* successfully delivered */
127 env
->old_exception
= -1;
131 void x86_cpu_exec_halt(CPUState
*cpu
)
133 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
134 X86CPU
*x86_cpu
= X86_CPU(cpu
);
137 apic_poll_irq(x86_cpu
->apic_state
);
138 cpu_reset_interrupt(cpu
, CPU_INTERRUPT_POLL
);
143 bool x86_need_replay_interrupt(int interrupt_request
)
146 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
147 * "real" interrupt event later. It does not need to be recorded for
150 return !(interrupt_request
& CPU_INTERRUPT_POLL
);
153 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
155 X86CPU
*cpu
= X86_CPU(cs
);
156 CPUX86State
*env
= &cpu
->env
;
159 interrupt_request
= x86_cpu_pending_interrupt(cs
, interrupt_request
);
160 if (!interrupt_request
) {
164 /* Don't process multiple interrupt requests in a single call.
165 * This is required to make icount-driven execution deterministic.
167 switch (interrupt_request
) {
168 case CPU_INTERRUPT_POLL
:
169 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
170 apic_poll_irq(cpu
->apic_state
);
172 case CPU_INTERRUPT_SIPI
:
175 case CPU_INTERRUPT_SMI
:
176 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0, 0);
177 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
180 case CPU_INTERRUPT_NMI
:
181 cpu_svm_check_intercept_param(env
, SVM_EXIT_NMI
, 0, 0);
182 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
183 env
->hflags2
|= HF2_NMI_MASK
;
184 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
186 case CPU_INTERRUPT_MCE
:
187 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
188 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
190 case CPU_INTERRUPT_HARD
:
191 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0, 0);
192 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
194 intno
= cpu_get_pic_interrupt(env
);
195 qemu_log_mask(CPU_LOG_INT
,
196 "Servicing hardware INT=0x%02x\n", intno
);
197 do_interrupt_x86_hardirq(env
, intno
, 1);
199 case CPU_INTERRUPT_VIRQ
:
200 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0, 0);
201 intno
= x86_ldl_phys(cs
, env
->vm_vmcb
202 + offsetof(struct vmcb
, control
.int_vector
));
203 qemu_log_mask(CPU_LOG_INT
,
204 "Servicing virtual hardware INT=0x%02x\n", intno
);
205 do_interrupt_x86_hardirq(env
, intno
, 1);
206 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
207 env
->int_ctl
&= ~V_IRQ_MASK
;
211 /* Ensure that no TB jump will be modified as the program flow was changed. */
215 /* check if Port I/O is allowed in TSS */
216 void helper_check_io(CPUX86State
*env
, uint32_t addr
, uint32_t size
)
218 uintptr_t retaddr
= GETPC();
219 uint32_t io_offset
, val
, mask
;
221 /* TSS must be a valid 32 bit one */
222 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
223 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
224 env
->tr
.limit
< 103) {
227 io_offset
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0x66, retaddr
);
228 io_offset
+= (addr
>> 3);
229 /* Note: the check needs two bytes */
230 if ((io_offset
+ 1) > env
->tr
.limit
) {
233 val
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ io_offset
, retaddr
);
235 mask
= (1 << size
) - 1;
236 /* all bits must be zero to allow the I/O */
237 if ((val
& mask
) != 0) {
239 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);