]> git.proxmox.com Git - mirror_qemu.git/blame - target/i386/tcg/sysemu/seg_helper.c
Merge tag 'qga-pull-2024-01-30' of https://github.com/kostyanf14/qemu into staging
[mirror_qemu.git] / target / i386 / tcg / sysemu / seg_helper.c
CommitLineData
30493a03
CF
1/*
2 * x86 segmentation related helpers: (sysemu-only code)
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
4 *
5 * Copyright (c) 2003 Fabrice Bellard
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "qemu/osdep.h"
cd617484 22#include "qemu/log.h"
ec1d32af 23#include "qemu/main-loop.h"
30493a03
CF
24#include "cpu.h"
25#include "exec/helper-proto.h"
26#include "exec/cpu_ldst.h"
27#include "tcg/helper-tcg.h"
d76b9c6f 28#include "../seg_helper.h"
30493a03 29
30493a03
CF
30void helper_syscall(CPUX86State *env, int next_eip_addend)
31{
32 int selector;
33
34 if (!(env->efer & MSR_EFER_SCE)) {
35 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
36 }
37 selector = (env->star >> 32) & 0xffff;
63fd8ef0 38#ifdef TARGET_X86_64
30493a03
CF
39 if (env->hflags & HF_LMA_MASK) {
40 int code64;
41
42 env->regs[R_ECX] = env->eip + next_eip_addend;
43 env->regs[11] = cpu_compute_eflags(env) & ~RF_MASK;
44
45 code64 = env->hflags & HF_CS64_MASK;
46
47 env->eflags &= ~(env->fmask | RF_MASK);
48 cpu_load_eflags(env, env->eflags, 0);
49 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
50 0, 0xffffffff,
51 DESC_G_MASK | DESC_P_MASK |
52 DESC_S_MASK |
53 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
54 DESC_L_MASK);
55 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
56 0, 0xffffffff,
57 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
58 DESC_S_MASK |
59 DESC_W_MASK | DESC_A_MASK);
60 if (code64) {
61 env->eip = env->lstar;
62 } else {
63 env->eip = env->cstar;
64 }
63fd8ef0
PB
65 } else
66#endif
67 {
30493a03
CF
68 env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
69
70 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
71 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
72 0, 0xffffffff,
73 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
74 DESC_S_MASK |
75 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
76 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
77 0, 0xffffffff,
78 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
79 DESC_S_MASK |
80 DESC_W_MASK | DESC_A_MASK);
81 env->eip = (uint32_t)env->star;
82 }
83}
30493a03
CF
84
85void handle_even_inj(CPUX86State *env, int intno, int is_int,
86 int error_code, int is_hw, int rm)
87{
88 CPUState *cs = env_cpu(env);
89 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
90 control.event_inj));
91
92 if (!(event_inj & SVM_EVTINJ_VALID)) {
93 int type;
94
95 if (is_int) {
96 type = SVM_EVTINJ_TYPE_SOFT;
97 } else {
98 type = SVM_EVTINJ_TYPE_EXEPT;
99 }
100 event_inj = intno | type | SVM_EVTINJ_VALID;
101 if (!rm && exception_has_error_code(intno)) {
102 event_inj |= SVM_EVTINJ_VALID_ERR;
103 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
104 control.event_inj_err),
105 error_code);
106 }
107 x86_stl_phys(cs,
108 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
109 event_inj);
110 }
111}
112
113void x86_cpu_do_interrupt(CPUState *cs)
114{
115 X86CPU *cpu = X86_CPU(cs);
116 CPUX86State *env = &cpu->env;
117
118 if (cs->exception_index == EXCP_VMEXIT) {
119 assert(env->old_exception == -1);
120 do_vmexit(env);
121 } else {
122 do_interrupt_all(cpu, cs->exception_index,
123 env->exception_is_int,
124 env->error_code,
125 env->exception_next_eip, 0);
126 /* successfully delivered */
127 env->old_exception = -1;
128 }
129}
d76b9c6f 130
ec1d32af
PMD
131void x86_cpu_exec_halt(CPUState *cpu)
132{
133 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
134 X86CPU *x86_cpu = X86_CPU(cpu);
135
136 bql_lock();
137 apic_poll_irq(x86_cpu->apic_state);
138 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
139 bql_unlock();
140 }
141}
142
6ae75481
PMD
143bool x86_need_replay_interrupt(int interrupt_request)
144{
145 /*
146 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
147 * "real" interrupt event later. It does not need to be recorded for
148 * replay purposes.
149 */
150 return !(interrupt_request & CPU_INTERRUPT_POLL);
151}
152
0792e6c8
PMD
153bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
154{
155 X86CPU *cpu = X86_CPU(cs);
156 CPUX86State *env = &cpu->env;
157 int intno;
158
159 interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request);
160 if (!interrupt_request) {
161 return false;
162 }
163
164 /* Don't process multiple interrupt requests in a single call.
165 * This is required to make icount-driven execution deterministic.
166 */
167 switch (interrupt_request) {
168 case CPU_INTERRUPT_POLL:
169 cs->interrupt_request &= ~CPU_INTERRUPT_POLL;
170 apic_poll_irq(cpu->apic_state);
171 break;
172 case CPU_INTERRUPT_SIPI:
173 do_cpu_sipi(cpu);
174 break;
175 case CPU_INTERRUPT_SMI:
176 cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0);
177 cs->interrupt_request &= ~CPU_INTERRUPT_SMI;
178 do_smm_enter(cpu);
179 break;
180 case CPU_INTERRUPT_NMI:
181 cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0);
182 cs->interrupt_request &= ~CPU_INTERRUPT_NMI;
183 env->hflags2 |= HF2_NMI_MASK;
184 do_interrupt_x86_hardirq(env, EXCP02_NMI, 1);
185 break;
186 case CPU_INTERRUPT_MCE:
187 cs->interrupt_request &= ~CPU_INTERRUPT_MCE;
188 do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0);
189 break;
190 case CPU_INTERRUPT_HARD:
191 cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0);
192 cs->interrupt_request &= ~(CPU_INTERRUPT_HARD |
193 CPU_INTERRUPT_VIRQ);
194 intno = cpu_get_pic_interrupt(env);
346cd004 195 qemu_log_mask(CPU_LOG_INT,
0792e6c8
PMD
196 "Servicing hardware INT=0x%02x\n", intno);
197 do_interrupt_x86_hardirq(env, intno, 1);
198 break;
199 case CPU_INTERRUPT_VIRQ:
200 cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0);
201 intno = x86_ldl_phys(cs, env->vm_vmcb
202 + offsetof(struct vmcb, control.int_vector));
346cd004 203 qemu_log_mask(CPU_LOG_INT,
0792e6c8
PMD
204 "Servicing virtual hardware INT=0x%02x\n", intno);
205 do_interrupt_x86_hardirq(env, intno, 1);
206 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
207 env->int_ctl &= ~V_IRQ_MASK;
208 break;
209 }
210
211 /* Ensure that no TB jump will be modified as the program flow was changed. */
212 return true;
213}
214
d76b9c6f
RH
215/* check if Port I/O is allowed in TSS */
216void helper_check_io(CPUX86State *env, uint32_t addr, uint32_t size)
217{
218 uintptr_t retaddr = GETPC();
219 uint32_t io_offset, val, mask;
220
221 /* TSS must be a valid 32 bit one */
222 if (!(env->tr.flags & DESC_P_MASK) ||
223 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
224 env->tr.limit < 103) {
225 goto fail;
226 }
227 io_offset = cpu_lduw_kernel_ra(env, env->tr.base + 0x66, retaddr);
228 io_offset += (addr >> 3);
229 /* Note: the check needs two bytes */
230 if ((io_offset + 1) > env->tr.limit) {
231 goto fail;
232 }
233 val = cpu_lduw_kernel_ra(env, env->tr.base + io_offset, retaddr);
234 val >>= (addr & 7);
235 mask = (1 << size) - 1;
236 /* all bits must be zero to allow the I/O */
237 if ((val & mask) != 0) {
238 fail:
239 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
240 }
241}