]> git.proxmox.com Git - mirror_qemu.git/blob - linux-user/vm86.c
fixed from 2.4.20 kernel
[mirror_qemu.git] / linux-user / vm86.c
1 /*
2 * vm86 linux syscall support
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <errno.h>
25 #include <unistd.h>
26
27 #include "qemu.h"
28
29 //#define DEBUG_VM86
30
31 #define set_flags(X,new,mask) \
32 ((X) = ((X) & ~(mask)) | ((new) & (mask)))
33
34 #define SAFE_MASK (0xDD5)
35 #define RETURN_MASK (0xDFF)
36
37 static inline int is_revectored(int nr, struct target_revectored_struct *bitmap)
38 {
39 return (tswap32(bitmap->__map[nr >> 5]) >> (nr & 0x1f)) & 1;
40 }
41
42 static inline void vm_putw(uint8_t *segptr, unsigned int reg16, unsigned int val)
43 {
44 *(uint16_t *)(segptr + (reg16 & 0xffff)) = tswap16(val);
45 }
46
47 static inline void vm_putl(uint8_t *segptr, unsigned int reg16, unsigned int val)
48 {
49 *(uint32_t *)(segptr + (reg16 & 0xffff)) = tswap32(val);
50 }
51
52 static inline unsigned int vm_getw(uint8_t *segptr, unsigned int reg16)
53 {
54 return tswap16(*(uint16_t *)(segptr + (reg16 & 0xffff)));
55 }
56
57 static inline unsigned int vm_getl(uint8_t *segptr, unsigned int reg16)
58 {
59 return tswap32(*(uint16_t *)(segptr + (reg16 & 0xffff)));
60 }
61
62 void save_v86_state(CPUX86State *env)
63 {
64 TaskState *ts = env->opaque;
65
66 /* put the VM86 registers in the userspace register structure */
67 ts->target_v86->regs.eax = tswap32(env->regs[R_EAX]);
68 ts->target_v86->regs.ebx = tswap32(env->regs[R_EBX]);
69 ts->target_v86->regs.ecx = tswap32(env->regs[R_ECX]);
70 ts->target_v86->regs.edx = tswap32(env->regs[R_EDX]);
71 ts->target_v86->regs.esi = tswap32(env->regs[R_ESI]);
72 ts->target_v86->regs.edi = tswap32(env->regs[R_EDI]);
73 ts->target_v86->regs.ebp = tswap32(env->regs[R_EBP]);
74 ts->target_v86->regs.esp = tswap32(env->regs[R_ESP]);
75 ts->target_v86->regs.eip = tswap32(env->eip);
76 ts->target_v86->regs.cs = tswap16(env->segs[R_CS]);
77 ts->target_v86->regs.ss = tswap16(env->segs[R_SS]);
78 ts->target_v86->regs.ds = tswap16(env->segs[R_DS]);
79 ts->target_v86->regs.es = tswap16(env->segs[R_ES]);
80 ts->target_v86->regs.fs = tswap16(env->segs[R_FS]);
81 ts->target_v86->regs.gs = tswap16(env->segs[R_GS]);
82 set_flags(env->eflags, ts->v86flags, VIF_MASK | ts->v86mask);
83 ts->target_v86->regs.eflags = tswap32(env->eflags);
84 #ifdef DEBUG_VM86
85 fprintf(logfile, "save_v86_state: eflags=%08x cs:ip=%04x:%04x\n",
86 env->eflags, env->segs[R_CS], env->eip);
87 #endif
88
89 /* restore 32 bit registers */
90 env->regs[R_EAX] = ts->vm86_saved_regs.eax;
91 env->regs[R_EBX] = ts->vm86_saved_regs.ebx;
92 env->regs[R_ECX] = ts->vm86_saved_regs.ecx;
93 env->regs[R_EDX] = ts->vm86_saved_regs.edx;
94 env->regs[R_ESI] = ts->vm86_saved_regs.esi;
95 env->regs[R_EDI] = ts->vm86_saved_regs.edi;
96 env->regs[R_EBP] = ts->vm86_saved_regs.ebp;
97 env->regs[R_ESP] = ts->vm86_saved_regs.esp;
98 env->eflags = ts->vm86_saved_regs.eflags;
99 env->eip = ts->vm86_saved_regs.eip;
100
101 cpu_x86_load_seg(env, R_CS, ts->vm86_saved_regs.cs);
102 cpu_x86_load_seg(env, R_SS, ts->vm86_saved_regs.ss);
103 cpu_x86_load_seg(env, R_DS, ts->vm86_saved_regs.ds);
104 cpu_x86_load_seg(env, R_ES, ts->vm86_saved_regs.es);
105 cpu_x86_load_seg(env, R_FS, ts->vm86_saved_regs.fs);
106 cpu_x86_load_seg(env, R_GS, ts->vm86_saved_regs.gs);
107 }
108
109 /* return from vm86 mode to 32 bit. The vm86() syscall will return
110 'retval' */
111 static inline void return_to_32bit(CPUX86State *env, int retval)
112 {
113 #ifdef DEBUG_VM86
114 fprintf(logfile, "return_to_32bit: ret=0x%x\n", retval);
115 #endif
116 save_v86_state(env);
117 env->regs[R_EAX] = retval;
118 }
119
120 static inline int set_IF(CPUX86State *env)
121 {
122 TaskState *ts = env->opaque;
123
124 ts->v86flags |= VIF_MASK;
125 if (ts->v86flags & VIP_MASK) {
126 return_to_32bit(env, TARGET_VM86_STI);
127 return 1;
128 }
129 return 0;
130 }
131
132 static inline void clear_IF(CPUX86State *env)
133 {
134 TaskState *ts = env->opaque;
135
136 ts->v86flags &= ~VIF_MASK;
137 }
138
139 static inline void clear_TF(CPUX86State *env)
140 {
141 env->eflags &= ~TF_MASK;
142 }
143
144 static inline void clear_AC(CPUX86State *env)
145 {
146 env->eflags &= ~AC_MASK;
147 }
148
149 static inline int set_vflags_long(unsigned long eflags, CPUX86State *env)
150 {
151 TaskState *ts = env->opaque;
152
153 set_flags(ts->v86flags, eflags, ts->v86mask);
154 set_flags(env->eflags, eflags, SAFE_MASK);
155 if (eflags & IF_MASK)
156 return set_IF(env);
157 else
158 clear_IF(env);
159 return 0;
160 }
161
162 static inline int set_vflags_short(unsigned short flags, CPUX86State *env)
163 {
164 TaskState *ts = env->opaque;
165
166 set_flags(ts->v86flags, flags, ts->v86mask & 0xffff);
167 set_flags(env->eflags, flags, SAFE_MASK);
168 if (flags & IF_MASK)
169 return set_IF(env);
170 else
171 clear_IF(env);
172 return 0;
173 }
174
175 static inline unsigned int get_vflags(CPUX86State *env)
176 {
177 TaskState *ts = env->opaque;
178 unsigned int flags;
179
180 flags = env->eflags & RETURN_MASK;
181 if (ts->v86flags & VIF_MASK)
182 flags |= IF_MASK;
183 return flags | (ts->v86flags & ts->v86mask);
184 }
185
186 #define ADD16(reg, val) reg = (reg & ~0xffff) | ((reg + (val)) & 0xffff)
187
188 /* handle VM86 interrupt (NOTE: the CPU core currently does not
189 support TSS interrupt revectoring, so this code is always executed) */
190 static void do_int(CPUX86State *env, int intno)
191 {
192 TaskState *ts = env->opaque;
193 uint32_t *int_ptr, segoffs;
194 uint8_t *ssp;
195 unsigned int sp;
196
197 #if 1
198 if (intno == 0xe6 && (env->regs[R_EAX] & 0xffff) == 0x00c0)
199 loglevel = 1;
200 #endif
201
202 if (env->segs[R_CS] == TARGET_BIOSSEG)
203 goto cannot_handle;
204 if (is_revectored(intno, &ts->target_v86->int_revectored))
205 goto cannot_handle;
206 if (intno == 0x21 && is_revectored((env->regs[R_EAX] >> 8) & 0xff,
207 &ts->target_v86->int21_revectored))
208 goto cannot_handle;
209 int_ptr = (uint32_t *)(intno << 2);
210 segoffs = tswap32(*int_ptr);
211 if ((segoffs >> 16) == TARGET_BIOSSEG)
212 goto cannot_handle;
213 #if defined(DEBUG_VM86)
214 fprintf(logfile, "VM86: emulating int 0x%x. CS:IP=%04x:%04x\n",
215 intno, segoffs >> 16, segoffs & 0xffff);
216 #endif
217 /* save old state */
218 ssp = (uint8_t *)(env->segs[R_SS] << 4);
219 sp = env->regs[R_ESP] & 0xffff;
220 vm_putw(ssp, sp - 2, get_vflags(env));
221 vm_putw(ssp, sp - 4, env->segs[R_CS]);
222 vm_putw(ssp, sp - 6, env->eip);
223 ADD16(env->regs[R_ESP], -6);
224 /* goto interrupt handler */
225 env->eip = segoffs & 0xffff;
226 cpu_x86_load_seg(env, R_CS, segoffs >> 16);
227 clear_TF(env);
228 clear_IF(env);
229 clear_AC(env);
230 return;
231 cannot_handle:
232 #if defined(DEBUG_VM86)
233 fprintf(logfile, "VM86: return to 32 bits int 0x%x\n", intno);
234 #endif
235 return_to_32bit(env, TARGET_VM86_INTx | (intno << 8));
236 }
237
238 void handle_vm86_trap(CPUX86State *env, int trapno)
239 {
240 if (trapno == 1 || trapno == 3) {
241 return_to_32bit(env, TARGET_VM86_TRAP + (trapno << 8));
242 } else {
243 do_int(env, trapno);
244 }
245 }
246
247 #define CHECK_IF_IN_TRAP(disp) \
248 if ((tswap32(ts->target_v86->vm86plus.flags) & TARGET_vm86dbg_active) && \
249 (tswap32(ts->target_v86->vm86plus.flags) & TARGET_vm86dbg_TFpendig)) \
250 vm_putw(ssp,sp + disp,vm_getw(ssp,sp + disp) | TF_MASK)
251
252 #define VM86_FAULT_RETURN \
253 if ((tswap32(ts->target_v86->vm86plus.flags) & TARGET_force_return_for_pic) && \
254 (ts->v86flags & (IF_MASK | VIF_MASK))) \
255 return_to_32bit(env, TARGET_VM86_PICRETURN); \
256 return
257
258 void handle_vm86_fault(CPUX86State *env)
259 {
260 TaskState *ts = env->opaque;
261 uint8_t *csp, *pc, *ssp;
262 unsigned int ip, sp;
263
264 csp = (uint8_t *)(env->segs[R_CS] << 4);
265 ip = env->eip & 0xffff;
266 pc = csp + ip;
267
268 ssp = (uint8_t *)(env->segs[R_SS] << 4);
269 sp = env->regs[R_ESP] & 0xffff;
270
271 #if defined(DEBUG_VM86)
272 fprintf(logfile, "VM86 exception %04x:%08x %02x %02x\n",
273 env->segs[R_CS], env->eip, pc[0], pc[1]);
274 #endif
275
276 /* VM86 mode */
277 switch(pc[0]) {
278 case 0x66:
279 switch(pc[1]) {
280 case 0x9c: /* pushfd */
281 ADD16(env->eip, 2);
282 ADD16(env->regs[R_ESP], -4);
283 vm_putl(ssp, sp - 4, get_vflags(env));
284 VM86_FAULT_RETURN;
285
286 case 0x9d: /* popfd */
287 ADD16(env->eip, 2);
288 ADD16(env->regs[R_ESP], 4);
289 CHECK_IF_IN_TRAP(0);
290 if (set_vflags_long(vm_getl(ssp, sp), env))
291 return;
292 VM86_FAULT_RETURN;
293
294 case 0xcf: /* iretd */
295 ADD16(env->regs[R_ESP], 12);
296 env->eip = vm_getl(ssp, sp) & 0xffff;
297 cpu_x86_load_seg(env, R_CS, vm_getl(ssp, sp + 4) & 0xffff);
298 CHECK_IF_IN_TRAP(8);
299 if (set_vflags_long(vm_getl(ssp, sp + 8), env))
300 return;
301 VM86_FAULT_RETURN;
302
303 default:
304 goto vm86_gpf;
305 }
306 break;
307 case 0x9c: /* pushf */
308 ADD16(env->eip, 1);
309 ADD16(env->regs[R_ESP], -2);
310 vm_putw(ssp, sp - 2, get_vflags(env));
311 VM86_FAULT_RETURN;
312
313 case 0x9d: /* popf */
314 ADD16(env->eip, 1);
315 ADD16(env->regs[R_ESP], 2);
316 CHECK_IF_IN_TRAP(0);
317 if (set_vflags_short(vm_getw(ssp, sp), env))
318 return;
319 VM86_FAULT_RETURN;
320
321 case 0xcd: /* int */
322 ADD16(env->eip, 2);
323 do_int(env, pc[1]);
324 break;
325
326 case 0xcf: /* iret */
327 ADD16(env->regs[R_ESP], 6);
328 env->eip = vm_getw(ssp, sp);
329 cpu_x86_load_seg(env, R_CS, vm_getw(ssp, sp + 2));
330 CHECK_IF_IN_TRAP(4);
331 if (set_vflags_short(vm_getw(ssp, sp + 4), env))
332 return;
333 VM86_FAULT_RETURN;
334
335 case 0xfa: /* cli */
336 ADD16(env->eip, 1);
337 clear_IF(env);
338 VM86_FAULT_RETURN;
339
340 case 0xfb: /* sti */
341 ADD16(env->eip, 1);
342 if (set_IF(env))
343 return;
344 VM86_FAULT_RETURN;
345
346 default:
347 vm86_gpf:
348 /* real VM86 GPF exception */
349 return_to_32bit(env, TARGET_VM86_UNKNOWN);
350 break;
351 }
352 }
353
354 int do_vm86(CPUX86State *env, long subfunction,
355 struct target_vm86plus_struct * target_v86)
356 {
357 TaskState *ts = env->opaque;
358 int ret;
359
360 switch (subfunction) {
361 case TARGET_VM86_REQUEST_IRQ:
362 case TARGET_VM86_FREE_IRQ:
363 case TARGET_VM86_GET_IRQ_BITS:
364 case TARGET_VM86_GET_AND_RESET_IRQ:
365 gemu_log("qemu: unsupported vm86 subfunction (%ld)\n", subfunction);
366 ret = -EINVAL;
367 goto out;
368 case TARGET_VM86_PLUS_INSTALL_CHECK:
369 /* NOTE: on old vm86 stuff this will return the error
370 from verify_area(), because the subfunction is
371 interpreted as (invalid) address to vm86_struct.
372 So the installation check works.
373 */
374 ret = 0;
375 goto out;
376 }
377
378 ts->target_v86 = target_v86;
379 /* save current CPU regs */
380 ts->vm86_saved_regs.eax = 0; /* default vm86 syscall return code */
381 ts->vm86_saved_regs.ebx = env->regs[R_EBX];
382 ts->vm86_saved_regs.ecx = env->regs[R_ECX];
383 ts->vm86_saved_regs.edx = env->regs[R_EDX];
384 ts->vm86_saved_regs.esi = env->regs[R_ESI];
385 ts->vm86_saved_regs.edi = env->regs[R_EDI];
386 ts->vm86_saved_regs.ebp = env->regs[R_EBP];
387 ts->vm86_saved_regs.esp = env->regs[R_ESP];
388 ts->vm86_saved_regs.eflags = env->eflags;
389 ts->vm86_saved_regs.eip = env->eip;
390 ts->vm86_saved_regs.cs = env->segs[R_CS];
391 ts->vm86_saved_regs.ss = env->segs[R_SS];
392 ts->vm86_saved_regs.ds = env->segs[R_DS];
393 ts->vm86_saved_regs.es = env->segs[R_ES];
394 ts->vm86_saved_regs.fs = env->segs[R_FS];
395 ts->vm86_saved_regs.gs = env->segs[R_GS];
396
397 /* build vm86 CPU state */
398 ts->v86flags = tswap32(target_v86->regs.eflags);
399 env->eflags = (env->eflags & ~SAFE_MASK) |
400 (tswap32(target_v86->regs.eflags) & SAFE_MASK) | VM_MASK;
401 ts->v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK;
402
403 env->regs[R_EBX] = tswap32(target_v86->regs.ebx);
404 env->regs[R_ECX] = tswap32(target_v86->regs.ecx);
405 env->regs[R_EDX] = tswap32(target_v86->regs.edx);
406 env->regs[R_ESI] = tswap32(target_v86->regs.esi);
407 env->regs[R_EDI] = tswap32(target_v86->regs.edi);
408 env->regs[R_EBP] = tswap32(target_v86->regs.ebp);
409 env->regs[R_ESP] = tswap32(target_v86->regs.esp);
410 env->eip = tswap32(target_v86->regs.eip);
411 cpu_x86_load_seg(env, R_CS, tswap16(target_v86->regs.cs));
412 cpu_x86_load_seg(env, R_SS, tswap16(target_v86->regs.ss));
413 cpu_x86_load_seg(env, R_DS, tswap16(target_v86->regs.ds));
414 cpu_x86_load_seg(env, R_ES, tswap16(target_v86->regs.es));
415 cpu_x86_load_seg(env, R_FS, tswap16(target_v86->regs.fs));
416 cpu_x86_load_seg(env, R_GS, tswap16(target_v86->regs.gs));
417 ret = tswap32(target_v86->regs.eax); /* eax will be restored at
418 the end of the syscall */
419 #ifdef DEBUG_VM86
420 fprintf(logfile, "do_vm86: cs:ip=%04x:%04x\n", env->segs[R_CS], env->eip);
421 #endif
422 /* now the virtual CPU is ready for vm86 execution ! */
423 out:
424 return ret;
425 }
426