]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * vm86 linux syscall support | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; either version 2 of the License, or | |
9 | * (at your option) any later version. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | #include "qemu/osdep.h" | |
20 | ||
21 | #include "qemu.h" | |
22 | ||
23 | //#define DEBUG_VM86 | |
24 | ||
25 | #ifdef DEBUG_VM86 | |
26 | # define LOG_VM86(...) qemu_log(__VA_ARGS__); | |
27 | #else | |
28 | # define LOG_VM86(...) do { } while (0) | |
29 | #endif | |
30 | ||
31 | ||
32 | #define set_flags(X,new,mask) \ | |
33 | ((X) = ((X) & ~(mask)) | ((new) & (mask))) | |
34 | ||
35 | #define SAFE_MASK (0xDD5) | |
36 | #define RETURN_MASK (0xDFF) | |
37 | ||
38 | static inline int is_revectored(int nr, struct target_revectored_struct *bitmap) | |
39 | { | |
40 | return (((uint8_t *)bitmap)[nr >> 3] >> (nr & 7)) & 1; | |
41 | } | |
42 | ||
43 | static inline void vm_putw(CPUX86State *env, uint32_t segptr, | |
44 | unsigned int reg16, unsigned int val) | |
45 | { | |
46 | cpu_stw_data(env, segptr + (reg16 & 0xffff), val); | |
47 | } | |
48 | ||
49 | static inline void vm_putl(CPUX86State *env, uint32_t segptr, | |
50 | unsigned int reg16, unsigned int val) | |
51 | { | |
52 | cpu_stl_data(env, segptr + (reg16 & 0xffff), val); | |
53 | } | |
54 | ||
55 | static inline unsigned int vm_getb(CPUX86State *env, | |
56 | uint32_t segptr, unsigned int reg16) | |
57 | { | |
58 | return cpu_ldub_data(env, segptr + (reg16 & 0xffff)); | |
59 | } | |
60 | ||
61 | static inline unsigned int vm_getw(CPUX86State *env, | |
62 | uint32_t segptr, unsigned int reg16) | |
63 | { | |
64 | return cpu_lduw_data(env, segptr + (reg16 & 0xffff)); | |
65 | } | |
66 | ||
67 | static inline unsigned int vm_getl(CPUX86State *env, | |
68 | uint32_t segptr, unsigned int reg16) | |
69 | { | |
70 | return cpu_ldl_data(env, segptr + (reg16 & 0xffff)); | |
71 | } | |
72 | ||
73 | void save_v86_state(CPUX86State *env) | |
74 | { | |
75 | CPUState *cs = env_cpu(env); | |
76 | TaskState *ts = cs->opaque; | |
77 | struct target_vm86plus_struct * target_v86; | |
78 | ||
79 | if (!lock_user_struct(VERIFY_WRITE, target_v86, ts->target_v86, 0)) | |
80 | /* FIXME - should return an error */ | |
81 | return; | |
82 | /* put the VM86 registers in the userspace register structure */ | |
83 | target_v86->regs.eax = tswap32(env->regs[R_EAX]); | |
84 | target_v86->regs.ebx = tswap32(env->regs[R_EBX]); | |
85 | target_v86->regs.ecx = tswap32(env->regs[R_ECX]); | |
86 | target_v86->regs.edx = tswap32(env->regs[R_EDX]); | |
87 | target_v86->regs.esi = tswap32(env->regs[R_ESI]); | |
88 | target_v86->regs.edi = tswap32(env->regs[R_EDI]); | |
89 | target_v86->regs.ebp = tswap32(env->regs[R_EBP]); | |
90 | target_v86->regs.esp = tswap32(env->regs[R_ESP]); | |
91 | target_v86->regs.eip = tswap32(env->eip); | |
92 | target_v86->regs.cs = tswap16(env->segs[R_CS].selector); | |
93 | target_v86->regs.ss = tswap16(env->segs[R_SS].selector); | |
94 | target_v86->regs.ds = tswap16(env->segs[R_DS].selector); | |
95 | target_v86->regs.es = tswap16(env->segs[R_ES].selector); | |
96 | target_v86->regs.fs = tswap16(env->segs[R_FS].selector); | |
97 | target_v86->regs.gs = tswap16(env->segs[R_GS].selector); | |
98 | set_flags(env->eflags, ts->v86flags, VIF_MASK | ts->v86mask); | |
99 | target_v86->regs.eflags = tswap32(env->eflags); | |
100 | unlock_user_struct(target_v86, ts->target_v86, 1); | |
101 | LOG_VM86("save_v86_state: eflags=%08x cs:ip=%04x:%04x\n", | |
102 | env->eflags, env->segs[R_CS].selector, env->eip); | |
103 | ||
104 | /* restore 32 bit registers */ | |
105 | env->regs[R_EAX] = ts->vm86_saved_regs.eax; | |
106 | env->regs[R_EBX] = ts->vm86_saved_regs.ebx; | |
107 | env->regs[R_ECX] = ts->vm86_saved_regs.ecx; | |
108 | env->regs[R_EDX] = ts->vm86_saved_regs.edx; | |
109 | env->regs[R_ESI] = ts->vm86_saved_regs.esi; | |
110 | env->regs[R_EDI] = ts->vm86_saved_regs.edi; | |
111 | env->regs[R_EBP] = ts->vm86_saved_regs.ebp; | |
112 | env->regs[R_ESP] = ts->vm86_saved_regs.esp; | |
113 | env->eflags = ts->vm86_saved_regs.eflags; | |
114 | env->eip = ts->vm86_saved_regs.eip; | |
115 | ||
116 | cpu_x86_load_seg(env, R_CS, ts->vm86_saved_regs.cs); | |
117 | cpu_x86_load_seg(env, R_SS, ts->vm86_saved_regs.ss); | |
118 | cpu_x86_load_seg(env, R_DS, ts->vm86_saved_regs.ds); | |
119 | cpu_x86_load_seg(env, R_ES, ts->vm86_saved_regs.es); | |
120 | cpu_x86_load_seg(env, R_FS, ts->vm86_saved_regs.fs); | |
121 | cpu_x86_load_seg(env, R_GS, ts->vm86_saved_regs.gs); | |
122 | } | |
123 | ||
124 | /* return from vm86 mode to 32 bit. The vm86() syscall will return | |
125 | 'retval' */ | |
126 | static inline void return_to_32bit(CPUX86State *env, int retval) | |
127 | { | |
128 | LOG_VM86("return_to_32bit: ret=0x%x\n", retval); | |
129 | save_v86_state(env); | |
130 | env->regs[R_EAX] = retval; | |
131 | } | |
132 | ||
133 | static inline int set_IF(CPUX86State *env) | |
134 | { | |
135 | CPUState *cs = env_cpu(env); | |
136 | TaskState *ts = cs->opaque; | |
137 | ||
138 | ts->v86flags |= VIF_MASK; | |
139 | if (ts->v86flags & VIP_MASK) { | |
140 | return_to_32bit(env, TARGET_VM86_STI); | |
141 | return 1; | |
142 | } | |
143 | return 0; | |
144 | } | |
145 | ||
146 | static inline void clear_IF(CPUX86State *env) | |
147 | { | |
148 | CPUState *cs = env_cpu(env); | |
149 | TaskState *ts = cs->opaque; | |
150 | ||
151 | ts->v86flags &= ~VIF_MASK; | |
152 | } | |
153 | ||
154 | static inline void clear_TF(CPUX86State *env) | |
155 | { | |
156 | env->eflags &= ~TF_MASK; | |
157 | } | |
158 | ||
159 | static inline void clear_AC(CPUX86State *env) | |
160 | { | |
161 | env->eflags &= ~AC_MASK; | |
162 | } | |
163 | ||
164 | static inline int set_vflags_long(unsigned long eflags, CPUX86State *env) | |
165 | { | |
166 | CPUState *cs = env_cpu(env); | |
167 | TaskState *ts = cs->opaque; | |
168 | ||
169 | set_flags(ts->v86flags, eflags, ts->v86mask); | |
170 | set_flags(env->eflags, eflags, SAFE_MASK); | |
171 | if (eflags & IF_MASK) | |
172 | return set_IF(env); | |
173 | else | |
174 | clear_IF(env); | |
175 | return 0; | |
176 | } | |
177 | ||
178 | static inline int set_vflags_short(unsigned short flags, CPUX86State *env) | |
179 | { | |
180 | CPUState *cs = env_cpu(env); | |
181 | TaskState *ts = cs->opaque; | |
182 | ||
183 | set_flags(ts->v86flags, flags, ts->v86mask & 0xffff); | |
184 | set_flags(env->eflags, flags, SAFE_MASK); | |
185 | if (flags & IF_MASK) | |
186 | return set_IF(env); | |
187 | else | |
188 | clear_IF(env); | |
189 | return 0; | |
190 | } | |
191 | ||
192 | static inline unsigned int get_vflags(CPUX86State *env) | |
193 | { | |
194 | CPUState *cs = env_cpu(env); | |
195 | TaskState *ts = cs->opaque; | |
196 | unsigned int flags; | |
197 | ||
198 | flags = env->eflags & RETURN_MASK; | |
199 | if (ts->v86flags & VIF_MASK) | |
200 | flags |= IF_MASK; | |
201 | flags |= IOPL_MASK; | |
202 | return flags | (ts->v86flags & ts->v86mask); | |
203 | } | |
204 | ||
205 | #define ADD16(reg, val) reg = (reg & ~0xffff) | ((reg + (val)) & 0xffff) | |
206 | ||
207 | /* handle VM86 interrupt (NOTE: the CPU core currently does not | |
208 | support TSS interrupt revectoring, so this code is always executed) */ | |
209 | static void do_int(CPUX86State *env, int intno) | |
210 | { | |
211 | CPUState *cs = env_cpu(env); | |
212 | TaskState *ts = cs->opaque; | |
213 | uint32_t int_addr, segoffs, ssp; | |
214 | unsigned int sp; | |
215 | ||
216 | if (env->segs[R_CS].selector == TARGET_BIOSSEG) | |
217 | goto cannot_handle; | |
218 | if (is_revectored(intno, &ts->vm86plus.int_revectored)) | |
219 | goto cannot_handle; | |
220 | if (intno == 0x21 && is_revectored((env->regs[R_EAX] >> 8) & 0xff, | |
221 | &ts->vm86plus.int21_revectored)) | |
222 | goto cannot_handle; | |
223 | int_addr = (intno << 2); | |
224 | segoffs = cpu_ldl_data(env, int_addr); | |
225 | if ((segoffs >> 16) == TARGET_BIOSSEG) | |
226 | goto cannot_handle; | |
227 | LOG_VM86("VM86: emulating int 0x%x. CS:IP=%04x:%04x\n", | |
228 | intno, segoffs >> 16, segoffs & 0xffff); | |
229 | /* save old state */ | |
230 | ssp = env->segs[R_SS].selector << 4; | |
231 | sp = env->regs[R_ESP] & 0xffff; | |
232 | vm_putw(env, ssp, sp - 2, get_vflags(env)); | |
233 | vm_putw(env, ssp, sp - 4, env->segs[R_CS].selector); | |
234 | vm_putw(env, ssp, sp - 6, env->eip); | |
235 | ADD16(env->regs[R_ESP], -6); | |
236 | /* goto interrupt handler */ | |
237 | env->eip = segoffs & 0xffff; | |
238 | cpu_x86_load_seg(env, R_CS, segoffs >> 16); | |
239 | clear_TF(env); | |
240 | clear_IF(env); | |
241 | clear_AC(env); | |
242 | return; | |
243 | cannot_handle: | |
244 | LOG_VM86("VM86: return to 32 bits int 0x%x\n", intno); | |
245 | return_to_32bit(env, TARGET_VM86_INTx | (intno << 8)); | |
246 | } | |
247 | ||
248 | void handle_vm86_trap(CPUX86State *env, int trapno) | |
249 | { | |
250 | if (trapno == 1 || trapno == 3) { | |
251 | return_to_32bit(env, TARGET_VM86_TRAP + (trapno << 8)); | |
252 | } else { | |
253 | do_int(env, trapno); | |
254 | } | |
255 | } | |
256 | ||
257 | #define CHECK_IF_IN_TRAP() \ | |
258 | if ((ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) && \ | |
259 | (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_TFpendig)) \ | |
260 | newflags |= TF_MASK | |
261 | ||
262 | #define VM86_FAULT_RETURN \ | |
263 | if ((ts->vm86plus.vm86plus.flags & TARGET_force_return_for_pic) && \ | |
264 | (ts->v86flags & (IF_MASK | VIF_MASK))) \ | |
265 | return_to_32bit(env, TARGET_VM86_PICRETURN); \ | |
266 | return | |
267 | ||
268 | void handle_vm86_fault(CPUX86State *env) | |
269 | { | |
270 | CPUState *cs = env_cpu(env); | |
271 | TaskState *ts = cs->opaque; | |
272 | uint32_t csp, ssp; | |
273 | unsigned int ip, sp, newflags, newip, newcs, opcode, intno; | |
274 | int data32, pref_done; | |
275 | ||
276 | csp = env->segs[R_CS].selector << 4; | |
277 | ip = env->eip & 0xffff; | |
278 | ||
279 | ssp = env->segs[R_SS].selector << 4; | |
280 | sp = env->regs[R_ESP] & 0xffff; | |
281 | ||
282 | LOG_VM86("VM86 exception %04x:%08x\n", | |
283 | env->segs[R_CS].selector, env->eip); | |
284 | ||
285 | data32 = 0; | |
286 | pref_done = 0; | |
287 | do { | |
288 | opcode = vm_getb(env, csp, ip); | |
289 | ADD16(ip, 1); | |
290 | switch (opcode) { | |
291 | case 0x66: /* 32-bit data */ data32=1; break; | |
292 | case 0x67: /* 32-bit address */ break; | |
293 | case 0x2e: /* CS */ break; | |
294 | case 0x3e: /* DS */ break; | |
295 | case 0x26: /* ES */ break; | |
296 | case 0x36: /* SS */ break; | |
297 | case 0x65: /* GS */ break; | |
298 | case 0x64: /* FS */ break; | |
299 | case 0xf2: /* repnz */ break; | |
300 | case 0xf3: /* rep */ break; | |
301 | default: pref_done = 1; | |
302 | } | |
303 | } while (!pref_done); | |
304 | ||
305 | /* VM86 mode */ | |
306 | switch(opcode) { | |
307 | case 0x9c: /* pushf */ | |
308 | if (data32) { | |
309 | vm_putl(env, ssp, sp - 4, get_vflags(env)); | |
310 | ADD16(env->regs[R_ESP], -4); | |
311 | } else { | |
312 | vm_putw(env, ssp, sp - 2, get_vflags(env)); | |
313 | ADD16(env->regs[R_ESP], -2); | |
314 | } | |
315 | env->eip = ip; | |
316 | VM86_FAULT_RETURN; | |
317 | ||
318 | case 0x9d: /* popf */ | |
319 | if (data32) { | |
320 | newflags = vm_getl(env, ssp, sp); | |
321 | ADD16(env->regs[R_ESP], 4); | |
322 | } else { | |
323 | newflags = vm_getw(env, ssp, sp); | |
324 | ADD16(env->regs[R_ESP], 2); | |
325 | } | |
326 | env->eip = ip; | |
327 | CHECK_IF_IN_TRAP(); | |
328 | if (data32) { | |
329 | if (set_vflags_long(newflags, env)) | |
330 | return; | |
331 | } else { | |
332 | if (set_vflags_short(newflags, env)) | |
333 | return; | |
334 | } | |
335 | VM86_FAULT_RETURN; | |
336 | ||
337 | case 0xcd: /* int */ | |
338 | intno = vm_getb(env, csp, ip); | |
339 | ADD16(ip, 1); | |
340 | env->eip = ip; | |
341 | if (ts->vm86plus.vm86plus.flags & TARGET_vm86dbg_active) { | |
342 | if ( (ts->vm86plus.vm86plus.vm86dbg_intxxtab[intno >> 3] >> | |
343 | (intno &7)) & 1) { | |
344 | return_to_32bit(env, TARGET_VM86_INTx + (intno << 8)); | |
345 | return; | |
346 | } | |
347 | } | |
348 | do_int(env, intno); | |
349 | break; | |
350 | ||
351 | case 0xcf: /* iret */ | |
352 | if (data32) { | |
353 | newip = vm_getl(env, ssp, sp) & 0xffff; | |
354 | newcs = vm_getl(env, ssp, sp + 4) & 0xffff; | |
355 | newflags = vm_getl(env, ssp, sp + 8); | |
356 | ADD16(env->regs[R_ESP], 12); | |
357 | } else { | |
358 | newip = vm_getw(env, ssp, sp); | |
359 | newcs = vm_getw(env, ssp, sp + 2); | |
360 | newflags = vm_getw(env, ssp, sp + 4); | |
361 | ADD16(env->regs[R_ESP], 6); | |
362 | } | |
363 | env->eip = newip; | |
364 | cpu_x86_load_seg(env, R_CS, newcs); | |
365 | CHECK_IF_IN_TRAP(); | |
366 | if (data32) { | |
367 | if (set_vflags_long(newflags, env)) | |
368 | return; | |
369 | } else { | |
370 | if (set_vflags_short(newflags, env)) | |
371 | return; | |
372 | } | |
373 | VM86_FAULT_RETURN; | |
374 | ||
375 | case 0xfa: /* cli */ | |
376 | env->eip = ip; | |
377 | clear_IF(env); | |
378 | VM86_FAULT_RETURN; | |
379 | ||
380 | case 0xfb: /* sti */ | |
381 | env->eip = ip; | |
382 | if (set_IF(env)) | |
383 | return; | |
384 | VM86_FAULT_RETURN; | |
385 | ||
386 | default: | |
387 | /* real VM86 GPF exception */ | |
388 | return_to_32bit(env, TARGET_VM86_UNKNOWN); | |
389 | break; | |
390 | } | |
391 | } | |
392 | ||
393 | int do_vm86(CPUX86State *env, long subfunction, abi_ulong vm86_addr) | |
394 | { | |
395 | CPUState *cs = env_cpu(env); | |
396 | TaskState *ts = cs->opaque; | |
397 | struct target_vm86plus_struct * target_v86; | |
398 | int ret; | |
399 | ||
400 | switch (subfunction) { | |
401 | case TARGET_VM86_REQUEST_IRQ: | |
402 | case TARGET_VM86_FREE_IRQ: | |
403 | case TARGET_VM86_GET_IRQ_BITS: | |
404 | case TARGET_VM86_GET_AND_RESET_IRQ: | |
405 | gemu_log("qemu: unsupported vm86 subfunction (%ld)\n", subfunction); | |
406 | ret = -TARGET_EINVAL; | |
407 | goto out; | |
408 | case TARGET_VM86_PLUS_INSTALL_CHECK: | |
409 | /* NOTE: on old vm86 stuff this will return the error | |
410 | from verify_area(), because the subfunction is | |
411 | interpreted as (invalid) address to vm86_struct. | |
412 | So the installation check works. | |
413 | */ | |
414 | ret = 0; | |
415 | goto out; | |
416 | } | |
417 | ||
418 | /* save current CPU regs */ | |
419 | ts->vm86_saved_regs.eax = 0; /* default vm86 syscall return code */ | |
420 | ts->vm86_saved_regs.ebx = env->regs[R_EBX]; | |
421 | ts->vm86_saved_regs.ecx = env->regs[R_ECX]; | |
422 | ts->vm86_saved_regs.edx = env->regs[R_EDX]; | |
423 | ts->vm86_saved_regs.esi = env->regs[R_ESI]; | |
424 | ts->vm86_saved_regs.edi = env->regs[R_EDI]; | |
425 | ts->vm86_saved_regs.ebp = env->regs[R_EBP]; | |
426 | ts->vm86_saved_regs.esp = env->regs[R_ESP]; | |
427 | ts->vm86_saved_regs.eflags = env->eflags; | |
428 | ts->vm86_saved_regs.eip = env->eip; | |
429 | ts->vm86_saved_regs.cs = env->segs[R_CS].selector; | |
430 | ts->vm86_saved_regs.ss = env->segs[R_SS].selector; | |
431 | ts->vm86_saved_regs.ds = env->segs[R_DS].selector; | |
432 | ts->vm86_saved_regs.es = env->segs[R_ES].selector; | |
433 | ts->vm86_saved_regs.fs = env->segs[R_FS].selector; | |
434 | ts->vm86_saved_regs.gs = env->segs[R_GS].selector; | |
435 | ||
436 | ts->target_v86 = vm86_addr; | |
437 | if (!lock_user_struct(VERIFY_READ, target_v86, vm86_addr, 1)) | |
438 | return -TARGET_EFAULT; | |
439 | /* build vm86 CPU state */ | |
440 | ts->v86flags = tswap32(target_v86->regs.eflags); | |
441 | env->eflags = (env->eflags & ~SAFE_MASK) | | |
442 | (tswap32(target_v86->regs.eflags) & SAFE_MASK) | VM_MASK; | |
443 | ||
444 | ts->vm86plus.cpu_type = tswapal(target_v86->cpu_type); | |
445 | switch (ts->vm86plus.cpu_type) { | |
446 | case TARGET_CPU_286: | |
447 | ts->v86mask = 0; | |
448 | break; | |
449 | case TARGET_CPU_386: | |
450 | ts->v86mask = NT_MASK | IOPL_MASK; | |
451 | break; | |
452 | case TARGET_CPU_486: | |
453 | ts->v86mask = AC_MASK | NT_MASK | IOPL_MASK; | |
454 | break; | |
455 | default: | |
456 | ts->v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; | |
457 | break; | |
458 | } | |
459 | ||
460 | env->regs[R_EBX] = tswap32(target_v86->regs.ebx); | |
461 | env->regs[R_ECX] = tswap32(target_v86->regs.ecx); | |
462 | env->regs[R_EDX] = tswap32(target_v86->regs.edx); | |
463 | env->regs[R_ESI] = tswap32(target_v86->regs.esi); | |
464 | env->regs[R_EDI] = tswap32(target_v86->regs.edi); | |
465 | env->regs[R_EBP] = tswap32(target_v86->regs.ebp); | |
466 | env->regs[R_ESP] = tswap32(target_v86->regs.esp); | |
467 | env->eip = tswap32(target_v86->regs.eip); | |
468 | cpu_x86_load_seg(env, R_CS, tswap16(target_v86->regs.cs)); | |
469 | cpu_x86_load_seg(env, R_SS, tswap16(target_v86->regs.ss)); | |
470 | cpu_x86_load_seg(env, R_DS, tswap16(target_v86->regs.ds)); | |
471 | cpu_x86_load_seg(env, R_ES, tswap16(target_v86->regs.es)); | |
472 | cpu_x86_load_seg(env, R_FS, tswap16(target_v86->regs.fs)); | |
473 | cpu_x86_load_seg(env, R_GS, tswap16(target_v86->regs.gs)); | |
474 | ret = tswap32(target_v86->regs.eax); /* eax will be restored at | |
475 | the end of the syscall */ | |
476 | memcpy(&ts->vm86plus.int_revectored, | |
477 | &target_v86->int_revectored, 32); | |
478 | memcpy(&ts->vm86plus.int21_revectored, | |
479 | &target_v86->int21_revectored, 32); | |
480 | ts->vm86plus.vm86plus.flags = tswapal(target_v86->vm86plus.flags); | |
481 | memcpy(&ts->vm86plus.vm86plus.vm86dbg_intxxtab, | |
482 | target_v86->vm86plus.vm86dbg_intxxtab, 32); | |
483 | unlock_user_struct(target_v86, vm86_addr, 0); | |
484 | ||
485 | LOG_VM86("do_vm86: cs:ip=%04x:%04x\n", | |
486 | env->segs[R_CS].selector, env->eip); | |
487 | /* now the virtual CPU is ready for vm86 execution ! */ | |
488 | out: | |
489 | return ret; | |
490 | } |