]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/vm86.c | |
3 | * | |
4 | * Copyright (C) 1994 Linus Torvalds | |
5 | * | |
6 | * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 | |
624dffcb | 7 | * stack - Manfred Spraul <manfred@colorfullife.com> |
1da177e4 LT |
8 | * |
9 | * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle | |
10 | * them correctly. Now the emulation will be in a | |
11 | * consistent state after stackfaults - Kasper Dupont | |
12 | * <kasperd@daimi.au.dk> | |
13 | * | |
14 | * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont | |
15 | * <kasperd@daimi.au.dk> | |
16 | * | |
17 | * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault | |
18 | * caused by Kasper Dupont's changes - Stas Sergeev | |
19 | * | |
20 | * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. | |
21 | * Kasper Dupont <kasperd@daimi.au.dk> | |
22 | * | |
23 | * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. | |
24 | * Kasper Dupont <kasperd@daimi.au.dk> | |
25 | * | |
26 | * 9 apr 2002 - Changed stack access macros to jump to a label | |
27 | * instead of returning to userspace. This simplifies | |
28 | * do_int, and is needed by handle_vm6_fault. Kasper | |
29 | * Dupont <kasperd@daimi.au.dk> | |
30 | * | |
31 | */ | |
32 | ||
a9415644 | 33 | #include <linux/capability.h> |
1da177e4 LT |
34 | #include <linux/errno.h> |
35 | #include <linux/interrupt.h> | |
36 | #include <linux/sched.h> | |
37 | #include <linux/kernel.h> | |
38 | #include <linux/signal.h> | |
39 | #include <linux/string.h> | |
40 | #include <linux/mm.h> | |
41 | #include <linux/smp.h> | |
42 | #include <linux/smp_lock.h> | |
43 | #include <linux/highmem.h> | |
44 | #include <linux/ptrace.h> | |
7e7f8a03 | 45 | #include <linux/audit.h> |
1da177e4 LT |
46 | |
47 | #include <asm/uaccess.h> | |
48 | #include <asm/io.h> | |
49 | #include <asm/tlbflush.h> | |
50 | #include <asm/irq.h> | |
51 | ||
52 | /* | |
53 | * Known problems: | |
54 | * | |
55 | * Interrupt handling is not guaranteed: | |
56 | * - a real x86 will disable all interrupts for one instruction | |
57 | * after a "mov ss,xx" to make stack handling atomic even without | |
58 | * the 'lss' instruction. We can't guarantee this in v86 mode, | |
59 | * as the next instruction might result in a page fault or similar. | |
60 | * - a real x86 will have interrupts disabled for one instruction | |
61 | * past the 'sti' that enables them. We don't bother with all the | |
62 | * details yet. | |
63 | * | |
64 | * Let's hope these problems do not actually matter for anything. | |
65 | */ | |
66 | ||
67 | ||
68 | #define KVM86 ((struct kernel_vm86_struct *)regs) | |
69 | #define VMPI KVM86->vm86plus | |
70 | ||
71 | ||
72 | /* | |
73 | * 8- and 16-bit register defines.. | |
74 | */ | |
75 | #define AL(regs) (((unsigned char *)&((regs)->eax))[0]) | |
76 | #define AH(regs) (((unsigned char *)&((regs)->eax))[1]) | |
77 | #define IP(regs) (*(unsigned short *)&((regs)->eip)) | |
78 | #define SP(regs) (*(unsigned short *)&((regs)->esp)) | |
79 | ||
80 | /* | |
81 | * virtual flags (16 and 32-bit versions) | |
82 | */ | |
83 | #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) | |
84 | #define VEFLAGS (current->thread.v86flags) | |
85 | ||
86 | #define set_flags(X,new,mask) \ | |
87 | ((X) = ((X) & ~(mask)) | ((new) & (mask))) | |
88 | ||
89 | #define SAFE_MASK (0xDD5) | |
90 | #define RETURN_MASK (0xDFF) | |
91 | ||
92 | #define VM86_REGS_PART2 orig_eax | |
93 | #define VM86_REGS_SIZE1 \ | |
94 | ( (unsigned)( & (((struct kernel_vm86_regs *)0)->VM86_REGS_PART2) ) ) | |
95 | #define VM86_REGS_SIZE2 (sizeof(struct kernel_vm86_regs) - VM86_REGS_SIZE1) | |
96 | ||
97 | struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); | |
98 | struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) | |
99 | { | |
100 | struct tss_struct *tss; | |
101 | struct pt_regs *ret; | |
102 | unsigned long tmp; | |
103 | ||
104 | /* | |
105 | * This gets called from entry.S with interrupts disabled, but | |
106 | * from process context. Enable interrupts here, before trying | |
107 | * to access user space. | |
108 | */ | |
109 | local_irq_enable(); | |
110 | ||
111 | if (!current->thread.vm86_info) { | |
112 | printk("no vm86_info: BAD\n"); | |
113 | do_exit(SIGSEGV); | |
114 | } | |
115 | set_flags(regs->eflags, VEFLAGS, VIF_MASK | current->thread.v86mask); | |
116 | tmp = copy_to_user(¤t->thread.vm86_info->regs,regs, VM86_REGS_SIZE1); | |
117 | tmp += copy_to_user(¤t->thread.vm86_info->regs.VM86_REGS_PART2, | |
118 | ®s->VM86_REGS_PART2, VM86_REGS_SIZE2); | |
119 | tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap); | |
120 | if (tmp) { | |
121 | printk("vm86: could not access userspace vm86_info\n"); | |
122 | do_exit(SIGSEGV); | |
123 | } | |
124 | ||
125 | tss = &per_cpu(init_tss, get_cpu()); | |
126 | current->thread.esp0 = current->thread.saved_esp0; | |
127 | current->thread.sysenter_cs = __KERNEL_CS; | |
128 | load_esp0(tss, ¤t->thread); | |
129 | current->thread.saved_esp0 = 0; | |
130 | put_cpu(); | |
131 | ||
132 | loadsegment(fs, current->thread.saved_fs); | |
133 | loadsegment(gs, current->thread.saved_gs); | |
134 | ret = KVM86->regs32; | |
135 | return ret; | |
136 | } | |
137 | ||
60ec5585 | 138 | static void mark_screen_rdonly(struct mm_struct *mm) |
1da177e4 LT |
139 | { |
140 | pgd_t *pgd; | |
141 | pud_t *pud; | |
142 | pmd_t *pmd; | |
60ec5585 HD |
143 | pte_t *pte; |
144 | spinlock_t *ptl; | |
1da177e4 LT |
145 | int i; |
146 | ||
60ec5585 | 147 | pgd = pgd_offset(mm, 0xA0000); |
1da177e4 LT |
148 | if (pgd_none_or_clear_bad(pgd)) |
149 | goto out; | |
150 | pud = pud_offset(pgd, 0xA0000); | |
151 | if (pud_none_or_clear_bad(pud)) | |
152 | goto out; | |
153 | pmd = pmd_offset(pud, 0xA0000); | |
154 | if (pmd_none_or_clear_bad(pmd)) | |
155 | goto out; | |
60ec5585 | 156 | pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); |
1da177e4 LT |
157 | for (i = 0; i < 32; i++) { |
158 | if (pte_present(*pte)) | |
159 | set_pte(pte, pte_wrprotect(*pte)); | |
160 | pte++; | |
161 | } | |
60ec5585 | 162 | pte_unmap_unlock(pte, ptl); |
1da177e4 | 163 | out: |
1da177e4 LT |
164 | flush_tlb(); |
165 | } | |
166 | ||
167 | ||
168 | ||
169 | static int do_vm86_irq_handling(int subfunction, int irqnumber); | |
170 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); | |
171 | ||
172 | asmlinkage int sys_vm86old(struct pt_regs regs) | |
173 | { | |
174 | struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx; | |
175 | struct kernel_vm86_struct info; /* declare this _on top_, | |
176 | * this avoids wasting of stack space. | |
177 | * This remains on the stack until we | |
178 | * return to 32 bit user space. | |
179 | */ | |
180 | struct task_struct *tsk; | |
181 | int tmp, ret = -EPERM; | |
182 | ||
183 | tsk = current; | |
184 | if (tsk->thread.saved_esp0) | |
185 | goto out; | |
186 | tmp = copy_from_user(&info, v86, VM86_REGS_SIZE1); | |
187 | tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2, | |
188 | (long)&info.vm86plus - (long)&info.regs.VM86_REGS_PART2); | |
189 | ret = -EFAULT; | |
190 | if (tmp) | |
191 | goto out; | |
192 | memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); | |
193 | info.regs32 = ®s; | |
194 | tsk->thread.vm86_info = v86; | |
195 | do_sys_vm86(&info, tsk); | |
196 | ret = 0; /* we never return here */ | |
197 | out: | |
198 | return ret; | |
199 | } | |
200 | ||
201 | ||
202 | asmlinkage int sys_vm86(struct pt_regs regs) | |
203 | { | |
204 | struct kernel_vm86_struct info; /* declare this _on top_, | |
205 | * this avoids wasting of stack space. | |
206 | * This remains on the stack until we | |
207 | * return to 32 bit user space. | |
208 | */ | |
209 | struct task_struct *tsk; | |
210 | int tmp, ret; | |
211 | struct vm86plus_struct __user *v86; | |
212 | ||
213 | tsk = current; | |
214 | switch (regs.ebx) { | |
215 | case VM86_REQUEST_IRQ: | |
216 | case VM86_FREE_IRQ: | |
217 | case VM86_GET_IRQ_BITS: | |
218 | case VM86_GET_AND_RESET_IRQ: | |
219 | ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx); | |
220 | goto out; | |
221 | case VM86_PLUS_INSTALL_CHECK: | |
222 | /* NOTE: on old vm86 stuff this will return the error | |
e49332bd | 223 | from access_ok(), because the subfunction is |
1da177e4 LT |
224 | interpreted as (invalid) address to vm86_struct. |
225 | So the installation check works. | |
226 | */ | |
227 | ret = 0; | |
228 | goto out; | |
229 | } | |
230 | ||
231 | /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ | |
232 | ret = -EPERM; | |
233 | if (tsk->thread.saved_esp0) | |
234 | goto out; | |
235 | v86 = (struct vm86plus_struct __user *)regs.ecx; | |
236 | tmp = copy_from_user(&info, v86, VM86_REGS_SIZE1); | |
237 | tmp += copy_from_user(&info.regs.VM86_REGS_PART2, &v86->regs.VM86_REGS_PART2, | |
238 | (long)&info.regs32 - (long)&info.regs.VM86_REGS_PART2); | |
239 | ret = -EFAULT; | |
240 | if (tmp) | |
241 | goto out; | |
242 | info.regs32 = ®s; | |
243 | info.vm86plus.is_vm86pus = 1; | |
244 | tsk->thread.vm86_info = (struct vm86_struct __user *)v86; | |
245 | do_sys_vm86(&info, tsk); | |
246 | ret = 0; /* we never return here */ | |
247 | out: | |
248 | return ret; | |
249 | } | |
250 | ||
251 | ||
252 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) | |
253 | { | |
254 | struct tss_struct *tss; | |
7e7f8a03 | 255 | long eax; |
1da177e4 LT |
256 | /* |
257 | * make sure the vm86() system call doesn't try to do anything silly | |
258 | */ | |
259 | info->regs.__null_ds = 0; | |
260 | info->regs.__null_es = 0; | |
261 | ||
262 | /* we are clearing fs,gs later just before "jmp resume_userspace", | |
263 | * because starting with Linux 2.1.x they aren't no longer saved/restored | |
264 | */ | |
265 | ||
266 | /* | |
267 | * The eflags register is also special: we cannot trust that the user | |
268 | * has set it up safely, so this makes sure interrupt etc flags are | |
269 | * inherited from protected mode. | |
270 | */ | |
271 | VEFLAGS = info->regs.eflags; | |
272 | info->regs.eflags &= SAFE_MASK; | |
273 | info->regs.eflags |= info->regs32->eflags & ~SAFE_MASK; | |
274 | info->regs.eflags |= VM_MASK; | |
275 | ||
276 | switch (info->cpu_type) { | |
277 | case CPU_286: | |
278 | tsk->thread.v86mask = 0; | |
279 | break; | |
280 | case CPU_386: | |
281 | tsk->thread.v86mask = NT_MASK | IOPL_MASK; | |
282 | break; | |
283 | case CPU_486: | |
284 | tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; | |
285 | break; | |
286 | default: | |
287 | tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; | |
288 | break; | |
289 | } | |
290 | ||
291 | /* | |
292 | * Save old state, set default return value (%eax) to 0 | |
293 | */ | |
294 | info->regs32->eax = 0; | |
295 | tsk->thread.saved_esp0 = tsk->thread.esp0; | |
4d37e7e3 ZA |
296 | savesegment(fs, tsk->thread.saved_fs); |
297 | savesegment(gs, tsk->thread.saved_gs); | |
1da177e4 LT |
298 | |
299 | tss = &per_cpu(init_tss, get_cpu()); | |
300 | tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; | |
301 | if (cpu_has_sep) | |
302 | tsk->thread.sysenter_cs = 0; | |
303 | load_esp0(tss, &tsk->thread); | |
304 | put_cpu(); | |
305 | ||
306 | tsk->thread.screen_bitmap = info->screen_bitmap; | |
307 | if (info->flags & VM86_SCREEN_BITMAP) | |
60ec5585 | 308 | mark_screen_rdonly(tsk->mm); |
7e7f8a03 JB |
309 | __asm__ __volatile__("xorl %eax,%eax; movl %eax,%fs; movl %eax,%gs\n\t"); |
310 | __asm__ __volatile__("movl %%eax, %0\n" :"=r"(eax)); | |
311 | ||
312 | /*call audit_syscall_exit since we do not exit via the normal paths */ | |
313 | if (unlikely(current->audit_context)) | |
5411be59 | 314 | audit_syscall_exit(AUDITSC_RESULT(eax), eax); |
7e7f8a03 | 315 | |
1da177e4 | 316 | __asm__ __volatile__( |
1da177e4 LT |
317 | "movl %0,%%esp\n\t" |
318 | "movl %1,%%ebp\n\t" | |
319 | "jmp resume_userspace" | |
320 | : /* no outputs */ | |
7e7f8a03 | 321 | :"r" (&info->regs), "r" (task_thread_info(tsk))); |
1da177e4 LT |
322 | /* we never return here */ |
323 | } | |
324 | ||
325 | static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) | |
326 | { | |
327 | struct pt_regs * regs32; | |
328 | ||
329 | regs32 = save_v86_state(regs16); | |
330 | regs32->eax = retval; | |
331 | __asm__ __volatile__("movl %0,%%esp\n\t" | |
332 | "movl %1,%%ebp\n\t" | |
333 | "jmp resume_userspace" | |
334 | : : "r" (regs32), "r" (current_thread_info())); | |
335 | } | |
336 | ||
337 | static inline void set_IF(struct kernel_vm86_regs * regs) | |
338 | { | |
339 | VEFLAGS |= VIF_MASK; | |
340 | if (VEFLAGS & VIP_MASK) | |
341 | return_to_32bit(regs, VM86_STI); | |
342 | } | |
343 | ||
344 | static inline void clear_IF(struct kernel_vm86_regs * regs) | |
345 | { | |
346 | VEFLAGS &= ~VIF_MASK; | |
347 | } | |
348 | ||
349 | static inline void clear_TF(struct kernel_vm86_regs * regs) | |
350 | { | |
351 | regs->eflags &= ~TF_MASK; | |
352 | } | |
353 | ||
354 | static inline void clear_AC(struct kernel_vm86_regs * regs) | |
355 | { | |
356 | regs->eflags &= ~AC_MASK; | |
357 | } | |
358 | ||
359 | /* It is correct to call set_IF(regs) from the set_vflags_* | |
360 | * functions. However someone forgot to call clear_IF(regs) | |
361 | * in the opposite case. | |
362 | * After the command sequence CLI PUSHF STI POPF you should | |
363 | * end up with interrups disabled, but you ended up with | |
364 | * interrupts enabled. | |
365 | * ( I was testing my own changes, but the only bug I | |
366 | * could find was in a function I had not changed. ) | |
367 | * [KD] | |
368 | */ | |
369 | ||
370 | static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs) | |
371 | { | |
372 | set_flags(VEFLAGS, eflags, current->thread.v86mask); | |
373 | set_flags(regs->eflags, eflags, SAFE_MASK); | |
374 | if (eflags & IF_MASK) | |
375 | set_IF(regs); | |
376 | else | |
377 | clear_IF(regs); | |
378 | } | |
379 | ||
380 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) | |
381 | { | |
382 | set_flags(VFLAGS, flags, current->thread.v86mask); | |
383 | set_flags(regs->eflags, flags, SAFE_MASK); | |
384 | if (flags & IF_MASK) | |
385 | set_IF(regs); | |
386 | else | |
387 | clear_IF(regs); | |
388 | } | |
389 | ||
390 | static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) | |
391 | { | |
392 | unsigned long flags = regs->eflags & RETURN_MASK; | |
393 | ||
394 | if (VEFLAGS & VIF_MASK) | |
395 | flags |= IF_MASK; | |
396 | flags |= IOPL_MASK; | |
397 | return flags | (VEFLAGS & current->thread.v86mask); | |
398 | } | |
399 | ||
400 | static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |
401 | { | |
402 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" | |
403 | :"=r" (nr) | |
404 | :"m" (*bitmap),"r" (nr)); | |
405 | return nr; | |
406 | } | |
407 | ||
408 | #define val_byte(val, n) (((__u8 *)&val)[n]) | |
409 | ||
410 | #define pushb(base, ptr, val, err_label) \ | |
411 | do { \ | |
412 | __u8 __val = val; \ | |
413 | ptr--; \ | |
414 | if (put_user(__val, base + ptr) < 0) \ | |
415 | goto err_label; \ | |
416 | } while(0) | |
417 | ||
418 | #define pushw(base, ptr, val, err_label) \ | |
419 | do { \ | |
420 | __u16 __val = val; \ | |
421 | ptr--; \ | |
422 | if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | |
423 | goto err_label; \ | |
424 | ptr--; \ | |
425 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | |
426 | goto err_label; \ | |
427 | } while(0) | |
428 | ||
429 | #define pushl(base, ptr, val, err_label) \ | |
430 | do { \ | |
431 | __u32 __val = val; \ | |
432 | ptr--; \ | |
433 | if (put_user(val_byte(__val, 3), base + ptr) < 0) \ | |
434 | goto err_label; \ | |
435 | ptr--; \ | |
436 | if (put_user(val_byte(__val, 2), base + ptr) < 0) \ | |
437 | goto err_label; \ | |
438 | ptr--; \ | |
439 | if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | |
440 | goto err_label; \ | |
441 | ptr--; \ | |
442 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | |
443 | goto err_label; \ | |
444 | } while(0) | |
445 | ||
446 | #define popb(base, ptr, err_label) \ | |
447 | ({ \ | |
448 | __u8 __res; \ | |
449 | if (get_user(__res, base + ptr) < 0) \ | |
450 | goto err_label; \ | |
451 | ptr++; \ | |
452 | __res; \ | |
453 | }) | |
454 | ||
455 | #define popw(base, ptr, err_label) \ | |
456 | ({ \ | |
457 | __u16 __res; \ | |
458 | if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | |
459 | goto err_label; \ | |
460 | ptr++; \ | |
461 | if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | |
462 | goto err_label; \ | |
463 | ptr++; \ | |
464 | __res; \ | |
465 | }) | |
466 | ||
467 | #define popl(base, ptr, err_label) \ | |
468 | ({ \ | |
469 | __u32 __res; \ | |
470 | if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | |
471 | goto err_label; \ | |
472 | ptr++; \ | |
473 | if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | |
474 | goto err_label; \ | |
475 | ptr++; \ | |
476 | if (get_user(val_byte(__res, 2), base + ptr) < 0) \ | |
477 | goto err_label; \ | |
478 | ptr++; \ | |
479 | if (get_user(val_byte(__res, 3), base + ptr) < 0) \ | |
480 | goto err_label; \ | |
481 | ptr++; \ | |
482 | __res; \ | |
483 | }) | |
484 | ||
485 | /* There are so many possible reasons for this function to return | |
486 | * VM86_INTx, so adding another doesn't bother me. We can expect | |
487 | * userspace programs to be able to handle it. (Getting a problem | |
488 | * in userspace is always better than an Oops anyway.) [KD] | |
489 | */ | |
490 | static void do_int(struct kernel_vm86_regs *regs, int i, | |
491 | unsigned char __user * ssp, unsigned short sp) | |
492 | { | |
493 | unsigned long __user *intr_ptr; | |
494 | unsigned long segoffs; | |
495 | ||
496 | if (regs->cs == BIOSSEG) | |
497 | goto cannot_handle; | |
498 | if (is_revectored(i, &KVM86->int_revectored)) | |
499 | goto cannot_handle; | |
500 | if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) | |
501 | goto cannot_handle; | |
502 | intr_ptr = (unsigned long __user *) (i << 2); | |
503 | if (get_user(segoffs, intr_ptr)) | |
504 | goto cannot_handle; | |
505 | if ((segoffs >> 16) == BIOSSEG) | |
506 | goto cannot_handle; | |
507 | pushw(ssp, sp, get_vflags(regs), cannot_handle); | |
508 | pushw(ssp, sp, regs->cs, cannot_handle); | |
509 | pushw(ssp, sp, IP(regs), cannot_handle); | |
510 | regs->cs = segoffs >> 16; | |
511 | SP(regs) -= 6; | |
512 | IP(regs) = segoffs & 0xffff; | |
513 | clear_TF(regs); | |
514 | clear_IF(regs); | |
515 | clear_AC(regs); | |
516 | return; | |
517 | ||
518 | cannot_handle: | |
519 | return_to_32bit(regs, VM86_INTx + (i << 8)); | |
520 | } | |
521 | ||
522 | int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) | |
523 | { | |
524 | if (VMPI.is_vm86pus) { | |
525 | if ( (trapno==3) || (trapno==1) ) | |
526 | return_to_32bit(regs, VM86_TRAP + (trapno << 8)); | |
527 | do_int(regs, trapno, (unsigned char __user *) (regs->ss << 4), SP(regs)); | |
528 | return 0; | |
529 | } | |
530 | if (trapno !=1) | |
531 | return 1; /* we let this handle by the calling routine */ | |
532 | if (current->ptrace & PT_PTRACED) { | |
533 | unsigned long flags; | |
534 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
535 | sigdelset(¤t->blocked, SIGTRAP); | |
536 | recalc_sigpending(); | |
537 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
538 | } | |
539 | send_sig(SIGTRAP, current, 1); | |
540 | current->thread.trap_no = trapno; | |
541 | current->thread.error_code = error_code; | |
542 | return 0; | |
543 | } | |
544 | ||
545 | void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |
546 | { | |
547 | unsigned char opcode; | |
548 | unsigned char __user *csp; | |
549 | unsigned char __user *ssp; | |
5fd75ebb | 550 | unsigned short ip, sp, orig_flags; |
1da177e4 LT |
551 | int data32, pref_done; |
552 | ||
553 | #define CHECK_IF_IN_TRAP \ | |
554 | if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ | |
555 | newflags |= TF_MASK | |
556 | #define VM86_FAULT_RETURN do { \ | |
557 | if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ | |
558 | return_to_32bit(regs, VM86_PICRETURN); \ | |
5fd75ebb PT |
559 | if (orig_flags & TF_MASK) \ |
560 | handle_vm86_trap(regs, 0, 1); \ | |
1da177e4 LT |
561 | return; } while (0) |
562 | ||
5fd75ebb PT |
563 | orig_flags = *(unsigned short *)®s->eflags; |
564 | ||
1da177e4 LT |
565 | csp = (unsigned char __user *) (regs->cs << 4); |
566 | ssp = (unsigned char __user *) (regs->ss << 4); | |
567 | sp = SP(regs); | |
568 | ip = IP(regs); | |
569 | ||
570 | data32 = 0; | |
571 | pref_done = 0; | |
572 | do { | |
573 | switch (opcode = popb(csp, ip, simulate_sigsegv)) { | |
574 | case 0x66: /* 32-bit data */ data32=1; break; | |
575 | case 0x67: /* 32-bit address */ break; | |
576 | case 0x2e: /* CS */ break; | |
577 | case 0x3e: /* DS */ break; | |
578 | case 0x26: /* ES */ break; | |
579 | case 0x36: /* SS */ break; | |
580 | case 0x65: /* GS */ break; | |
581 | case 0x64: /* FS */ break; | |
582 | case 0xf2: /* repnz */ break; | |
583 | case 0xf3: /* rep */ break; | |
584 | default: pref_done = 1; | |
585 | } | |
586 | } while (!pref_done); | |
587 | ||
588 | switch (opcode) { | |
589 | ||
590 | /* pushf */ | |
591 | case 0x9c: | |
592 | if (data32) { | |
593 | pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); | |
594 | SP(regs) -= 4; | |
595 | } else { | |
596 | pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); | |
597 | SP(regs) -= 2; | |
598 | } | |
599 | IP(regs) = ip; | |
600 | VM86_FAULT_RETURN; | |
601 | ||
602 | /* popf */ | |
603 | case 0x9d: | |
604 | { | |
605 | unsigned long newflags; | |
606 | if (data32) { | |
607 | newflags=popl(ssp, sp, simulate_sigsegv); | |
608 | SP(regs) += 4; | |
609 | } else { | |
610 | newflags = popw(ssp, sp, simulate_sigsegv); | |
611 | SP(regs) += 2; | |
612 | } | |
613 | IP(regs) = ip; | |
614 | CHECK_IF_IN_TRAP; | |
615 | if (data32) { | |
616 | set_vflags_long(newflags, regs); | |
617 | } else { | |
618 | set_vflags_short(newflags, regs); | |
619 | } | |
620 | VM86_FAULT_RETURN; | |
621 | } | |
622 | ||
623 | /* int xx */ | |
624 | case 0xcd: { | |
625 | int intno=popb(csp, ip, simulate_sigsegv); | |
626 | IP(regs) = ip; | |
627 | if (VMPI.vm86dbg_active) { | |
628 | if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) | |
629 | return_to_32bit(regs, VM86_INTx + (intno << 8)); | |
630 | } | |
631 | do_int(regs, intno, ssp, sp); | |
632 | return; | |
633 | } | |
634 | ||
635 | /* iret */ | |
636 | case 0xcf: | |
637 | { | |
638 | unsigned long newip; | |
639 | unsigned long newcs; | |
640 | unsigned long newflags; | |
641 | if (data32) { | |
642 | newip=popl(ssp, sp, simulate_sigsegv); | |
643 | newcs=popl(ssp, sp, simulate_sigsegv); | |
644 | newflags=popl(ssp, sp, simulate_sigsegv); | |
645 | SP(regs) += 12; | |
646 | } else { | |
647 | newip = popw(ssp, sp, simulate_sigsegv); | |
648 | newcs = popw(ssp, sp, simulate_sigsegv); | |
649 | newflags = popw(ssp, sp, simulate_sigsegv); | |
650 | SP(regs) += 6; | |
651 | } | |
652 | IP(regs) = newip; | |
653 | regs->cs = newcs; | |
654 | CHECK_IF_IN_TRAP; | |
655 | if (data32) { | |
656 | set_vflags_long(newflags, regs); | |
657 | } else { | |
658 | set_vflags_short(newflags, regs); | |
659 | } | |
660 | VM86_FAULT_RETURN; | |
661 | } | |
662 | ||
663 | /* cli */ | |
664 | case 0xfa: | |
665 | IP(regs) = ip; | |
666 | clear_IF(regs); | |
667 | VM86_FAULT_RETURN; | |
668 | ||
669 | /* sti */ | |
670 | /* | |
671 | * Damn. This is incorrect: the 'sti' instruction should actually | |
672 | * enable interrupts after the /next/ instruction. Not good. | |
673 | * | |
674 | * Probably needs some horsing around with the TF flag. Aiee.. | |
675 | */ | |
676 | case 0xfb: | |
677 | IP(regs) = ip; | |
678 | set_IF(regs); | |
679 | VM86_FAULT_RETURN; | |
680 | ||
681 | default: | |
682 | return_to_32bit(regs, VM86_UNKNOWN); | |
683 | } | |
684 | ||
685 | return; | |
686 | ||
687 | simulate_sigsegv: | |
688 | /* FIXME: After a long discussion with Stas we finally | |
689 | * agreed, that this is wrong. Here we should | |
690 | * really send a SIGSEGV to the user program. | |
691 | * But how do we create the correct context? We | |
692 | * are inside a general protection fault handler | |
693 | * and has just returned from a page fault handler. | |
694 | * The correct context for the signal handler | |
695 | * should be a mixture of the two, but how do we | |
696 | * get the information? [KD] | |
697 | */ | |
698 | return_to_32bit(regs, VM86_UNKNOWN); | |
699 | } | |
700 | ||
701 | /* ---------------- vm86 special IRQ passing stuff ----------------- */ | |
702 | ||
703 | #define VM86_IRQNAME "vm86irq" | |
704 | ||
705 | static struct vm86_irqs { | |
706 | struct task_struct *tsk; | |
707 | int sig; | |
708 | } vm86_irqs[16]; | |
709 | ||
710 | static DEFINE_SPINLOCK(irqbits_lock); | |
711 | static int irqbits; | |
712 | ||
713 | #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ | |
714 | | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | |
715 | | (1 << SIGUNUSED) ) | |
716 | ||
7d12e780 | 717 | static irqreturn_t irq_handler(int intno, void *dev_id) |
1da177e4 LT |
718 | { |
719 | int irq_bit; | |
720 | unsigned long flags; | |
721 | ||
722 | spin_lock_irqsave(&irqbits_lock, flags); | |
723 | irq_bit = 1 << intno; | |
724 | if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) | |
725 | goto out; | |
726 | irqbits |= irq_bit; | |
727 | if (vm86_irqs[intno].sig) | |
728 | send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); | |
1da177e4 LT |
729 | /* |
730 | * IRQ will be re-enabled when user asks for the irq (whether | |
731 | * polling or as a result of the signal) | |
732 | */ | |
ad671423 PP |
733 | disable_irq_nosync(intno); |
734 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
1da177e4 LT |
735 | return IRQ_HANDLED; |
736 | ||
737 | out: | |
738 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
739 | return IRQ_NONE; | |
740 | } | |
741 | ||
742 | static inline void free_vm86_irq(int irqnumber) | |
743 | { | |
744 | unsigned long flags; | |
745 | ||
746 | free_irq(irqnumber, NULL); | |
747 | vm86_irqs[irqnumber].tsk = NULL; | |
748 | ||
749 | spin_lock_irqsave(&irqbits_lock, flags); | |
750 | irqbits &= ~(1 << irqnumber); | |
751 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
752 | } | |
753 | ||
754 | void release_vm86_irqs(struct task_struct *task) | |
755 | { | |
756 | int i; | |
757 | for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) | |
758 | if (vm86_irqs[i].tsk == task) | |
759 | free_vm86_irq(i); | |
760 | } | |
761 | ||
762 | static inline int get_and_reset_irq(int irqnumber) | |
763 | { | |
764 | int bit; | |
765 | unsigned long flags; | |
ad671423 | 766 | int ret = 0; |
1da177e4 LT |
767 | |
768 | if (invalid_vm86_irq(irqnumber)) return 0; | |
769 | if (vm86_irqs[irqnumber].tsk != current) return 0; | |
770 | spin_lock_irqsave(&irqbits_lock, flags); | |
771 | bit = irqbits & (1 << irqnumber); | |
772 | irqbits &= ~bit; | |
ad671423 PP |
773 | if (bit) { |
774 | enable_irq(irqnumber); | |
775 | ret = 1; | |
776 | } | |
777 | ||
1da177e4 | 778 | spin_unlock_irqrestore(&irqbits_lock, flags); |
ad671423 | 779 | return ret; |
1da177e4 LT |
780 | } |
781 | ||
782 | ||
783 | static int do_vm86_irq_handling(int subfunction, int irqnumber) | |
784 | { | |
785 | int ret; | |
786 | switch (subfunction) { | |
787 | case VM86_GET_AND_RESET_IRQ: { | |
788 | return get_and_reset_irq(irqnumber); | |
789 | } | |
790 | case VM86_GET_IRQ_BITS: { | |
791 | return irqbits; | |
792 | } | |
793 | case VM86_REQUEST_IRQ: { | |
794 | int sig = irqnumber >> 8; | |
795 | int irq = irqnumber & 255; | |
796 | if (!capable(CAP_SYS_ADMIN)) return -EPERM; | |
797 | if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; | |
798 | if (invalid_vm86_irq(irq)) return -EPERM; | |
799 | if (vm86_irqs[irq].tsk) return -EPERM; | |
800 | ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); | |
801 | if (ret) return ret; | |
802 | vm86_irqs[irq].sig = sig; | |
803 | vm86_irqs[irq].tsk = current; | |
804 | return irq; | |
805 | } | |
806 | case VM86_FREE_IRQ: { | |
807 | if (invalid_vm86_irq(irqnumber)) return -EPERM; | |
808 | if (!vm86_irqs[irqnumber].tsk) return 0; | |
809 | if (vm86_irqs[irqnumber].tsk != current) return -EPERM; | |
810 | free_vm86_irq(irqnumber); | |
811 | return 0; | |
812 | } | |
813 | } | |
814 | return -EINVAL; | |
815 | } | |
816 |