]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1994 Linus Torvalds |
3 | * | |
4 | * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 | |
624dffcb | 5 | * stack - Manfred Spraul <manfred@colorfullife.com> |
1da177e4 LT |
6 | * |
7 | * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle | |
8 | * them correctly. Now the emulation will be in a | |
9 | * consistent state after stackfaults - Kasper Dupont | |
10 | * <kasperd@daimi.au.dk> | |
11 | * | |
12 | * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont | |
13 | * <kasperd@daimi.au.dk> | |
14 | * | |
15 | * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault | |
16 | * caused by Kasper Dupont's changes - Stas Sergeev | |
17 | * | |
18 | * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. | |
19 | * Kasper Dupont <kasperd@daimi.au.dk> | |
20 | * | |
21 | * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. | |
22 | * Kasper Dupont <kasperd@daimi.au.dk> | |
23 | * | |
24 | * 9 apr 2002 - Changed stack access macros to jump to a label | |
25 | * instead of returning to userspace. This simplifies | |
26 | * do_int, and is needed by handle_vm6_fault. Kasper | |
27 | * Dupont <kasperd@daimi.au.dk> | |
28 | * | |
29 | */ | |
30 | ||
a9415644 | 31 | #include <linux/capability.h> |
1da177e4 LT |
32 | #include <linux/errno.h> |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/sched.h> | |
35 | #include <linux/kernel.h> | |
36 | #include <linux/signal.h> | |
37 | #include <linux/string.h> | |
38 | #include <linux/mm.h> | |
39 | #include <linux/smp.h> | |
1da177e4 LT |
40 | #include <linux/highmem.h> |
41 | #include <linux/ptrace.h> | |
7e7f8a03 | 42 | #include <linux/audit.h> |
49d26b6e | 43 | #include <linux/stddef.h> |
1da177e4 LT |
44 | |
45 | #include <asm/uaccess.h> | |
46 | #include <asm/io.h> | |
47 | #include <asm/tlbflush.h> | |
48 | #include <asm/irq.h> | |
bbc1f698 | 49 | #include <asm/syscalls.h> |
1da177e4 LT |
50 | |
51 | /* | |
52 | * Known problems: | |
53 | * | |
54 | * Interrupt handling is not guaranteed: | |
55 | * - a real x86 will disable all interrupts for one instruction | |
56 | * after a "mov ss,xx" to make stack handling atomic even without | |
57 | * the 'lss' instruction. We can't guarantee this in v86 mode, | |
58 | * as the next instruction might result in a page fault or similar. | |
59 | * - a real x86 will have interrupts disabled for one instruction | |
60 | * past the 'sti' that enables them. We don't bother with all the | |
61 | * details yet. | |
62 | * | |
63 | * Let's hope these problems do not actually matter for anything. | |
64 | */ | |
65 | ||
66 | ||
67 | #define KVM86 ((struct kernel_vm86_struct *)regs) | |
83e714e8 | 68 | #define VMPI KVM86->vm86plus |
1da177e4 LT |
69 | |
70 | ||
71 | /* | |
72 | * 8- and 16-bit register defines.. | |
73 | */ | |
65ea5b03 PA |
74 | #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0]) |
75 | #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1]) | |
76 | #define IP(regs) (*(unsigned short *)&((regs)->pt.ip)) | |
77 | #define SP(regs) (*(unsigned short *)&((regs)->pt.sp)) | |
1da177e4 LT |
78 | |
79 | /* | |
80 | * virtual flags (16 and 32-bit versions) | |
81 | */ | |
82 | #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) | |
83 | #define VEFLAGS (current->thread.v86flags) | |
84 | ||
83e714e8 | 85 | #define set_flags(X, new, mask) \ |
1da177e4 LT |
86 | ((X) = ((X) & ~(mask)) | ((new) & (mask))) |
87 | ||
88 | #define SAFE_MASK (0xDD5) | |
89 | #define RETURN_MASK (0xDFF) | |
90 | ||
49d26b6e JF |
91 | /* convert kernel_vm86_regs to vm86_regs */ |
92 | static int copy_vm86_regs_to_user(struct vm86_regs __user *user, | |
93 | const struct kernel_vm86_regs *regs) | |
94 | { | |
95 | int ret = 0; | |
96 | ||
83e714e8 PC |
97 | /* |
98 | * kernel_vm86_regs is missing gs, so copy everything up to | |
99 | * (but not including) orig_eax, and then rest including orig_eax. | |
100 | */ | |
65ea5b03 PA |
101 | ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); |
102 | ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, | |
49d26b6e | 103 | sizeof(struct kernel_vm86_regs) - |
65ea5b03 | 104 | offsetof(struct kernel_vm86_regs, pt.orig_ax)); |
49d26b6e JF |
105 | |
106 | return ret; | |
107 | } | |
108 | ||
109 | /* convert vm86_regs to kernel_vm86_regs */ | |
110 | static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, | |
111 | const struct vm86_regs __user *user, | |
112 | unsigned extra) | |
113 | { | |
114 | int ret = 0; | |
115 | ||
65ea5b03 PA |
116 | /* copy ax-fs inclusive */ |
117 | ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax)); | |
118 | /* copy orig_ax-__gsh+extra */ | |
119 | ret += copy_from_user(®s->pt.orig_ax, &user->orig_eax, | |
49d26b6e | 120 | sizeof(struct kernel_vm86_regs) - |
65ea5b03 | 121 | offsetof(struct kernel_vm86_regs, pt.orig_ax) + |
49d26b6e | 122 | extra); |
49d26b6e JF |
123 | return ret; |
124 | } | |
1da177e4 | 125 | |
83e714e8 | 126 | struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs) |
1da177e4 LT |
127 | { |
128 | struct tss_struct *tss; | |
129 | struct pt_regs *ret; | |
130 | unsigned long tmp; | |
131 | ||
132 | /* | |
133 | * This gets called from entry.S with interrupts disabled, but | |
134 | * from process context. Enable interrupts here, before trying | |
135 | * to access user space. | |
136 | */ | |
137 | local_irq_enable(); | |
138 | ||
139 | if (!current->thread.vm86_info) { | |
140 | printk("no vm86_info: BAD\n"); | |
141 | do_exit(SIGSEGV); | |
142 | } | |
a5c15d41 | 143 | set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask); |
83e714e8 PC |
144 | tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs); |
145 | tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap); | |
1da177e4 LT |
146 | if (tmp) { |
147 | printk("vm86: could not access userspace vm86_info\n"); | |
148 | do_exit(SIGSEGV); | |
149 | } | |
150 | ||
151 | tss = &per_cpu(init_tss, get_cpu()); | |
faca6227 | 152 | current->thread.sp0 = current->thread.saved_sp0; |
1da177e4 | 153 | current->thread.sysenter_cs = __KERNEL_CS; |
faca6227 PA |
154 | load_sp0(tss, ¤t->thread); |
155 | current->thread.saved_sp0 = 0; | |
1da177e4 LT |
156 | put_cpu(); |
157 | ||
1da177e4 | 158 | ret = KVM86->regs32; |
49d26b6e | 159 | |
65ea5b03 | 160 | ret->fs = current->thread.saved_fs; |
d9a89a26 | 161 | set_user_gs(ret, current->thread.saved_gs); |
49d26b6e | 162 | |
1da177e4 LT |
163 | return ret; |
164 | } | |
165 | ||
60ec5585 | 166 | static void mark_screen_rdonly(struct mm_struct *mm) |
1da177e4 LT |
167 | { |
168 | pgd_t *pgd; | |
169 | pud_t *pud; | |
170 | pmd_t *pmd; | |
60ec5585 HD |
171 | pte_t *pte; |
172 | spinlock_t *ptl; | |
1da177e4 LT |
173 | int i; |
174 | ||
60ec5585 | 175 | pgd = pgd_offset(mm, 0xA0000); |
1da177e4 LT |
176 | if (pgd_none_or_clear_bad(pgd)) |
177 | goto out; | |
178 | pud = pud_offset(pgd, 0xA0000); | |
179 | if (pud_none_or_clear_bad(pud)) | |
180 | goto out; | |
181 | pmd = pmd_offset(pud, 0xA0000); | |
bae9c19b | 182 | split_huge_page_pmd(mm, pmd); |
1da177e4 LT |
183 | if (pmd_none_or_clear_bad(pmd)) |
184 | goto out; | |
60ec5585 | 185 | pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); |
1da177e4 LT |
186 | for (i = 0; i < 32; i++) { |
187 | if (pte_present(*pte)) | |
188 | set_pte(pte, pte_wrprotect(*pte)); | |
189 | pte++; | |
190 | } | |
60ec5585 | 191 | pte_unmap_unlock(pte, ptl); |
1da177e4 | 192 | out: |
1da177e4 LT |
193 | flush_tlb(); |
194 | } | |
195 | ||
196 | ||
197 | ||
198 | static int do_vm86_irq_handling(int subfunction, int irqnumber); | |
199 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); | |
200 | ||
f1382f15 | 201 | int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs) |
1da177e4 | 202 | { |
1da177e4 LT |
203 | struct kernel_vm86_struct info; /* declare this _on top_, |
204 | * this avoids wasting of stack space. | |
205 | * This remains on the stack until we | |
206 | * return to 32 bit user space. | |
207 | */ | |
208 | struct task_struct *tsk; | |
209 | int tmp, ret = -EPERM; | |
210 | ||
211 | tsk = current; | |
faca6227 | 212 | if (tsk->thread.saved_sp0) |
1da177e4 | 213 | goto out; |
49d26b6e JF |
214 | tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, |
215 | offsetof(struct kernel_vm86_struct, vm86plus) - | |
216 | sizeof(info.regs)); | |
1da177e4 LT |
217 | ret = -EFAULT; |
218 | if (tmp) | |
219 | goto out; | |
220 | memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); | |
253f29a4 | 221 | info.regs32 = regs; |
1da177e4 LT |
222 | tsk->thread.vm86_info = v86; |
223 | do_sys_vm86(&info, tsk); | |
224 | ret = 0; /* we never return here */ | |
225 | out: | |
226 | return ret; | |
227 | } | |
228 | ||
229 | ||
f1382f15 | 230 | int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs) |
1da177e4 LT |
231 | { |
232 | struct kernel_vm86_struct info; /* declare this _on top_, | |
233 | * this avoids wasting of stack space. | |
234 | * This remains on the stack until we | |
235 | * return to 32 bit user space. | |
236 | */ | |
237 | struct task_struct *tsk; | |
238 | int tmp, ret; | |
239 | struct vm86plus_struct __user *v86; | |
240 | ||
241 | tsk = current; | |
f1382f15 | 242 | switch (cmd) { |
83e714e8 PC |
243 | case VM86_REQUEST_IRQ: |
244 | case VM86_FREE_IRQ: | |
245 | case VM86_GET_IRQ_BITS: | |
246 | case VM86_GET_AND_RESET_IRQ: | |
f1382f15 | 247 | ret = do_vm86_irq_handling(cmd, (int)arg); |
83e714e8 PC |
248 | goto out; |
249 | case VM86_PLUS_INSTALL_CHECK: | |
250 | /* | |
251 | * NOTE: on old vm86 stuff this will return the error | |
252 | * from access_ok(), because the subfunction is | |
253 | * interpreted as (invalid) address to vm86_struct. | |
254 | * So the installation check works. | |
255 | */ | |
256 | ret = 0; | |
257 | goto out; | |
1da177e4 LT |
258 | } |
259 | ||
260 | /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ | |
261 | ret = -EPERM; | |
faca6227 | 262 | if (tsk->thread.saved_sp0) |
1da177e4 | 263 | goto out; |
f1382f15 | 264 | v86 = (struct vm86plus_struct __user *)arg; |
49d26b6e JF |
265 | tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, |
266 | offsetof(struct kernel_vm86_struct, regs32) - | |
267 | sizeof(info.regs)); | |
1da177e4 LT |
268 | ret = -EFAULT; |
269 | if (tmp) | |
270 | goto out; | |
253f29a4 | 271 | info.regs32 = regs; |
1da177e4 LT |
272 | info.vm86plus.is_vm86pus = 1; |
273 | tsk->thread.vm86_info = (struct vm86_struct __user *)v86; | |
274 | do_sys_vm86(&info, tsk); | |
275 | ret = 0; /* we never return here */ | |
276 | out: | |
277 | return ret; | |
278 | } | |
279 | ||
280 | ||
281 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) | |
282 | { | |
283 | struct tss_struct *tss; | |
284 | /* | |
285 | * make sure the vm86() system call doesn't try to do anything silly | |
286 | */ | |
65ea5b03 PA |
287 | info->regs.pt.ds = 0; |
288 | info->regs.pt.es = 0; | |
289 | info->regs.pt.fs = 0; | |
3aa6b186 LR |
290 | #ifndef CONFIG_X86_32_LAZY_GS |
291 | info->regs.pt.gs = 0; | |
292 | #endif | |
1da177e4 LT |
293 | |
294 | /* | |
65ea5b03 | 295 | * The flags register is also special: we cannot trust that the user |
1da177e4 LT |
296 | * has set it up safely, so this makes sure interrupt etc flags are |
297 | * inherited from protected mode. | |
298 | */ | |
65ea5b03 PA |
299 | VEFLAGS = info->regs.pt.flags; |
300 | info->regs.pt.flags &= SAFE_MASK; | |
301 | info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; | |
6b6891f9 | 302 | info->regs.pt.flags |= X86_VM_MASK; |
1da177e4 LT |
303 | |
304 | switch (info->cpu_type) { | |
83e714e8 PC |
305 | case CPU_286: |
306 | tsk->thread.v86mask = 0; | |
307 | break; | |
308 | case CPU_386: | |
a5c15d41 | 309 | tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL; |
83e714e8 PC |
310 | break; |
311 | case CPU_486: | |
a5c15d41 | 312 | tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; |
83e714e8 PC |
313 | break; |
314 | default: | |
a5c15d41 | 315 | tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL; |
83e714e8 | 316 | break; |
1da177e4 LT |
317 | } |
318 | ||
319 | /* | |
975e5f45 | 320 | * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL) |
1da177e4 | 321 | */ |
975e5f45 | 322 | info->regs32->ax = VM86_SIGNAL; |
faca6227 | 323 | tsk->thread.saved_sp0 = tsk->thread.sp0; |
65ea5b03 | 324 | tsk->thread.saved_fs = info->regs32->fs; |
d9a89a26 | 325 | tsk->thread.saved_gs = get_user_gs(info->regs32); |
1da177e4 LT |
326 | |
327 | tss = &per_cpu(init_tss, get_cpu()); | |
faca6227 | 328 | tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; |
1da177e4 LT |
329 | if (cpu_has_sep) |
330 | tsk->thread.sysenter_cs = 0; | |
faca6227 | 331 | load_sp0(tss, &tsk->thread); |
1da177e4 LT |
332 | put_cpu(); |
333 | ||
334 | tsk->thread.screen_bitmap = info->screen_bitmap; | |
335 | if (info->flags & VM86_SCREEN_BITMAP) | |
60ec5585 | 336 | mark_screen_rdonly(tsk->mm); |
7e7f8a03 | 337 | |
d7e7528b | 338 | /*call __audit_syscall_exit since we do not exit via the normal paths */ |
6015ff10 | 339 | #ifdef CONFIG_AUDITSYSCALL |
7e7f8a03 | 340 | if (unlikely(current->audit_context)) |
d7e7528b | 341 | __audit_syscall_exit(1, 0); |
6015ff10 | 342 | #endif |
7e7f8a03 | 343 | |
1da177e4 | 344 | __asm__ __volatile__( |
1da177e4 LT |
345 | "movl %0,%%esp\n\t" |
346 | "movl %1,%%ebp\n\t" | |
3aa6b186 | 347 | #ifdef CONFIG_X86_32_LAZY_GS |
464d1a78 | 348 | "mov %2, %%gs\n\t" |
3aa6b186 | 349 | #endif |
1da177e4 LT |
350 | "jmp resume_userspace" |
351 | : /* no outputs */ | |
49d26b6e | 352 | :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); |
1da177e4 LT |
353 | /* we never return here */ |
354 | } | |
355 | ||
83e714e8 | 356 | static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval) |
1da177e4 | 357 | { |
83e714e8 | 358 | struct pt_regs *regs32; |
1da177e4 LT |
359 | |
360 | regs32 = save_v86_state(regs16); | |
65ea5b03 | 361 | regs32->ax = retval; |
1da177e4 LT |
362 | __asm__ __volatile__("movl %0,%%esp\n\t" |
363 | "movl %1,%%ebp\n\t" | |
364 | "jmp resume_userspace" | |
365 | : : "r" (regs32), "r" (current_thread_info())); | |
366 | } | |
367 | ||
83e714e8 | 368 | static inline void set_IF(struct kernel_vm86_regs *regs) |
1da177e4 | 369 | { |
a5c15d41 | 370 | VEFLAGS |= X86_EFLAGS_VIF; |
371 | if (VEFLAGS & X86_EFLAGS_VIP) | |
1da177e4 LT |
372 | return_to_32bit(regs, VM86_STI); |
373 | } | |
374 | ||
83e714e8 | 375 | static inline void clear_IF(struct kernel_vm86_regs *regs) |
1da177e4 | 376 | { |
a5c15d41 | 377 | VEFLAGS &= ~X86_EFLAGS_VIF; |
1da177e4 LT |
378 | } |
379 | ||
83e714e8 | 380 | static inline void clear_TF(struct kernel_vm86_regs *regs) |
1da177e4 | 381 | { |
a5c15d41 | 382 | regs->pt.flags &= ~X86_EFLAGS_TF; |
1da177e4 LT |
383 | } |
384 | ||
83e714e8 | 385 | static inline void clear_AC(struct kernel_vm86_regs *regs) |
1da177e4 | 386 | { |
a5c15d41 | 387 | regs->pt.flags &= ~X86_EFLAGS_AC; |
1da177e4 LT |
388 | } |
389 | ||
83e714e8 PC |
390 | /* |
391 | * It is correct to call set_IF(regs) from the set_vflags_* | |
1da177e4 LT |
392 | * functions. However someone forgot to call clear_IF(regs) |
393 | * in the opposite case. | |
394 | * After the command sequence CLI PUSHF STI POPF you should | |
ab4a574e | 395 | * end up with interrupts disabled, but you ended up with |
1da177e4 LT |
396 | * interrupts enabled. |
397 | * ( I was testing my own changes, but the only bug I | |
398 | * could find was in a function I had not changed. ) | |
399 | * [KD] | |
400 | */ | |
401 | ||
83e714e8 | 402 | static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs) |
1da177e4 | 403 | { |
65ea5b03 PA |
404 | set_flags(VEFLAGS, flags, current->thread.v86mask); |
405 | set_flags(regs->pt.flags, flags, SAFE_MASK); | |
a5c15d41 | 406 | if (flags & X86_EFLAGS_IF) |
1da177e4 LT |
407 | set_IF(regs); |
408 | else | |
409 | clear_IF(regs); | |
410 | } | |
411 | ||
83e714e8 | 412 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs) |
1da177e4 LT |
413 | { |
414 | set_flags(VFLAGS, flags, current->thread.v86mask); | |
65ea5b03 | 415 | set_flags(regs->pt.flags, flags, SAFE_MASK); |
a5c15d41 | 416 | if (flags & X86_EFLAGS_IF) |
1da177e4 LT |
417 | set_IF(regs); |
418 | else | |
419 | clear_IF(regs); | |
420 | } | |
421 | ||
83e714e8 | 422 | static inline unsigned long get_vflags(struct kernel_vm86_regs *regs) |
1da177e4 | 423 | { |
65ea5b03 | 424 | unsigned long flags = regs->pt.flags & RETURN_MASK; |
1da177e4 | 425 | |
a5c15d41 | 426 | if (VEFLAGS & X86_EFLAGS_VIF) |
427 | flags |= X86_EFLAGS_IF; | |
428 | flags |= X86_EFLAGS_IOPL; | |
1da177e4 LT |
429 | return flags | (VEFLAGS & current->thread.v86mask); |
430 | } | |
431 | ||
83e714e8 | 432 | static inline int is_revectored(int nr, struct revectored_struct *bitmap) |
1da177e4 LT |
433 | { |
434 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" | |
435 | :"=r" (nr) | |
83e714e8 | 436 | :"m" (*bitmap), "r" (nr)); |
1da177e4 LT |
437 | return nr; |
438 | } | |
439 | ||
440 | #define val_byte(val, n) (((__u8 *)&val)[n]) | |
441 | ||
442 | #define pushb(base, ptr, val, err_label) \ | |
443 | do { \ | |
444 | __u8 __val = val; \ | |
445 | ptr--; \ | |
446 | if (put_user(__val, base + ptr) < 0) \ | |
447 | goto err_label; \ | |
83e714e8 | 448 | } while (0) |
1da177e4 LT |
449 | |
450 | #define pushw(base, ptr, val, err_label) \ | |
451 | do { \ | |
452 | __u16 __val = val; \ | |
453 | ptr--; \ | |
454 | if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | |
455 | goto err_label; \ | |
456 | ptr--; \ | |
457 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | |
458 | goto err_label; \ | |
83e714e8 | 459 | } while (0) |
1da177e4 LT |
460 | |
461 | #define pushl(base, ptr, val, err_label) \ | |
462 | do { \ | |
463 | __u32 __val = val; \ | |
464 | ptr--; \ | |
465 | if (put_user(val_byte(__val, 3), base + ptr) < 0) \ | |
466 | goto err_label; \ | |
467 | ptr--; \ | |
468 | if (put_user(val_byte(__val, 2), base + ptr) < 0) \ | |
469 | goto err_label; \ | |
470 | ptr--; \ | |
471 | if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | |
472 | goto err_label; \ | |
473 | ptr--; \ | |
474 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | |
475 | goto err_label; \ | |
83e714e8 | 476 | } while (0) |
1da177e4 LT |
477 | |
478 | #define popb(base, ptr, err_label) \ | |
479 | ({ \ | |
480 | __u8 __res; \ | |
481 | if (get_user(__res, base + ptr) < 0) \ | |
482 | goto err_label; \ | |
483 | ptr++; \ | |
484 | __res; \ | |
485 | }) | |
486 | ||
487 | #define popw(base, ptr, err_label) \ | |
488 | ({ \ | |
489 | __u16 __res; \ | |
490 | if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | |
491 | goto err_label; \ | |
492 | ptr++; \ | |
493 | if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | |
494 | goto err_label; \ | |
495 | ptr++; \ | |
496 | __res; \ | |
497 | }) | |
498 | ||
499 | #define popl(base, ptr, err_label) \ | |
500 | ({ \ | |
501 | __u32 __res; \ | |
502 | if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | |
503 | goto err_label; \ | |
504 | ptr++; \ | |
505 | if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | |
506 | goto err_label; \ | |
507 | ptr++; \ | |
508 | if (get_user(val_byte(__res, 2), base + ptr) < 0) \ | |
509 | goto err_label; \ | |
510 | ptr++; \ | |
511 | if (get_user(val_byte(__res, 3), base + ptr) < 0) \ | |
512 | goto err_label; \ | |
513 | ptr++; \ | |
514 | __res; \ | |
515 | }) | |
516 | ||
517 | /* There are so many possible reasons for this function to return | |
518 | * VM86_INTx, so adding another doesn't bother me. We can expect | |
519 | * userspace programs to be able to handle it. (Getting a problem | |
520 | * in userspace is always better than an Oops anyway.) [KD] | |
521 | */ | |
522 | static void do_int(struct kernel_vm86_regs *regs, int i, | |
83e714e8 | 523 | unsigned char __user *ssp, unsigned short sp) |
1da177e4 LT |
524 | { |
525 | unsigned long __user *intr_ptr; | |
526 | unsigned long segoffs; | |
527 | ||
65ea5b03 | 528 | if (regs->pt.cs == BIOSSEG) |
1da177e4 LT |
529 | goto cannot_handle; |
530 | if (is_revectored(i, &KVM86->int_revectored)) | |
531 | goto cannot_handle; | |
83e714e8 | 532 | if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) |
1da177e4 LT |
533 | goto cannot_handle; |
534 | intr_ptr = (unsigned long __user *) (i << 2); | |
535 | if (get_user(segoffs, intr_ptr)) | |
536 | goto cannot_handle; | |
537 | if ((segoffs >> 16) == BIOSSEG) | |
538 | goto cannot_handle; | |
539 | pushw(ssp, sp, get_vflags(regs), cannot_handle); | |
65ea5b03 | 540 | pushw(ssp, sp, regs->pt.cs, cannot_handle); |
1da177e4 | 541 | pushw(ssp, sp, IP(regs), cannot_handle); |
65ea5b03 | 542 | regs->pt.cs = segoffs >> 16; |
1da177e4 LT |
543 | SP(regs) -= 6; |
544 | IP(regs) = segoffs & 0xffff; | |
545 | clear_TF(regs); | |
546 | clear_IF(regs); | |
547 | clear_AC(regs); | |
548 | return; | |
549 | ||
550 | cannot_handle: | |
551 | return_to_32bit(regs, VM86_INTx + (i << 8)); | |
552 | } | |
553 | ||
83e714e8 | 554 | int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) |
1da177e4 LT |
555 | { |
556 | if (VMPI.is_vm86pus) { | |
6554287b BO |
557 | if ((trapno == 3) || (trapno == 1)) { |
558 | KVM86->regs32->ax = VM86_TRAP + (trapno << 8); | |
559 | /* setting this flag forces the code in entry_32.S to | |
560 | call save_v86_state() and change the stack pointer | |
561 | to KVM86->regs32 */ | |
562 | set_thread_flag(TIF_IRET); | |
563 | return 0; | |
564 | } | |
65ea5b03 | 565 | do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); |
1da177e4 LT |
566 | return 0; |
567 | } | |
83e714e8 | 568 | if (trapno != 1) |
1da177e4 | 569 | return 1; /* we let this handle by the calling routine */ |
1da177e4 LT |
570 | current->thread.trap_no = trapno; |
571 | current->thread.error_code = error_code; | |
0f540910 | 572 | force_sig(SIGTRAP, current); |
1da177e4 LT |
573 | return 0; |
574 | } | |
575 | ||
83e714e8 | 576 | void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code) |
1da177e4 LT |
577 | { |
578 | unsigned char opcode; | |
579 | unsigned char __user *csp; | |
580 | unsigned char __user *ssp; | |
5fd75ebb | 581 | unsigned short ip, sp, orig_flags; |
1da177e4 LT |
582 | int data32, pref_done; |
583 | ||
584 | #define CHECK_IF_IN_TRAP \ | |
585 | if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ | |
a5c15d41 | 586 | newflags |= X86_EFLAGS_TF |
1da177e4 | 587 | #define VM86_FAULT_RETURN do { \ |
a5c15d41 | 588 | if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \ |
1da177e4 | 589 | return_to_32bit(regs, VM86_PICRETURN); \ |
a5c15d41 | 590 | if (orig_flags & X86_EFLAGS_TF) \ |
5fd75ebb | 591 | handle_vm86_trap(regs, 0, 1); \ |
1da177e4 LT |
592 | return; } while (0) |
593 | ||
65ea5b03 | 594 | orig_flags = *(unsigned short *)®s->pt.flags; |
5fd75ebb | 595 | |
65ea5b03 PA |
596 | csp = (unsigned char __user *) (regs->pt.cs << 4); |
597 | ssp = (unsigned char __user *) (regs->pt.ss << 4); | |
1da177e4 LT |
598 | sp = SP(regs); |
599 | ip = IP(regs); | |
600 | ||
601 | data32 = 0; | |
602 | pref_done = 0; | |
603 | do { | |
604 | switch (opcode = popb(csp, ip, simulate_sigsegv)) { | |
83e714e8 PC |
605 | case 0x66: /* 32-bit data */ data32 = 1; break; |
606 | case 0x67: /* 32-bit address */ break; | |
607 | case 0x2e: /* CS */ break; | |
608 | case 0x3e: /* DS */ break; | |
609 | case 0x26: /* ES */ break; | |
610 | case 0x36: /* SS */ break; | |
611 | case 0x65: /* GS */ break; | |
612 | case 0x64: /* FS */ break; | |
613 | case 0xf2: /* repnz */ break; | |
614 | case 0xf3: /* rep */ break; | |
615 | default: pref_done = 1; | |
1da177e4 LT |
616 | } |
617 | } while (!pref_done); | |
618 | ||
619 | switch (opcode) { | |
620 | ||
621 | /* pushf */ | |
622 | case 0x9c: | |
623 | if (data32) { | |
624 | pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); | |
625 | SP(regs) -= 4; | |
626 | } else { | |
627 | pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); | |
628 | SP(regs) -= 2; | |
629 | } | |
630 | IP(regs) = ip; | |
631 | VM86_FAULT_RETURN; | |
632 | ||
633 | /* popf */ | |
634 | case 0x9d: | |
635 | { | |
636 | unsigned long newflags; | |
637 | if (data32) { | |
83e714e8 | 638 | newflags = popl(ssp, sp, simulate_sigsegv); |
1da177e4 LT |
639 | SP(regs) += 4; |
640 | } else { | |
641 | newflags = popw(ssp, sp, simulate_sigsegv); | |
642 | SP(regs) += 2; | |
643 | } | |
644 | IP(regs) = ip; | |
645 | CHECK_IF_IN_TRAP; | |
83e714e8 | 646 | if (data32) |
1da177e4 | 647 | set_vflags_long(newflags, regs); |
83e714e8 | 648 | else |
1da177e4 | 649 | set_vflags_short(newflags, regs); |
83e714e8 | 650 | |
1da177e4 LT |
651 | VM86_FAULT_RETURN; |
652 | } | |
653 | ||
654 | /* int xx */ | |
655 | case 0xcd: { | |
83e714e8 | 656 | int intno = popb(csp, ip, simulate_sigsegv); |
1da177e4 LT |
657 | IP(regs) = ip; |
658 | if (VMPI.vm86dbg_active) { | |
83e714e8 | 659 | if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3]) |
1da177e4 LT |
660 | return_to_32bit(regs, VM86_INTx + (intno << 8)); |
661 | } | |
662 | do_int(regs, intno, ssp, sp); | |
663 | return; | |
664 | } | |
665 | ||
666 | /* iret */ | |
667 | case 0xcf: | |
668 | { | |
669 | unsigned long newip; | |
670 | unsigned long newcs; | |
671 | unsigned long newflags; | |
672 | if (data32) { | |
83e714e8 PC |
673 | newip = popl(ssp, sp, simulate_sigsegv); |
674 | newcs = popl(ssp, sp, simulate_sigsegv); | |
675 | newflags = popl(ssp, sp, simulate_sigsegv); | |
1da177e4 LT |
676 | SP(regs) += 12; |
677 | } else { | |
678 | newip = popw(ssp, sp, simulate_sigsegv); | |
679 | newcs = popw(ssp, sp, simulate_sigsegv); | |
680 | newflags = popw(ssp, sp, simulate_sigsegv); | |
681 | SP(regs) += 6; | |
682 | } | |
683 | IP(regs) = newip; | |
65ea5b03 | 684 | regs->pt.cs = newcs; |
1da177e4 LT |
685 | CHECK_IF_IN_TRAP; |
686 | if (data32) { | |
687 | set_vflags_long(newflags, regs); | |
688 | } else { | |
689 | set_vflags_short(newflags, regs); | |
690 | } | |
691 | VM86_FAULT_RETURN; | |
692 | } | |
693 | ||
694 | /* cli */ | |
695 | case 0xfa: | |
696 | IP(regs) = ip; | |
697 | clear_IF(regs); | |
698 | VM86_FAULT_RETURN; | |
699 | ||
700 | /* sti */ | |
701 | /* | |
702 | * Damn. This is incorrect: the 'sti' instruction should actually | |
703 | * enable interrupts after the /next/ instruction. Not good. | |
704 | * | |
705 | * Probably needs some horsing around with the TF flag. Aiee.. | |
706 | */ | |
707 | case 0xfb: | |
708 | IP(regs) = ip; | |
709 | set_IF(regs); | |
710 | VM86_FAULT_RETURN; | |
711 | ||
712 | default: | |
713 | return_to_32bit(regs, VM86_UNKNOWN); | |
714 | } | |
715 | ||
716 | return; | |
717 | ||
718 | simulate_sigsegv: | |
719 | /* FIXME: After a long discussion with Stas we finally | |
720 | * agreed, that this is wrong. Here we should | |
721 | * really send a SIGSEGV to the user program. | |
722 | * But how do we create the correct context? We | |
723 | * are inside a general protection fault handler | |
724 | * and has just returned from a page fault handler. | |
725 | * The correct context for the signal handler | |
726 | * should be a mixture of the two, but how do we | |
727 | * get the information? [KD] | |
728 | */ | |
729 | return_to_32bit(regs, VM86_UNKNOWN); | |
730 | } | |
731 | ||
732 | /* ---------------- vm86 special IRQ passing stuff ----------------- */ | |
733 | ||
734 | #define VM86_IRQNAME "vm86irq" | |
735 | ||
736 | static struct vm86_irqs { | |
737 | struct task_struct *tsk; | |
738 | int sig; | |
739 | } vm86_irqs[16]; | |
740 | ||
741 | static DEFINE_SPINLOCK(irqbits_lock); | |
742 | static int irqbits; | |
743 | ||
83e714e8 | 744 | #define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \ |
1da177e4 | 745 | | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ |
83e714e8 PC |
746 | | (1 << SIGUNUSED)) |
747 | ||
7d12e780 | 748 | static irqreturn_t irq_handler(int intno, void *dev_id) |
1da177e4 LT |
749 | { |
750 | int irq_bit; | |
751 | unsigned long flags; | |
752 | ||
83e714e8 | 753 | spin_lock_irqsave(&irqbits_lock, flags); |
1da177e4 | 754 | irq_bit = 1 << intno; |
83e714e8 | 755 | if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk) |
1da177e4 LT |
756 | goto out; |
757 | irqbits |= irq_bit; | |
758 | if (vm86_irqs[intno].sig) | |
759 | send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); | |
1da177e4 LT |
760 | /* |
761 | * IRQ will be re-enabled when user asks for the irq (whether | |
762 | * polling or as a result of the signal) | |
763 | */ | |
ad671423 PP |
764 | disable_irq_nosync(intno); |
765 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
1da177e4 LT |
766 | return IRQ_HANDLED; |
767 | ||
768 | out: | |
83e714e8 | 769 | spin_unlock_irqrestore(&irqbits_lock, flags); |
1da177e4 LT |
770 | return IRQ_NONE; |
771 | } | |
772 | ||
773 | static inline void free_vm86_irq(int irqnumber) | |
774 | { | |
775 | unsigned long flags; | |
776 | ||
777 | free_irq(irqnumber, NULL); | |
778 | vm86_irqs[irqnumber].tsk = NULL; | |
779 | ||
83e714e8 | 780 | spin_lock_irqsave(&irqbits_lock, flags); |
1da177e4 | 781 | irqbits &= ~(1 << irqnumber); |
83e714e8 | 782 | spin_unlock_irqrestore(&irqbits_lock, flags); |
1da177e4 LT |
783 | } |
784 | ||
785 | void release_vm86_irqs(struct task_struct *task) | |
786 | { | |
787 | int i; | |
788 | for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) | |
789 | if (vm86_irqs[i].tsk == task) | |
790 | free_vm86_irq(i); | |
791 | } | |
792 | ||
793 | static inline int get_and_reset_irq(int irqnumber) | |
794 | { | |
795 | int bit; | |
796 | unsigned long flags; | |
ad671423 | 797 | int ret = 0; |
83e714e8 | 798 | |
1da177e4 LT |
799 | if (invalid_vm86_irq(irqnumber)) return 0; |
800 | if (vm86_irqs[irqnumber].tsk != current) return 0; | |
83e714e8 | 801 | spin_lock_irqsave(&irqbits_lock, flags); |
1da177e4 LT |
802 | bit = irqbits & (1 << irqnumber); |
803 | irqbits &= ~bit; | |
ad671423 PP |
804 | if (bit) { |
805 | enable_irq(irqnumber); | |
806 | ret = 1; | |
807 | } | |
808 | ||
83e714e8 | 809 | spin_unlock_irqrestore(&irqbits_lock, flags); |
ad671423 | 810 | return ret; |
1da177e4 LT |
811 | } |
812 | ||
813 | ||
814 | static int do_vm86_irq_handling(int subfunction, int irqnumber) | |
815 | { | |
816 | int ret; | |
817 | switch (subfunction) { | |
818 | case VM86_GET_AND_RESET_IRQ: { | |
819 | return get_and_reset_irq(irqnumber); | |
820 | } | |
821 | case VM86_GET_IRQ_BITS: { | |
822 | return irqbits; | |
823 | } | |
824 | case VM86_REQUEST_IRQ: { | |
825 | int sig = irqnumber >> 8; | |
826 | int irq = irqnumber & 255; | |
827 | if (!capable(CAP_SYS_ADMIN)) return -EPERM; | |
828 | if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; | |
829 | if (invalid_vm86_irq(irq)) return -EPERM; | |
830 | if (vm86_irqs[irq].tsk) return -EPERM; | |
831 | ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); | |
832 | if (ret) return ret; | |
833 | vm86_irqs[irq].sig = sig; | |
834 | vm86_irqs[irq].tsk = current; | |
835 | return irq; | |
836 | } | |
837 | case VM86_FREE_IRQ: { | |
838 | if (invalid_vm86_irq(irqnumber)) return -EPERM; | |
839 | if (!vm86_irqs[irqnumber].tsk) return 0; | |
840 | if (vm86_irqs[irqnumber].tsk != current) return -EPERM; | |
841 | free_vm86_irq(irqnumber); | |
842 | return 0; | |
843 | } | |
844 | } | |
845 | return -EINVAL; | |
846 | } | |
847 |