Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi...
[deliverable/linux.git] / arch / x86 / kernel / vm86_32.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
624dffcb 5 * stack - Manfred Spraul <manfred@colorfullife.com>
1da177e4
LT
6 *
7 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
8 * them correctly. Now the emulation will be in a
9 * consistent state after stackfaults - Kasper Dupont
10 * <kasperd@daimi.au.dk>
11 *
12 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
13 * <kasperd@daimi.au.dk>
14 *
15 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
16 * caused by Kasper Dupont's changes - Stas Sergeev
17 *
18 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
19 * Kasper Dupont <kasperd@daimi.au.dk>
20 *
21 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
22 * Kasper Dupont <kasperd@daimi.au.dk>
23 *
24 * 9 apr 2002 - Changed stack access macros to jump to a label
25 * instead of returning to userspace. This simplifies
26 * do_int, and is needed by handle_vm6_fault. Kasper
27 * Dupont <kasperd@daimi.au.dk>
28 *
29 */
30
a9415644 31#include <linux/capability.h>
1da177e4
LT
32#include <linux/errno.h>
33#include <linux/interrupt.h>
34#include <linux/sched.h>
35#include <linux/kernel.h>
36#include <linux/signal.h>
37#include <linux/string.h>
38#include <linux/mm.h>
39#include <linux/smp.h>
1da177e4
LT
40#include <linux/highmem.h>
41#include <linux/ptrace.h>
7e7f8a03 42#include <linux/audit.h>
49d26b6e 43#include <linux/stddef.h>
1da177e4
LT
44
45#include <asm/uaccess.h>
46#include <asm/io.h>
47#include <asm/tlbflush.h>
48#include <asm/irq.h>
bbc1f698 49#include <asm/syscalls.h>
1da177e4
LT
50
51/*
52 * Known problems:
53 *
54 * Interrupt handling is not guaranteed:
55 * - a real x86 will disable all interrupts for one instruction
56 * after a "mov ss,xx" to make stack handling atomic even without
57 * the 'lss' instruction. We can't guarantee this in v86 mode,
58 * as the next instruction might result in a page fault or similar.
59 * - a real x86 will have interrupts disabled for one instruction
60 * past the 'sti' that enables them. We don't bother with all the
61 * details yet.
62 *
63 * Let's hope these problems do not actually matter for anything.
64 */
65
66
67#define KVM86 ((struct kernel_vm86_struct *)regs)
83e714e8 68#define VMPI KVM86->vm86plus
1da177e4
LT
69
70
71/*
72 * 8- and 16-bit register defines..
73 */
65ea5b03
PA
74#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
75#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
76#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
77#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
1da177e4
LT
78
79/*
80 * virtual flags (16 and 32-bit versions)
81 */
82#define VFLAGS (*(unsigned short *)&(current->thread.v86flags))
83#define VEFLAGS (current->thread.v86flags)
84
83e714e8 85#define set_flags(X, new, mask) \
1da177e4
LT
86((X) = ((X) & ~(mask)) | ((new) & (mask)))
87
88#define SAFE_MASK (0xDD5)
89#define RETURN_MASK (0xDFF)
90
49d26b6e
JF
91/* convert kernel_vm86_regs to vm86_regs */
92static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
93 const struct kernel_vm86_regs *regs)
94{
95 int ret = 0;
96
83e714e8
PC
97 /*
98 * kernel_vm86_regs is missing gs, so copy everything up to
99 * (but not including) orig_eax, and then rest including orig_eax.
100 */
65ea5b03
PA
101 ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
102 ret += copy_to_user(&user->orig_eax, &regs->pt.orig_ax,
49d26b6e 103 sizeof(struct kernel_vm86_regs) -
65ea5b03 104 offsetof(struct kernel_vm86_regs, pt.orig_ax));
49d26b6e
JF
105
106 return ret;
107}
108
109/* convert vm86_regs to kernel_vm86_regs */
110static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
111 const struct vm86_regs __user *user,
112 unsigned extra)
113{
114 int ret = 0;
115
65ea5b03
PA
116 /* copy ax-fs inclusive */
117 ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
118 /* copy orig_ax-__gsh+extra */
119 ret += copy_from_user(&regs->pt.orig_ax, &user->orig_eax,
49d26b6e 120 sizeof(struct kernel_vm86_regs) -
65ea5b03 121 offsetof(struct kernel_vm86_regs, pt.orig_ax) +
49d26b6e 122 extra);
49d26b6e
JF
123 return ret;
124}
1da177e4 125
83e714e8 126struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
1da177e4
LT
127{
128 struct tss_struct *tss;
129 struct pt_regs *ret;
130 unsigned long tmp;
131
132 /*
133 * This gets called from entry.S with interrupts disabled, but
134 * from process context. Enable interrupts here, before trying
135 * to access user space.
136 */
137 local_irq_enable();
138
139 if (!current->thread.vm86_info) {
140 printk("no vm86_info: BAD\n");
141 do_exit(SIGSEGV);
142 }
a5c15d41 143 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask);
83e714e8
PC
144 tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs, regs);
145 tmp += put_user(current->thread.screen_bitmap, &current->thread.vm86_info->screen_bitmap);
1da177e4
LT
146 if (tmp) {
147 printk("vm86: could not access userspace vm86_info\n");
148 do_exit(SIGSEGV);
149 }
150
151 tss = &per_cpu(init_tss, get_cpu());
faca6227 152 current->thread.sp0 = current->thread.saved_sp0;
1da177e4 153 current->thread.sysenter_cs = __KERNEL_CS;
faca6227
PA
154 load_sp0(tss, &current->thread);
155 current->thread.saved_sp0 = 0;
1da177e4
LT
156 put_cpu();
157
1da177e4 158 ret = KVM86->regs32;
49d26b6e 159
65ea5b03 160 ret->fs = current->thread.saved_fs;
d9a89a26 161 set_user_gs(ret, current->thread.saved_gs);
49d26b6e 162
1da177e4
LT
163 return ret;
164}
165
60ec5585 166static void mark_screen_rdonly(struct mm_struct *mm)
1da177e4
LT
167{
168 pgd_t *pgd;
169 pud_t *pud;
170 pmd_t *pmd;
60ec5585
HD
171 pte_t *pte;
172 spinlock_t *ptl;
1da177e4
LT
173 int i;
174
1a5a9906 175 down_write(&mm->mmap_sem);
60ec5585 176 pgd = pgd_offset(mm, 0xA0000);
1da177e4
LT
177 if (pgd_none_or_clear_bad(pgd))
178 goto out;
179 pud = pud_offset(pgd, 0xA0000);
180 if (pud_none_or_clear_bad(pud))
181 goto out;
182 pmd = pmd_offset(pud, 0xA0000);
bae9c19b 183 split_huge_page_pmd(mm, pmd);
1da177e4
LT
184 if (pmd_none_or_clear_bad(pmd))
185 goto out;
60ec5585 186 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
1da177e4
LT
187 for (i = 0; i < 32; i++) {
188 if (pte_present(*pte))
189 set_pte(pte, pte_wrprotect(*pte));
190 pte++;
191 }
60ec5585 192 pte_unmap_unlock(pte, ptl);
1da177e4 193out:
1a5a9906 194 up_write(&mm->mmap_sem);
1da177e4
LT
195 flush_tlb();
196}
197
198
199
200static int do_vm86_irq_handling(int subfunction, int irqnumber);
201static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
202
f1382f15 203int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
1da177e4 204{
1da177e4
LT
205 struct kernel_vm86_struct info; /* declare this _on top_,
206 * this avoids wasting of stack space.
207 * This remains on the stack until we
208 * return to 32 bit user space.
209 */
210 struct task_struct *tsk;
211 int tmp, ret = -EPERM;
212
213 tsk = current;
faca6227 214 if (tsk->thread.saved_sp0)
1da177e4 215 goto out;
49d26b6e
JF
216 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
217 offsetof(struct kernel_vm86_struct, vm86plus) -
218 sizeof(info.regs));
1da177e4
LT
219 ret = -EFAULT;
220 if (tmp)
221 goto out;
222 memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
253f29a4 223 info.regs32 = regs;
1da177e4
LT
224 tsk->thread.vm86_info = v86;
225 do_sys_vm86(&info, tsk);
226 ret = 0; /* we never return here */
227out:
228 return ret;
229}
230
231
f1382f15 232int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
1da177e4
LT
233{
234 struct kernel_vm86_struct info; /* declare this _on top_,
235 * this avoids wasting of stack space.
236 * This remains on the stack until we
237 * return to 32 bit user space.
238 */
239 struct task_struct *tsk;
240 int tmp, ret;
241 struct vm86plus_struct __user *v86;
242
243 tsk = current;
f1382f15 244 switch (cmd) {
83e714e8
PC
245 case VM86_REQUEST_IRQ:
246 case VM86_FREE_IRQ:
247 case VM86_GET_IRQ_BITS:
248 case VM86_GET_AND_RESET_IRQ:
f1382f15 249 ret = do_vm86_irq_handling(cmd, (int)arg);
83e714e8
PC
250 goto out;
251 case VM86_PLUS_INSTALL_CHECK:
252 /*
253 * NOTE: on old vm86 stuff this will return the error
254 * from access_ok(), because the subfunction is
255 * interpreted as (invalid) address to vm86_struct.
256 * So the installation check works.
257 */
258 ret = 0;
259 goto out;
1da177e4
LT
260 }
261
262 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
263 ret = -EPERM;
faca6227 264 if (tsk->thread.saved_sp0)
1da177e4 265 goto out;
f1382f15 266 v86 = (struct vm86plus_struct __user *)arg;
49d26b6e
JF
267 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
268 offsetof(struct kernel_vm86_struct, regs32) -
269 sizeof(info.regs));
1da177e4
LT
270 ret = -EFAULT;
271 if (tmp)
272 goto out;
253f29a4 273 info.regs32 = regs;
1da177e4
LT
274 info.vm86plus.is_vm86pus = 1;
275 tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
276 do_sys_vm86(&info, tsk);
277 ret = 0; /* we never return here */
278out:
279 return ret;
280}
281
282
283static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
284{
285 struct tss_struct *tss;
286/*
287 * make sure the vm86() system call doesn't try to do anything silly
288 */
65ea5b03
PA
289 info->regs.pt.ds = 0;
290 info->regs.pt.es = 0;
291 info->regs.pt.fs = 0;
3aa6b186
LR
292#ifndef CONFIG_X86_32_LAZY_GS
293 info->regs.pt.gs = 0;
294#endif
1da177e4
LT
295
296/*
65ea5b03 297 * The flags register is also special: we cannot trust that the user
1da177e4
LT
298 * has set it up safely, so this makes sure interrupt etc flags are
299 * inherited from protected mode.
300 */
65ea5b03
PA
301 VEFLAGS = info->regs.pt.flags;
302 info->regs.pt.flags &= SAFE_MASK;
303 info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
6b6891f9 304 info->regs.pt.flags |= X86_VM_MASK;
1da177e4
LT
305
306 switch (info->cpu_type) {
83e714e8
PC
307 case CPU_286:
308 tsk->thread.v86mask = 0;
309 break;
310 case CPU_386:
a5c15d41 311 tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
83e714e8
PC
312 break;
313 case CPU_486:
a5c15d41 314 tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
83e714e8
PC
315 break;
316 default:
a5c15d41 317 tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
83e714e8 318 break;
1da177e4
LT
319 }
320
321/*
975e5f45 322 * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
1da177e4 323 */
975e5f45 324 info->regs32->ax = VM86_SIGNAL;
faca6227 325 tsk->thread.saved_sp0 = tsk->thread.sp0;
65ea5b03 326 tsk->thread.saved_fs = info->regs32->fs;
d9a89a26 327 tsk->thread.saved_gs = get_user_gs(info->regs32);
1da177e4
LT
328
329 tss = &per_cpu(init_tss, get_cpu());
faca6227 330 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
1da177e4
LT
331 if (cpu_has_sep)
332 tsk->thread.sysenter_cs = 0;
faca6227 333 load_sp0(tss, &tsk->thread);
1da177e4
LT
334 put_cpu();
335
336 tsk->thread.screen_bitmap = info->screen_bitmap;
337 if (info->flags & VM86_SCREEN_BITMAP)
60ec5585 338 mark_screen_rdonly(tsk->mm);
7e7f8a03 339
d7e7528b 340 /*call __audit_syscall_exit since we do not exit via the normal paths */
6015ff10 341#ifdef CONFIG_AUDITSYSCALL
7e7f8a03 342 if (unlikely(current->audit_context))
d7e7528b 343 __audit_syscall_exit(1, 0);
6015ff10 344#endif
7e7f8a03 345
1da177e4 346 __asm__ __volatile__(
1da177e4
LT
347 "movl %0,%%esp\n\t"
348 "movl %1,%%ebp\n\t"
3aa6b186 349#ifdef CONFIG_X86_32_LAZY_GS
464d1a78 350 "mov %2, %%gs\n\t"
3aa6b186 351#endif
1da177e4
LT
352 "jmp resume_userspace"
353 : /* no outputs */
49d26b6e 354 :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
1da177e4
LT
355 /* we never return here */
356}
357
83e714e8 358static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval)
1da177e4 359{
83e714e8 360 struct pt_regs *regs32;
1da177e4
LT
361
362 regs32 = save_v86_state(regs16);
65ea5b03 363 regs32->ax = retval;
1da177e4
LT
364 __asm__ __volatile__("movl %0,%%esp\n\t"
365 "movl %1,%%ebp\n\t"
366 "jmp resume_userspace"
367 : : "r" (regs32), "r" (current_thread_info()));
368}
369
83e714e8 370static inline void set_IF(struct kernel_vm86_regs *regs)
1da177e4 371{
a5c15d41 372 VEFLAGS |= X86_EFLAGS_VIF;
373 if (VEFLAGS & X86_EFLAGS_VIP)
1da177e4
LT
374 return_to_32bit(regs, VM86_STI);
375}
376
83e714e8 377static inline void clear_IF(struct kernel_vm86_regs *regs)
1da177e4 378{
a5c15d41 379 VEFLAGS &= ~X86_EFLAGS_VIF;
1da177e4
LT
380}
381
83e714e8 382static inline void clear_TF(struct kernel_vm86_regs *regs)
1da177e4 383{
a5c15d41 384 regs->pt.flags &= ~X86_EFLAGS_TF;
1da177e4
LT
385}
386
83e714e8 387static inline void clear_AC(struct kernel_vm86_regs *regs)
1da177e4 388{
a5c15d41 389 regs->pt.flags &= ~X86_EFLAGS_AC;
1da177e4
LT
390}
391
83e714e8
PC
392/*
393 * It is correct to call set_IF(regs) from the set_vflags_*
1da177e4
LT
394 * functions. However someone forgot to call clear_IF(regs)
395 * in the opposite case.
396 * After the command sequence CLI PUSHF STI POPF you should
ab4a574e 397 * end up with interrupts disabled, but you ended up with
1da177e4
LT
398 * interrupts enabled.
399 * ( I was testing my own changes, but the only bug I
400 * could find was in a function I had not changed. )
401 * [KD]
402 */
403
83e714e8 404static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
1da177e4 405{
65ea5b03
PA
406 set_flags(VEFLAGS, flags, current->thread.v86mask);
407 set_flags(regs->pt.flags, flags, SAFE_MASK);
a5c15d41 408 if (flags & X86_EFLAGS_IF)
1da177e4
LT
409 set_IF(regs);
410 else
411 clear_IF(regs);
412}
413
83e714e8 414static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
1da177e4
LT
415{
416 set_flags(VFLAGS, flags, current->thread.v86mask);
65ea5b03 417 set_flags(regs->pt.flags, flags, SAFE_MASK);
a5c15d41 418 if (flags & X86_EFLAGS_IF)
1da177e4
LT
419 set_IF(regs);
420 else
421 clear_IF(regs);
422}
423
83e714e8 424static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
1da177e4 425{
65ea5b03 426 unsigned long flags = regs->pt.flags & RETURN_MASK;
1da177e4 427
a5c15d41 428 if (VEFLAGS & X86_EFLAGS_VIF)
429 flags |= X86_EFLAGS_IF;
430 flags |= X86_EFLAGS_IOPL;
1da177e4
LT
431 return flags | (VEFLAGS & current->thread.v86mask);
432}
433
83e714e8 434static inline int is_revectored(int nr, struct revectored_struct *bitmap)
1da177e4
LT
435{
436 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
437 :"=r" (nr)
83e714e8 438 :"m" (*bitmap), "r" (nr));
1da177e4
LT
439 return nr;
440}
441
442#define val_byte(val, n) (((__u8 *)&val)[n])
443
444#define pushb(base, ptr, val, err_label) \
445 do { \
446 __u8 __val = val; \
447 ptr--; \
448 if (put_user(__val, base + ptr) < 0) \
449 goto err_label; \
83e714e8 450 } while (0)
1da177e4
LT
451
452#define pushw(base, ptr, val, err_label) \
453 do { \
454 __u16 __val = val; \
455 ptr--; \
456 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
457 goto err_label; \
458 ptr--; \
459 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
460 goto err_label; \
83e714e8 461 } while (0)
1da177e4
LT
462
463#define pushl(base, ptr, val, err_label) \
464 do { \
465 __u32 __val = val; \
466 ptr--; \
467 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
468 goto err_label; \
469 ptr--; \
470 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
471 goto err_label; \
472 ptr--; \
473 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
474 goto err_label; \
475 ptr--; \
476 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
477 goto err_label; \
83e714e8 478 } while (0)
1da177e4
LT
479
480#define popb(base, ptr, err_label) \
481 ({ \
482 __u8 __res; \
483 if (get_user(__res, base + ptr) < 0) \
484 goto err_label; \
485 ptr++; \
486 __res; \
487 })
488
489#define popw(base, ptr, err_label) \
490 ({ \
491 __u16 __res; \
492 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
493 goto err_label; \
494 ptr++; \
495 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
496 goto err_label; \
497 ptr++; \
498 __res; \
499 })
500
501#define popl(base, ptr, err_label) \
502 ({ \
503 __u32 __res; \
504 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
505 goto err_label; \
506 ptr++; \
507 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
508 goto err_label; \
509 ptr++; \
510 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
511 goto err_label; \
512 ptr++; \
513 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
514 goto err_label; \
515 ptr++; \
516 __res; \
517 })
518
519/* There are so many possible reasons for this function to return
520 * VM86_INTx, so adding another doesn't bother me. We can expect
521 * userspace programs to be able to handle it. (Getting a problem
522 * in userspace is always better than an Oops anyway.) [KD]
523 */
524static void do_int(struct kernel_vm86_regs *regs, int i,
83e714e8 525 unsigned char __user *ssp, unsigned short sp)
1da177e4
LT
526{
527 unsigned long __user *intr_ptr;
528 unsigned long segoffs;
529
65ea5b03 530 if (regs->pt.cs == BIOSSEG)
1da177e4
LT
531 goto cannot_handle;
532 if (is_revectored(i, &KVM86->int_revectored))
533 goto cannot_handle;
83e714e8 534 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
1da177e4
LT
535 goto cannot_handle;
536 intr_ptr = (unsigned long __user *) (i << 2);
537 if (get_user(segoffs, intr_ptr))
538 goto cannot_handle;
539 if ((segoffs >> 16) == BIOSSEG)
540 goto cannot_handle;
541 pushw(ssp, sp, get_vflags(regs), cannot_handle);
65ea5b03 542 pushw(ssp, sp, regs->pt.cs, cannot_handle);
1da177e4 543 pushw(ssp, sp, IP(regs), cannot_handle);
65ea5b03 544 regs->pt.cs = segoffs >> 16;
1da177e4
LT
545 SP(regs) -= 6;
546 IP(regs) = segoffs & 0xffff;
547 clear_TF(regs);
548 clear_IF(regs);
549 clear_AC(regs);
550 return;
551
552cannot_handle:
553 return_to_32bit(regs, VM86_INTx + (i << 8));
554}
555
83e714e8 556int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
1da177e4
LT
557{
558 if (VMPI.is_vm86pus) {
6554287b
BO
559 if ((trapno == 3) || (trapno == 1)) {
560 KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
561 /* setting this flag forces the code in entry_32.S to
562 call save_v86_state() and change the stack pointer
563 to KVM86->regs32 */
564 set_thread_flag(TIF_IRET);
565 return 0;
566 }
65ea5b03 567 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
1da177e4
LT
568 return 0;
569 }
83e714e8 570 if (trapno != 1)
1da177e4 571 return 1; /* we let this handle by the calling routine */
51e7dc70 572 current->thread.trap_nr = trapno;
1da177e4 573 current->thread.error_code = error_code;
0f540910 574 force_sig(SIGTRAP, current);
1da177e4
LT
575 return 0;
576}
577
83e714e8 578void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
1da177e4
LT
579{
580 unsigned char opcode;
581 unsigned char __user *csp;
582 unsigned char __user *ssp;
5fd75ebb 583 unsigned short ip, sp, orig_flags;
1da177e4
LT
584 int data32, pref_done;
585
586#define CHECK_IF_IN_TRAP \
587 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
a5c15d41 588 newflags |= X86_EFLAGS_TF
1da177e4 589#define VM86_FAULT_RETURN do { \
a5c15d41 590 if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
1da177e4 591 return_to_32bit(regs, VM86_PICRETURN); \
a5c15d41 592 if (orig_flags & X86_EFLAGS_TF) \
5fd75ebb 593 handle_vm86_trap(regs, 0, 1); \
1da177e4
LT
594 return; } while (0)
595
65ea5b03 596 orig_flags = *(unsigned short *)&regs->pt.flags;
5fd75ebb 597
65ea5b03
PA
598 csp = (unsigned char __user *) (regs->pt.cs << 4);
599 ssp = (unsigned char __user *) (regs->pt.ss << 4);
1da177e4
LT
600 sp = SP(regs);
601 ip = IP(regs);
602
603 data32 = 0;
604 pref_done = 0;
605 do {
606 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
83e714e8
PC
607 case 0x66: /* 32-bit data */ data32 = 1; break;
608 case 0x67: /* 32-bit address */ break;
609 case 0x2e: /* CS */ break;
610 case 0x3e: /* DS */ break;
611 case 0x26: /* ES */ break;
612 case 0x36: /* SS */ break;
613 case 0x65: /* GS */ break;
614 case 0x64: /* FS */ break;
615 case 0xf2: /* repnz */ break;
616 case 0xf3: /* rep */ break;
617 default: pref_done = 1;
1da177e4
LT
618 }
619 } while (!pref_done);
620
621 switch (opcode) {
622
623 /* pushf */
624 case 0x9c:
625 if (data32) {
626 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
627 SP(regs) -= 4;
628 } else {
629 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
630 SP(regs) -= 2;
631 }
632 IP(regs) = ip;
633 VM86_FAULT_RETURN;
634
635 /* popf */
636 case 0x9d:
637 {
638 unsigned long newflags;
639 if (data32) {
83e714e8 640 newflags = popl(ssp, sp, simulate_sigsegv);
1da177e4
LT
641 SP(regs) += 4;
642 } else {
643 newflags = popw(ssp, sp, simulate_sigsegv);
644 SP(regs) += 2;
645 }
646 IP(regs) = ip;
647 CHECK_IF_IN_TRAP;
83e714e8 648 if (data32)
1da177e4 649 set_vflags_long(newflags, regs);
83e714e8 650 else
1da177e4 651 set_vflags_short(newflags, regs);
83e714e8 652
1da177e4
LT
653 VM86_FAULT_RETURN;
654 }
655
656 /* int xx */
657 case 0xcd: {
83e714e8 658 int intno = popb(csp, ip, simulate_sigsegv);
1da177e4
LT
659 IP(regs) = ip;
660 if (VMPI.vm86dbg_active) {
83e714e8 661 if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3])
1da177e4
LT
662 return_to_32bit(regs, VM86_INTx + (intno << 8));
663 }
664 do_int(regs, intno, ssp, sp);
665 return;
666 }
667
668 /* iret */
669 case 0xcf:
670 {
671 unsigned long newip;
672 unsigned long newcs;
673 unsigned long newflags;
674 if (data32) {
83e714e8
PC
675 newip = popl(ssp, sp, simulate_sigsegv);
676 newcs = popl(ssp, sp, simulate_sigsegv);
677 newflags = popl(ssp, sp, simulate_sigsegv);
1da177e4
LT
678 SP(regs) += 12;
679 } else {
680 newip = popw(ssp, sp, simulate_sigsegv);
681 newcs = popw(ssp, sp, simulate_sigsegv);
682 newflags = popw(ssp, sp, simulate_sigsegv);
683 SP(regs) += 6;
684 }
685 IP(regs) = newip;
65ea5b03 686 regs->pt.cs = newcs;
1da177e4
LT
687 CHECK_IF_IN_TRAP;
688 if (data32) {
689 set_vflags_long(newflags, regs);
690 } else {
691 set_vflags_short(newflags, regs);
692 }
693 VM86_FAULT_RETURN;
694 }
695
696 /* cli */
697 case 0xfa:
698 IP(regs) = ip;
699 clear_IF(regs);
700 VM86_FAULT_RETURN;
701
702 /* sti */
703 /*
704 * Damn. This is incorrect: the 'sti' instruction should actually
705 * enable interrupts after the /next/ instruction. Not good.
706 *
707 * Probably needs some horsing around with the TF flag. Aiee..
708 */
709 case 0xfb:
710 IP(regs) = ip;
711 set_IF(regs);
712 VM86_FAULT_RETURN;
713
714 default:
715 return_to_32bit(regs, VM86_UNKNOWN);
716 }
717
718 return;
719
720simulate_sigsegv:
721 /* FIXME: After a long discussion with Stas we finally
722 * agreed, that this is wrong. Here we should
723 * really send a SIGSEGV to the user program.
724 * But how do we create the correct context? We
725 * are inside a general protection fault handler
726 * and has just returned from a page fault handler.
727 * The correct context for the signal handler
728 * should be a mixture of the two, but how do we
729 * get the information? [KD]
730 */
731 return_to_32bit(regs, VM86_UNKNOWN);
732}
733
734/* ---------------- vm86 special IRQ passing stuff ----------------- */
735
736#define VM86_IRQNAME "vm86irq"
737
738static struct vm86_irqs {
739 struct task_struct *tsk;
740 int sig;
741} vm86_irqs[16];
742
743static DEFINE_SPINLOCK(irqbits_lock);
744static int irqbits;
745
83e714e8 746#define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
1da177e4 747 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
83e714e8
PC
748 | (1 << SIGUNUSED))
749
7d12e780 750static irqreturn_t irq_handler(int intno, void *dev_id)
1da177e4
LT
751{
752 int irq_bit;
753 unsigned long flags;
754
83e714e8 755 spin_lock_irqsave(&irqbits_lock, flags);
1da177e4 756 irq_bit = 1 << intno;
83e714e8 757 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
1da177e4
LT
758 goto out;
759 irqbits |= irq_bit;
760 if (vm86_irqs[intno].sig)
761 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
1da177e4
LT
762 /*
763 * IRQ will be re-enabled when user asks for the irq (whether
764 * polling or as a result of the signal)
765 */
ad671423
PP
766 disable_irq_nosync(intno);
767 spin_unlock_irqrestore(&irqbits_lock, flags);
1da177e4
LT
768 return IRQ_HANDLED;
769
770out:
83e714e8 771 spin_unlock_irqrestore(&irqbits_lock, flags);
1da177e4
LT
772 return IRQ_NONE;
773}
774
775static inline void free_vm86_irq(int irqnumber)
776{
777 unsigned long flags;
778
779 free_irq(irqnumber, NULL);
780 vm86_irqs[irqnumber].tsk = NULL;
781
83e714e8 782 spin_lock_irqsave(&irqbits_lock, flags);
1da177e4 783 irqbits &= ~(1 << irqnumber);
83e714e8 784 spin_unlock_irqrestore(&irqbits_lock, flags);
1da177e4
LT
785}
786
787void release_vm86_irqs(struct task_struct *task)
788{
789 int i;
790 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
791 if (vm86_irqs[i].tsk == task)
792 free_vm86_irq(i);
793}
794
795static inline int get_and_reset_irq(int irqnumber)
796{
797 int bit;
798 unsigned long flags;
ad671423 799 int ret = 0;
83e714e8 800
1da177e4
LT
801 if (invalid_vm86_irq(irqnumber)) return 0;
802 if (vm86_irqs[irqnumber].tsk != current) return 0;
83e714e8 803 spin_lock_irqsave(&irqbits_lock, flags);
1da177e4
LT
804 bit = irqbits & (1 << irqnumber);
805 irqbits &= ~bit;
ad671423
PP
806 if (bit) {
807 enable_irq(irqnumber);
808 ret = 1;
809 }
810
83e714e8 811 spin_unlock_irqrestore(&irqbits_lock, flags);
ad671423 812 return ret;
1da177e4
LT
813}
814
815
816static int do_vm86_irq_handling(int subfunction, int irqnumber)
817{
818 int ret;
819 switch (subfunction) {
820 case VM86_GET_AND_RESET_IRQ: {
821 return get_and_reset_irq(irqnumber);
822 }
823 case VM86_GET_IRQ_BITS: {
824 return irqbits;
825 }
826 case VM86_REQUEST_IRQ: {
827 int sig = irqnumber >> 8;
828 int irq = irqnumber & 255;
829 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
830 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
831 if (invalid_vm86_irq(irq)) return -EPERM;
832 if (vm86_irqs[irq].tsk) return -EPERM;
833 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
834 if (ret) return ret;
835 vm86_irqs[irq].sig = sig;
836 vm86_irqs[irq].tsk = current;
837 return irq;
838 }
839 case VM86_FREE_IRQ: {
840 if (invalid_vm86_irq(irqnumber)) return -EPERM;
841 if (!vm86_irqs[irqnumber].tsk) return 0;
842 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
843 free_vm86_irq(irqnumber);
844 return 0;
845 }
846 }
847 return -EINVAL;
848}
849
This page took 0.692782 seconds and 5 git commands to generate.