Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1994 Linus Torvalds |
3 | * | |
4 | * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 | |
624dffcb | 5 | * stack - Manfred Spraul <manfred@colorfullife.com> |
1da177e4 LT |
6 | * |
7 | * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle | |
8 | * them correctly. Now the emulation will be in a | |
9 | * consistent state after stackfaults - Kasper Dupont | |
10 | * <kasperd@daimi.au.dk> | |
11 | * | |
12 | * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont | |
13 | * <kasperd@daimi.au.dk> | |
14 | * | |
15 | * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault | |
16 | * caused by Kasper Dupont's changes - Stas Sergeev | |
17 | * | |
18 | * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. | |
19 | * Kasper Dupont <kasperd@daimi.au.dk> | |
20 | * | |
21 | * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. | |
22 | * Kasper Dupont <kasperd@daimi.au.dk> | |
23 | * | |
24 | * 9 apr 2002 - Changed stack access macros to jump to a label | |
25 | * instead of returning to userspace. This simplifies | |
26 | * do_int, and is needed by handle_vm6_fault. Kasper | |
27 | * Dupont <kasperd@daimi.au.dk> | |
28 | * | |
29 | */ | |
30 | ||
a9415644 | 31 | #include <linux/capability.h> |
1da177e4 LT |
32 | #include <linux/errno.h> |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/sched.h> | |
35 | #include <linux/kernel.h> | |
36 | #include <linux/signal.h> | |
37 | #include <linux/string.h> | |
38 | #include <linux/mm.h> | |
39 | #include <linux/smp.h> | |
1da177e4 LT |
40 | #include <linux/highmem.h> |
41 | #include <linux/ptrace.h> | |
7e7f8a03 | 42 | #include <linux/audit.h> |
49d26b6e | 43 | #include <linux/stddef.h> |
1da177e4 LT |
44 | |
45 | #include <asm/uaccess.h> | |
46 | #include <asm/io.h> | |
47 | #include <asm/tlbflush.h> | |
48 | #include <asm/irq.h> | |
49 | ||
50 | /* | |
51 | * Known problems: | |
52 | * | |
53 | * Interrupt handling is not guaranteed: | |
54 | * - a real x86 will disable all interrupts for one instruction | |
55 | * after a "mov ss,xx" to make stack handling atomic even without | |
56 | * the 'lss' instruction. We can't guarantee this in v86 mode, | |
57 | * as the next instruction might result in a page fault or similar. | |
58 | * - a real x86 will have interrupts disabled for one instruction | |
59 | * past the 'sti' that enables them. We don't bother with all the | |
60 | * details yet. | |
61 | * | |
62 | * Let's hope these problems do not actually matter for anything. | |
63 | */ | |
64 | ||
65 | ||
66 | #define KVM86 ((struct kernel_vm86_struct *)regs) | |
67 | #define VMPI KVM86->vm86plus | |
68 | ||
69 | ||
70 | /* | |
71 | * 8- and 16-bit register defines.. | |
72 | */ | |
49d26b6e JF |
73 | #define AL(regs) (((unsigned char *)&((regs)->pt.eax))[0]) |
74 | #define AH(regs) (((unsigned char *)&((regs)->pt.eax))[1]) | |
75 | #define IP(regs) (*(unsigned short *)&((regs)->pt.eip)) | |
76 | #define SP(regs) (*(unsigned short *)&((regs)->pt.esp)) | |
1da177e4 LT |
77 | |
78 | /* | |
79 | * virtual flags (16 and 32-bit versions) | |
80 | */ | |
81 | #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) | |
82 | #define VEFLAGS (current->thread.v86flags) | |
83 | ||
84 | #define set_flags(X,new,mask) \ | |
85 | ((X) = ((X) & ~(mask)) | ((new) & (mask))) | |
86 | ||
87 | #define SAFE_MASK (0xDD5) | |
88 | #define RETURN_MASK (0xDFF) | |
89 | ||
49d26b6e JF |
90 | /* convert kernel_vm86_regs to vm86_regs */ |
91 | static int copy_vm86_regs_to_user(struct vm86_regs __user *user, | |
92 | const struct kernel_vm86_regs *regs) | |
93 | { | |
94 | int ret = 0; | |
95 | ||
464d1a78 JF |
96 | /* kernel_vm86_regs is missing xgs, so copy everything up to |
97 | (but not including) orig_eax, and then rest including orig_eax. */ | |
98 | ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_eax)); | |
99 | ret += copy_to_user(&user->orig_eax, ®s->pt.orig_eax, | |
49d26b6e | 100 | sizeof(struct kernel_vm86_regs) - |
464d1a78 | 101 | offsetof(struct kernel_vm86_regs, pt.orig_eax)); |
49d26b6e JF |
102 | |
103 | return ret; | |
104 | } | |
105 | ||
106 | /* convert vm86_regs to kernel_vm86_regs */ | |
107 | static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, | |
108 | const struct vm86_regs __user *user, | |
109 | unsigned extra) | |
110 | { | |
111 | int ret = 0; | |
112 | ||
464d1a78 JF |
113 | /* copy eax-xfs inclusive */ |
114 | ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_eax)); | |
115 | /* copy orig_eax-__gsh+extra */ | |
116 | ret += copy_from_user(®s->pt.orig_eax, &user->orig_eax, | |
49d26b6e | 117 | sizeof(struct kernel_vm86_regs) - |
464d1a78 | 118 | offsetof(struct kernel_vm86_regs, pt.orig_eax) + |
49d26b6e | 119 | extra); |
49d26b6e JF |
120 | return ret; |
121 | } | |
1da177e4 LT |
122 | |
123 | struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs)); | |
124 | struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs) | |
125 | { | |
126 | struct tss_struct *tss; | |
127 | struct pt_regs *ret; | |
128 | unsigned long tmp; | |
129 | ||
130 | /* | |
131 | * This gets called from entry.S with interrupts disabled, but | |
132 | * from process context. Enable interrupts here, before trying | |
133 | * to access user space. | |
134 | */ | |
135 | local_irq_enable(); | |
136 | ||
137 | if (!current->thread.vm86_info) { | |
138 | printk("no vm86_info: BAD\n"); | |
139 | do_exit(SIGSEGV); | |
140 | } | |
49d26b6e JF |
141 | set_flags(regs->pt.eflags, VEFLAGS, VIF_MASK | current->thread.v86mask); |
142 | tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs,regs); | |
1da177e4 LT |
143 | tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap); |
144 | if (tmp) { | |
145 | printk("vm86: could not access userspace vm86_info\n"); | |
146 | do_exit(SIGSEGV); | |
147 | } | |
148 | ||
149 | tss = &per_cpu(init_tss, get_cpu()); | |
150 | current->thread.esp0 = current->thread.saved_esp0; | |
151 | current->thread.sysenter_cs = __KERNEL_CS; | |
152 | load_esp0(tss, ¤t->thread); | |
153 | current->thread.saved_esp0 = 0; | |
154 | put_cpu(); | |
155 | ||
1da177e4 | 156 | ret = KVM86->regs32; |
49d26b6e | 157 | |
464d1a78 JF |
158 | ret->xfs = current->thread.saved_fs; |
159 | loadsegment(gs, current->thread.saved_gs); | |
49d26b6e | 160 | |
1da177e4 LT |
161 | return ret; |
162 | } | |
163 | ||
60ec5585 | 164 | static void mark_screen_rdonly(struct mm_struct *mm) |
1da177e4 LT |
165 | { |
166 | pgd_t *pgd; | |
167 | pud_t *pud; | |
168 | pmd_t *pmd; | |
60ec5585 HD |
169 | pte_t *pte; |
170 | spinlock_t *ptl; | |
1da177e4 LT |
171 | int i; |
172 | ||
60ec5585 | 173 | pgd = pgd_offset(mm, 0xA0000); |
1da177e4 LT |
174 | if (pgd_none_or_clear_bad(pgd)) |
175 | goto out; | |
176 | pud = pud_offset(pgd, 0xA0000); | |
177 | if (pud_none_or_clear_bad(pud)) | |
178 | goto out; | |
179 | pmd = pmd_offset(pud, 0xA0000); | |
180 | if (pmd_none_or_clear_bad(pmd)) | |
181 | goto out; | |
60ec5585 | 182 | pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); |
1da177e4 LT |
183 | for (i = 0; i < 32; i++) { |
184 | if (pte_present(*pte)) | |
185 | set_pte(pte, pte_wrprotect(*pte)); | |
186 | pte++; | |
187 | } | |
60ec5585 | 188 | pte_unmap_unlock(pte, ptl); |
1da177e4 | 189 | out: |
1da177e4 LT |
190 | flush_tlb(); |
191 | } | |
192 | ||
193 | ||
194 | ||
195 | static int do_vm86_irq_handling(int subfunction, int irqnumber); | |
196 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); | |
197 | ||
198 | asmlinkage int sys_vm86old(struct pt_regs regs) | |
199 | { | |
200 | struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.ebx; | |
201 | struct kernel_vm86_struct info; /* declare this _on top_, | |
202 | * this avoids wasting of stack space. | |
203 | * This remains on the stack until we | |
204 | * return to 32 bit user space. | |
205 | */ | |
206 | struct task_struct *tsk; | |
207 | int tmp, ret = -EPERM; | |
208 | ||
209 | tsk = current; | |
210 | if (tsk->thread.saved_esp0) | |
211 | goto out; | |
49d26b6e JF |
212 | tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, |
213 | offsetof(struct kernel_vm86_struct, vm86plus) - | |
214 | sizeof(info.regs)); | |
1da177e4 LT |
215 | ret = -EFAULT; |
216 | if (tmp) | |
217 | goto out; | |
218 | memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); | |
219 | info.regs32 = ®s; | |
220 | tsk->thread.vm86_info = v86; | |
221 | do_sys_vm86(&info, tsk); | |
222 | ret = 0; /* we never return here */ | |
223 | out: | |
224 | return ret; | |
225 | } | |
226 | ||
227 | ||
228 | asmlinkage int sys_vm86(struct pt_regs regs) | |
229 | { | |
230 | struct kernel_vm86_struct info; /* declare this _on top_, | |
231 | * this avoids wasting of stack space. | |
232 | * This remains on the stack until we | |
233 | * return to 32 bit user space. | |
234 | */ | |
235 | struct task_struct *tsk; | |
236 | int tmp, ret; | |
237 | struct vm86plus_struct __user *v86; | |
238 | ||
239 | tsk = current; | |
240 | switch (regs.ebx) { | |
241 | case VM86_REQUEST_IRQ: | |
242 | case VM86_FREE_IRQ: | |
243 | case VM86_GET_IRQ_BITS: | |
244 | case VM86_GET_AND_RESET_IRQ: | |
245 | ret = do_vm86_irq_handling(regs.ebx, (int)regs.ecx); | |
246 | goto out; | |
247 | case VM86_PLUS_INSTALL_CHECK: | |
248 | /* NOTE: on old vm86 stuff this will return the error | |
e49332bd | 249 | from access_ok(), because the subfunction is |
1da177e4 LT |
250 | interpreted as (invalid) address to vm86_struct. |
251 | So the installation check works. | |
252 | */ | |
253 | ret = 0; | |
254 | goto out; | |
255 | } | |
256 | ||
257 | /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ | |
258 | ret = -EPERM; | |
259 | if (tsk->thread.saved_esp0) | |
260 | goto out; | |
261 | v86 = (struct vm86plus_struct __user *)regs.ecx; | |
49d26b6e JF |
262 | tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, |
263 | offsetof(struct kernel_vm86_struct, regs32) - | |
264 | sizeof(info.regs)); | |
1da177e4 LT |
265 | ret = -EFAULT; |
266 | if (tmp) | |
267 | goto out; | |
268 | info.regs32 = ®s; | |
269 | info.vm86plus.is_vm86pus = 1; | |
270 | tsk->thread.vm86_info = (struct vm86_struct __user *)v86; | |
271 | do_sys_vm86(&info, tsk); | |
272 | ret = 0; /* we never return here */ | |
273 | out: | |
274 | return ret; | |
275 | } | |
276 | ||
277 | ||
278 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) | |
279 | { | |
280 | struct tss_struct *tss; | |
281 | /* | |
282 | * make sure the vm86() system call doesn't try to do anything silly | |
283 | */ | |
49d26b6e JF |
284 | info->regs.pt.xds = 0; |
285 | info->regs.pt.xes = 0; | |
464d1a78 | 286 | info->regs.pt.xfs = 0; |
1da177e4 | 287 | |
464d1a78 | 288 | /* we are clearing gs later just before "jmp resume_userspace", |
49d26b6e | 289 | * because it is not saved/restored. |
1da177e4 LT |
290 | */ |
291 | ||
292 | /* | |
293 | * The eflags register is also special: we cannot trust that the user | |
294 | * has set it up safely, so this makes sure interrupt etc flags are | |
295 | * inherited from protected mode. | |
296 | */ | |
49d26b6e JF |
297 | VEFLAGS = info->regs.pt.eflags; |
298 | info->regs.pt.eflags &= SAFE_MASK; | |
299 | info->regs.pt.eflags |= info->regs32->eflags & ~SAFE_MASK; | |
300 | info->regs.pt.eflags |= VM_MASK; | |
1da177e4 LT |
301 | |
302 | switch (info->cpu_type) { | |
303 | case CPU_286: | |
304 | tsk->thread.v86mask = 0; | |
305 | break; | |
306 | case CPU_386: | |
307 | tsk->thread.v86mask = NT_MASK | IOPL_MASK; | |
308 | break; | |
309 | case CPU_486: | |
310 | tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; | |
311 | break; | |
312 | default: | |
313 | tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; | |
314 | break; | |
315 | } | |
316 | ||
317 | /* | |
318 | * Save old state, set default return value (%eax) to 0 | |
319 | */ | |
320 | info->regs32->eax = 0; | |
321 | tsk->thread.saved_esp0 = tsk->thread.esp0; | |
464d1a78 JF |
322 | tsk->thread.saved_fs = info->regs32->xfs; |
323 | savesegment(gs, tsk->thread.saved_gs); | |
1da177e4 LT |
324 | |
325 | tss = &per_cpu(init_tss, get_cpu()); | |
326 | tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; | |
327 | if (cpu_has_sep) | |
328 | tsk->thread.sysenter_cs = 0; | |
329 | load_esp0(tss, &tsk->thread); | |
330 | put_cpu(); | |
331 | ||
332 | tsk->thread.screen_bitmap = info->screen_bitmap; | |
333 | if (info->flags & VM86_SCREEN_BITMAP) | |
60ec5585 | 334 | mark_screen_rdonly(tsk->mm); |
7e7f8a03 JB |
335 | |
336 | /*call audit_syscall_exit since we do not exit via the normal paths */ | |
337 | if (unlikely(current->audit_context)) | |
49d26b6e | 338 | audit_syscall_exit(AUDITSC_RESULT(0), 0); |
7e7f8a03 | 339 | |
1da177e4 | 340 | __asm__ __volatile__( |
1da177e4 LT |
341 | "movl %0,%%esp\n\t" |
342 | "movl %1,%%ebp\n\t" | |
464d1a78 | 343 | "mov %2, %%gs\n\t" |
1da177e4 LT |
344 | "jmp resume_userspace" |
345 | : /* no outputs */ | |
49d26b6e | 346 | :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); |
1da177e4 LT |
347 | /* we never return here */ |
348 | } | |
349 | ||
350 | static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) | |
351 | { | |
352 | struct pt_regs * regs32; | |
353 | ||
354 | regs32 = save_v86_state(regs16); | |
355 | regs32->eax = retval; | |
356 | __asm__ __volatile__("movl %0,%%esp\n\t" | |
357 | "movl %1,%%ebp\n\t" | |
358 | "jmp resume_userspace" | |
359 | : : "r" (regs32), "r" (current_thread_info())); | |
360 | } | |
361 | ||
362 | static inline void set_IF(struct kernel_vm86_regs * regs) | |
363 | { | |
364 | VEFLAGS |= VIF_MASK; | |
365 | if (VEFLAGS & VIP_MASK) | |
366 | return_to_32bit(regs, VM86_STI); | |
367 | } | |
368 | ||
369 | static inline void clear_IF(struct kernel_vm86_regs * regs) | |
370 | { | |
371 | VEFLAGS &= ~VIF_MASK; | |
372 | } | |
373 | ||
374 | static inline void clear_TF(struct kernel_vm86_regs * regs) | |
375 | { | |
49d26b6e | 376 | regs->pt.eflags &= ~TF_MASK; |
1da177e4 LT |
377 | } |
378 | ||
379 | static inline void clear_AC(struct kernel_vm86_regs * regs) | |
380 | { | |
49d26b6e | 381 | regs->pt.eflags &= ~AC_MASK; |
1da177e4 LT |
382 | } |
383 | ||
384 | /* It is correct to call set_IF(regs) from the set_vflags_* | |
385 | * functions. However someone forgot to call clear_IF(regs) | |
386 | * in the opposite case. | |
387 | * After the command sequence CLI PUSHF STI POPF you should | |
388 | * end up with interrups disabled, but you ended up with | |
389 | * interrupts enabled. | |
390 | * ( I was testing my own changes, but the only bug I | |
391 | * could find was in a function I had not changed. ) | |
392 | * [KD] | |
393 | */ | |
394 | ||
395 | static inline void set_vflags_long(unsigned long eflags, struct kernel_vm86_regs * regs) | |
396 | { | |
397 | set_flags(VEFLAGS, eflags, current->thread.v86mask); | |
49d26b6e | 398 | set_flags(regs->pt.eflags, eflags, SAFE_MASK); |
1da177e4 LT |
399 | if (eflags & IF_MASK) |
400 | set_IF(regs); | |
401 | else | |
402 | clear_IF(regs); | |
403 | } | |
404 | ||
405 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) | |
406 | { | |
407 | set_flags(VFLAGS, flags, current->thread.v86mask); | |
49d26b6e | 408 | set_flags(regs->pt.eflags, flags, SAFE_MASK); |
1da177e4 LT |
409 | if (flags & IF_MASK) |
410 | set_IF(regs); | |
411 | else | |
412 | clear_IF(regs); | |
413 | } | |
414 | ||
415 | static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) | |
416 | { | |
49d26b6e | 417 | unsigned long flags = regs->pt.eflags & RETURN_MASK; |
1da177e4 LT |
418 | |
419 | if (VEFLAGS & VIF_MASK) | |
420 | flags |= IF_MASK; | |
421 | flags |= IOPL_MASK; | |
422 | return flags | (VEFLAGS & current->thread.v86mask); | |
423 | } | |
424 | ||
425 | static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |
426 | { | |
427 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" | |
428 | :"=r" (nr) | |
429 | :"m" (*bitmap),"r" (nr)); | |
430 | return nr; | |
431 | } | |
432 | ||
433 | #define val_byte(val, n) (((__u8 *)&val)[n]) | |
434 | ||
435 | #define pushb(base, ptr, val, err_label) \ | |
436 | do { \ | |
437 | __u8 __val = val; \ | |
438 | ptr--; \ | |
439 | if (put_user(__val, base + ptr) < 0) \ | |
440 | goto err_label; \ | |
441 | } while(0) | |
442 | ||
443 | #define pushw(base, ptr, val, err_label) \ | |
444 | do { \ | |
445 | __u16 __val = val; \ | |
446 | ptr--; \ | |
447 | if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | |
448 | goto err_label; \ | |
449 | ptr--; \ | |
450 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | |
451 | goto err_label; \ | |
452 | } while(0) | |
453 | ||
454 | #define pushl(base, ptr, val, err_label) \ | |
455 | do { \ | |
456 | __u32 __val = val; \ | |
457 | ptr--; \ | |
458 | if (put_user(val_byte(__val, 3), base + ptr) < 0) \ | |
459 | goto err_label; \ | |
460 | ptr--; \ | |
461 | if (put_user(val_byte(__val, 2), base + ptr) < 0) \ | |
462 | goto err_label; \ | |
463 | ptr--; \ | |
464 | if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | |
465 | goto err_label; \ | |
466 | ptr--; \ | |
467 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | |
468 | goto err_label; \ | |
469 | } while(0) | |
470 | ||
471 | #define popb(base, ptr, err_label) \ | |
472 | ({ \ | |
473 | __u8 __res; \ | |
474 | if (get_user(__res, base + ptr) < 0) \ | |
475 | goto err_label; \ | |
476 | ptr++; \ | |
477 | __res; \ | |
478 | }) | |
479 | ||
480 | #define popw(base, ptr, err_label) \ | |
481 | ({ \ | |
482 | __u16 __res; \ | |
483 | if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | |
484 | goto err_label; \ | |
485 | ptr++; \ | |
486 | if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | |
487 | goto err_label; \ | |
488 | ptr++; \ | |
489 | __res; \ | |
490 | }) | |
491 | ||
492 | #define popl(base, ptr, err_label) \ | |
493 | ({ \ | |
494 | __u32 __res; \ | |
495 | if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | |
496 | goto err_label; \ | |
497 | ptr++; \ | |
498 | if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | |
499 | goto err_label; \ | |
500 | ptr++; \ | |
501 | if (get_user(val_byte(__res, 2), base + ptr) < 0) \ | |
502 | goto err_label; \ | |
503 | ptr++; \ | |
504 | if (get_user(val_byte(__res, 3), base + ptr) < 0) \ | |
505 | goto err_label; \ | |
506 | ptr++; \ | |
507 | __res; \ | |
508 | }) | |
509 | ||
510 | /* There are so many possible reasons for this function to return | |
511 | * VM86_INTx, so adding another doesn't bother me. We can expect | |
512 | * userspace programs to be able to handle it. (Getting a problem | |
513 | * in userspace is always better than an Oops anyway.) [KD] | |
514 | */ | |
515 | static void do_int(struct kernel_vm86_regs *regs, int i, | |
516 | unsigned char __user * ssp, unsigned short sp) | |
517 | { | |
518 | unsigned long __user *intr_ptr; | |
519 | unsigned long segoffs; | |
520 | ||
49d26b6e | 521 | if (regs->pt.xcs == BIOSSEG) |
1da177e4 LT |
522 | goto cannot_handle; |
523 | if (is_revectored(i, &KVM86->int_revectored)) | |
524 | goto cannot_handle; | |
525 | if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) | |
526 | goto cannot_handle; | |
527 | intr_ptr = (unsigned long __user *) (i << 2); | |
528 | if (get_user(segoffs, intr_ptr)) | |
529 | goto cannot_handle; | |
530 | if ((segoffs >> 16) == BIOSSEG) | |
531 | goto cannot_handle; | |
532 | pushw(ssp, sp, get_vflags(regs), cannot_handle); | |
49d26b6e | 533 | pushw(ssp, sp, regs->pt.xcs, cannot_handle); |
1da177e4 | 534 | pushw(ssp, sp, IP(regs), cannot_handle); |
49d26b6e | 535 | regs->pt.xcs = segoffs >> 16; |
1da177e4 LT |
536 | SP(regs) -= 6; |
537 | IP(regs) = segoffs & 0xffff; | |
538 | clear_TF(regs); | |
539 | clear_IF(regs); | |
540 | clear_AC(regs); | |
541 | return; | |
542 | ||
543 | cannot_handle: | |
544 | return_to_32bit(regs, VM86_INTx + (i << 8)); | |
545 | } | |
546 | ||
547 | int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) | |
548 | { | |
549 | if (VMPI.is_vm86pus) { | |
550 | if ( (trapno==3) || (trapno==1) ) | |
551 | return_to_32bit(regs, VM86_TRAP + (trapno << 8)); | |
49d26b6e | 552 | do_int(regs, trapno, (unsigned char __user *) (regs->pt.xss << 4), SP(regs)); |
1da177e4 LT |
553 | return 0; |
554 | } | |
555 | if (trapno !=1) | |
556 | return 1; /* we let this handle by the calling routine */ | |
557 | if (current->ptrace & PT_PTRACED) { | |
558 | unsigned long flags; | |
559 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
560 | sigdelset(¤t->blocked, SIGTRAP); | |
561 | recalc_sigpending(); | |
562 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
563 | } | |
564 | send_sig(SIGTRAP, current, 1); | |
565 | current->thread.trap_no = trapno; | |
566 | current->thread.error_code = error_code; | |
567 | return 0; | |
568 | } | |
569 | ||
570 | void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |
571 | { | |
572 | unsigned char opcode; | |
573 | unsigned char __user *csp; | |
574 | unsigned char __user *ssp; | |
5fd75ebb | 575 | unsigned short ip, sp, orig_flags; |
1da177e4 LT |
576 | int data32, pref_done; |
577 | ||
578 | #define CHECK_IF_IN_TRAP \ | |
579 | if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ | |
580 | newflags |= TF_MASK | |
581 | #define VM86_FAULT_RETURN do { \ | |
582 | if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ | |
583 | return_to_32bit(regs, VM86_PICRETURN); \ | |
5fd75ebb PT |
584 | if (orig_flags & TF_MASK) \ |
585 | handle_vm86_trap(regs, 0, 1); \ | |
1da177e4 LT |
586 | return; } while (0) |
587 | ||
49d26b6e | 588 | orig_flags = *(unsigned short *)®s->pt.eflags; |
5fd75ebb | 589 | |
49d26b6e JF |
590 | csp = (unsigned char __user *) (regs->pt.xcs << 4); |
591 | ssp = (unsigned char __user *) (regs->pt.xss << 4); | |
1da177e4 LT |
592 | sp = SP(regs); |
593 | ip = IP(regs); | |
594 | ||
595 | data32 = 0; | |
596 | pref_done = 0; | |
597 | do { | |
598 | switch (opcode = popb(csp, ip, simulate_sigsegv)) { | |
599 | case 0x66: /* 32-bit data */ data32=1; break; | |
600 | case 0x67: /* 32-bit address */ break; | |
601 | case 0x2e: /* CS */ break; | |
602 | case 0x3e: /* DS */ break; | |
603 | case 0x26: /* ES */ break; | |
604 | case 0x36: /* SS */ break; | |
605 | case 0x65: /* GS */ break; | |
606 | case 0x64: /* FS */ break; | |
607 | case 0xf2: /* repnz */ break; | |
608 | case 0xf3: /* rep */ break; | |
609 | default: pref_done = 1; | |
610 | } | |
611 | } while (!pref_done); | |
612 | ||
613 | switch (opcode) { | |
614 | ||
615 | /* pushf */ | |
616 | case 0x9c: | |
617 | if (data32) { | |
618 | pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); | |
619 | SP(regs) -= 4; | |
620 | } else { | |
621 | pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); | |
622 | SP(regs) -= 2; | |
623 | } | |
624 | IP(regs) = ip; | |
625 | VM86_FAULT_RETURN; | |
626 | ||
627 | /* popf */ | |
628 | case 0x9d: | |
629 | { | |
630 | unsigned long newflags; | |
631 | if (data32) { | |
632 | newflags=popl(ssp, sp, simulate_sigsegv); | |
633 | SP(regs) += 4; | |
634 | } else { | |
635 | newflags = popw(ssp, sp, simulate_sigsegv); | |
636 | SP(regs) += 2; | |
637 | } | |
638 | IP(regs) = ip; | |
639 | CHECK_IF_IN_TRAP; | |
640 | if (data32) { | |
641 | set_vflags_long(newflags, regs); | |
642 | } else { | |
643 | set_vflags_short(newflags, regs); | |
644 | } | |
645 | VM86_FAULT_RETURN; | |
646 | } | |
647 | ||
648 | /* int xx */ | |
649 | case 0xcd: { | |
650 | int intno=popb(csp, ip, simulate_sigsegv); | |
651 | IP(regs) = ip; | |
652 | if (VMPI.vm86dbg_active) { | |
653 | if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) | |
654 | return_to_32bit(regs, VM86_INTx + (intno << 8)); | |
655 | } | |
656 | do_int(regs, intno, ssp, sp); | |
657 | return; | |
658 | } | |
659 | ||
660 | /* iret */ | |
661 | case 0xcf: | |
662 | { | |
663 | unsigned long newip; | |
664 | unsigned long newcs; | |
665 | unsigned long newflags; | |
666 | if (data32) { | |
667 | newip=popl(ssp, sp, simulate_sigsegv); | |
668 | newcs=popl(ssp, sp, simulate_sigsegv); | |
669 | newflags=popl(ssp, sp, simulate_sigsegv); | |
670 | SP(regs) += 12; | |
671 | } else { | |
672 | newip = popw(ssp, sp, simulate_sigsegv); | |
673 | newcs = popw(ssp, sp, simulate_sigsegv); | |
674 | newflags = popw(ssp, sp, simulate_sigsegv); | |
675 | SP(regs) += 6; | |
676 | } | |
677 | IP(regs) = newip; | |
49d26b6e | 678 | regs->pt.xcs = newcs; |
1da177e4 LT |
679 | CHECK_IF_IN_TRAP; |
680 | if (data32) { | |
681 | set_vflags_long(newflags, regs); | |
682 | } else { | |
683 | set_vflags_short(newflags, regs); | |
684 | } | |
685 | VM86_FAULT_RETURN; | |
686 | } | |
687 | ||
688 | /* cli */ | |
689 | case 0xfa: | |
690 | IP(regs) = ip; | |
691 | clear_IF(regs); | |
692 | VM86_FAULT_RETURN; | |
693 | ||
694 | /* sti */ | |
695 | /* | |
696 | * Damn. This is incorrect: the 'sti' instruction should actually | |
697 | * enable interrupts after the /next/ instruction. Not good. | |
698 | * | |
699 | * Probably needs some horsing around with the TF flag. Aiee.. | |
700 | */ | |
701 | case 0xfb: | |
702 | IP(regs) = ip; | |
703 | set_IF(regs); | |
704 | VM86_FAULT_RETURN; | |
705 | ||
706 | default: | |
707 | return_to_32bit(regs, VM86_UNKNOWN); | |
708 | } | |
709 | ||
710 | return; | |
711 | ||
712 | simulate_sigsegv: | |
713 | /* FIXME: After a long discussion with Stas we finally | |
714 | * agreed, that this is wrong. Here we should | |
715 | * really send a SIGSEGV to the user program. | |
716 | * But how do we create the correct context? We | |
717 | * are inside a general protection fault handler | |
718 | * and has just returned from a page fault handler. | |
719 | * The correct context for the signal handler | |
720 | * should be a mixture of the two, but how do we | |
721 | * get the information? [KD] | |
722 | */ | |
723 | return_to_32bit(regs, VM86_UNKNOWN); | |
724 | } | |
725 | ||
726 | /* ---------------- vm86 special IRQ passing stuff ----------------- */ | |
727 | ||
728 | #define VM86_IRQNAME "vm86irq" | |
729 | ||
730 | static struct vm86_irqs { | |
731 | struct task_struct *tsk; | |
732 | int sig; | |
733 | } vm86_irqs[16]; | |
734 | ||
735 | static DEFINE_SPINLOCK(irqbits_lock); | |
736 | static int irqbits; | |
737 | ||
738 | #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ | |
739 | | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | |
740 | | (1 << SIGUNUSED) ) | |
741 | ||
7d12e780 | 742 | static irqreturn_t irq_handler(int intno, void *dev_id) |
1da177e4 LT |
743 | { |
744 | int irq_bit; | |
745 | unsigned long flags; | |
746 | ||
747 | spin_lock_irqsave(&irqbits_lock, flags); | |
748 | irq_bit = 1 << intno; | |
749 | if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) | |
750 | goto out; | |
751 | irqbits |= irq_bit; | |
752 | if (vm86_irqs[intno].sig) | |
753 | send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); | |
1da177e4 LT |
754 | /* |
755 | * IRQ will be re-enabled when user asks for the irq (whether | |
756 | * polling or as a result of the signal) | |
757 | */ | |
ad671423 PP |
758 | disable_irq_nosync(intno); |
759 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
1da177e4 LT |
760 | return IRQ_HANDLED; |
761 | ||
762 | out: | |
763 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
764 | return IRQ_NONE; | |
765 | } | |
766 | ||
767 | static inline void free_vm86_irq(int irqnumber) | |
768 | { | |
769 | unsigned long flags; | |
770 | ||
771 | free_irq(irqnumber, NULL); | |
772 | vm86_irqs[irqnumber].tsk = NULL; | |
773 | ||
774 | spin_lock_irqsave(&irqbits_lock, flags); | |
775 | irqbits &= ~(1 << irqnumber); | |
776 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
777 | } | |
778 | ||
779 | void release_vm86_irqs(struct task_struct *task) | |
780 | { | |
781 | int i; | |
782 | for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) | |
783 | if (vm86_irqs[i].tsk == task) | |
784 | free_vm86_irq(i); | |
785 | } | |
786 | ||
787 | static inline int get_and_reset_irq(int irqnumber) | |
788 | { | |
789 | int bit; | |
790 | unsigned long flags; | |
ad671423 | 791 | int ret = 0; |
1da177e4 LT |
792 | |
793 | if (invalid_vm86_irq(irqnumber)) return 0; | |
794 | if (vm86_irqs[irqnumber].tsk != current) return 0; | |
795 | spin_lock_irqsave(&irqbits_lock, flags); | |
796 | bit = irqbits & (1 << irqnumber); | |
797 | irqbits &= ~bit; | |
ad671423 PP |
798 | if (bit) { |
799 | enable_irq(irqnumber); | |
800 | ret = 1; | |
801 | } | |
802 | ||
1da177e4 | 803 | spin_unlock_irqrestore(&irqbits_lock, flags); |
ad671423 | 804 | return ret; |
1da177e4 LT |
805 | } |
806 | ||
807 | ||
808 | static int do_vm86_irq_handling(int subfunction, int irqnumber) | |
809 | { | |
810 | int ret; | |
811 | switch (subfunction) { | |
812 | case VM86_GET_AND_RESET_IRQ: { | |
813 | return get_and_reset_irq(irqnumber); | |
814 | } | |
815 | case VM86_GET_IRQ_BITS: { | |
816 | return irqbits; | |
817 | } | |
818 | case VM86_REQUEST_IRQ: { | |
819 | int sig = irqnumber >> 8; | |
820 | int irq = irqnumber & 255; | |
821 | if (!capable(CAP_SYS_ADMIN)) return -EPERM; | |
822 | if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; | |
823 | if (invalid_vm86_irq(irq)) return -EPERM; | |
824 | if (vm86_irqs[irq].tsk) return -EPERM; | |
825 | ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); | |
826 | if (ret) return ret; | |
827 | vm86_irqs[irq].sig = sig; | |
828 | vm86_irqs[irq].tsk = current; | |
829 | return irq; | |
830 | } | |
831 | case VM86_FREE_IRQ: { | |
832 | if (invalid_vm86_irq(irqnumber)) return -EPERM; | |
833 | if (!vm86_irqs[irqnumber].tsk) return 0; | |
834 | if (vm86_irqs[irqnumber].tsk != current) return -EPERM; | |
835 | free_vm86_irq(irqnumber); | |
836 | return 0; | |
837 | } | |
838 | } | |
839 | return -EINVAL; | |
840 | } | |
841 |