Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1994 Linus Torvalds |
3 | * | |
4 | * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86 | |
624dffcb | 5 | * stack - Manfred Spraul <manfred@colorfullife.com> |
1da177e4 LT |
6 | * |
7 | * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle | |
8 | * them correctly. Now the emulation will be in a | |
9 | * consistent state after stackfaults - Kasper Dupont | |
10 | * <kasperd@daimi.au.dk> | |
11 | * | |
12 | * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont | |
13 | * <kasperd@daimi.au.dk> | |
14 | * | |
15 | * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault | |
16 | * caused by Kasper Dupont's changes - Stas Sergeev | |
17 | * | |
18 | * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes. | |
19 | * Kasper Dupont <kasperd@daimi.au.dk> | |
20 | * | |
21 | * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault. | |
22 | * Kasper Dupont <kasperd@daimi.au.dk> | |
23 | * | |
24 | * 9 apr 2002 - Changed stack access macros to jump to a label | |
25 | * instead of returning to userspace. This simplifies | |
26 | * do_int, and is needed by handle_vm6_fault. Kasper | |
27 | * Dupont <kasperd@daimi.au.dk> | |
28 | * | |
29 | */ | |
30 | ||
a9415644 | 31 | #include <linux/capability.h> |
1da177e4 LT |
32 | #include <linux/errno.h> |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/sched.h> | |
35 | #include <linux/kernel.h> | |
36 | #include <linux/signal.h> | |
37 | #include <linux/string.h> | |
38 | #include <linux/mm.h> | |
39 | #include <linux/smp.h> | |
1da177e4 LT |
40 | #include <linux/highmem.h> |
41 | #include <linux/ptrace.h> | |
7e7f8a03 | 42 | #include <linux/audit.h> |
49d26b6e | 43 | #include <linux/stddef.h> |
1da177e4 LT |
44 | |
45 | #include <asm/uaccess.h> | |
46 | #include <asm/io.h> | |
47 | #include <asm/tlbflush.h> | |
48 | #include <asm/irq.h> | |
49 | ||
50 | /* | |
51 | * Known problems: | |
52 | * | |
53 | * Interrupt handling is not guaranteed: | |
54 | * - a real x86 will disable all interrupts for one instruction | |
55 | * after a "mov ss,xx" to make stack handling atomic even without | |
56 | * the 'lss' instruction. We can't guarantee this in v86 mode, | |
57 | * as the next instruction might result in a page fault or similar. | |
58 | * - a real x86 will have interrupts disabled for one instruction | |
59 | * past the 'sti' that enables them. We don't bother with all the | |
60 | * details yet. | |
61 | * | |
62 | * Let's hope these problems do not actually matter for anything. | |
63 | */ | |
64 | ||
65 | ||
66 | #define KVM86 ((struct kernel_vm86_struct *)regs) | |
67 | #define VMPI KVM86->vm86plus | |
68 | ||
69 | ||
70 | /* | |
71 | * 8- and 16-bit register defines.. | |
72 | */ | |
65ea5b03 PA |
73 | #define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0]) |
74 | #define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1]) | |
75 | #define IP(regs) (*(unsigned short *)&((regs)->pt.ip)) | |
76 | #define SP(regs) (*(unsigned short *)&((regs)->pt.sp)) | |
1da177e4 LT |
77 | |
78 | /* | |
79 | * virtual flags (16 and 32-bit versions) | |
80 | */ | |
81 | #define VFLAGS (*(unsigned short *)&(current->thread.v86flags)) | |
82 | #define VEFLAGS (current->thread.v86flags) | |
83 | ||
84 | #define set_flags(X,new,mask) \ | |
85 | ((X) = ((X) & ~(mask)) | ((new) & (mask))) | |
86 | ||
87 | #define SAFE_MASK (0xDD5) | |
88 | #define RETURN_MASK (0xDFF) | |
89 | ||
49d26b6e JF |
90 | /* convert kernel_vm86_regs to vm86_regs */ |
91 | static int copy_vm86_regs_to_user(struct vm86_regs __user *user, | |
92 | const struct kernel_vm86_regs *regs) | |
93 | { | |
94 | int ret = 0; | |
95 | ||
65ea5b03 | 96 | /* kernel_vm86_regs is missing gs, so copy everything up to |
464d1a78 | 97 | (but not including) orig_eax, and then rest including orig_eax. */ |
65ea5b03 PA |
98 | ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax)); |
99 | ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax, | |
49d26b6e | 100 | sizeof(struct kernel_vm86_regs) - |
65ea5b03 | 101 | offsetof(struct kernel_vm86_regs, pt.orig_ax)); |
49d26b6e JF |
102 | |
103 | return ret; | |
104 | } | |
105 | ||
106 | /* convert vm86_regs to kernel_vm86_regs */ | |
107 | static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs, | |
108 | const struct vm86_regs __user *user, | |
109 | unsigned extra) | |
110 | { | |
111 | int ret = 0; | |
112 | ||
65ea5b03 PA |
113 | /* copy ax-fs inclusive */ |
114 | ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax)); | |
115 | /* copy orig_ax-__gsh+extra */ | |
116 | ret += copy_from_user(®s->pt.orig_ax, &user->orig_eax, | |
49d26b6e | 117 | sizeof(struct kernel_vm86_regs) - |
65ea5b03 | 118 | offsetof(struct kernel_vm86_regs, pt.orig_ax) + |
49d26b6e | 119 | extra); |
49d26b6e JF |
120 | return ret; |
121 | } | |
1da177e4 | 122 | |
75604d7f | 123 | struct pt_regs * save_v86_state(struct kernel_vm86_regs * regs) |
1da177e4 LT |
124 | { |
125 | struct tss_struct *tss; | |
126 | struct pt_regs *ret; | |
127 | unsigned long tmp; | |
128 | ||
129 | /* | |
130 | * This gets called from entry.S with interrupts disabled, but | |
131 | * from process context. Enable interrupts here, before trying | |
132 | * to access user space. | |
133 | */ | |
134 | local_irq_enable(); | |
135 | ||
136 | if (!current->thread.vm86_info) { | |
137 | printk("no vm86_info: BAD\n"); | |
138 | do_exit(SIGSEGV); | |
139 | } | |
65ea5b03 | 140 | set_flags(regs->pt.flags, VEFLAGS, VIF_MASK | current->thread.v86mask); |
49d26b6e | 141 | tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs,regs); |
1da177e4 LT |
142 | tmp += put_user(current->thread.screen_bitmap,¤t->thread.vm86_info->screen_bitmap); |
143 | if (tmp) { | |
144 | printk("vm86: could not access userspace vm86_info\n"); | |
145 | do_exit(SIGSEGV); | |
146 | } | |
147 | ||
148 | tss = &per_cpu(init_tss, get_cpu()); | |
faca6227 | 149 | current->thread.sp0 = current->thread.saved_sp0; |
1da177e4 | 150 | current->thread.sysenter_cs = __KERNEL_CS; |
faca6227 PA |
151 | load_sp0(tss, ¤t->thread); |
152 | current->thread.saved_sp0 = 0; | |
1da177e4 LT |
153 | put_cpu(); |
154 | ||
1da177e4 | 155 | ret = KVM86->regs32; |
49d26b6e | 156 | |
65ea5b03 | 157 | ret->fs = current->thread.saved_fs; |
464d1a78 | 158 | loadsegment(gs, current->thread.saved_gs); |
49d26b6e | 159 | |
1da177e4 LT |
160 | return ret; |
161 | } | |
162 | ||
60ec5585 | 163 | static void mark_screen_rdonly(struct mm_struct *mm) |
1da177e4 LT |
164 | { |
165 | pgd_t *pgd; | |
166 | pud_t *pud; | |
167 | pmd_t *pmd; | |
60ec5585 HD |
168 | pte_t *pte; |
169 | spinlock_t *ptl; | |
1da177e4 LT |
170 | int i; |
171 | ||
60ec5585 | 172 | pgd = pgd_offset(mm, 0xA0000); |
1da177e4 LT |
173 | if (pgd_none_or_clear_bad(pgd)) |
174 | goto out; | |
175 | pud = pud_offset(pgd, 0xA0000); | |
176 | if (pud_none_or_clear_bad(pud)) | |
177 | goto out; | |
178 | pmd = pmd_offset(pud, 0xA0000); | |
179 | if (pmd_none_or_clear_bad(pmd)) | |
180 | goto out; | |
60ec5585 | 181 | pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); |
1da177e4 LT |
182 | for (i = 0; i < 32; i++) { |
183 | if (pte_present(*pte)) | |
184 | set_pte(pte, pte_wrprotect(*pte)); | |
185 | pte++; | |
186 | } | |
60ec5585 | 187 | pte_unmap_unlock(pte, ptl); |
1da177e4 | 188 | out: |
1da177e4 LT |
189 | flush_tlb(); |
190 | } | |
191 | ||
192 | ||
193 | ||
194 | static int do_vm86_irq_handling(int subfunction, int irqnumber); | |
195 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); | |
196 | ||
197 | asmlinkage int sys_vm86old(struct pt_regs regs) | |
198 | { | |
65ea5b03 | 199 | struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx; |
1da177e4 LT |
200 | struct kernel_vm86_struct info; /* declare this _on top_, |
201 | * this avoids wasting of stack space. | |
202 | * This remains on the stack until we | |
203 | * return to 32 bit user space. | |
204 | */ | |
205 | struct task_struct *tsk; | |
206 | int tmp, ret = -EPERM; | |
207 | ||
208 | tsk = current; | |
faca6227 | 209 | if (tsk->thread.saved_sp0) |
1da177e4 | 210 | goto out; |
49d26b6e JF |
211 | tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, |
212 | offsetof(struct kernel_vm86_struct, vm86plus) - | |
213 | sizeof(info.regs)); | |
1da177e4 LT |
214 | ret = -EFAULT; |
215 | if (tmp) | |
216 | goto out; | |
217 | memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); | |
218 | info.regs32 = ®s; | |
219 | tsk->thread.vm86_info = v86; | |
220 | do_sys_vm86(&info, tsk); | |
221 | ret = 0; /* we never return here */ | |
222 | out: | |
223 | return ret; | |
224 | } | |
225 | ||
226 | ||
227 | asmlinkage int sys_vm86(struct pt_regs regs) | |
228 | { | |
229 | struct kernel_vm86_struct info; /* declare this _on top_, | |
230 | * this avoids wasting of stack space. | |
231 | * This remains on the stack until we | |
232 | * return to 32 bit user space. | |
233 | */ | |
234 | struct task_struct *tsk; | |
235 | int tmp, ret; | |
236 | struct vm86plus_struct __user *v86; | |
237 | ||
238 | tsk = current; | |
65ea5b03 | 239 | switch (regs.bx) { |
1da177e4 LT |
240 | case VM86_REQUEST_IRQ: |
241 | case VM86_FREE_IRQ: | |
242 | case VM86_GET_IRQ_BITS: | |
243 | case VM86_GET_AND_RESET_IRQ: | |
65ea5b03 | 244 | ret = do_vm86_irq_handling(regs.bx, (int)regs.cx); |
1da177e4 LT |
245 | goto out; |
246 | case VM86_PLUS_INSTALL_CHECK: | |
247 | /* NOTE: on old vm86 stuff this will return the error | |
e49332bd | 248 | from access_ok(), because the subfunction is |
1da177e4 LT |
249 | interpreted as (invalid) address to vm86_struct. |
250 | So the installation check works. | |
251 | */ | |
252 | ret = 0; | |
253 | goto out; | |
254 | } | |
255 | ||
256 | /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ | |
257 | ret = -EPERM; | |
faca6227 | 258 | if (tsk->thread.saved_sp0) |
1da177e4 | 259 | goto out; |
65ea5b03 | 260 | v86 = (struct vm86plus_struct __user *)regs.cx; |
49d26b6e JF |
261 | tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, |
262 | offsetof(struct kernel_vm86_struct, regs32) - | |
263 | sizeof(info.regs)); | |
1da177e4 LT |
264 | ret = -EFAULT; |
265 | if (tmp) | |
266 | goto out; | |
267 | info.regs32 = ®s; | |
268 | info.vm86plus.is_vm86pus = 1; | |
269 | tsk->thread.vm86_info = (struct vm86_struct __user *)v86; | |
270 | do_sys_vm86(&info, tsk); | |
271 | ret = 0; /* we never return here */ | |
272 | out: | |
273 | return ret; | |
274 | } | |
275 | ||
276 | ||
277 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk) | |
278 | { | |
279 | struct tss_struct *tss; | |
280 | /* | |
281 | * make sure the vm86() system call doesn't try to do anything silly | |
282 | */ | |
65ea5b03 PA |
283 | info->regs.pt.ds = 0; |
284 | info->regs.pt.es = 0; | |
285 | info->regs.pt.fs = 0; | |
1da177e4 | 286 | |
464d1a78 | 287 | /* we are clearing gs later just before "jmp resume_userspace", |
49d26b6e | 288 | * because it is not saved/restored. |
1da177e4 LT |
289 | */ |
290 | ||
291 | /* | |
65ea5b03 | 292 | * The flags register is also special: we cannot trust that the user |
1da177e4 LT |
293 | * has set it up safely, so this makes sure interrupt etc flags are |
294 | * inherited from protected mode. | |
295 | */ | |
65ea5b03 PA |
296 | VEFLAGS = info->regs.pt.flags; |
297 | info->regs.pt.flags &= SAFE_MASK; | |
298 | info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK; | |
299 | info->regs.pt.flags |= VM_MASK; | |
1da177e4 LT |
300 | |
301 | switch (info->cpu_type) { | |
302 | case CPU_286: | |
303 | tsk->thread.v86mask = 0; | |
304 | break; | |
305 | case CPU_386: | |
306 | tsk->thread.v86mask = NT_MASK | IOPL_MASK; | |
307 | break; | |
308 | case CPU_486: | |
309 | tsk->thread.v86mask = AC_MASK | NT_MASK | IOPL_MASK; | |
310 | break; | |
311 | default: | |
312 | tsk->thread.v86mask = ID_MASK | AC_MASK | NT_MASK | IOPL_MASK; | |
313 | break; | |
314 | } | |
315 | ||
316 | /* | |
65ea5b03 | 317 | * Save old state, set default return value (%ax) to 0 |
1da177e4 | 318 | */ |
65ea5b03 | 319 | info->regs32->ax = 0; |
faca6227 | 320 | tsk->thread.saved_sp0 = tsk->thread.sp0; |
65ea5b03 | 321 | tsk->thread.saved_fs = info->regs32->fs; |
464d1a78 | 322 | savesegment(gs, tsk->thread.saved_gs); |
1da177e4 LT |
323 | |
324 | tss = &per_cpu(init_tss, get_cpu()); | |
faca6227 | 325 | tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; |
1da177e4 LT |
326 | if (cpu_has_sep) |
327 | tsk->thread.sysenter_cs = 0; | |
faca6227 | 328 | load_sp0(tss, &tsk->thread); |
1da177e4 LT |
329 | put_cpu(); |
330 | ||
331 | tsk->thread.screen_bitmap = info->screen_bitmap; | |
332 | if (info->flags & VM86_SCREEN_BITMAP) | |
60ec5585 | 333 | mark_screen_rdonly(tsk->mm); |
7e7f8a03 JB |
334 | |
335 | /*call audit_syscall_exit since we do not exit via the normal paths */ | |
336 | if (unlikely(current->audit_context)) | |
49d26b6e | 337 | audit_syscall_exit(AUDITSC_RESULT(0), 0); |
7e7f8a03 | 338 | |
1da177e4 | 339 | __asm__ __volatile__( |
1da177e4 LT |
340 | "movl %0,%%esp\n\t" |
341 | "movl %1,%%ebp\n\t" | |
464d1a78 | 342 | "mov %2, %%gs\n\t" |
1da177e4 LT |
343 | "jmp resume_userspace" |
344 | : /* no outputs */ | |
49d26b6e | 345 | :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0)); |
1da177e4 LT |
346 | /* we never return here */ |
347 | } | |
348 | ||
349 | static inline void return_to_32bit(struct kernel_vm86_regs * regs16, int retval) | |
350 | { | |
351 | struct pt_regs * regs32; | |
352 | ||
353 | regs32 = save_v86_state(regs16); | |
65ea5b03 | 354 | regs32->ax = retval; |
1da177e4 LT |
355 | __asm__ __volatile__("movl %0,%%esp\n\t" |
356 | "movl %1,%%ebp\n\t" | |
357 | "jmp resume_userspace" | |
358 | : : "r" (regs32), "r" (current_thread_info())); | |
359 | } | |
360 | ||
361 | static inline void set_IF(struct kernel_vm86_regs * regs) | |
362 | { | |
363 | VEFLAGS |= VIF_MASK; | |
364 | if (VEFLAGS & VIP_MASK) | |
365 | return_to_32bit(regs, VM86_STI); | |
366 | } | |
367 | ||
368 | static inline void clear_IF(struct kernel_vm86_regs * regs) | |
369 | { | |
370 | VEFLAGS &= ~VIF_MASK; | |
371 | } | |
372 | ||
373 | static inline void clear_TF(struct kernel_vm86_regs * regs) | |
374 | { | |
65ea5b03 | 375 | regs->pt.flags &= ~TF_MASK; |
1da177e4 LT |
376 | } |
377 | ||
378 | static inline void clear_AC(struct kernel_vm86_regs * regs) | |
379 | { | |
65ea5b03 | 380 | regs->pt.flags &= ~AC_MASK; |
1da177e4 LT |
381 | } |
382 | ||
383 | /* It is correct to call set_IF(regs) from the set_vflags_* | |
384 | * functions. However someone forgot to call clear_IF(regs) | |
385 | * in the opposite case. | |
386 | * After the command sequence CLI PUSHF STI POPF you should | |
387 | * end up with interrups disabled, but you ended up with | |
388 | * interrupts enabled. | |
389 | * ( I was testing my own changes, but the only bug I | |
390 | * could find was in a function I had not changed. ) | |
391 | * [KD] | |
392 | */ | |
393 | ||
65ea5b03 | 394 | static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs * regs) |
1da177e4 | 395 | { |
65ea5b03 PA |
396 | set_flags(VEFLAGS, flags, current->thread.v86mask); |
397 | set_flags(regs->pt.flags, flags, SAFE_MASK); | |
398 | if (flags & IF_MASK) | |
1da177e4 LT |
399 | set_IF(regs); |
400 | else | |
401 | clear_IF(regs); | |
402 | } | |
403 | ||
404 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs * regs) | |
405 | { | |
406 | set_flags(VFLAGS, flags, current->thread.v86mask); | |
65ea5b03 | 407 | set_flags(regs->pt.flags, flags, SAFE_MASK); |
1da177e4 LT |
408 | if (flags & IF_MASK) |
409 | set_IF(regs); | |
410 | else | |
411 | clear_IF(regs); | |
412 | } | |
413 | ||
414 | static inline unsigned long get_vflags(struct kernel_vm86_regs * regs) | |
415 | { | |
65ea5b03 | 416 | unsigned long flags = regs->pt.flags & RETURN_MASK; |
1da177e4 LT |
417 | |
418 | if (VEFLAGS & VIF_MASK) | |
419 | flags |= IF_MASK; | |
420 | flags |= IOPL_MASK; | |
421 | return flags | (VEFLAGS & current->thread.v86mask); | |
422 | } | |
423 | ||
424 | static inline int is_revectored(int nr, struct revectored_struct * bitmap) | |
425 | { | |
426 | __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0" | |
427 | :"=r" (nr) | |
428 | :"m" (*bitmap),"r" (nr)); | |
429 | return nr; | |
430 | } | |
431 | ||
432 | #define val_byte(val, n) (((__u8 *)&val)[n]) | |
433 | ||
434 | #define pushb(base, ptr, val, err_label) \ | |
435 | do { \ | |
436 | __u8 __val = val; \ | |
437 | ptr--; \ | |
438 | if (put_user(__val, base + ptr) < 0) \ | |
439 | goto err_label; \ | |
440 | } while(0) | |
441 | ||
442 | #define pushw(base, ptr, val, err_label) \ | |
443 | do { \ | |
444 | __u16 __val = val; \ | |
445 | ptr--; \ | |
446 | if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | |
447 | goto err_label; \ | |
448 | ptr--; \ | |
449 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | |
450 | goto err_label; \ | |
451 | } while(0) | |
452 | ||
453 | #define pushl(base, ptr, val, err_label) \ | |
454 | do { \ | |
455 | __u32 __val = val; \ | |
456 | ptr--; \ | |
457 | if (put_user(val_byte(__val, 3), base + ptr) < 0) \ | |
458 | goto err_label; \ | |
459 | ptr--; \ | |
460 | if (put_user(val_byte(__val, 2), base + ptr) < 0) \ | |
461 | goto err_label; \ | |
462 | ptr--; \ | |
463 | if (put_user(val_byte(__val, 1), base + ptr) < 0) \ | |
464 | goto err_label; \ | |
465 | ptr--; \ | |
466 | if (put_user(val_byte(__val, 0), base + ptr) < 0) \ | |
467 | goto err_label; \ | |
468 | } while(0) | |
469 | ||
470 | #define popb(base, ptr, err_label) \ | |
471 | ({ \ | |
472 | __u8 __res; \ | |
473 | if (get_user(__res, base + ptr) < 0) \ | |
474 | goto err_label; \ | |
475 | ptr++; \ | |
476 | __res; \ | |
477 | }) | |
478 | ||
479 | #define popw(base, ptr, err_label) \ | |
480 | ({ \ | |
481 | __u16 __res; \ | |
482 | if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | |
483 | goto err_label; \ | |
484 | ptr++; \ | |
485 | if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | |
486 | goto err_label; \ | |
487 | ptr++; \ | |
488 | __res; \ | |
489 | }) | |
490 | ||
491 | #define popl(base, ptr, err_label) \ | |
492 | ({ \ | |
493 | __u32 __res; \ | |
494 | if (get_user(val_byte(__res, 0), base + ptr) < 0) \ | |
495 | goto err_label; \ | |
496 | ptr++; \ | |
497 | if (get_user(val_byte(__res, 1), base + ptr) < 0) \ | |
498 | goto err_label; \ | |
499 | ptr++; \ | |
500 | if (get_user(val_byte(__res, 2), base + ptr) < 0) \ | |
501 | goto err_label; \ | |
502 | ptr++; \ | |
503 | if (get_user(val_byte(__res, 3), base + ptr) < 0) \ | |
504 | goto err_label; \ | |
505 | ptr++; \ | |
506 | __res; \ | |
507 | }) | |
508 | ||
509 | /* There are so many possible reasons for this function to return | |
510 | * VM86_INTx, so adding another doesn't bother me. We can expect | |
511 | * userspace programs to be able to handle it. (Getting a problem | |
512 | * in userspace is always better than an Oops anyway.) [KD] | |
513 | */ | |
514 | static void do_int(struct kernel_vm86_regs *regs, int i, | |
515 | unsigned char __user * ssp, unsigned short sp) | |
516 | { | |
517 | unsigned long __user *intr_ptr; | |
518 | unsigned long segoffs; | |
519 | ||
65ea5b03 | 520 | if (regs->pt.cs == BIOSSEG) |
1da177e4 LT |
521 | goto cannot_handle; |
522 | if (is_revectored(i, &KVM86->int_revectored)) | |
523 | goto cannot_handle; | |
524 | if (i==0x21 && is_revectored(AH(regs),&KVM86->int21_revectored)) | |
525 | goto cannot_handle; | |
526 | intr_ptr = (unsigned long __user *) (i << 2); | |
527 | if (get_user(segoffs, intr_ptr)) | |
528 | goto cannot_handle; | |
529 | if ((segoffs >> 16) == BIOSSEG) | |
530 | goto cannot_handle; | |
531 | pushw(ssp, sp, get_vflags(regs), cannot_handle); | |
65ea5b03 | 532 | pushw(ssp, sp, regs->pt.cs, cannot_handle); |
1da177e4 | 533 | pushw(ssp, sp, IP(regs), cannot_handle); |
65ea5b03 | 534 | regs->pt.cs = segoffs >> 16; |
1da177e4 LT |
535 | SP(regs) -= 6; |
536 | IP(regs) = segoffs & 0xffff; | |
537 | clear_TF(regs); | |
538 | clear_IF(regs); | |
539 | clear_AC(regs); | |
540 | return; | |
541 | ||
542 | cannot_handle: | |
543 | return_to_32bit(regs, VM86_INTx + (i << 8)); | |
544 | } | |
545 | ||
546 | int handle_vm86_trap(struct kernel_vm86_regs * regs, long error_code, int trapno) | |
547 | { | |
548 | if (VMPI.is_vm86pus) { | |
549 | if ( (trapno==3) || (trapno==1) ) | |
550 | return_to_32bit(regs, VM86_TRAP + (trapno << 8)); | |
65ea5b03 | 551 | do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); |
1da177e4 LT |
552 | return 0; |
553 | } | |
554 | if (trapno !=1) | |
555 | return 1; /* we let this handle by the calling routine */ | |
556 | if (current->ptrace & PT_PTRACED) { | |
557 | unsigned long flags; | |
558 | spin_lock_irqsave(¤t->sighand->siglock, flags); | |
559 | sigdelset(¤t->blocked, SIGTRAP); | |
560 | recalc_sigpending(); | |
561 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | |
562 | } | |
563 | send_sig(SIGTRAP, current, 1); | |
564 | current->thread.trap_no = trapno; | |
565 | current->thread.error_code = error_code; | |
566 | return 0; | |
567 | } | |
568 | ||
569 | void handle_vm86_fault(struct kernel_vm86_regs * regs, long error_code) | |
570 | { | |
571 | unsigned char opcode; | |
572 | unsigned char __user *csp; | |
573 | unsigned char __user *ssp; | |
5fd75ebb | 574 | unsigned short ip, sp, orig_flags; |
1da177e4 LT |
575 | int data32, pref_done; |
576 | ||
577 | #define CHECK_IF_IN_TRAP \ | |
578 | if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \ | |
579 | newflags |= TF_MASK | |
580 | #define VM86_FAULT_RETURN do { \ | |
581 | if (VMPI.force_return_for_pic && (VEFLAGS & (IF_MASK | VIF_MASK))) \ | |
582 | return_to_32bit(regs, VM86_PICRETURN); \ | |
5fd75ebb PT |
583 | if (orig_flags & TF_MASK) \ |
584 | handle_vm86_trap(regs, 0, 1); \ | |
1da177e4 LT |
585 | return; } while (0) |
586 | ||
65ea5b03 | 587 | orig_flags = *(unsigned short *)®s->pt.flags; |
5fd75ebb | 588 | |
65ea5b03 PA |
589 | csp = (unsigned char __user *) (regs->pt.cs << 4); |
590 | ssp = (unsigned char __user *) (regs->pt.ss << 4); | |
1da177e4 LT |
591 | sp = SP(regs); |
592 | ip = IP(regs); | |
593 | ||
594 | data32 = 0; | |
595 | pref_done = 0; | |
596 | do { | |
597 | switch (opcode = popb(csp, ip, simulate_sigsegv)) { | |
598 | case 0x66: /* 32-bit data */ data32=1; break; | |
599 | case 0x67: /* 32-bit address */ break; | |
600 | case 0x2e: /* CS */ break; | |
601 | case 0x3e: /* DS */ break; | |
602 | case 0x26: /* ES */ break; | |
603 | case 0x36: /* SS */ break; | |
604 | case 0x65: /* GS */ break; | |
605 | case 0x64: /* FS */ break; | |
606 | case 0xf2: /* repnz */ break; | |
607 | case 0xf3: /* rep */ break; | |
608 | default: pref_done = 1; | |
609 | } | |
610 | } while (!pref_done); | |
611 | ||
612 | switch (opcode) { | |
613 | ||
614 | /* pushf */ | |
615 | case 0x9c: | |
616 | if (data32) { | |
617 | pushl(ssp, sp, get_vflags(regs), simulate_sigsegv); | |
618 | SP(regs) -= 4; | |
619 | } else { | |
620 | pushw(ssp, sp, get_vflags(regs), simulate_sigsegv); | |
621 | SP(regs) -= 2; | |
622 | } | |
623 | IP(regs) = ip; | |
624 | VM86_FAULT_RETURN; | |
625 | ||
626 | /* popf */ | |
627 | case 0x9d: | |
628 | { | |
629 | unsigned long newflags; | |
630 | if (data32) { | |
631 | newflags=popl(ssp, sp, simulate_sigsegv); | |
632 | SP(regs) += 4; | |
633 | } else { | |
634 | newflags = popw(ssp, sp, simulate_sigsegv); | |
635 | SP(regs) += 2; | |
636 | } | |
637 | IP(regs) = ip; | |
638 | CHECK_IF_IN_TRAP; | |
639 | if (data32) { | |
640 | set_vflags_long(newflags, regs); | |
641 | } else { | |
642 | set_vflags_short(newflags, regs); | |
643 | } | |
644 | VM86_FAULT_RETURN; | |
645 | } | |
646 | ||
647 | /* int xx */ | |
648 | case 0xcd: { | |
649 | int intno=popb(csp, ip, simulate_sigsegv); | |
650 | IP(regs) = ip; | |
651 | if (VMPI.vm86dbg_active) { | |
652 | if ( (1 << (intno &7)) & VMPI.vm86dbg_intxxtab[intno >> 3] ) | |
653 | return_to_32bit(regs, VM86_INTx + (intno << 8)); | |
654 | } | |
655 | do_int(regs, intno, ssp, sp); | |
656 | return; | |
657 | } | |
658 | ||
659 | /* iret */ | |
660 | case 0xcf: | |
661 | { | |
662 | unsigned long newip; | |
663 | unsigned long newcs; | |
664 | unsigned long newflags; | |
665 | if (data32) { | |
666 | newip=popl(ssp, sp, simulate_sigsegv); | |
667 | newcs=popl(ssp, sp, simulate_sigsegv); | |
668 | newflags=popl(ssp, sp, simulate_sigsegv); | |
669 | SP(regs) += 12; | |
670 | } else { | |
671 | newip = popw(ssp, sp, simulate_sigsegv); | |
672 | newcs = popw(ssp, sp, simulate_sigsegv); | |
673 | newflags = popw(ssp, sp, simulate_sigsegv); | |
674 | SP(regs) += 6; | |
675 | } | |
676 | IP(regs) = newip; | |
65ea5b03 | 677 | regs->pt.cs = newcs; |
1da177e4 LT |
678 | CHECK_IF_IN_TRAP; |
679 | if (data32) { | |
680 | set_vflags_long(newflags, regs); | |
681 | } else { | |
682 | set_vflags_short(newflags, regs); | |
683 | } | |
684 | VM86_FAULT_RETURN; | |
685 | } | |
686 | ||
687 | /* cli */ | |
688 | case 0xfa: | |
689 | IP(regs) = ip; | |
690 | clear_IF(regs); | |
691 | VM86_FAULT_RETURN; | |
692 | ||
693 | /* sti */ | |
694 | /* | |
695 | * Damn. This is incorrect: the 'sti' instruction should actually | |
696 | * enable interrupts after the /next/ instruction. Not good. | |
697 | * | |
698 | * Probably needs some horsing around with the TF flag. Aiee.. | |
699 | */ | |
700 | case 0xfb: | |
701 | IP(regs) = ip; | |
702 | set_IF(regs); | |
703 | VM86_FAULT_RETURN; | |
704 | ||
705 | default: | |
706 | return_to_32bit(regs, VM86_UNKNOWN); | |
707 | } | |
708 | ||
709 | return; | |
710 | ||
711 | simulate_sigsegv: | |
712 | /* FIXME: After a long discussion with Stas we finally | |
713 | * agreed, that this is wrong. Here we should | |
714 | * really send a SIGSEGV to the user program. | |
715 | * But how do we create the correct context? We | |
716 | * are inside a general protection fault handler | |
717 | * and has just returned from a page fault handler. | |
718 | * The correct context for the signal handler | |
719 | * should be a mixture of the two, but how do we | |
720 | * get the information? [KD] | |
721 | */ | |
722 | return_to_32bit(regs, VM86_UNKNOWN); | |
723 | } | |
724 | ||
725 | /* ---------------- vm86 special IRQ passing stuff ----------------- */ | |
726 | ||
727 | #define VM86_IRQNAME "vm86irq" | |
728 | ||
729 | static struct vm86_irqs { | |
730 | struct task_struct *tsk; | |
731 | int sig; | |
732 | } vm86_irqs[16]; | |
733 | ||
734 | static DEFINE_SPINLOCK(irqbits_lock); | |
735 | static int irqbits; | |
736 | ||
737 | #define ALLOWED_SIGS ( 1 /* 0 = don't send a signal */ \ | |
738 | | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \ | |
739 | | (1 << SIGUNUSED) ) | |
740 | ||
7d12e780 | 741 | static irqreturn_t irq_handler(int intno, void *dev_id) |
1da177e4 LT |
742 | { |
743 | int irq_bit; | |
744 | unsigned long flags; | |
745 | ||
746 | spin_lock_irqsave(&irqbits_lock, flags); | |
747 | irq_bit = 1 << intno; | |
748 | if ((irqbits & irq_bit) || ! vm86_irqs[intno].tsk) | |
749 | goto out; | |
750 | irqbits |= irq_bit; | |
751 | if (vm86_irqs[intno].sig) | |
752 | send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1); | |
1da177e4 LT |
753 | /* |
754 | * IRQ will be re-enabled when user asks for the irq (whether | |
755 | * polling or as a result of the signal) | |
756 | */ | |
ad671423 PP |
757 | disable_irq_nosync(intno); |
758 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
1da177e4 LT |
759 | return IRQ_HANDLED; |
760 | ||
761 | out: | |
762 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
763 | return IRQ_NONE; | |
764 | } | |
765 | ||
766 | static inline void free_vm86_irq(int irqnumber) | |
767 | { | |
768 | unsigned long flags; | |
769 | ||
770 | free_irq(irqnumber, NULL); | |
771 | vm86_irqs[irqnumber].tsk = NULL; | |
772 | ||
773 | spin_lock_irqsave(&irqbits_lock, flags); | |
774 | irqbits &= ~(1 << irqnumber); | |
775 | spin_unlock_irqrestore(&irqbits_lock, flags); | |
776 | } | |
777 | ||
778 | void release_vm86_irqs(struct task_struct *task) | |
779 | { | |
780 | int i; | |
781 | for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++) | |
782 | if (vm86_irqs[i].tsk == task) | |
783 | free_vm86_irq(i); | |
784 | } | |
785 | ||
786 | static inline int get_and_reset_irq(int irqnumber) | |
787 | { | |
788 | int bit; | |
789 | unsigned long flags; | |
ad671423 | 790 | int ret = 0; |
1da177e4 LT |
791 | |
792 | if (invalid_vm86_irq(irqnumber)) return 0; | |
793 | if (vm86_irqs[irqnumber].tsk != current) return 0; | |
794 | spin_lock_irqsave(&irqbits_lock, flags); | |
795 | bit = irqbits & (1 << irqnumber); | |
796 | irqbits &= ~bit; | |
ad671423 PP |
797 | if (bit) { |
798 | enable_irq(irqnumber); | |
799 | ret = 1; | |
800 | } | |
801 | ||
1da177e4 | 802 | spin_unlock_irqrestore(&irqbits_lock, flags); |
ad671423 | 803 | return ret; |
1da177e4 LT |
804 | } |
805 | ||
806 | ||
807 | static int do_vm86_irq_handling(int subfunction, int irqnumber) | |
808 | { | |
809 | int ret; | |
810 | switch (subfunction) { | |
811 | case VM86_GET_AND_RESET_IRQ: { | |
812 | return get_and_reset_irq(irqnumber); | |
813 | } | |
814 | case VM86_GET_IRQ_BITS: { | |
815 | return irqbits; | |
816 | } | |
817 | case VM86_REQUEST_IRQ: { | |
818 | int sig = irqnumber >> 8; | |
819 | int irq = irqnumber & 255; | |
820 | if (!capable(CAP_SYS_ADMIN)) return -EPERM; | |
821 | if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM; | |
822 | if (invalid_vm86_irq(irq)) return -EPERM; | |
823 | if (vm86_irqs[irq].tsk) return -EPERM; | |
824 | ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL); | |
825 | if (ret) return ret; | |
826 | vm86_irqs[irq].sig = sig; | |
827 | vm86_irqs[irq].tsk = current; | |
828 | return irq; | |
829 | } | |
830 | case VM86_FREE_IRQ: { | |
831 | if (invalid_vm86_irq(irqnumber)) return -EPERM; | |
832 | if (!vm86_irqs[irqnumber].tsk) return 0; | |
833 | if (vm86_irqs[irqnumber].tsk != current) return -EPERM; | |
834 | free_vm86_irq(irqnumber); | |
835 | return 0; | |
836 | } | |
837 | } | |
838 | return -EINVAL; | |
839 | } | |
840 |