| 1 | /* |
| 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
| 4 | * |
| 5 | * Pentium III FXSR, SSE support |
| 6 | * Gareth Hughes <gareth@valinux.com>, May 2000 |
| 7 | */ |
| 8 | |
| 9 | /* |
| 10 | * Handle hardware traps and faults. |
| 11 | */ |
| 12 | |
| 13 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 14 | |
| 15 | #include <linux/context_tracking.h> |
| 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/kallsyms.h> |
| 18 | #include <linux/spinlock.h> |
| 19 | #include <linux/kprobes.h> |
| 20 | #include <linux/uaccess.h> |
| 21 | #include <linux/kdebug.h> |
| 22 | #include <linux/kgdb.h> |
| 23 | #include <linux/kernel.h> |
| 24 | #include <linux/module.h> |
| 25 | #include <linux/ptrace.h> |
| 26 | #include <linux/uprobes.h> |
| 27 | #include <linux/string.h> |
| 28 | #include <linux/delay.h> |
| 29 | #include <linux/errno.h> |
| 30 | #include <linux/kexec.h> |
| 31 | #include <linux/sched.h> |
| 32 | #include <linux/timer.h> |
| 33 | #include <linux/init.h> |
| 34 | #include <linux/bug.h> |
| 35 | #include <linux/nmi.h> |
| 36 | #include <linux/mm.h> |
| 37 | #include <linux/smp.h> |
| 38 | #include <linux/io.h> |
| 39 | |
| 40 | #ifdef CONFIG_EISA |
| 41 | #include <linux/ioport.h> |
| 42 | #include <linux/eisa.h> |
| 43 | #endif |
| 44 | |
| 45 | #if defined(CONFIG_EDAC) |
| 46 | #include <linux/edac.h> |
| 47 | #endif |
| 48 | |
| 49 | #include <asm/kmemcheck.h> |
| 50 | #include <asm/stacktrace.h> |
| 51 | #include <asm/processor.h> |
| 52 | #include <asm/debugreg.h> |
| 53 | #include <linux/atomic.h> |
| 54 | #include <asm/ftrace.h> |
| 55 | #include <asm/traps.h> |
| 56 | #include <asm/desc.h> |
| 57 | #include <asm/fpu/internal.h> |
| 58 | #include <asm/mce.h> |
| 59 | #include <asm/fixmap.h> |
| 60 | #include <asm/mach_traps.h> |
| 61 | #include <asm/alternative.h> |
| 62 | #include <asm/mpx.h> |
| 63 | |
| 64 | #ifdef CONFIG_X86_64 |
| 65 | #include <asm/x86_init.h> |
| 66 | #include <asm/pgalloc.h> |
| 67 | #include <asm/proto.h> |
| 68 | |
| 69 | /* No need to be aligned, but done to keep all IDTs defined the same way. */ |
| 70 | gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss; |
| 71 | #else |
| 72 | #include <asm/processor-flags.h> |
| 73 | #include <asm/setup.h> |
| 74 | |
| 75 | asmlinkage int system_call(void); |
| 76 | #endif |
| 77 | |
| 78 | /* Must be page-aligned because the real IDT is used in a fixmap. */ |
| 79 | gate_desc idt_table[NR_VECTORS] __page_aligned_bss; |
| 80 | |
| 81 | DECLARE_BITMAP(used_vectors, NR_VECTORS); |
| 82 | EXPORT_SYMBOL_GPL(used_vectors); |
| 83 | |
| 84 | static inline void conditional_sti(struct pt_regs *regs) |
| 85 | { |
| 86 | if (regs->flags & X86_EFLAGS_IF) |
| 87 | local_irq_enable(); |
| 88 | } |
| 89 | |
| 90 | static inline void preempt_conditional_sti(struct pt_regs *regs) |
| 91 | { |
| 92 | preempt_count_inc(); |
| 93 | if (regs->flags & X86_EFLAGS_IF) |
| 94 | local_irq_enable(); |
| 95 | } |
| 96 | |
| 97 | static inline void conditional_cli(struct pt_regs *regs) |
| 98 | { |
| 99 | if (regs->flags & X86_EFLAGS_IF) |
| 100 | local_irq_disable(); |
| 101 | } |
| 102 | |
| 103 | static inline void preempt_conditional_cli(struct pt_regs *regs) |
| 104 | { |
| 105 | if (regs->flags & X86_EFLAGS_IF) |
| 106 | local_irq_disable(); |
| 107 | preempt_count_dec(); |
| 108 | } |
| 109 | |
| 110 | enum ctx_state ist_enter(struct pt_regs *regs) |
| 111 | { |
| 112 | enum ctx_state prev_state; |
| 113 | |
| 114 | if (user_mode(regs)) { |
| 115 | /* Other than that, we're just an exception. */ |
| 116 | prev_state = exception_enter(); |
| 117 | } else { |
| 118 | /* |
| 119 | * We might have interrupted pretty much anything. In |
| 120 | * fact, if we're a machine check, we can even interrupt |
| 121 | * NMI processing. We don't want in_nmi() to return true, |
| 122 | * but we need to notify RCU. |
| 123 | */ |
| 124 | rcu_nmi_enter(); |
| 125 | prev_state = CONTEXT_KERNEL; /* the value is irrelevant. */ |
| 126 | } |
| 127 | |
| 128 | /* |
| 129 | * We are atomic because we're on the IST stack (or we're on x86_32, |
| 130 | * in which case we still shouldn't schedule). |
| 131 | * |
| 132 | * This must be after exception_enter(), because exception_enter() |
| 133 | * won't do anything if in_interrupt() returns true. |
| 134 | */ |
| 135 | preempt_count_add(HARDIRQ_OFFSET); |
| 136 | |
| 137 | /* This code is a bit fragile. Test it. */ |
| 138 | rcu_lockdep_assert(rcu_is_watching(), "ist_enter didn't work"); |
| 139 | |
| 140 | return prev_state; |
| 141 | } |
| 142 | |
| 143 | void ist_exit(struct pt_regs *regs, enum ctx_state prev_state) |
| 144 | { |
| 145 | /* Must be before exception_exit. */ |
| 146 | preempt_count_sub(HARDIRQ_OFFSET); |
| 147 | |
| 148 | if (user_mode(regs)) |
| 149 | return exception_exit(prev_state); |
| 150 | else |
| 151 | rcu_nmi_exit(); |
| 152 | } |
| 153 | |
| 154 | /** |
| 155 | * ist_begin_non_atomic() - begin a non-atomic section in an IST exception |
| 156 | * @regs: regs passed to the IST exception handler |
| 157 | * |
| 158 | * IST exception handlers normally cannot schedule. As a special |
| 159 | * exception, if the exception interrupted userspace code (i.e. |
| 160 | * user_mode(regs) would return true) and the exception was not |
| 161 | * a double fault, it can be safe to schedule. ist_begin_non_atomic() |
| 162 | * begins a non-atomic section within an ist_enter()/ist_exit() region. |
| 163 | * Callers are responsible for enabling interrupts themselves inside |
| 164 | * the non-atomic section, and callers must call is_end_non_atomic() |
| 165 | * before ist_exit(). |
| 166 | */ |
| 167 | void ist_begin_non_atomic(struct pt_regs *regs) |
| 168 | { |
| 169 | BUG_ON(!user_mode(regs)); |
| 170 | |
| 171 | /* |
| 172 | * Sanity check: we need to be on the normal thread stack. This |
| 173 | * will catch asm bugs and any attempt to use ist_preempt_enable |
| 174 | * from double_fault. |
| 175 | */ |
| 176 | BUG_ON((unsigned long)(current_top_of_stack() - |
| 177 | current_stack_pointer()) >= THREAD_SIZE); |
| 178 | |
| 179 | preempt_count_sub(HARDIRQ_OFFSET); |
| 180 | } |
| 181 | |
| 182 | /** |
| 183 | * ist_end_non_atomic() - begin a non-atomic section in an IST exception |
| 184 | * |
| 185 | * Ends a non-atomic section started with ist_begin_non_atomic(). |
| 186 | */ |
| 187 | void ist_end_non_atomic(void) |
| 188 | { |
| 189 | preempt_count_add(HARDIRQ_OFFSET); |
| 190 | } |
| 191 | |
| 192 | static nokprobe_inline int |
| 193 | do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, |
| 194 | struct pt_regs *regs, long error_code) |
| 195 | { |
| 196 | if (v8086_mode(regs)) { |
| 197 | /* |
| 198 | * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. |
| 199 | * On nmi (interrupt 2), do_trap should not be called. |
| 200 | */ |
| 201 | if (trapnr < X86_TRAP_UD) { |
| 202 | if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, |
| 203 | error_code, trapnr)) |
| 204 | return 0; |
| 205 | } |
| 206 | return -1; |
| 207 | } |
| 208 | |
| 209 | if (!user_mode(regs)) { |
| 210 | if (!fixup_exception(regs)) { |
| 211 | tsk->thread.error_code = error_code; |
| 212 | tsk->thread.trap_nr = trapnr; |
| 213 | die(str, regs, error_code); |
| 214 | } |
| 215 | return 0; |
| 216 | } |
| 217 | |
| 218 | return -1; |
| 219 | } |
| 220 | |
| 221 | static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, |
| 222 | siginfo_t *info) |
| 223 | { |
| 224 | unsigned long siaddr; |
| 225 | int sicode; |
| 226 | |
| 227 | switch (trapnr) { |
| 228 | default: |
| 229 | return SEND_SIG_PRIV; |
| 230 | |
| 231 | case X86_TRAP_DE: |
| 232 | sicode = FPE_INTDIV; |
| 233 | siaddr = uprobe_get_trap_addr(regs); |
| 234 | break; |
| 235 | case X86_TRAP_UD: |
| 236 | sicode = ILL_ILLOPN; |
| 237 | siaddr = uprobe_get_trap_addr(regs); |
| 238 | break; |
| 239 | case X86_TRAP_AC: |
| 240 | sicode = BUS_ADRALN; |
| 241 | siaddr = 0; |
| 242 | break; |
| 243 | } |
| 244 | |
| 245 | info->si_signo = signr; |
| 246 | info->si_errno = 0; |
| 247 | info->si_code = sicode; |
| 248 | info->si_addr = (void __user *)siaddr; |
| 249 | return info; |
| 250 | } |
| 251 | |
| 252 | static void |
| 253 | do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, |
| 254 | long error_code, siginfo_t *info) |
| 255 | { |
| 256 | struct task_struct *tsk = current; |
| 257 | |
| 258 | |
| 259 | if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) |
| 260 | return; |
| 261 | /* |
| 262 | * We want error_code and trap_nr set for userspace faults and |
| 263 | * kernelspace faults which result in die(), but not |
| 264 | * kernelspace faults which are fixed up. die() gives the |
| 265 | * process no chance to handle the signal and notice the |
| 266 | * kernel fault information, so that won't result in polluting |
| 267 | * the information about previously queued, but not yet |
| 268 | * delivered, faults. See also do_general_protection below. |
| 269 | */ |
| 270 | tsk->thread.error_code = error_code; |
| 271 | tsk->thread.trap_nr = trapnr; |
| 272 | |
| 273 | #ifdef CONFIG_X86_64 |
| 274 | if (show_unhandled_signals && unhandled_signal(tsk, signr) && |
| 275 | printk_ratelimit()) { |
| 276 | pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx", |
| 277 | tsk->comm, tsk->pid, str, |
| 278 | regs->ip, regs->sp, error_code); |
| 279 | print_vma_addr(" in ", regs->ip); |
| 280 | pr_cont("\n"); |
| 281 | } |
| 282 | #endif |
| 283 | |
| 284 | force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); |
| 285 | } |
| 286 | NOKPROBE_SYMBOL(do_trap); |
| 287 | |
| 288 | static void do_error_trap(struct pt_regs *regs, long error_code, char *str, |
| 289 | unsigned long trapnr, int signr) |
| 290 | { |
| 291 | enum ctx_state prev_state = exception_enter(); |
| 292 | siginfo_t info; |
| 293 | |
| 294 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != |
| 295 | NOTIFY_STOP) { |
| 296 | conditional_sti(regs); |
| 297 | do_trap(trapnr, signr, str, regs, error_code, |
| 298 | fill_trap_info(regs, signr, trapnr, &info)); |
| 299 | } |
| 300 | |
| 301 | exception_exit(prev_state); |
| 302 | } |
| 303 | |
| 304 | #define DO_ERROR(trapnr, signr, str, name) \ |
| 305 | dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ |
| 306 | { \ |
| 307 | do_error_trap(regs, error_code, str, trapnr, signr); \ |
| 308 | } |
| 309 | |
| 310 | DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error) |
| 311 | DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) |
| 312 | DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) |
| 313 | DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) |
| 314 | DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) |
| 315 | DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) |
| 316 | DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) |
| 317 | DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) |
| 318 | |
| 319 | #ifdef CONFIG_X86_64 |
| 320 | /* Runs on IST stack */ |
| 321 | dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) |
| 322 | { |
| 323 | static const char str[] = "double fault"; |
| 324 | struct task_struct *tsk = current; |
| 325 | |
| 326 | #ifdef CONFIG_X86_ESPFIX64 |
| 327 | extern unsigned char native_irq_return_iret[]; |
| 328 | |
| 329 | /* |
| 330 | * If IRET takes a non-IST fault on the espfix64 stack, then we |
| 331 | * end up promoting it to a doublefault. In that case, modify |
| 332 | * the stack to make it look like we just entered the #GP |
| 333 | * handler from user space, similar to bad_iret. |
| 334 | * |
| 335 | * No need for ist_enter here because we don't use RCU. |
| 336 | */ |
| 337 | if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY && |
| 338 | regs->cs == __KERNEL_CS && |
| 339 | regs->ip == (unsigned long)native_irq_return_iret) |
| 340 | { |
| 341 | struct pt_regs *normal_regs = task_pt_regs(current); |
| 342 | |
| 343 | /* Fake a #GP(0) from userspace. */ |
| 344 | memmove(&normal_regs->ip, (void *)regs->sp, 5*8); |
| 345 | normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */ |
| 346 | regs->ip = (unsigned long)general_protection; |
| 347 | regs->sp = (unsigned long)&normal_regs->orig_ax; |
| 348 | |
| 349 | return; |
| 350 | } |
| 351 | #endif |
| 352 | |
| 353 | ist_enter(regs); /* Discard prev_state because we won't return. */ |
| 354 | notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); |
| 355 | |
| 356 | tsk->thread.error_code = error_code; |
| 357 | tsk->thread.trap_nr = X86_TRAP_DF; |
| 358 | |
| 359 | #ifdef CONFIG_DOUBLEFAULT |
| 360 | df_debug(regs, error_code); |
| 361 | #endif |
| 362 | /* |
| 363 | * This is always a kernel trap and never fixable (and thus must |
| 364 | * never return). |
| 365 | */ |
| 366 | for (;;) |
| 367 | die(str, regs, error_code); |
| 368 | } |
| 369 | #endif |
| 370 | |
| 371 | dotraplinkage void do_bounds(struct pt_regs *regs, long error_code) |
| 372 | { |
| 373 | struct task_struct *tsk = current; |
| 374 | struct xregs_state *xsave_buf; |
| 375 | enum ctx_state prev_state; |
| 376 | struct bndcsr *bndcsr; |
| 377 | siginfo_t *info; |
| 378 | |
| 379 | prev_state = exception_enter(); |
| 380 | if (notify_die(DIE_TRAP, "bounds", regs, error_code, |
| 381 | X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP) |
| 382 | goto exit; |
| 383 | conditional_sti(regs); |
| 384 | |
| 385 | if (!user_mode(regs)) |
| 386 | die("bounds", regs, error_code); |
| 387 | |
| 388 | if (!cpu_feature_enabled(X86_FEATURE_MPX)) { |
| 389 | /* The exception is not from Intel MPX */ |
| 390 | goto exit_trap; |
| 391 | } |
| 392 | |
| 393 | /* |
| 394 | * We need to look at BNDSTATUS to resolve this exception. |
| 395 | * It is not directly accessible, though, so we need to |
| 396 | * do an xsave and then pull it out of the xsave buffer. |
| 397 | */ |
| 398 | copy_fpregs_to_fpstate(&tsk->thread.fpu); |
| 399 | xsave_buf = &(tsk->thread.fpu.state.xsave); |
| 400 | bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR); |
| 401 | if (!bndcsr) |
| 402 | goto exit_trap; |
| 403 | |
| 404 | /* |
| 405 | * The error code field of the BNDSTATUS register communicates status |
| 406 | * information of a bound range exception #BR or operation involving |
| 407 | * bound directory. |
| 408 | */ |
| 409 | switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) { |
| 410 | case 2: /* Bound directory has invalid entry. */ |
| 411 | if (mpx_handle_bd_fault(xsave_buf)) |
| 412 | goto exit_trap; |
| 413 | break; /* Success, it was handled */ |
| 414 | case 1: /* Bound violation. */ |
| 415 | info = mpx_generate_siginfo(regs, xsave_buf); |
| 416 | if (IS_ERR(info)) { |
| 417 | /* |
| 418 | * We failed to decode the MPX instruction. Act as if |
| 419 | * the exception was not caused by MPX. |
| 420 | */ |
| 421 | goto exit_trap; |
| 422 | } |
| 423 | /* |
| 424 | * Success, we decoded the instruction and retrieved |
| 425 | * an 'info' containing the address being accessed |
| 426 | * which caused the exception. This information |
| 427 | * allows and application to possibly handle the |
| 428 | * #BR exception itself. |
| 429 | */ |
| 430 | do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, info); |
| 431 | kfree(info); |
| 432 | break; |
| 433 | case 0: /* No exception caused by Intel MPX operations. */ |
| 434 | goto exit_trap; |
| 435 | default: |
| 436 | die("bounds", regs, error_code); |
| 437 | } |
| 438 | |
| 439 | exit: |
| 440 | exception_exit(prev_state); |
| 441 | return; |
| 442 | exit_trap: |
| 443 | /* |
| 444 | * This path out is for all the cases where we could not |
| 445 | * handle the exception in some way (like allocating a |
| 446 | * table or telling userspace about it. We will also end |
| 447 | * up here if the kernel has MPX turned off at compile |
| 448 | * time.. |
| 449 | */ |
| 450 | do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, NULL); |
| 451 | exception_exit(prev_state); |
| 452 | } |
| 453 | |
| 454 | dotraplinkage void |
| 455 | do_general_protection(struct pt_regs *regs, long error_code) |
| 456 | { |
| 457 | struct task_struct *tsk; |
| 458 | enum ctx_state prev_state; |
| 459 | |
| 460 | prev_state = exception_enter(); |
| 461 | conditional_sti(regs); |
| 462 | |
| 463 | if (v8086_mode(regs)) { |
| 464 | local_irq_enable(); |
| 465 | handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); |
| 466 | goto exit; |
| 467 | } |
| 468 | |
| 469 | tsk = current; |
| 470 | if (!user_mode(regs)) { |
| 471 | if (fixup_exception(regs)) |
| 472 | goto exit; |
| 473 | |
| 474 | tsk->thread.error_code = error_code; |
| 475 | tsk->thread.trap_nr = X86_TRAP_GP; |
| 476 | if (notify_die(DIE_GPF, "general protection fault", regs, error_code, |
| 477 | X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) |
| 478 | die("general protection fault", regs, error_code); |
| 479 | goto exit; |
| 480 | } |
| 481 | |
| 482 | tsk->thread.error_code = error_code; |
| 483 | tsk->thread.trap_nr = X86_TRAP_GP; |
| 484 | |
| 485 | if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && |
| 486 | printk_ratelimit()) { |
| 487 | pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx", |
| 488 | tsk->comm, task_pid_nr(tsk), |
| 489 | regs->ip, regs->sp, error_code); |
| 490 | print_vma_addr(" in ", regs->ip); |
| 491 | pr_cont("\n"); |
| 492 | } |
| 493 | |
| 494 | force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); |
| 495 | exit: |
| 496 | exception_exit(prev_state); |
| 497 | } |
| 498 | NOKPROBE_SYMBOL(do_general_protection); |
| 499 | |
| 500 | /* May run on IST stack. */ |
| 501 | dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) |
| 502 | { |
| 503 | enum ctx_state prev_state; |
| 504 | |
| 505 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 506 | /* |
| 507 | * ftrace must be first, everything else may cause a recursive crash. |
| 508 | * See note by declaration of modifying_ftrace_code in ftrace.c |
| 509 | */ |
| 510 | if (unlikely(atomic_read(&modifying_ftrace_code)) && |
| 511 | ftrace_int3_handler(regs)) |
| 512 | return; |
| 513 | #endif |
| 514 | if (poke_int3_handler(regs)) |
| 515 | return; |
| 516 | |
| 517 | prev_state = ist_enter(regs); |
| 518 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP |
| 519 | if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
| 520 | SIGTRAP) == NOTIFY_STOP) |
| 521 | goto exit; |
| 522 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ |
| 523 | |
| 524 | #ifdef CONFIG_KPROBES |
| 525 | if (kprobe_int3_handler(regs)) |
| 526 | goto exit; |
| 527 | #endif |
| 528 | |
| 529 | if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
| 530 | SIGTRAP) == NOTIFY_STOP) |
| 531 | goto exit; |
| 532 | |
| 533 | /* |
| 534 | * Let others (NMI) know that the debug stack is in use |
| 535 | * as we may switch to the interrupt stack. |
| 536 | */ |
| 537 | debug_stack_usage_inc(); |
| 538 | preempt_conditional_sti(regs); |
| 539 | do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); |
| 540 | preempt_conditional_cli(regs); |
| 541 | debug_stack_usage_dec(); |
| 542 | exit: |
| 543 | ist_exit(regs, prev_state); |
| 544 | } |
| 545 | NOKPROBE_SYMBOL(do_int3); |
| 546 | |
| 547 | #ifdef CONFIG_X86_64 |
| 548 | /* |
| 549 | * Help handler running on IST stack to switch off the IST stack if the |
| 550 | * interrupted code was in user mode. The actual stack switch is done in |
| 551 | * entry_64.S |
| 552 | */ |
| 553 | asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs) |
| 554 | { |
| 555 | struct pt_regs *regs = task_pt_regs(current); |
| 556 | *regs = *eregs; |
| 557 | return regs; |
| 558 | } |
| 559 | NOKPROBE_SYMBOL(sync_regs); |
| 560 | |
| 561 | struct bad_iret_stack { |
| 562 | void *error_entry_ret; |
| 563 | struct pt_regs regs; |
| 564 | }; |
| 565 | |
| 566 | asmlinkage __visible notrace |
| 567 | struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) |
| 568 | { |
| 569 | /* |
| 570 | * This is called from entry_64.S early in handling a fault |
| 571 | * caused by a bad iret to user mode. To handle the fault |
| 572 | * correctly, we want move our stack frame to task_pt_regs |
| 573 | * and we want to pretend that the exception came from the |
| 574 | * iret target. |
| 575 | */ |
| 576 | struct bad_iret_stack *new_stack = |
| 577 | container_of(task_pt_regs(current), |
| 578 | struct bad_iret_stack, regs); |
| 579 | |
| 580 | /* Copy the IRET target to the new stack. */ |
| 581 | memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); |
| 582 | |
| 583 | /* Copy the remainder of the stack from the current stack. */ |
| 584 | memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip)); |
| 585 | |
| 586 | BUG_ON(!user_mode(&new_stack->regs)); |
| 587 | return new_stack; |
| 588 | } |
| 589 | NOKPROBE_SYMBOL(fixup_bad_iret); |
| 590 | #endif |
| 591 | |
| 592 | /* |
| 593 | * Our handling of the processor debug registers is non-trivial. |
| 594 | * We do not clear them on entry and exit from the kernel. Therefore |
| 595 | * it is possible to get a watchpoint trap here from inside the kernel. |
| 596 | * However, the code in ./ptrace.c has ensured that the user can |
| 597 | * only set watchpoints on userspace addresses. Therefore the in-kernel |
| 598 | * watchpoint trap can only occur in code which is reading/writing |
| 599 | * from user space. Such code must not hold kernel locks (since it |
| 600 | * can equally take a page fault), therefore it is safe to call |
| 601 | * force_sig_info even though that claims and releases locks. |
| 602 | * |
| 603 | * Code in ./signal.c ensures that the debug control register |
| 604 | * is restored before we deliver any signal, and therefore that |
| 605 | * user code runs with the correct debug control register even though |
| 606 | * we clear it here. |
| 607 | * |
| 608 | * Being careful here means that we don't have to be as careful in a |
| 609 | * lot of more complicated places (task switching can be a bit lazy |
| 610 | * about restoring all the debug state, and ptrace doesn't have to |
| 611 | * find every occurrence of the TF bit that could be saved away even |
| 612 | * by user code) |
| 613 | * |
| 614 | * May run on IST stack. |
| 615 | */ |
| 616 | dotraplinkage void do_debug(struct pt_regs *regs, long error_code) |
| 617 | { |
| 618 | struct task_struct *tsk = current; |
| 619 | enum ctx_state prev_state; |
| 620 | int user_icebp = 0; |
| 621 | unsigned long dr6; |
| 622 | int si_code; |
| 623 | |
| 624 | prev_state = ist_enter(regs); |
| 625 | |
| 626 | get_debugreg(dr6, 6); |
| 627 | |
| 628 | /* Filter out all the reserved bits which are preset to 1 */ |
| 629 | dr6 &= ~DR6_RESERVED; |
| 630 | |
| 631 | /* |
| 632 | * If dr6 has no reason to give us about the origin of this trap, |
| 633 | * then it's very likely the result of an icebp/int01 trap. |
| 634 | * User wants a sigtrap for that. |
| 635 | */ |
| 636 | if (!dr6 && user_mode(regs)) |
| 637 | user_icebp = 1; |
| 638 | |
| 639 | /* Catch kmemcheck conditions first of all! */ |
| 640 | if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) |
| 641 | goto exit; |
| 642 | |
| 643 | /* DR6 may or may not be cleared by the CPU */ |
| 644 | set_debugreg(0, 6); |
| 645 | |
| 646 | /* |
| 647 | * The processor cleared BTF, so don't mark that we need it set. |
| 648 | */ |
| 649 | clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP); |
| 650 | |
| 651 | /* Store the virtualized DR6 value */ |
| 652 | tsk->thread.debugreg6 = dr6; |
| 653 | |
| 654 | #ifdef CONFIG_KPROBES |
| 655 | if (kprobe_debug_handler(regs)) |
| 656 | goto exit; |
| 657 | #endif |
| 658 | |
| 659 | if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, |
| 660 | SIGTRAP) == NOTIFY_STOP) |
| 661 | goto exit; |
| 662 | |
| 663 | /* |
| 664 | * Let others (NMI) know that the debug stack is in use |
| 665 | * as we may switch to the interrupt stack. |
| 666 | */ |
| 667 | debug_stack_usage_inc(); |
| 668 | |
| 669 | /* It's safe to allow irq's after DR6 has been saved */ |
| 670 | preempt_conditional_sti(regs); |
| 671 | |
| 672 | if (v8086_mode(regs)) { |
| 673 | handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, |
| 674 | X86_TRAP_DB); |
| 675 | preempt_conditional_cli(regs); |
| 676 | debug_stack_usage_dec(); |
| 677 | goto exit; |
| 678 | } |
| 679 | |
| 680 | /* |
| 681 | * Single-stepping through system calls: ignore any exceptions in |
| 682 | * kernel space, but re-enable TF when returning to user mode. |
| 683 | * |
| 684 | * We already checked v86 mode above, so we can check for kernel mode |
| 685 | * by just checking the CPL of CS. |
| 686 | */ |
| 687 | if ((dr6 & DR_STEP) && !user_mode(regs)) { |
| 688 | tsk->thread.debugreg6 &= ~DR_STEP; |
| 689 | set_tsk_thread_flag(tsk, TIF_SINGLESTEP); |
| 690 | regs->flags &= ~X86_EFLAGS_TF; |
| 691 | } |
| 692 | si_code = get_si_code(tsk->thread.debugreg6); |
| 693 | if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp) |
| 694 | send_sigtrap(tsk, regs, error_code, si_code); |
| 695 | preempt_conditional_cli(regs); |
| 696 | debug_stack_usage_dec(); |
| 697 | |
| 698 | exit: |
| 699 | ist_exit(regs, prev_state); |
| 700 | } |
| 701 | NOKPROBE_SYMBOL(do_debug); |
| 702 | |
| 703 | /* |
| 704 | * Note that we play around with the 'TS' bit in an attempt to get |
| 705 | * the correct behaviour even in the presence of the asynchronous |
| 706 | * IRQ13 behaviour |
| 707 | */ |
| 708 | static void math_error(struct pt_regs *regs, int error_code, int trapnr) |
| 709 | { |
| 710 | struct task_struct *task = current; |
| 711 | struct fpu *fpu = &task->thread.fpu; |
| 712 | siginfo_t info; |
| 713 | char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" : |
| 714 | "simd exception"; |
| 715 | |
| 716 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP) |
| 717 | return; |
| 718 | conditional_sti(regs); |
| 719 | |
| 720 | if (!user_mode(regs)) { |
| 721 | if (!fixup_exception(regs)) { |
| 722 | task->thread.error_code = error_code; |
| 723 | task->thread.trap_nr = trapnr; |
| 724 | die(str, regs, error_code); |
| 725 | } |
| 726 | return; |
| 727 | } |
| 728 | |
| 729 | /* |
| 730 | * Save the info for the exception handler and clear the error. |
| 731 | */ |
| 732 | fpu__save(fpu); |
| 733 | |
| 734 | task->thread.trap_nr = trapnr; |
| 735 | task->thread.error_code = error_code; |
| 736 | info.si_signo = SIGFPE; |
| 737 | info.si_errno = 0; |
| 738 | info.si_addr = (void __user *)uprobe_get_trap_addr(regs); |
| 739 | |
| 740 | info.si_code = fpu__exception_code(fpu, trapnr); |
| 741 | |
| 742 | /* Retry when we get spurious exceptions: */ |
| 743 | if (!info.si_code) |
| 744 | return; |
| 745 | |
| 746 | force_sig_info(SIGFPE, &info, task); |
| 747 | } |
| 748 | |
| 749 | dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) |
| 750 | { |
| 751 | enum ctx_state prev_state; |
| 752 | |
| 753 | prev_state = exception_enter(); |
| 754 | math_error(regs, error_code, X86_TRAP_MF); |
| 755 | exception_exit(prev_state); |
| 756 | } |
| 757 | |
| 758 | dotraplinkage void |
| 759 | do_simd_coprocessor_error(struct pt_regs *regs, long error_code) |
| 760 | { |
| 761 | enum ctx_state prev_state; |
| 762 | |
| 763 | prev_state = exception_enter(); |
| 764 | math_error(regs, error_code, X86_TRAP_XF); |
| 765 | exception_exit(prev_state); |
| 766 | } |
| 767 | |
| 768 | dotraplinkage void |
| 769 | do_spurious_interrupt_bug(struct pt_regs *regs, long error_code) |
| 770 | { |
| 771 | conditional_sti(regs); |
| 772 | #if 0 |
| 773 | /* No need to warn about this any longer. */ |
| 774 | pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n"); |
| 775 | #endif |
| 776 | } |
| 777 | |
| 778 | asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void) |
| 779 | { |
| 780 | } |
| 781 | |
| 782 | asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void) |
| 783 | { |
| 784 | } |
| 785 | |
| 786 | dotraplinkage void |
| 787 | do_device_not_available(struct pt_regs *regs, long error_code) |
| 788 | { |
| 789 | enum ctx_state prev_state; |
| 790 | |
| 791 | prev_state = exception_enter(); |
| 792 | BUG_ON(use_eager_fpu()); |
| 793 | |
| 794 | #ifdef CONFIG_MATH_EMULATION |
| 795 | if (read_cr0() & X86_CR0_EM) { |
| 796 | struct math_emu_info info = { }; |
| 797 | |
| 798 | conditional_sti(regs); |
| 799 | |
| 800 | info.regs = regs; |
| 801 | math_emulate(&info); |
| 802 | exception_exit(prev_state); |
| 803 | return; |
| 804 | } |
| 805 | #endif |
| 806 | fpu__restore(¤t->thread.fpu); /* interrupts still off */ |
| 807 | #ifdef CONFIG_X86_32 |
| 808 | conditional_sti(regs); |
| 809 | #endif |
| 810 | exception_exit(prev_state); |
| 811 | } |
| 812 | NOKPROBE_SYMBOL(do_device_not_available); |
| 813 | |
| 814 | #ifdef CONFIG_X86_32 |
| 815 | dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) |
| 816 | { |
| 817 | siginfo_t info; |
| 818 | enum ctx_state prev_state; |
| 819 | |
| 820 | prev_state = exception_enter(); |
| 821 | local_irq_enable(); |
| 822 | |
| 823 | info.si_signo = SIGILL; |
| 824 | info.si_errno = 0; |
| 825 | info.si_code = ILL_BADSTK; |
| 826 | info.si_addr = NULL; |
| 827 | if (notify_die(DIE_TRAP, "iret exception", regs, error_code, |
| 828 | X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { |
| 829 | do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, |
| 830 | &info); |
| 831 | } |
| 832 | exception_exit(prev_state); |
| 833 | } |
| 834 | #endif |
| 835 | |
| 836 | /* Set of traps needed for early debugging. */ |
| 837 | void __init early_trap_init(void) |
| 838 | { |
| 839 | /* |
| 840 | * Don't use IST to set DEBUG_STACK as it doesn't work until TSS |
| 841 | * is ready in cpu_init() <-- trap_init(). Before trap_init(), |
| 842 | * CPU runs at ring 0 so it is impossible to hit an invalid |
| 843 | * stack. Using the original stack works well enough at this |
| 844 | * early stage. DEBUG_STACK will be equipped after cpu_init() in |
| 845 | * trap_init(). |
| 846 | * |
| 847 | * We don't need to set trace_idt_table like set_intr_gate(), |
| 848 | * since we don't have trace_debug and it will be reset to |
| 849 | * 'debug' in trap_init() by set_intr_gate_ist(). |
| 850 | */ |
| 851 | set_intr_gate_notrace(X86_TRAP_DB, debug); |
| 852 | /* int3 can be called from all */ |
| 853 | set_system_intr_gate(X86_TRAP_BP, &int3); |
| 854 | #ifdef CONFIG_X86_32 |
| 855 | set_intr_gate(X86_TRAP_PF, page_fault); |
| 856 | #endif |
| 857 | load_idt(&idt_descr); |
| 858 | } |
| 859 | |
| 860 | void __init early_trap_pf_init(void) |
| 861 | { |
| 862 | #ifdef CONFIG_X86_64 |
| 863 | set_intr_gate(X86_TRAP_PF, page_fault); |
| 864 | #endif |
| 865 | } |
| 866 | |
| 867 | void __init trap_init(void) |
| 868 | { |
| 869 | int i; |
| 870 | |
| 871 | #ifdef CONFIG_EISA |
| 872 | void __iomem *p = early_ioremap(0x0FFFD9, 4); |
| 873 | |
| 874 | if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24)) |
| 875 | EISA_bus = 1; |
| 876 | early_iounmap(p, 4); |
| 877 | #endif |
| 878 | |
| 879 | set_intr_gate(X86_TRAP_DE, divide_error); |
| 880 | set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK); |
| 881 | /* int4 can be called from all */ |
| 882 | set_system_intr_gate(X86_TRAP_OF, &overflow); |
| 883 | set_intr_gate(X86_TRAP_BR, bounds); |
| 884 | set_intr_gate(X86_TRAP_UD, invalid_op); |
| 885 | set_intr_gate(X86_TRAP_NM, device_not_available); |
| 886 | #ifdef CONFIG_X86_32 |
| 887 | set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS); |
| 888 | #else |
| 889 | set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK); |
| 890 | #endif |
| 891 | set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); |
| 892 | set_intr_gate(X86_TRAP_TS, invalid_TSS); |
| 893 | set_intr_gate(X86_TRAP_NP, segment_not_present); |
| 894 | set_intr_gate(X86_TRAP_SS, stack_segment); |
| 895 | set_intr_gate(X86_TRAP_GP, general_protection); |
| 896 | set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); |
| 897 | set_intr_gate(X86_TRAP_MF, coprocessor_error); |
| 898 | set_intr_gate(X86_TRAP_AC, alignment_check); |
| 899 | #ifdef CONFIG_X86_MCE |
| 900 | set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK); |
| 901 | #endif |
| 902 | set_intr_gate(X86_TRAP_XF, simd_coprocessor_error); |
| 903 | |
| 904 | /* Reserve all the builtin and the syscall vector: */ |
| 905 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) |
| 906 | set_bit(i, used_vectors); |
| 907 | |
| 908 | #ifdef CONFIG_IA32_EMULATION |
| 909 | set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); |
| 910 | set_bit(IA32_SYSCALL_VECTOR, used_vectors); |
| 911 | #endif |
| 912 | |
| 913 | #ifdef CONFIG_X86_32 |
| 914 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); |
| 915 | set_bit(SYSCALL_VECTOR, used_vectors); |
| 916 | #endif |
| 917 | |
| 918 | /* |
| 919 | * Set the IDT descriptor to a fixed read-only location, so that the |
| 920 | * "sidt" instruction will not leak the location of the kernel, and |
| 921 | * to defend the IDT against arbitrary memory write vulnerabilities. |
| 922 | * It will be reloaded in cpu_init() */ |
| 923 | __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO); |
| 924 | idt_descr.address = fix_to_virt(FIX_RO_IDT); |
| 925 | |
| 926 | /* |
| 927 | * Should be a barrier for any external CPU state: |
| 928 | */ |
| 929 | cpu_init(); |
| 930 | |
| 931 | /* |
| 932 | * X86_TRAP_DB and X86_TRAP_BP have been set |
| 933 | * in early_trap_init(). However, ITS works only after |
| 934 | * cpu_init() loads TSS. See comments in early_trap_init(). |
| 935 | */ |
| 936 | set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK); |
| 937 | /* int3 can be called from all */ |
| 938 | set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK); |
| 939 | |
| 940 | x86_init.irqs.trap_init(); |
| 941 | |
| 942 | #ifdef CONFIG_X86_64 |
| 943 | memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16); |
| 944 | set_nmi_gate(X86_TRAP_DB, &debug); |
| 945 | set_nmi_gate(X86_TRAP_BP, &int3); |
| 946 | #endif |
| 947 | } |