2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
26 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
27 * <prasanna@in.ibm.com> added function-return probes.
30 #include <linux/kprobes.h>
31 #include <linux/ptrace.h>
32 #include <linux/preempt.h>
33 #include <linux/kdebug.h>
34 #include <asm/cacheflush.h>
36 #include <asm/uaccess.h>
37 #include <asm/alternative.h>
39 void jprobe_return_end(void);
41 DEFINE_PER_CPU(struct kprobe
*, current_kprobe
) = NULL
;
42 DEFINE_PER_CPU(struct kprobe_ctlblk
, kprobe_ctlblk
);
44 struct kretprobe_blackpoint kretprobe_blacklist
[] = {
45 {"__switch_to", }, /* This function switches only current task, but
46 doesn't switch kernel stack.*/
47 {NULL
, NULL
} /* Terminator */
49 const int kretprobe_blacklist_size
= ARRAY_SIZE(kretprobe_blacklist
);
51 /* insert a jmp code */
52 static __always_inline
void set_jmp_op(void *from
, void *to
)
54 struct __arch_jmp_op
{
57 } __attribute__((packed
)) *jop
;
58 jop
= (struct __arch_jmp_op
*)from
;
59 jop
->raddr
= (long)(to
) - ((long)(from
) + 5);
60 jop
->op
= RELATIVEJUMP_INSTRUCTION
;
64 * returns non-zero if opcodes can be boosted.
66 static __always_inline
int can_boost(kprobe_opcode_t
*opcodes
)
68 #define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
69 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
70 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
71 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
72 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
75 * Undefined/reserved opcodes, conditional jump, Opcode Extension
76 * Groups, and some special opcodes can not be boost.
78 static const unsigned long twobyte_is_boostable
[256 / 32] = {
79 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
80 /* ------------------------------- */
81 W(0x00, 0,0,1,1,0,0,1,0,1,1,0,0,0,0,0,0)| /* 00 */
82 W(0x10, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 10 */
83 W(0x20, 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0)| /* 20 */
84 W(0x30, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 30 */
85 W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 40 */
86 W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 50 */
87 W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1)| /* 60 */
88 W(0x70, 0,0,0,0,1,1,1,1,0,0,0,0,0,0,1,1), /* 70 */
89 W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 80 */
90 W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* 90 */
91 W(0xa0, 1,1,0,1,1,1,0,0,1,1,0,1,1,1,0,1)| /* a0 */
92 W(0xb0, 1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1), /* b0 */
93 W(0xc0, 1,1,0,0,0,0,0,0,1,1,1,1,1,1,1,1)| /* c0 */
94 W(0xd0, 0,1,1,1,0,1,0,0,1,1,0,1,1,1,0,1), /* d0 */
95 W(0xe0, 0,1,1,0,0,1,0,0,1,1,0,1,1,1,0,1)| /* e0 */
96 W(0xf0, 0,1,1,1,0,1,0,0,1,1,1,0,1,1,1,0) /* f0 */
97 /* ------------------------------- */
98 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
101 kprobe_opcode_t opcode
;
102 kprobe_opcode_t
*orig_opcodes
= opcodes
;
104 if (opcodes
- orig_opcodes
> MAX_INSN_SIZE
- 1)
106 opcode
= *(opcodes
++);
108 /* 2nd-byte opcode */
109 if (opcode
== 0x0f) {
110 if (opcodes
- orig_opcodes
> MAX_INSN_SIZE
- 1)
112 return test_bit(*opcodes
, twobyte_is_boostable
);
115 switch (opcode
& 0xf0) {
117 if (0x63 < opcode
&& opcode
< 0x67)
118 goto retry
; /* prefixes */
119 /* can't boost Address-size override and bound */
120 return (opcode
!= 0x62 && opcode
!= 0x67);
122 return 0; /* can't boost conditional jump */
124 /* can't boost software-interruptions */
125 return (0xc1 < opcode
&& opcode
< 0xcc) || opcode
== 0xcf;
127 /* can boost AA* and XLAT */
128 return (opcode
== 0xd4 || opcode
== 0xd5 || opcode
== 0xd7);
130 /* can boost in/out and absolute jmps */
131 return ((opcode
& 0x04) || opcode
== 0xea);
133 if ((opcode
& 0x0c) == 0 && opcode
!= 0xf1)
134 goto retry
; /* lock/rep(ne) prefix */
135 /* clear and set flags can be boost */
136 return (opcode
== 0xf5 || (0xf7 < opcode
&& opcode
< 0xfe));
138 if (opcode
== 0x26 || opcode
== 0x36 || opcode
== 0x3e)
139 goto retry
; /* prefixes */
140 /* can't boost CS override and call */
141 return (opcode
!= 0x2e && opcode
!= 0x9a);
146 * returns non-zero if opcode modifies the interrupt flag.
148 static int __kprobes
is_IF_modifier(kprobe_opcode_t opcode
)
153 case 0xcf: /* iret/iretd */
154 case 0x9d: /* popf/popfd */
160 int __kprobes
arch_prepare_kprobe(struct kprobe
*p
)
162 /* insn: must be on special executable page on i386. */
163 p
->ainsn
.insn
= get_insn_slot();
167 memcpy(p
->ainsn
.insn
, p
->addr
, MAX_INSN_SIZE
* sizeof(kprobe_opcode_t
));
168 p
->opcode
= *p
->addr
;
169 if (can_boost(p
->addr
)) {
170 p
->ainsn
.boostable
= 0;
172 p
->ainsn
.boostable
= -1;
177 void __kprobes
arch_arm_kprobe(struct kprobe
*p
)
179 text_poke(p
->addr
, ((unsigned char []){BREAKPOINT_INSTRUCTION
}), 1);
182 void __kprobes
arch_disarm_kprobe(struct kprobe
*p
)
184 text_poke(p
->addr
, &p
->opcode
, 1);
187 void __kprobes
arch_remove_kprobe(struct kprobe
*p
)
189 mutex_lock(&kprobe_mutex
);
190 free_insn_slot(p
->ainsn
.insn
, (p
->ainsn
.boostable
== 1));
191 mutex_unlock(&kprobe_mutex
);
194 static void __kprobes
save_previous_kprobe(struct kprobe_ctlblk
*kcb
)
196 kcb
->prev_kprobe
.kp
= kprobe_running();
197 kcb
->prev_kprobe
.status
= kcb
->kprobe_status
;
198 kcb
->prev_kprobe
.old_eflags
= kcb
->kprobe_old_eflags
;
199 kcb
->prev_kprobe
.saved_eflags
= kcb
->kprobe_saved_eflags
;
202 static void __kprobes
restore_previous_kprobe(struct kprobe_ctlblk
*kcb
)
204 __get_cpu_var(current_kprobe
) = kcb
->prev_kprobe
.kp
;
205 kcb
->kprobe_status
= kcb
->prev_kprobe
.status
;
206 kcb
->kprobe_old_eflags
= kcb
->prev_kprobe
.old_eflags
;
207 kcb
->kprobe_saved_eflags
= kcb
->prev_kprobe
.saved_eflags
;
210 static void __kprobes
set_current_kprobe(struct kprobe
*p
, struct pt_regs
*regs
,
211 struct kprobe_ctlblk
*kcb
)
213 __get_cpu_var(current_kprobe
) = p
;
214 kcb
->kprobe_saved_eflags
= kcb
->kprobe_old_eflags
215 = (regs
->eflags
& (TF_MASK
| IF_MASK
));
216 if (is_IF_modifier(p
->opcode
))
217 kcb
->kprobe_saved_eflags
&= ~IF_MASK
;
220 static void __kprobes
prepare_singlestep(struct kprobe
*p
, struct pt_regs
*regs
)
222 regs
->eflags
|= TF_MASK
;
223 regs
->eflags
&= ~IF_MASK
;
224 /*single step inline if the instruction is an int3*/
225 if (p
->opcode
== BREAKPOINT_INSTRUCTION
)
226 regs
->eip
= (unsigned long)p
->addr
;
228 regs
->eip
= (unsigned long)p
->ainsn
.insn
;
231 /* Called with kretprobe_lock held */
232 void __kprobes
arch_prepare_kretprobe(struct kretprobe_instance
*ri
,
233 struct pt_regs
*regs
)
235 unsigned long *sara
= (unsigned long *)®s
->esp
;
237 ri
->ret_addr
= (kprobe_opcode_t
*) *sara
;
239 /* Replace the return addr with trampoline addr */
240 *sara
= (unsigned long) &kretprobe_trampoline
;
244 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
245 * remain disabled thorough out this function.
247 static int __kprobes
kprobe_handler(struct pt_regs
*regs
)
251 kprobe_opcode_t
*addr
;
252 struct kprobe_ctlblk
*kcb
;
254 addr
= (kprobe_opcode_t
*)(regs
->eip
- sizeof(kprobe_opcode_t
));
257 * We don't want to be preempted for the entire
258 * duration of kprobe processing
261 kcb
= get_kprobe_ctlblk();
263 /* Check we're not actually recursing */
264 if (kprobe_running()) {
265 p
= get_kprobe(addr
);
267 if (kcb
->kprobe_status
== KPROBE_HIT_SS
&&
268 *p
->ainsn
.insn
== BREAKPOINT_INSTRUCTION
) {
269 regs
->eflags
&= ~TF_MASK
;
270 regs
->eflags
|= kcb
->kprobe_saved_eflags
;
273 /* We have reentered the kprobe_handler(), since
274 * another probe was hit while within the handler.
275 * We here save the original kprobes variables and
276 * just single step on the instruction of the new probe
277 * without calling any user handlers.
279 save_previous_kprobe(kcb
);
280 set_current_kprobe(p
, regs
, kcb
);
281 kprobes_inc_nmissed_count(p
);
282 prepare_singlestep(p
, regs
);
283 kcb
->kprobe_status
= KPROBE_REENTER
;
286 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
287 /* The breakpoint instruction was removed by
288 * another cpu right after we hit, no further
289 * handling of this interrupt is appropriate
291 regs
->eip
-= sizeof(kprobe_opcode_t
);
295 p
= __get_cpu_var(current_kprobe
);
296 if (p
->break_handler
&& p
->break_handler(p
, regs
)) {
303 p
= get_kprobe(addr
);
305 if (*addr
!= BREAKPOINT_INSTRUCTION
) {
307 * The breakpoint instruction was removed right
308 * after we hit it. Another cpu has removed
309 * either a probepoint or a debugger breakpoint
310 * at this address. In either case, no further
311 * handling of this interrupt is appropriate.
312 * Back up over the (now missing) int3 and run
313 * the original instruction.
315 regs
->eip
-= sizeof(kprobe_opcode_t
);
318 /* Not one of ours: let kernel handle it */
322 set_current_kprobe(p
, regs
, kcb
);
323 kcb
->kprobe_status
= KPROBE_HIT_ACTIVE
;
325 if (p
->pre_handler
&& p
->pre_handler(p
, regs
))
326 /* handler has already set things up, so skip ss setup */
330 #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
331 if (p
->ainsn
.boostable
== 1 && !p
->post_handler
){
332 /* Boost up -- we can execute copied instructions directly */
333 reset_current_kprobe();
334 regs
->eip
= (unsigned long)p
->ainsn
.insn
;
335 preempt_enable_no_resched();
339 prepare_singlestep(p
, regs
);
340 kcb
->kprobe_status
= KPROBE_HIT_SS
;
344 preempt_enable_no_resched();
349 * For function-return probes, init_kprobes() establishes a probepoint
350 * here. When a retprobed function returns, this probe is hit and
351 * trampoline_probe_handler() runs, calling the kretprobe's handler.
353 void __kprobes
kretprobe_trampoline_holder(void)
355 asm volatile ( ".global kretprobe_trampoline\n"
356 "kretprobe_trampoline: \n"
358 /* skip cs, eip, orig_eax */
371 " call trampoline_handler\n"
372 /* move eflags to cs */
373 " movl 52(%esp), %edx\n"
374 " movl %edx, 48(%esp)\n"
375 /* save true return address on eflags */
376 " movl %eax, 52(%esp)\n"
384 /* skip eip, orig_eax, es, ds, fs */
391 * Called from kretprobe_trampoline
393 fastcall
void *__kprobes
trampoline_handler(struct pt_regs
*regs
)
395 struct kretprobe_instance
*ri
= NULL
;
396 struct hlist_head
*head
, empty_rp
;
397 struct hlist_node
*node
, *tmp
;
398 unsigned long flags
, orig_ret_address
= 0;
399 unsigned long trampoline_address
=(unsigned long)&kretprobe_trampoline
;
401 INIT_HLIST_HEAD(&empty_rp
);
402 spin_lock_irqsave(&kretprobe_lock
, flags
);
403 head
= kretprobe_inst_table_head(current
);
404 /* fixup registers */
405 regs
->xcs
= __KERNEL_CS
| get_kernel_rpl();
406 regs
->eip
= trampoline_address
;
407 regs
->orig_eax
= 0xffffffff;
410 * It is possible to have multiple instances associated with a given
411 * task either because an multiple functions in the call path
412 * have a return probe installed on them, and/or more then one return
413 * return probe was registered for a target function.
415 * We can handle this because:
416 * - instances are always inserted at the head of the list
417 * - when multiple return probes are registered for the same
418 * function, the first instance's ret_addr will point to the
419 * real return address, and all the rest will point to
420 * kretprobe_trampoline
422 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
423 if (ri
->task
!= current
)
424 /* another task is sharing our hash bucket */
427 if (ri
->rp
&& ri
->rp
->handler
){
428 __get_cpu_var(current_kprobe
) = &ri
->rp
->kp
;
429 get_kprobe_ctlblk()->kprobe_status
= KPROBE_HIT_ACTIVE
;
430 ri
->rp
->handler(ri
, regs
);
431 __get_cpu_var(current_kprobe
) = NULL
;
434 orig_ret_address
= (unsigned long)ri
->ret_addr
;
435 recycle_rp_inst(ri
, &empty_rp
);
437 if (orig_ret_address
!= trampoline_address
)
439 * This is the real return address. Any other
440 * instances associated with this task are for
441 * other calls deeper on the call stack
446 kretprobe_assert(ri
, orig_ret_address
, trampoline_address
);
447 spin_unlock_irqrestore(&kretprobe_lock
, flags
);
449 hlist_for_each_entry_safe(ri
, node
, tmp
, &empty_rp
, hlist
) {
450 hlist_del(&ri
->hlist
);
453 return (void*)orig_ret_address
;
457 * Called after single-stepping. p->addr is the address of the
458 * instruction whose first byte has been replaced by the "int 3"
459 * instruction. To avoid the SMP problems that can occur when we
460 * temporarily put back the original opcode to single-step, we
461 * single-stepped a copy of the instruction. The address of this
462 * copy is p->ainsn.insn.
464 * This function prepares to return from the post-single-step
465 * interrupt. We have to fix up the stack as follows:
467 * 0) Except in the case of absolute or indirect jump or call instructions,
468 * the new eip is relative to the copied instruction. We need to make
469 * it relative to the original instruction.
471 * 1) If the single-stepped instruction was pushfl, then the TF and IF
472 * flags are set in the just-pushed eflags, and may need to be cleared.
474 * 2) If the single-stepped instruction was a call, the return address
475 * that is atop the stack is the address following the copied instruction.
476 * We need to make it the address following the original instruction.
478 * This function also checks instruction size for preparing direct execution.
480 static void __kprobes
resume_execution(struct kprobe
*p
,
481 struct pt_regs
*regs
, struct kprobe_ctlblk
*kcb
)
483 unsigned long *tos
= (unsigned long *)®s
->esp
;
484 unsigned long copy_eip
= (unsigned long)p
->ainsn
.insn
;
485 unsigned long orig_eip
= (unsigned long)p
->addr
;
487 regs
->eflags
&= ~TF_MASK
;
488 switch (p
->ainsn
.insn
[0]) {
489 case 0x9c: /* pushfl */
490 *tos
&= ~(TF_MASK
| IF_MASK
);
491 *tos
|= kcb
->kprobe_old_eflags
;
493 case 0xc2: /* iret/ret/lret */
498 case 0xea: /* jmp absolute -- eip is correct */
499 /* eip is already adjusted, no more changes required */
500 p
->ainsn
.boostable
= 1;
502 case 0xe8: /* call relative - Fix return addr */
503 *tos
= orig_eip
+ (*tos
- copy_eip
);
505 case 0x9a: /* call absolute -- same as call absolute, indirect */
506 *tos
= orig_eip
+ (*tos
- copy_eip
);
509 if ((p
->ainsn
.insn
[1] & 0x30) == 0x10) {
511 * call absolute, indirect
512 * Fix return addr; eip is correct.
513 * But this is not boostable
515 *tos
= orig_eip
+ (*tos
- copy_eip
);
517 } else if (((p
->ainsn
.insn
[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
518 ((p
->ainsn
.insn
[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */
519 /* eip is correct. And this is boostable */
520 p
->ainsn
.boostable
= 1;
527 if (p
->ainsn
.boostable
== 0) {
528 if ((regs
->eip
> copy_eip
) &&
529 (regs
->eip
- copy_eip
) + 5 < MAX_INSN_SIZE
) {
531 * These instructions can be executed directly if it
532 * jumps back to correct address.
534 set_jmp_op((void *)regs
->eip
,
535 (void *)orig_eip
+ (regs
->eip
- copy_eip
));
536 p
->ainsn
.boostable
= 1;
538 p
->ainsn
.boostable
= -1;
542 regs
->eip
= orig_eip
+ (regs
->eip
- copy_eip
);
549 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
550 * remain disabled thoroughout this function.
552 static int __kprobes
post_kprobe_handler(struct pt_regs
*regs
)
554 struct kprobe
*cur
= kprobe_running();
555 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
560 if ((kcb
->kprobe_status
!= KPROBE_REENTER
) && cur
->post_handler
) {
561 kcb
->kprobe_status
= KPROBE_HIT_SSDONE
;
562 cur
->post_handler(cur
, regs
, 0);
565 resume_execution(cur
, regs
, kcb
);
566 regs
->eflags
|= kcb
->kprobe_saved_eflags
;
567 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
568 if (raw_irqs_disabled_flags(regs
->eflags
))
569 trace_hardirqs_off();
574 /*Restore back the original saved kprobes variables and continue. */
575 if (kcb
->kprobe_status
== KPROBE_REENTER
) {
576 restore_previous_kprobe(kcb
);
579 reset_current_kprobe();
581 preempt_enable_no_resched();
584 * if somebody else is singlestepping across a probe point, eflags
585 * will have TF set, in which case, continue the remaining processing
586 * of do_debug, as if this is not a probe hit.
588 if (regs
->eflags
& TF_MASK
)
594 int __kprobes
kprobe_fault_handler(struct pt_regs
*regs
, int trapnr
)
596 struct kprobe
*cur
= kprobe_running();
597 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
599 switch(kcb
->kprobe_status
) {
603 * We are here because the instruction being single
604 * stepped caused a page fault. We reset the current
605 * kprobe and the eip points back to the probe address
606 * and allow the page fault handler to continue as a
609 regs
->eip
= (unsigned long)cur
->addr
;
610 regs
->eflags
|= kcb
->kprobe_old_eflags
;
611 if (kcb
->kprobe_status
== KPROBE_REENTER
)
612 restore_previous_kprobe(kcb
);
614 reset_current_kprobe();
615 preempt_enable_no_resched();
617 case KPROBE_HIT_ACTIVE
:
618 case KPROBE_HIT_SSDONE
:
620 * We increment the nmissed count for accounting,
621 * we can also use npre/npostfault count for accouting
622 * these specific fault cases.
624 kprobes_inc_nmissed_count(cur
);
627 * We come here because instructions in the pre/post
628 * handler caused the page_fault, this could happen
629 * if handler tries to access user space by
630 * copy_from_user(), get_user() etc. Let the
631 * user-specified handler try to fix it first.
633 if (cur
->fault_handler
&& cur
->fault_handler(cur
, regs
, trapnr
))
637 * In case the user-specified fault handler returned
638 * zero, try to fix up.
640 if (fixup_exception(regs
))
644 * fixup_exception() could not handle it,
645 * Let do_page_fault() fix it.
655 * Wrapper routine to for handling exceptions.
657 int __kprobes
kprobe_exceptions_notify(struct notifier_block
*self
,
658 unsigned long val
, void *data
)
660 struct die_args
*args
= (struct die_args
*)data
;
661 int ret
= NOTIFY_DONE
;
663 if (args
->regs
&& user_mode_vm(args
->regs
))
668 if (kprobe_handler(args
->regs
))
672 if (post_kprobe_handler(args
->regs
))
676 /* kprobe_running() needs smp_processor_id() */
678 if (kprobe_running() &&
679 kprobe_fault_handler(args
->regs
, args
->trapnr
))
689 int __kprobes
setjmp_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
691 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
693 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
695 kcb
->jprobe_saved_regs
= *regs
;
696 kcb
->jprobe_saved_esp
= ®s
->esp
;
697 addr
= (unsigned long)(kcb
->jprobe_saved_esp
);
700 * TBD: As Linus pointed out, gcc assumes that the callee
701 * owns the argument space and could overwrite it, e.g.
702 * tailcall optimization. So, to be absolutely safe
703 * we also save and restore enough stack bytes to cover
706 memcpy(kcb
->jprobes_stack
, (kprobe_opcode_t
*)addr
,
707 MIN_STACK_SIZE(addr
));
708 regs
->eflags
&= ~IF_MASK
;
709 trace_hardirqs_off();
710 regs
->eip
= (unsigned long)(jp
->entry
);
714 void __kprobes
jprobe_return(void)
716 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
718 asm volatile (" xchgl %%ebx,%%esp \n"
720 " .globl jprobe_return_end \n"
721 " jprobe_return_end: \n"
723 (kcb
->jprobe_saved_esp
):"memory");
726 int __kprobes
longjmp_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
728 struct kprobe_ctlblk
*kcb
= get_kprobe_ctlblk();
729 u8
*addr
= (u8
*) (regs
->eip
- 1);
730 unsigned long stack_addr
= (unsigned long)(kcb
->jprobe_saved_esp
);
731 struct jprobe
*jp
= container_of(p
, struct jprobe
, kp
);
733 if ((addr
> (u8
*) jprobe_return
) && (addr
< (u8
*) jprobe_return_end
)) {
734 if (®s
->esp
!= kcb
->jprobe_saved_esp
) {
735 struct pt_regs
*saved_regs
=
736 container_of(kcb
->jprobe_saved_esp
,
737 struct pt_regs
, esp
);
738 printk("current esp %p does not match saved esp %p\n",
739 ®s
->esp
, kcb
->jprobe_saved_esp
);
740 printk("Saved registers for jprobe %p\n", jp
);
741 show_registers(saved_regs
);
742 printk("Current registers\n");
743 show_registers(regs
);
746 *regs
= kcb
->jprobe_saved_regs
;
747 memcpy((kprobe_opcode_t
*) stack_addr
, kcb
->jprobes_stack
,
748 MIN_STACK_SIZE(stack_addr
));
749 preempt_enable_no_resched();
755 int __kprobes
arch_trampoline_kprobe(struct kprobe
*p
)
760 int __init
arch_init_kprobes(void)