2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/module.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/kdebug.h>
47 #include <asm-generic/sections.h>
48 #include <asm/cacheflush.h>
49 #include <asm/errno.h>
50 #include <asm/uaccess.h>
52 #define KPROBE_HASH_BITS 6
53 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
57 * Some oddball architectures like 64bit powerpc have function descriptors
58 * so this must be overridable.
60 #ifndef kprobe_lookup_name
61 #define kprobe_lookup_name(name, addr) \
62 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
65 static int kprobes_initialized
;
66 static struct hlist_head kprobe_table
[KPROBE_TABLE_SIZE
];
67 static struct hlist_head kretprobe_inst_table
[KPROBE_TABLE_SIZE
];
69 /* NOTE: change this value only with kprobe_mutex held */
70 static bool kprobe_enabled
;
72 static DEFINE_MUTEX(kprobe_mutex
); /* Protects kprobe_table */
73 static DEFINE_PER_CPU(struct kprobe
*, kprobe_instance
) = NULL
;
75 spinlock_t lock ____cacheline_aligned_in_smp
;
76 } kretprobe_table_locks
[KPROBE_TABLE_SIZE
];
78 static spinlock_t
*kretprobe_table_lock_ptr(unsigned long hash
)
80 return &(kretprobe_table_locks
[hash
].lock
);
84 * Normally, functions that we'd want to prohibit kprobes in, are marked
85 * __kprobes. But, there are cases where such functions already belong to
86 * a different section (__sched for preempt_schedule)
88 * For such cases, we now have a blacklist
90 static struct kprobe_blackpoint kprobe_blacklist
[] = {
91 {"preempt_schedule",},
92 {NULL
} /* Terminator */
95 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
97 * kprobe->ainsn.insn points to the copy of the instruction to be
98 * single-stepped. x86_64, POWER4 and above have no-exec support and
99 * stepping on the instruction on a vmalloced/kmalloced/data page
100 * is a recipe for disaster
102 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
104 struct kprobe_insn_page
{
105 struct hlist_node hlist
;
106 kprobe_opcode_t
*insns
; /* Page of instruction slots */
107 char slot_used
[INSNS_PER_PAGE
];
112 enum kprobe_slot_state
{
118 static DEFINE_MUTEX(kprobe_insn_mutex
); /* Protects kprobe_insn_pages */
119 static struct hlist_head kprobe_insn_pages
;
120 static int kprobe_garbage_slots
;
121 static int collect_garbage_slots(void);
123 static int __kprobes
check_safety(void)
126 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
127 ret
= freeze_processes();
129 struct task_struct
*p
, *q
;
130 do_each_thread(p
, q
) {
131 if (p
!= current
&& p
->state
== TASK_RUNNING
&&
133 printk("Check failed: %s is running\n",p
->comm
);
137 } while_each_thread(p
, q
);
148 * __get_insn_slot() - Find a slot on an executable page for an instruction.
149 * We allocate an executable page if there's no room on existing ones.
151 static kprobe_opcode_t __kprobes
*__get_insn_slot(void)
153 struct kprobe_insn_page
*kip
;
154 struct hlist_node
*pos
;
157 hlist_for_each_entry(kip
, pos
, &kprobe_insn_pages
, hlist
) {
158 if (kip
->nused
< INSNS_PER_PAGE
) {
160 for (i
= 0; i
< INSNS_PER_PAGE
; i
++) {
161 if (kip
->slot_used
[i
] == SLOT_CLEAN
) {
162 kip
->slot_used
[i
] = SLOT_USED
;
164 return kip
->insns
+ (i
* MAX_INSN_SIZE
);
167 /* Surprise! No unused slots. Fix kip->nused. */
168 kip
->nused
= INSNS_PER_PAGE
;
172 /* If there are any garbage slots, collect it and try again. */
173 if (kprobe_garbage_slots
&& collect_garbage_slots() == 0) {
176 /* All out of space. Need to allocate a new page. Use slot 0. */
177 kip
= kmalloc(sizeof(struct kprobe_insn_page
), GFP_KERNEL
);
182 * Use module_alloc so this page is within +/- 2GB of where the
183 * kernel image and loaded module images reside. This is required
184 * so x86_64 can correctly handle the %rip-relative fixups.
186 kip
->insns
= module_alloc(PAGE_SIZE
);
191 INIT_HLIST_NODE(&kip
->hlist
);
192 hlist_add_head(&kip
->hlist
, &kprobe_insn_pages
);
193 memset(kip
->slot_used
, SLOT_CLEAN
, INSNS_PER_PAGE
);
194 kip
->slot_used
[0] = SLOT_USED
;
200 kprobe_opcode_t __kprobes
*get_insn_slot(void)
202 kprobe_opcode_t
*ret
;
203 mutex_lock(&kprobe_insn_mutex
);
204 ret
= __get_insn_slot();
205 mutex_unlock(&kprobe_insn_mutex
);
209 /* Return 1 if all garbages are collected, otherwise 0. */
210 static int __kprobes
collect_one_slot(struct kprobe_insn_page
*kip
, int idx
)
212 kip
->slot_used
[idx
] = SLOT_CLEAN
;
214 if (kip
->nused
== 0) {
216 * Page is no longer in use. Free it unless
217 * it's the last one. We keep the last one
218 * so as not to have to set it up again the
219 * next time somebody inserts a probe.
221 hlist_del(&kip
->hlist
);
222 if (hlist_empty(&kprobe_insn_pages
)) {
223 INIT_HLIST_NODE(&kip
->hlist
);
224 hlist_add_head(&kip
->hlist
,
227 module_free(NULL
, kip
->insns
);
235 static int __kprobes
collect_garbage_slots(void)
237 struct kprobe_insn_page
*kip
;
238 struct hlist_node
*pos
, *next
;
241 /* Ensure no-one is preepmted on the garbages */
242 mutex_unlock(&kprobe_insn_mutex
);
243 safety
= check_safety();
244 mutex_lock(&kprobe_insn_mutex
);
248 hlist_for_each_entry_safe(kip
, pos
, next
, &kprobe_insn_pages
, hlist
) {
250 if (kip
->ngarbage
== 0)
252 kip
->ngarbage
= 0; /* we will collect all garbages */
253 for (i
= 0; i
< INSNS_PER_PAGE
; i
++) {
254 if (kip
->slot_used
[i
] == SLOT_DIRTY
&&
255 collect_one_slot(kip
, i
))
259 kprobe_garbage_slots
= 0;
263 void __kprobes
free_insn_slot(kprobe_opcode_t
* slot
, int dirty
)
265 struct kprobe_insn_page
*kip
;
266 struct hlist_node
*pos
;
268 mutex_lock(&kprobe_insn_mutex
);
269 hlist_for_each_entry(kip
, pos
, &kprobe_insn_pages
, hlist
) {
270 if (kip
->insns
<= slot
&&
271 slot
< kip
->insns
+ (INSNS_PER_PAGE
* MAX_INSN_SIZE
)) {
272 int i
= (slot
- kip
->insns
) / MAX_INSN_SIZE
;
274 kip
->slot_used
[i
] = SLOT_DIRTY
;
277 collect_one_slot(kip
, i
);
283 if (dirty
&& ++kprobe_garbage_slots
> INSNS_PER_PAGE
)
284 collect_garbage_slots();
286 mutex_unlock(&kprobe_insn_mutex
);
290 /* We have preemption disabled.. so it is safe to use __ versions */
291 static inline void set_kprobe_instance(struct kprobe
*kp
)
293 __get_cpu_var(kprobe_instance
) = kp
;
296 static inline void reset_kprobe_instance(void)
298 __get_cpu_var(kprobe_instance
) = NULL
;
302 * This routine is called either:
303 * - under the kprobe_mutex - during kprobe_[un]register()
305 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
307 struct kprobe __kprobes
*get_kprobe(void *addr
)
309 struct hlist_head
*head
;
310 struct hlist_node
*node
;
313 head
= &kprobe_table
[hash_ptr(addr
, KPROBE_HASH_BITS
)];
314 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
322 * Aggregate handlers for multiple kprobes support - these handlers
323 * take care of invoking the individual kprobe handlers on p->list
325 static int __kprobes
aggr_pre_handler(struct kprobe
*p
, struct pt_regs
*regs
)
329 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
330 if (kp
->pre_handler
) {
331 set_kprobe_instance(kp
);
332 if (kp
->pre_handler(kp
, regs
))
335 reset_kprobe_instance();
340 static void __kprobes
aggr_post_handler(struct kprobe
*p
, struct pt_regs
*regs
,
345 list_for_each_entry_rcu(kp
, &p
->list
, list
) {
346 if (kp
->post_handler
) {
347 set_kprobe_instance(kp
);
348 kp
->post_handler(kp
, regs
, flags
);
349 reset_kprobe_instance();
354 static int __kprobes
aggr_fault_handler(struct kprobe
*p
, struct pt_regs
*regs
,
357 struct kprobe
*cur
= __get_cpu_var(kprobe_instance
);
360 * if we faulted "during" the execution of a user specified
361 * probe handler, invoke just that probe's fault handler
363 if (cur
&& cur
->fault_handler
) {
364 if (cur
->fault_handler(cur
, regs
, trapnr
))
370 static int __kprobes
aggr_break_handler(struct kprobe
*p
, struct pt_regs
*regs
)
372 struct kprobe
*cur
= __get_cpu_var(kprobe_instance
);
375 if (cur
&& cur
->break_handler
) {
376 if (cur
->break_handler(cur
, regs
))
379 reset_kprobe_instance();
383 /* Walks the list and increments nmissed count for multiprobe case */
384 void __kprobes
kprobes_inc_nmissed_count(struct kprobe
*p
)
387 if (p
->pre_handler
!= aggr_pre_handler
) {
390 list_for_each_entry_rcu(kp
, &p
->list
, list
)
396 void __kprobes
recycle_rp_inst(struct kretprobe_instance
*ri
,
397 struct hlist_head
*head
)
399 struct kretprobe
*rp
= ri
->rp
;
401 /* remove rp inst off the rprobe_inst_table */
402 hlist_del(&ri
->hlist
);
403 INIT_HLIST_NODE(&ri
->hlist
);
405 spin_lock(&rp
->lock
);
406 hlist_add_head(&ri
->hlist
, &rp
->free_instances
);
407 spin_unlock(&rp
->lock
);
410 hlist_add_head(&ri
->hlist
, head
);
413 void kretprobe_hash_lock(struct task_struct
*tsk
,
414 struct hlist_head
**head
, unsigned long *flags
)
416 unsigned long hash
= hash_ptr(tsk
, KPROBE_HASH_BITS
);
417 spinlock_t
*hlist_lock
;
419 *head
= &kretprobe_inst_table
[hash
];
420 hlist_lock
= kretprobe_table_lock_ptr(hash
);
421 spin_lock_irqsave(hlist_lock
, *flags
);
424 static void kretprobe_table_lock(unsigned long hash
, unsigned long *flags
)
426 spinlock_t
*hlist_lock
= kretprobe_table_lock_ptr(hash
);
427 spin_lock_irqsave(hlist_lock
, *flags
);
430 void kretprobe_hash_unlock(struct task_struct
*tsk
, unsigned long *flags
)
432 unsigned long hash
= hash_ptr(tsk
, KPROBE_HASH_BITS
);
433 spinlock_t
*hlist_lock
;
435 hlist_lock
= kretprobe_table_lock_ptr(hash
);
436 spin_unlock_irqrestore(hlist_lock
, *flags
);
439 void kretprobe_table_unlock(unsigned long hash
, unsigned long *flags
)
441 spinlock_t
*hlist_lock
= kretprobe_table_lock_ptr(hash
);
442 spin_unlock_irqrestore(hlist_lock
, *flags
);
446 * This function is called from finish_task_switch when task tk becomes dead,
447 * so that we can recycle any function-return probe instances associated
448 * with this task. These left over instances represent probed functions
449 * that have been called but will never return.
451 void __kprobes
kprobe_flush_task(struct task_struct
*tk
)
453 struct kretprobe_instance
*ri
;
454 struct hlist_head
*head
, empty_rp
;
455 struct hlist_node
*node
, *tmp
;
456 unsigned long hash
, flags
= 0;
458 if (unlikely(!kprobes_initialized
))
459 /* Early boot. kretprobe_table_locks not yet initialized. */
462 hash
= hash_ptr(tk
, KPROBE_HASH_BITS
);
463 head
= &kretprobe_inst_table
[hash
];
464 kretprobe_table_lock(hash
, &flags
);
465 hlist_for_each_entry_safe(ri
, node
, tmp
, head
, hlist
) {
467 recycle_rp_inst(ri
, &empty_rp
);
469 kretprobe_table_unlock(hash
, &flags
);
470 INIT_HLIST_HEAD(&empty_rp
);
471 hlist_for_each_entry_safe(ri
, node
, tmp
, &empty_rp
, hlist
) {
472 hlist_del(&ri
->hlist
);
477 static inline void free_rp_inst(struct kretprobe
*rp
)
479 struct kretprobe_instance
*ri
;
480 struct hlist_node
*pos
, *next
;
482 hlist_for_each_entry_safe(ri
, pos
, next
, &rp
->free_instances
, hlist
) {
483 hlist_del(&ri
->hlist
);
488 static void __kprobes
cleanup_rp_inst(struct kretprobe
*rp
)
490 unsigned long flags
, hash
;
491 struct kretprobe_instance
*ri
;
492 struct hlist_node
*pos
, *next
;
493 struct hlist_head
*head
;
496 for (hash
= 0; hash
< KPROBE_TABLE_SIZE
; hash
++) {
497 kretprobe_table_lock(hash
, &flags
);
498 head
= &kretprobe_inst_table
[hash
];
499 hlist_for_each_entry_safe(ri
, pos
, next
, head
, hlist
) {
503 kretprobe_table_unlock(hash
, &flags
);
509 * Keep all fields in the kprobe consistent
511 static inline void copy_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
513 memcpy(&p
->opcode
, &old_p
->opcode
, sizeof(kprobe_opcode_t
));
514 memcpy(&p
->ainsn
, &old_p
->ainsn
, sizeof(struct arch_specific_insn
));
518 * Add the new probe to old_p->list. Fail if this is the
519 * second jprobe at the address - two jprobes can't coexist
521 static int __kprobes
add_new_kprobe(struct kprobe
*old_p
, struct kprobe
*p
)
523 if (p
->break_handler
) {
524 if (old_p
->break_handler
)
526 list_add_tail_rcu(&p
->list
, &old_p
->list
);
527 old_p
->break_handler
= aggr_break_handler
;
529 list_add_rcu(&p
->list
, &old_p
->list
);
530 if (p
->post_handler
&& !old_p
->post_handler
)
531 old_p
->post_handler
= aggr_post_handler
;
536 * Fill in the required fields of the "manager kprobe". Replace the
537 * earlier kprobe in the hlist with the manager kprobe
539 static inline void add_aggr_kprobe(struct kprobe
*ap
, struct kprobe
*p
)
544 ap
->pre_handler
= aggr_pre_handler
;
545 ap
->fault_handler
= aggr_fault_handler
;
547 ap
->post_handler
= aggr_post_handler
;
548 if (p
->break_handler
)
549 ap
->break_handler
= aggr_break_handler
;
551 INIT_LIST_HEAD(&ap
->list
);
552 list_add_rcu(&p
->list
, &ap
->list
);
554 hlist_replace_rcu(&p
->hlist
, &ap
->hlist
);
558 * This is the second or subsequent kprobe at the address - handle
561 static int __kprobes
register_aggr_kprobe(struct kprobe
*old_p
,
567 if (old_p
->pre_handler
== aggr_pre_handler
) {
568 copy_kprobe(old_p
, p
);
569 ret
= add_new_kprobe(old_p
, p
);
571 ap
= kzalloc(sizeof(struct kprobe
), GFP_KERNEL
);
574 add_aggr_kprobe(ap
, old_p
);
576 ret
= add_new_kprobe(ap
, p
);
581 static int __kprobes
in_kprobes_functions(unsigned long addr
)
583 struct kprobe_blackpoint
*kb
;
585 if (addr
>= (unsigned long)__kprobes_text_start
&&
586 addr
< (unsigned long)__kprobes_text_end
)
589 * If there exists a kprobe_blacklist, verify and
590 * fail any probe registration in the prohibited area
592 for (kb
= kprobe_blacklist
; kb
->name
!= NULL
; kb
++) {
593 if (kb
->start_addr
) {
594 if (addr
>= kb
->start_addr
&&
595 addr
< (kb
->start_addr
+ kb
->range
))
603 * If we have a symbol_name argument, look it up and add the offset field
604 * to it. This way, we can specify a relative address to a symbol.
606 static kprobe_opcode_t __kprobes
*kprobe_addr(struct kprobe
*p
)
608 kprobe_opcode_t
*addr
= p
->addr
;
609 if (p
->symbol_name
) {
612 kprobe_lookup_name(p
->symbol_name
, addr
);
617 return (kprobe_opcode_t
*)(((char *)addr
) + p
->offset
);
620 static int __kprobes
__register_kprobe(struct kprobe
*p
,
621 unsigned long called_from
)
624 struct kprobe
*old_p
;
625 struct module
*probed_mod
;
626 kprobe_opcode_t
*addr
;
628 addr
= kprobe_addr(p
);
634 if (!__kernel_text_address((unsigned long) p
->addr
) ||
635 in_kprobes_functions((unsigned long) p
->addr
)) {
640 p
->mod_refcounted
= 0;
643 * Check if are we probing a module.
645 probed_mod
= __module_text_address((unsigned long) p
->addr
);
647 struct module
*calling_mod
;
648 calling_mod
= __module_text_address(called_from
);
650 * We must allow modules to probe themself and in this case
651 * avoid incrementing the module refcount, so as to allow
652 * unloading of self probing modules.
654 if (calling_mod
!= probed_mod
) {
655 if (unlikely(!try_module_get(probed_mod
))) {
659 p
->mod_refcounted
= 1;
666 INIT_LIST_HEAD(&p
->list
);
667 mutex_lock(&kprobe_mutex
);
668 old_p
= get_kprobe(p
->addr
);
670 ret
= register_aggr_kprobe(old_p
, p
);
674 ret
= arch_prepare_kprobe(p
);
678 INIT_HLIST_NODE(&p
->hlist
);
679 hlist_add_head_rcu(&p
->hlist
,
680 &kprobe_table
[hash_ptr(p
->addr
, KPROBE_HASH_BITS
)]);
686 mutex_unlock(&kprobe_mutex
);
688 if (ret
&& probed_mod
)
689 module_put(probed_mod
);
694 * Unregister a kprobe without a scheduler synchronization.
696 static int __kprobes
__unregister_kprobe_top(struct kprobe
*p
)
698 struct kprobe
*old_p
, *list_p
;
700 old_p
= get_kprobe(p
->addr
);
701 if (unlikely(!old_p
))
705 list_for_each_entry_rcu(list_p
, &old_p
->list
, list
)
707 /* kprobe p is a valid probe */
713 (old_p
->pre_handler
== aggr_pre_handler
&&
714 list_is_singular(&old_p
->list
))) {
716 * Only probe on the hash list. Disarm only if kprobes are
717 * enabled - otherwise, the breakpoint would already have
718 * been removed. We save on flushing icache.
721 arch_disarm_kprobe(p
);
722 hlist_del_rcu(&old_p
->hlist
);
724 if (p
->break_handler
)
725 old_p
->break_handler
= NULL
;
726 if (p
->post_handler
) {
727 list_for_each_entry_rcu(list_p
, &old_p
->list
, list
) {
728 if ((list_p
!= p
) && (list_p
->post_handler
))
731 old_p
->post_handler
= NULL
;
734 list_del_rcu(&p
->list
);
739 static void __kprobes
__unregister_kprobe_bottom(struct kprobe
*p
)
742 struct kprobe
*old_p
;
744 if (p
->mod_refcounted
) {
746 * Since we've already incremented refcount,
747 * we don't need to disable preemption.
749 mod
= module_text_address((unsigned long)p
->addr
);
754 if (list_empty(&p
->list
) || list_is_singular(&p
->list
)) {
755 if (!list_empty(&p
->list
)) {
756 /* "p" is the last child of an aggr_kprobe */
757 old_p
= list_entry(p
->list
.next
, struct kprobe
, list
);
761 arch_remove_kprobe(p
);
765 static int __register_kprobes(struct kprobe
**kps
, int num
,
766 unsigned long called_from
)
772 for (i
= 0; i
< num
; i
++) {
773 ret
= __register_kprobe(kps
[i
], called_from
);
776 unregister_kprobes(kps
, i
);
784 * Registration and unregistration functions for kprobe.
786 int __kprobes
register_kprobe(struct kprobe
*p
)
788 return __register_kprobes(&p
, 1,
789 (unsigned long)__builtin_return_address(0));
792 void __kprobes
unregister_kprobe(struct kprobe
*p
)
794 unregister_kprobes(&p
, 1);
797 int __kprobes
register_kprobes(struct kprobe
**kps
, int num
)
799 return __register_kprobes(kps
, num
,
800 (unsigned long)__builtin_return_address(0));
803 void __kprobes
unregister_kprobes(struct kprobe
**kps
, int num
)
809 mutex_lock(&kprobe_mutex
);
810 for (i
= 0; i
< num
; i
++)
811 if (__unregister_kprobe_top(kps
[i
]) < 0)
813 mutex_unlock(&kprobe_mutex
);
816 for (i
= 0; i
< num
; i
++)
818 __unregister_kprobe_bottom(kps
[i
]);
821 static struct notifier_block kprobe_exceptions_nb
= {
822 .notifier_call
= kprobe_exceptions_notify
,
823 .priority
= 0x7fffffff /* we need to be notified first */
826 unsigned long __weak
arch_deref_entry_point(void *entry
)
828 return (unsigned long)entry
;
831 static int __register_jprobes(struct jprobe
**jps
, int num
,
832 unsigned long called_from
)
839 for (i
= 0; i
< num
; i
++) {
842 addr
= arch_deref_entry_point(jp
->entry
);
844 if (!kernel_text_address(addr
))
847 /* Todo: Verify probepoint is a function entry point */
848 jp
->kp
.pre_handler
= setjmp_pre_handler
;
849 jp
->kp
.break_handler
= longjmp_break_handler
;
850 ret
= __register_kprobe(&jp
->kp
, called_from
);
854 unregister_jprobes(jps
, i
);
861 int __kprobes
register_jprobe(struct jprobe
*jp
)
863 return __register_jprobes(&jp
, 1,
864 (unsigned long)__builtin_return_address(0));
867 void __kprobes
unregister_jprobe(struct jprobe
*jp
)
869 unregister_jprobes(&jp
, 1);
872 int __kprobes
register_jprobes(struct jprobe
**jps
, int num
)
874 return __register_jprobes(jps
, num
,
875 (unsigned long)__builtin_return_address(0));
878 void __kprobes
unregister_jprobes(struct jprobe
**jps
, int num
)
884 mutex_lock(&kprobe_mutex
);
885 for (i
= 0; i
< num
; i
++)
886 if (__unregister_kprobe_top(&jps
[i
]->kp
) < 0)
887 jps
[i
]->kp
.addr
= NULL
;
888 mutex_unlock(&kprobe_mutex
);
891 for (i
= 0; i
< num
; i
++) {
893 __unregister_kprobe_bottom(&jps
[i
]->kp
);
897 #ifdef CONFIG_KRETPROBES
899 * This kprobe pre_handler is registered with every kretprobe. When probe
900 * hits it will set up the return probe.
902 static int __kprobes
pre_handler_kretprobe(struct kprobe
*p
,
903 struct pt_regs
*regs
)
905 struct kretprobe
*rp
= container_of(p
, struct kretprobe
, kp
);
906 unsigned long hash
, flags
= 0;
907 struct kretprobe_instance
*ri
;
909 /*TODO: consider to only swap the RA after the last pre_handler fired */
910 hash
= hash_ptr(current
, KPROBE_HASH_BITS
);
911 spin_lock_irqsave(&rp
->lock
, flags
);
912 if (!hlist_empty(&rp
->free_instances
)) {
913 ri
= hlist_entry(rp
->free_instances
.first
,
914 struct kretprobe_instance
, hlist
);
915 hlist_del(&ri
->hlist
);
916 spin_unlock_irqrestore(&rp
->lock
, flags
);
921 if (rp
->entry_handler
&& rp
->entry_handler(ri
, regs
)) {
922 spin_unlock_irqrestore(&rp
->lock
, flags
);
926 arch_prepare_kretprobe(ri
, regs
);
928 /* XXX(hch): why is there no hlist_move_head? */
929 INIT_HLIST_NODE(&ri
->hlist
);
930 kretprobe_table_lock(hash
, &flags
);
931 hlist_add_head(&ri
->hlist
, &kretprobe_inst_table
[hash
]);
932 kretprobe_table_unlock(hash
, &flags
);
935 spin_unlock_irqrestore(&rp
->lock
, flags
);
940 static int __kprobes
__register_kretprobe(struct kretprobe
*rp
,
941 unsigned long called_from
)
944 struct kretprobe_instance
*inst
;
948 if (kretprobe_blacklist_size
) {
949 addr
= kprobe_addr(&rp
->kp
);
953 for (i
= 0; kretprobe_blacklist
[i
].name
!= NULL
; i
++) {
954 if (kretprobe_blacklist
[i
].addr
== addr
)
959 rp
->kp
.pre_handler
= pre_handler_kretprobe
;
960 rp
->kp
.post_handler
= NULL
;
961 rp
->kp
.fault_handler
= NULL
;
962 rp
->kp
.break_handler
= NULL
;
964 /* Pre-allocate memory for max kretprobe instances */
965 if (rp
->maxactive
<= 0) {
966 #ifdef CONFIG_PREEMPT
967 rp
->maxactive
= max(10, 2 * NR_CPUS
);
969 rp
->maxactive
= NR_CPUS
;
972 spin_lock_init(&rp
->lock
);
973 INIT_HLIST_HEAD(&rp
->free_instances
);
974 for (i
= 0; i
< rp
->maxactive
; i
++) {
975 inst
= kmalloc(sizeof(struct kretprobe_instance
) +
976 rp
->data_size
, GFP_KERNEL
);
981 INIT_HLIST_NODE(&inst
->hlist
);
982 hlist_add_head(&inst
->hlist
, &rp
->free_instances
);
986 /* Establish function entry probe point */
987 ret
= __register_kprobe(&rp
->kp
, called_from
);
993 static int __register_kretprobes(struct kretprobe
**rps
, int num
,
994 unsigned long called_from
)
1000 for (i
= 0; i
< num
; i
++) {
1001 ret
= __register_kretprobe(rps
[i
], called_from
);
1004 unregister_kretprobes(rps
, i
);
1011 int __kprobes
register_kretprobe(struct kretprobe
*rp
)
1013 return __register_kretprobes(&rp
, 1,
1014 (unsigned long)__builtin_return_address(0));
1017 void __kprobes
unregister_kretprobe(struct kretprobe
*rp
)
1019 unregister_kretprobes(&rp
, 1);
1022 int __kprobes
register_kretprobes(struct kretprobe
**rps
, int num
)
1024 return __register_kretprobes(rps
, num
,
1025 (unsigned long)__builtin_return_address(0));
1028 void __kprobes
unregister_kretprobes(struct kretprobe
**rps
, int num
)
1034 mutex_lock(&kprobe_mutex
);
1035 for (i
= 0; i
< num
; i
++)
1036 if (__unregister_kprobe_top(&rps
[i
]->kp
) < 0)
1037 rps
[i
]->kp
.addr
= NULL
;
1038 mutex_unlock(&kprobe_mutex
);
1040 synchronize_sched();
1041 for (i
= 0; i
< num
; i
++) {
1042 if (rps
[i
]->kp
.addr
) {
1043 __unregister_kprobe_bottom(&rps
[i
]->kp
);
1044 cleanup_rp_inst(rps
[i
]);
1049 #else /* CONFIG_KRETPROBES */
1050 int __kprobes
register_kretprobe(struct kretprobe
*rp
)
1055 int __kprobes
register_kretprobes(struct kretprobe
**rps
, int num
)
1059 void __kprobes
unregister_kretprobe(struct kretprobe
*rp
)
1063 void __kprobes
unregister_kretprobes(struct kretprobe
**rps
, int num
)
1067 static int __kprobes
pre_handler_kretprobe(struct kprobe
*p
,
1068 struct pt_regs
*regs
)
1073 #endif /* CONFIG_KRETPROBES */
1075 static int __init
init_kprobes(void)
1078 unsigned long offset
= 0, size
= 0;
1079 char *modname
, namebuf
[128];
1080 const char *symbol_name
;
1082 struct kprobe_blackpoint
*kb
;
1084 /* FIXME allocate the probe table, currently defined statically */
1085 /* initialize all list heads */
1086 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
1087 INIT_HLIST_HEAD(&kprobe_table
[i
]);
1088 INIT_HLIST_HEAD(&kretprobe_inst_table
[i
]);
1089 spin_lock_init(&(kretprobe_table_locks
[i
].lock
));
1093 * Lookup and populate the kprobe_blacklist.
1095 * Unlike the kretprobe blacklist, we'll need to determine
1096 * the range of addresses that belong to the said functions,
1097 * since a kprobe need not necessarily be at the beginning
1100 for (kb
= kprobe_blacklist
; kb
->name
!= NULL
; kb
++) {
1101 kprobe_lookup_name(kb
->name
, addr
);
1105 kb
->start_addr
= (unsigned long)addr
;
1106 symbol_name
= kallsyms_lookup(kb
->start_addr
,
1107 &size
, &offset
, &modname
, namebuf
);
1114 if (kretprobe_blacklist_size
) {
1115 /* lookup the function address from its name */
1116 for (i
= 0; kretprobe_blacklist
[i
].name
!= NULL
; i
++) {
1117 kprobe_lookup_name(kretprobe_blacklist
[i
].name
,
1118 kretprobe_blacklist
[i
].addr
);
1119 if (!kretprobe_blacklist
[i
].addr
)
1120 printk("kretprobe: lookup failed: %s\n",
1121 kretprobe_blacklist
[i
].name
);
1125 /* By default, kprobes are enabled */
1126 kprobe_enabled
= true;
1128 err
= arch_init_kprobes();
1130 err
= register_die_notifier(&kprobe_exceptions_nb
);
1131 kprobes_initialized
= (err
== 0);
1138 #ifdef CONFIG_DEBUG_FS
1139 static void __kprobes
report_probe(struct seq_file
*pi
, struct kprobe
*p
,
1140 const char *sym
, int offset
,char *modname
)
1144 if (p
->pre_handler
== pre_handler_kretprobe
)
1146 else if (p
->pre_handler
== setjmp_pre_handler
)
1151 seq_printf(pi
, "%p %s %s+0x%x %s\n", p
->addr
, kprobe_type
,
1152 sym
, offset
, (modname
? modname
: " "));
1154 seq_printf(pi
, "%p %s %p\n", p
->addr
, kprobe_type
, p
->addr
);
1157 static void __kprobes
*kprobe_seq_start(struct seq_file
*f
, loff_t
*pos
)
1159 return (*pos
< KPROBE_TABLE_SIZE
) ? pos
: NULL
;
1162 static void __kprobes
*kprobe_seq_next(struct seq_file
*f
, void *v
, loff_t
*pos
)
1165 if (*pos
>= KPROBE_TABLE_SIZE
)
1170 static void __kprobes
kprobe_seq_stop(struct seq_file
*f
, void *v
)
1175 static int __kprobes
show_kprobe_addr(struct seq_file
*pi
, void *v
)
1177 struct hlist_head
*head
;
1178 struct hlist_node
*node
;
1179 struct kprobe
*p
, *kp
;
1180 const char *sym
= NULL
;
1181 unsigned int i
= *(loff_t
*) v
;
1182 unsigned long offset
= 0;
1183 char *modname
, namebuf
[128];
1185 head
= &kprobe_table
[i
];
1187 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
1188 sym
= kallsyms_lookup((unsigned long)p
->addr
, NULL
,
1189 &offset
, &modname
, namebuf
);
1190 if (p
->pre_handler
== aggr_pre_handler
) {
1191 list_for_each_entry_rcu(kp
, &p
->list
, list
)
1192 report_probe(pi
, kp
, sym
, offset
, modname
);
1194 report_probe(pi
, p
, sym
, offset
, modname
);
1200 static struct seq_operations kprobes_seq_ops
= {
1201 .start
= kprobe_seq_start
,
1202 .next
= kprobe_seq_next
,
1203 .stop
= kprobe_seq_stop
,
1204 .show
= show_kprobe_addr
1207 static int __kprobes
kprobes_open(struct inode
*inode
, struct file
*filp
)
1209 return seq_open(filp
, &kprobes_seq_ops
);
1212 static struct file_operations debugfs_kprobes_operations
= {
1213 .open
= kprobes_open
,
1215 .llseek
= seq_lseek
,
1216 .release
= seq_release
,
1219 static void __kprobes
enable_all_kprobes(void)
1221 struct hlist_head
*head
;
1222 struct hlist_node
*node
;
1226 mutex_lock(&kprobe_mutex
);
1228 /* If kprobes are already enabled, just return */
1230 goto already_enabled
;
1232 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
1233 head
= &kprobe_table
[i
];
1234 hlist_for_each_entry_rcu(p
, node
, head
, hlist
)
1238 kprobe_enabled
= true;
1239 printk(KERN_INFO
"Kprobes globally enabled\n");
1242 mutex_unlock(&kprobe_mutex
);
1246 static void __kprobes
disable_all_kprobes(void)
1248 struct hlist_head
*head
;
1249 struct hlist_node
*node
;
1253 mutex_lock(&kprobe_mutex
);
1255 /* If kprobes are already disabled, just return */
1256 if (!kprobe_enabled
)
1257 goto already_disabled
;
1259 kprobe_enabled
= false;
1260 printk(KERN_INFO
"Kprobes globally disabled\n");
1261 for (i
= 0; i
< KPROBE_TABLE_SIZE
; i
++) {
1262 head
= &kprobe_table
[i
];
1263 hlist_for_each_entry_rcu(p
, node
, head
, hlist
) {
1264 if (!arch_trampoline_kprobe(p
))
1265 arch_disarm_kprobe(p
);
1269 mutex_unlock(&kprobe_mutex
);
1270 /* Allow all currently running kprobes to complete */
1271 synchronize_sched();
1275 mutex_unlock(&kprobe_mutex
);
1280 * XXX: The debugfs bool file interface doesn't allow for callbacks
1281 * when the bool state is switched. We can reuse that facility when
1284 static ssize_t
read_enabled_file_bool(struct file
*file
,
1285 char __user
*user_buf
, size_t count
, loff_t
*ppos
)
1295 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, 2);
1298 static ssize_t
write_enabled_file_bool(struct file
*file
,
1299 const char __user
*user_buf
, size_t count
, loff_t
*ppos
)
1304 buf_size
= min(count
, (sizeof(buf
)-1));
1305 if (copy_from_user(buf
, user_buf
, buf_size
))
1312 enable_all_kprobes();
1317 disable_all_kprobes();
1324 static struct file_operations fops_kp
= {
1325 .read
= read_enabled_file_bool
,
1326 .write
= write_enabled_file_bool
,
1329 static int __kprobes
debugfs_kprobe_init(void)
1331 struct dentry
*dir
, *file
;
1332 unsigned int value
= 1;
1334 dir
= debugfs_create_dir("kprobes", NULL
);
1338 file
= debugfs_create_file("list", 0444, dir
, NULL
,
1339 &debugfs_kprobes_operations
);
1341 debugfs_remove(dir
);
1345 file
= debugfs_create_file("enabled", 0600, dir
,
1348 debugfs_remove(dir
);
1355 late_initcall(debugfs_kprobe_init
);
1356 #endif /* CONFIG_DEBUG_FS */
1358 module_init(init_kprobes
);
1360 EXPORT_SYMBOL_GPL(register_kprobe
);
1361 EXPORT_SYMBOL_GPL(unregister_kprobe
);
1362 EXPORT_SYMBOL_GPL(register_kprobes
);
1363 EXPORT_SYMBOL_GPL(unregister_kprobes
);
1364 EXPORT_SYMBOL_GPL(register_jprobe
);
1365 EXPORT_SYMBOL_GPL(unregister_jprobe
);
1366 EXPORT_SYMBOL_GPL(register_jprobes
);
1367 EXPORT_SYMBOL_GPL(unregister_jprobes
);
1368 EXPORT_SYMBOL_GPL(jprobe_return
);
1369 EXPORT_SYMBOL_GPL(register_kretprobe
);
1370 EXPORT_SYMBOL_GPL(unregister_kretprobe
);
1371 EXPORT_SYMBOL_GPL(register_kretprobes
);
1372 EXPORT_SYMBOL_GPL(unregister_kretprobes
);