2 * kprobe based kernel tracer
4 * Created by Masami Hiramatsu <mhiramat@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 #include <linux/kprobes.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/smp.h>
26 #include <linux/debugfs.h>
27 #include <linux/types.h>
28 #include <linux/string.h>
29 #include <linux/ctype.h>
30 #include <linux/ptrace.h>
31 #include <linux/perf_event.h>
34 #include "trace_output.h"
36 #define MAX_TRACE_ARGS 128
37 #define MAX_ARGSTR_LEN 63
38 #define MAX_EVENT_NAME_LEN 64
39 #define KPROBE_EVENT_SYSTEM "kprobes"
41 /* currently, trace_kprobe only supports X86. */
44 unsigned long (*func
)(struct pt_regs
*, void *);
48 static __kprobes
unsigned long call_fetch(struct fetch_func
*f
,
51 return f
->func(regs
, f
->data
);
55 static __kprobes
unsigned long fetch_register(struct pt_regs
*regs
,
58 return regs_get_register(regs
, (unsigned int)((unsigned long)offset
));
61 static __kprobes
unsigned long fetch_stack(struct pt_regs
*regs
,
64 return regs_get_kernel_stack_nth(regs
,
65 (unsigned int)((unsigned long)num
));
68 static __kprobes
unsigned long fetch_memory(struct pt_regs
*regs
, void *addr
)
72 if (probe_kernel_address(addr
, retval
))
77 static __kprobes
unsigned long fetch_argument(struct pt_regs
*regs
, void *num
)
79 return regs_get_argument_nth(regs
, (unsigned int)((unsigned long)num
));
82 static __kprobes
unsigned long fetch_retvalue(struct pt_regs
*regs
,
85 return regs_return_value(regs
);
88 static __kprobes
unsigned long fetch_ip(struct pt_regs
*regs
, void *dummy
)
90 return instruction_pointer(regs
);
93 static __kprobes
unsigned long fetch_stack_address(struct pt_regs
*regs
,
96 return kernel_stack_pointer(regs
);
99 /* Memory fetching by symbol */
100 struct symbol_cache
{
106 static unsigned long update_symbol_cache(struct symbol_cache
*sc
)
108 sc
->addr
= (unsigned long)kallsyms_lookup_name(sc
->symbol
);
110 sc
->addr
+= sc
->offset
;
114 static void free_symbol_cache(struct symbol_cache
*sc
)
120 static struct symbol_cache
*alloc_symbol_cache(const char *sym
, long offset
)
122 struct symbol_cache
*sc
;
124 if (!sym
|| strlen(sym
) == 0)
126 sc
= kzalloc(sizeof(struct symbol_cache
), GFP_KERNEL
);
130 sc
->symbol
= kstrdup(sym
, GFP_KERNEL
);
137 update_symbol_cache(sc
);
141 static __kprobes
unsigned long fetch_symbol(struct pt_regs
*regs
, void *data
)
143 struct symbol_cache
*sc
= data
;
146 return fetch_memory(regs
, (void *)sc
->addr
);
151 /* Special indirect memory access interface */
152 struct indirect_fetch_data
{
153 struct fetch_func orig
;
157 static __kprobes
unsigned long fetch_indirect(struct pt_regs
*regs
, void *data
)
159 struct indirect_fetch_data
*ind
= data
;
162 addr
= call_fetch(&ind
->orig
, regs
);
165 return fetch_memory(regs
, (void *)addr
);
170 static __kprobes
void free_indirect_fetch_data(struct indirect_fetch_data
*data
)
172 if (data
->orig
.func
== fetch_indirect
)
173 free_indirect_fetch_data(data
->orig
.data
);
174 else if (data
->orig
.func
== fetch_symbol
)
175 free_symbol_cache(data
->orig
.data
);
180 * Kprobe tracer core functions
184 struct fetch_func fetch
;
188 /* Flags for trace_probe */
189 #define TP_FLAG_TRACE 1
190 #define TP_FLAG_PROFILE 2
193 struct list_head list
;
194 struct kretprobe rp
; /* Use rp.kp for kprobe use */
196 unsigned int flags
; /* For TP_FLAG_* */
197 const char *symbol
; /* symbol name */
198 struct ftrace_event_call call
;
199 struct trace_event event
;
200 unsigned int nr_args
;
201 struct probe_arg args
[];
204 #define SIZEOF_TRACE_PROBE(n) \
205 (offsetof(struct trace_probe, args) + \
206 (sizeof(struct probe_arg) * (n)))
208 static __kprobes
int probe_is_return(struct trace_probe
*tp
)
210 return tp
->rp
.handler
!= NULL
;
213 static __kprobes
const char *probe_symbol(struct trace_probe
*tp
)
215 return tp
->symbol
? tp
->symbol
: "unknown";
218 static int probe_arg_string(char *buf
, size_t n
, struct fetch_func
*ff
)
222 if (ff
->func
== fetch_argument
)
223 ret
= snprintf(buf
, n
, "a%lu", (unsigned long)ff
->data
);
224 else if (ff
->func
== fetch_register
) {
226 name
= regs_query_register_name((unsigned int)((long)ff
->data
));
227 ret
= snprintf(buf
, n
, "%%%s", name
);
228 } else if (ff
->func
== fetch_stack
)
229 ret
= snprintf(buf
, n
, "s%lu", (unsigned long)ff
->data
);
230 else if (ff
->func
== fetch_memory
)
231 ret
= snprintf(buf
, n
, "@0x%p", ff
->data
);
232 else if (ff
->func
== fetch_symbol
) {
233 struct symbol_cache
*sc
= ff
->data
;
234 ret
= snprintf(buf
, n
, "@%s%+ld", sc
->symbol
, sc
->offset
);
235 } else if (ff
->func
== fetch_retvalue
)
236 ret
= snprintf(buf
, n
, "rv");
237 else if (ff
->func
== fetch_ip
)
238 ret
= snprintf(buf
, n
, "ra");
239 else if (ff
->func
== fetch_stack_address
)
240 ret
= snprintf(buf
, n
, "sa");
241 else if (ff
->func
== fetch_indirect
) {
242 struct indirect_fetch_data
*id
= ff
->data
;
244 ret
= snprintf(buf
, n
, "%+ld(", id
->offset
);
248 ret
= probe_arg_string(buf
+ l
, n
- l
, &id
->orig
);
252 ret
= snprintf(buf
+ l
, n
- l
, ")");
261 static int register_probe_event(struct trace_probe
*tp
);
262 static void unregister_probe_event(struct trace_probe
*tp
);
264 static DEFINE_MUTEX(probe_lock
);
265 static LIST_HEAD(probe_list
);
267 static int kprobe_dispatcher(struct kprobe
*kp
, struct pt_regs
*regs
);
268 static int kretprobe_dispatcher(struct kretprobe_instance
*ri
,
269 struct pt_regs
*regs
);
272 * Allocate new trace_probe and initialize it (including kprobes).
274 static struct trace_probe
*alloc_trace_probe(const char *group
,
279 int nargs
, int is_return
)
281 struct trace_probe
*tp
;
283 tp
= kzalloc(SIZEOF_TRACE_PROBE(nargs
), GFP_KERNEL
);
285 return ERR_PTR(-ENOMEM
);
288 tp
->symbol
= kstrdup(symbol
, GFP_KERNEL
);
291 tp
->rp
.kp
.symbol_name
= tp
->symbol
;
292 tp
->rp
.kp
.offset
= offs
;
294 tp
->rp
.kp
.addr
= addr
;
297 tp
->rp
.handler
= kretprobe_dispatcher
;
299 tp
->rp
.kp
.pre_handler
= kprobe_dispatcher
;
303 tp
->call
.name
= kstrdup(event
, GFP_KERNEL
);
309 tp
->call
.system
= kstrdup(group
, GFP_KERNEL
);
310 if (!tp
->call
.system
)
313 INIT_LIST_HEAD(&tp
->list
);
316 kfree(tp
->call
.name
);
319 return ERR_PTR(-ENOMEM
);
322 static void free_probe_arg(struct probe_arg
*arg
)
324 if (arg
->fetch
.func
== fetch_symbol
)
325 free_symbol_cache(arg
->fetch
.data
);
326 else if (arg
->fetch
.func
== fetch_indirect
)
327 free_indirect_fetch_data(arg
->fetch
.data
);
331 static void free_trace_probe(struct trace_probe
*tp
)
335 for (i
= 0; i
< tp
->nr_args
; i
++)
336 free_probe_arg(&tp
->args
[i
]);
338 kfree(tp
->call
.system
);
339 kfree(tp
->call
.name
);
344 static struct trace_probe
*find_probe_event(const char *event
)
346 struct trace_probe
*tp
;
348 list_for_each_entry(tp
, &probe_list
, list
)
349 if (!strcmp(tp
->call
.name
, event
))
354 /* Unregister a trace_probe and probe_event: call with locking probe_lock */
355 static void unregister_trace_probe(struct trace_probe
*tp
)
357 if (probe_is_return(tp
))
358 unregister_kretprobe(&tp
->rp
);
360 unregister_kprobe(&tp
->rp
.kp
);
362 unregister_probe_event(tp
);
365 /* Register a trace_probe and probe_event */
366 static int register_trace_probe(struct trace_probe
*tp
)
368 struct trace_probe
*old_tp
;
371 mutex_lock(&probe_lock
);
373 /* register as an event */
374 old_tp
= find_probe_event(tp
->call
.name
);
376 /* delete old event */
377 unregister_trace_probe(old_tp
);
378 free_trace_probe(old_tp
);
380 ret
= register_probe_event(tp
);
382 pr_warning("Faild to register probe event(%d)\n", ret
);
386 tp
->rp
.kp
.flags
|= KPROBE_FLAG_DISABLED
;
387 if (probe_is_return(tp
))
388 ret
= register_kretprobe(&tp
->rp
);
390 ret
= register_kprobe(&tp
->rp
.kp
);
393 pr_warning("Could not insert probe(%d)\n", ret
);
394 if (ret
== -EILSEQ
) {
395 pr_warning("Probing address(0x%p) is not an "
396 "instruction boundary.\n",
400 unregister_probe_event(tp
);
402 list_add_tail(&tp
->list
, &probe_list
);
404 mutex_unlock(&probe_lock
);
408 /* Split symbol and offset. */
409 static int split_symbol_offset(char *symbol
, unsigned long *offset
)
417 tmp
= strchr(symbol
, '+');
419 /* skip sign because strict_strtol doesn't accept '+' */
420 ret
= strict_strtoul(tmp
+ 1, 0, offset
);
429 #define PARAM_MAX_ARGS 16
430 #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
432 static int parse_probe_arg(char *arg
, struct fetch_func
*ff
, int is_return
)
440 case 'a': /* argument */
441 ret
= strict_strtoul(arg
+ 1, 10, ¶m
);
442 if (ret
|| param
> PARAM_MAX_ARGS
)
445 ff
->func
= fetch_argument
;
446 ff
->data
= (void *)param
;
449 case 'r': /* retval or retaddr */
450 if (is_return
&& arg
[1] == 'v') {
451 ff
->func
= fetch_retvalue
;
453 } else if (is_return
&& arg
[1] == 'a') {
459 case '%': /* named register */
460 ret
= regs_query_register_offset(arg
+ 1);
462 ff
->func
= fetch_register
;
463 ff
->data
= (void *)(unsigned long)ret
;
467 case 's': /* stack */
469 ff
->func
= fetch_stack_address
;
472 ret
= strict_strtoul(arg
+ 1, 10, ¶m
);
473 if (ret
|| param
> PARAM_MAX_STACK
)
476 ff
->func
= fetch_stack
;
477 ff
->data
= (void *)param
;
481 case '@': /* memory or symbol */
482 if (isdigit(arg
[1])) {
483 ret
= strict_strtoul(arg
+ 1, 0, ¶m
);
486 ff
->func
= fetch_memory
;
487 ff
->data
= (void *)param
;
489 ret
= split_symbol_offset(arg
+ 1, &offset
);
492 ff
->data
= alloc_symbol_cache(arg
+ 1,
495 ff
->func
= fetch_symbol
;
500 case '+': /* indirect memory */
502 tmp
= strchr(arg
, '(');
508 ret
= strict_strtol(arg
+ 1, 0, &offset
);
514 tmp
= strrchr(arg
, ')');
516 struct indirect_fetch_data
*id
;
518 id
= kzalloc(sizeof(struct indirect_fetch_data
),
523 ret
= parse_probe_arg(arg
, &id
->orig
, is_return
);
527 ff
->func
= fetch_indirect
;
528 ff
->data
= (void *)id
;
534 /* TODO: support custom handler */
540 static int create_trace_probe(int argc
, char **argv
)
544 * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS]
545 * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS]
547 * aN : fetch Nth of function argument. (N:0-)
548 * rv : fetch return value
549 * ra : fetch return address
550 * sa : fetch stack address
551 * sN : fetch Nth of stack (N:0-)
552 * @ADDR : fetch memory at ADDR (ADDR should be in kernel)
553 * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol)
554 * %REG : fetch register REG
555 * Indirect memory fetch:
556 * +|-offs(ARG) : fetch memory at ARG +|- offs address.
557 * Alias name of args:
558 * NAME=FETCHARG : set NAME as alias of FETCHARG.
560 struct trace_probe
*tp
;
563 char *symbol
= NULL
, *event
= NULL
, *arg
= NULL
, *group
= NULL
;
564 unsigned long offset
= 0;
566 char buf
[MAX_EVENT_NAME_LEN
];
571 if (argv
[0][0] == 'p')
573 else if (argv
[0][0] == 'r')
578 if (argv
[0][1] == ':') {
580 if (strchr(event
, '/')) {
582 event
= strchr(group
, '/') + 1;
584 if (strlen(group
) == 0) {
585 pr_info("Group name is not specifiled\n");
589 if (strlen(event
) == 0) {
590 pr_info("Event name is not specifiled\n");
595 if (isdigit(argv
[1][0])) {
598 /* an address specified */
599 ret
= strict_strtoul(&argv
[0][2], 0, (unsigned long *)&addr
);
603 /* a symbol specified */
605 /* TODO: support .init module functions */
606 ret
= split_symbol_offset(symbol
, &offset
);
609 if (offset
&& is_return
)
612 argc
-= 2; argv
+= 2;
616 group
= KPROBE_EVENT_SYSTEM
;
618 /* Make a new event name */
620 snprintf(buf
, MAX_EVENT_NAME_LEN
, "%c@%s%+ld",
621 is_return
? 'r' : 'p', symbol
, offset
);
623 snprintf(buf
, MAX_EVENT_NAME_LEN
, "%c@0x%p",
624 is_return
? 'r' : 'p', addr
);
627 tp
= alloc_trace_probe(group
, event
, addr
, symbol
, offset
, argc
,
632 /* parse arguments */
634 for (i
= 0; i
< argc
&& i
< MAX_TRACE_ARGS
; i
++) {
635 /* Parse argument name */
636 arg
= strchr(argv
[i
], '=');
641 tp
->args
[i
].name
= kstrdup(argv
[i
], GFP_KERNEL
);
643 /* Parse fetch argument */
644 if (strlen(arg
) > MAX_ARGSTR_LEN
) {
645 pr_info("Argument%d(%s) is too long.\n", i
, arg
);
649 ret
= parse_probe_arg(arg
, &tp
->args
[i
].fetch
, is_return
);
655 ret
= register_trace_probe(tp
);
661 free_trace_probe(tp
);
665 static void cleanup_all_probes(void)
667 struct trace_probe
*tp
;
669 mutex_lock(&probe_lock
);
670 /* TODO: Use batch unregistration */
671 while (!list_empty(&probe_list
)) {
672 tp
= list_entry(probe_list
.next
, struct trace_probe
, list
);
673 unregister_trace_probe(tp
);
674 free_trace_probe(tp
);
676 mutex_unlock(&probe_lock
);
680 /* Probes listing interfaces */
681 static void *probes_seq_start(struct seq_file
*m
, loff_t
*pos
)
683 mutex_lock(&probe_lock
);
684 return seq_list_start(&probe_list
, *pos
);
687 static void *probes_seq_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
689 return seq_list_next(v
, &probe_list
, pos
);
692 static void probes_seq_stop(struct seq_file
*m
, void *v
)
694 mutex_unlock(&probe_lock
);
697 static int probes_seq_show(struct seq_file
*m
, void *v
)
699 struct trace_probe
*tp
= v
;
701 char buf
[MAX_ARGSTR_LEN
+ 1];
703 seq_printf(m
, "%c", probe_is_return(tp
) ? 'r' : 'p');
704 seq_printf(m
, ":%s", tp
->call
.name
);
707 seq_printf(m
, " %s+%u", probe_symbol(tp
), tp
->rp
.kp
.offset
);
709 seq_printf(m
, " 0x%p", tp
->rp
.kp
.addr
);
711 for (i
= 0; i
< tp
->nr_args
; i
++) {
712 ret
= probe_arg_string(buf
, MAX_ARGSTR_LEN
, &tp
->args
[i
].fetch
);
714 pr_warning("Argument%d decoding error(%d).\n", i
, ret
);
717 seq_printf(m
, " %s=%s", tp
->args
[i
].name
, buf
);
723 static const struct seq_operations probes_seq_op
= {
724 .start
= probes_seq_start
,
725 .next
= probes_seq_next
,
726 .stop
= probes_seq_stop
,
727 .show
= probes_seq_show
730 static int probes_open(struct inode
*inode
, struct file
*file
)
732 if ((file
->f_mode
& FMODE_WRITE
) &&
733 (file
->f_flags
& O_TRUNC
))
734 cleanup_all_probes();
736 return seq_open(file
, &probes_seq_op
);
739 static int command_trace_probe(const char *buf
)
742 int argc
= 0, ret
= 0;
744 argv
= argv_split(GFP_KERNEL
, buf
, &argc
);
749 ret
= create_trace_probe(argc
, argv
);
755 #define WRITE_BUFSIZE 128
757 static ssize_t
probes_write(struct file
*file
, const char __user
*buffer
,
758 size_t count
, loff_t
*ppos
)
765 kbuf
= kmalloc(WRITE_BUFSIZE
, GFP_KERNEL
);
770 while (done
< count
) {
772 if (size
>= WRITE_BUFSIZE
)
773 size
= WRITE_BUFSIZE
- 1;
774 if (copy_from_user(kbuf
, buffer
+ done
, size
)) {
779 tmp
= strchr(kbuf
, '\n');
782 size
= tmp
- kbuf
+ 1;
783 } else if (done
+ size
< count
) {
784 pr_warning("Line length is too long: "
785 "Should be less than %d.", WRITE_BUFSIZE
);
790 /* Remove comments */
791 tmp
= strchr(kbuf
, '#');
795 ret
= command_trace_probe(kbuf
);
805 static const struct file_operations kprobe_events_ops
= {
806 .owner
= THIS_MODULE
,
810 .release
= seq_release
,
811 .write
= probes_write
,
814 /* Probes profiling interfaces */
815 static int probes_profile_seq_show(struct seq_file
*m
, void *v
)
817 struct trace_probe
*tp
= v
;
819 seq_printf(m
, " %-44s %15lu %15lu\n", tp
->call
.name
, tp
->nhit
,
825 static const struct seq_operations profile_seq_op
= {
826 .start
= probes_seq_start
,
827 .next
= probes_seq_next
,
828 .stop
= probes_seq_stop
,
829 .show
= probes_profile_seq_show
832 static int profile_open(struct inode
*inode
, struct file
*file
)
834 return seq_open(file
, &profile_seq_op
);
837 static const struct file_operations kprobe_profile_ops
= {
838 .owner
= THIS_MODULE
,
839 .open
= profile_open
,
842 .release
= seq_release
,
846 static __kprobes
int kprobe_trace_func(struct kprobe
*kp
, struct pt_regs
*regs
)
848 struct trace_probe
*tp
= container_of(kp
, struct trace_probe
, rp
.kp
);
849 struct kprobe_trace_entry
*entry
;
850 struct ring_buffer_event
*event
;
851 struct ring_buffer
*buffer
;
853 unsigned long irq_flags
;
854 struct ftrace_event_call
*call
= &tp
->call
;
858 local_save_flags(irq_flags
);
859 pc
= preempt_count();
861 size
= SIZEOF_KPROBE_TRACE_ENTRY(tp
->nr_args
);
863 event
= trace_current_buffer_lock_reserve(&buffer
, call
->id
, size
,
868 entry
= ring_buffer_event_data(event
);
869 entry
->nargs
= tp
->nr_args
;
870 entry
->ip
= (unsigned long)kp
->addr
;
871 for (i
= 0; i
< tp
->nr_args
; i
++)
872 entry
->args
[i
] = call_fetch(&tp
->args
[i
].fetch
, regs
);
874 if (!filter_current_check_discard(buffer
, call
, entry
, event
))
875 trace_nowake_buffer_unlock_commit(buffer
, event
, irq_flags
, pc
);
879 /* Kretprobe handler */
880 static __kprobes
int kretprobe_trace_func(struct kretprobe_instance
*ri
,
881 struct pt_regs
*regs
)
883 struct trace_probe
*tp
= container_of(ri
->rp
, struct trace_probe
, rp
);
884 struct kretprobe_trace_entry
*entry
;
885 struct ring_buffer_event
*event
;
886 struct ring_buffer
*buffer
;
888 unsigned long irq_flags
;
889 struct ftrace_event_call
*call
= &tp
->call
;
891 local_save_flags(irq_flags
);
892 pc
= preempt_count();
894 size
= SIZEOF_KRETPROBE_TRACE_ENTRY(tp
->nr_args
);
896 event
= trace_current_buffer_lock_reserve(&buffer
, call
->id
, size
,
901 entry
= ring_buffer_event_data(event
);
902 entry
->nargs
= tp
->nr_args
;
903 entry
->func
= (unsigned long)tp
->rp
.kp
.addr
;
904 entry
->ret_ip
= (unsigned long)ri
->ret_addr
;
905 for (i
= 0; i
< tp
->nr_args
; i
++)
906 entry
->args
[i
] = call_fetch(&tp
->args
[i
].fetch
, regs
);
908 if (!filter_current_check_discard(buffer
, call
, entry
, event
))
909 trace_nowake_buffer_unlock_commit(buffer
, event
, irq_flags
, pc
);
914 /* Event entry printers */
916 print_kprobe_event(struct trace_iterator
*iter
, int flags
)
918 struct kprobe_trace_entry
*field
;
919 struct trace_seq
*s
= &iter
->seq
;
920 struct trace_event
*event
;
921 struct trace_probe
*tp
;
924 field
= (struct kprobe_trace_entry
*)iter
->ent
;
925 event
= ftrace_find_event(field
->ent
.type
);
926 tp
= container_of(event
, struct trace_probe
, event
);
928 if (!trace_seq_printf(s
, "%s: (", tp
->call
.name
))
931 if (!seq_print_ip_sym(s
, field
->ip
, flags
| TRACE_ITER_SYM_OFFSET
))
934 if (!trace_seq_puts(s
, ")"))
937 for (i
= 0; i
< field
->nargs
; i
++)
938 if (!trace_seq_printf(s
, " %s=%lx",
939 tp
->args
[i
].name
, field
->args
[i
]))
942 if (!trace_seq_puts(s
, "\n"))
945 return TRACE_TYPE_HANDLED
;
947 return TRACE_TYPE_PARTIAL_LINE
;
951 print_kretprobe_event(struct trace_iterator
*iter
, int flags
)
953 struct kretprobe_trace_entry
*field
;
954 struct trace_seq
*s
= &iter
->seq
;
955 struct trace_event
*event
;
956 struct trace_probe
*tp
;
959 field
= (struct kretprobe_trace_entry
*)iter
->ent
;
960 event
= ftrace_find_event(field
->ent
.type
);
961 tp
= container_of(event
, struct trace_probe
, event
);
963 if (!trace_seq_printf(s
, "%s: (", tp
->call
.name
))
966 if (!seq_print_ip_sym(s
, field
->ret_ip
, flags
| TRACE_ITER_SYM_OFFSET
))
969 if (!trace_seq_puts(s
, " <- "))
972 if (!seq_print_ip_sym(s
, field
->func
, flags
& ~TRACE_ITER_SYM_OFFSET
))
975 if (!trace_seq_puts(s
, ")"))
978 for (i
= 0; i
< field
->nargs
; i
++)
979 if (!trace_seq_printf(s
, " %s=%lx",
980 tp
->args
[i
].name
, field
->args
[i
]))
983 if (!trace_seq_puts(s
, "\n"))
986 return TRACE_TYPE_HANDLED
;
988 return TRACE_TYPE_PARTIAL_LINE
;
991 static int probe_event_enable(struct ftrace_event_call
*call
)
993 struct trace_probe
*tp
= (struct trace_probe
*)call
->data
;
995 tp
->flags
|= TP_FLAG_TRACE
;
996 if (probe_is_return(tp
))
997 return enable_kretprobe(&tp
->rp
);
999 return enable_kprobe(&tp
->rp
.kp
);
1002 static void probe_event_disable(struct ftrace_event_call
*call
)
1004 struct trace_probe
*tp
= (struct trace_probe
*)call
->data
;
1006 tp
->flags
&= ~TP_FLAG_TRACE
;
1007 if (!(tp
->flags
& (TP_FLAG_TRACE
| TP_FLAG_PROFILE
))) {
1008 if (probe_is_return(tp
))
1009 disable_kretprobe(&tp
->rp
);
1011 disable_kprobe(&tp
->rp
.kp
);
1015 static int probe_event_raw_init(struct ftrace_event_call
*event_call
)
1017 INIT_LIST_HEAD(&event_call
->fields
);
1023 #define DEFINE_FIELD(type, item, name, is_signed) \
1025 ret = trace_define_field(event_call, #type, name, \
1026 offsetof(typeof(field), item), \
1027 sizeof(field.item), is_signed, \
1033 static int kprobe_event_define_fields(struct ftrace_event_call
*event_call
)
1036 struct kprobe_trace_entry field
;
1037 struct trace_probe
*tp
= (struct trace_probe
*)event_call
->data
;
1039 ret
= trace_define_common_fields(event_call
);
1043 DEFINE_FIELD(unsigned long, ip
, "ip", 0);
1044 DEFINE_FIELD(int, nargs
, "nargs", 1);
1045 /* Set argument names as fields */
1046 for (i
= 0; i
< tp
->nr_args
; i
++)
1047 DEFINE_FIELD(unsigned long, args
[i
], tp
->args
[i
].name
, 0);
1051 static int kretprobe_event_define_fields(struct ftrace_event_call
*event_call
)
1054 struct kretprobe_trace_entry field
;
1055 struct trace_probe
*tp
= (struct trace_probe
*)event_call
->data
;
1057 ret
= trace_define_common_fields(event_call
);
1061 DEFINE_FIELD(unsigned long, func
, "func", 0);
1062 DEFINE_FIELD(unsigned long, ret_ip
, "ret_ip", 0);
1063 DEFINE_FIELD(int, nargs
, "nargs", 1);
1064 /* Set argument names as fields */
1065 for (i
= 0; i
< tp
->nr_args
; i
++)
1066 DEFINE_FIELD(unsigned long, args
[i
], tp
->args
[i
].name
, 0);
1070 static int __probe_event_show_format(struct trace_seq
*s
,
1071 struct trace_probe
*tp
, const char *fmt
,
1077 if (!trace_seq_printf(s
, "\nprint fmt: \"%s", fmt
))
1080 for (i
= 0; i
< tp
->nr_args
; i
++)
1081 if (!trace_seq_printf(s
, " %s=%%lx", tp
->args
[i
].name
))
1084 if (!trace_seq_printf(s
, "\", %s", arg
))
1087 for (i
= 0; i
< tp
->nr_args
; i
++)
1088 if (!trace_seq_printf(s
, ", REC->%s", tp
->args
[i
].name
))
1091 return trace_seq_puts(s
, "\n");
1095 #define SHOW_FIELD(type, item, name) \
1097 ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \
1098 "offset:%u;\tsize:%u;\n", name, \
1099 (unsigned int)offsetof(typeof(field), item),\
1100 (unsigned int)sizeof(type)); \
1105 static int kprobe_event_show_format(struct ftrace_event_call
*call
,
1106 struct trace_seq
*s
)
1108 struct kprobe_trace_entry field
__attribute__((unused
));
1110 struct trace_probe
*tp
= (struct trace_probe
*)call
->data
;
1112 SHOW_FIELD(unsigned long, ip
, "ip");
1113 SHOW_FIELD(int, nargs
, "nargs");
1116 for (i
= 0; i
< tp
->nr_args
; i
++)
1117 SHOW_FIELD(unsigned long, args
[i
], tp
->args
[i
].name
);
1118 trace_seq_puts(s
, "\n");
1120 return __probe_event_show_format(s
, tp
, "(%lx)", "REC->ip");
1123 static int kretprobe_event_show_format(struct ftrace_event_call
*call
,
1124 struct trace_seq
*s
)
1126 struct kretprobe_trace_entry field
__attribute__((unused
));
1128 struct trace_probe
*tp
= (struct trace_probe
*)call
->data
;
1130 SHOW_FIELD(unsigned long, func
, "func");
1131 SHOW_FIELD(unsigned long, ret_ip
, "ret_ip");
1132 SHOW_FIELD(int, nargs
, "nargs");
1135 for (i
= 0; i
< tp
->nr_args
; i
++)
1136 SHOW_FIELD(unsigned long, args
[i
], tp
->args
[i
].name
);
1137 trace_seq_puts(s
, "\n");
1139 return __probe_event_show_format(s
, tp
, "(%lx <- %lx)",
1140 "REC->func, REC->ret_ip");
1143 #ifdef CONFIG_EVENT_PROFILE
1145 /* Kprobe profile handler */
1146 static __kprobes
int kprobe_profile_func(struct kprobe
*kp
,
1147 struct pt_regs
*regs
)
1149 struct trace_probe
*tp
= container_of(kp
, struct trace_probe
, rp
.kp
);
1150 struct ftrace_event_call
*call
= &tp
->call
;
1151 struct kprobe_trace_entry
*entry
;
1152 struct trace_entry
*ent
;
1153 int size
, __size
, i
, pc
, __cpu
;
1154 unsigned long irq_flags
;
1157 pc
= preempt_count();
1158 __size
= SIZEOF_KPROBE_TRACE_ENTRY(tp
->nr_args
);
1159 size
= ALIGN(__size
+ sizeof(u32
), sizeof(u64
));
1160 size
-= sizeof(u32
);
1161 if (WARN_ONCE(size
> FTRACE_MAX_PROFILE_SIZE
,
1162 "profile buffer not large enough"))
1166 * Protect the non nmi buffer
1167 * This also protects the rcu read side
1169 local_irq_save(irq_flags
);
1170 __cpu
= smp_processor_id();
1173 raw_data
= rcu_dereference(trace_profile_buf_nmi
);
1175 raw_data
= rcu_dereference(trace_profile_buf
);
1180 raw_data
= per_cpu_ptr(raw_data
, __cpu
);
1181 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1182 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
1183 entry
= (struct kprobe_trace_entry
*)raw_data
;
1186 tracing_generic_entry_update(ent
, irq_flags
, pc
);
1187 ent
->type
= call
->id
;
1188 entry
->nargs
= tp
->nr_args
;
1189 entry
->ip
= (unsigned long)kp
->addr
;
1190 for (i
= 0; i
< tp
->nr_args
; i
++)
1191 entry
->args
[i
] = call_fetch(&tp
->args
[i
].fetch
, regs
);
1192 perf_tp_event(call
->id
, entry
->ip
, 1, entry
, size
);
1194 local_irq_restore(irq_flags
);
1198 /* Kretprobe profile handler */
1199 static __kprobes
int kretprobe_profile_func(struct kretprobe_instance
*ri
,
1200 struct pt_regs
*regs
)
1202 struct trace_probe
*tp
= container_of(ri
->rp
, struct trace_probe
, rp
);
1203 struct ftrace_event_call
*call
= &tp
->call
;
1204 struct kretprobe_trace_entry
*entry
;
1205 struct trace_entry
*ent
;
1206 int size
, __size
, i
, pc
, __cpu
;
1207 unsigned long irq_flags
;
1210 pc
= preempt_count();
1211 __size
= SIZEOF_KRETPROBE_TRACE_ENTRY(tp
->nr_args
);
1212 size
= ALIGN(__size
+ sizeof(u32
), sizeof(u64
));
1213 size
-= sizeof(u32
);
1214 if (WARN_ONCE(size
> FTRACE_MAX_PROFILE_SIZE
,
1215 "profile buffer not large enough"))
1219 * Protect the non nmi buffer
1220 * This also protects the rcu read side
1222 local_irq_save(irq_flags
);
1223 __cpu
= smp_processor_id();
1226 raw_data
= rcu_dereference(trace_profile_buf_nmi
);
1228 raw_data
= rcu_dereference(trace_profile_buf
);
1233 raw_data
= per_cpu_ptr(raw_data
, __cpu
);
1234 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1235 *(u64
*)(&raw_data
[size
- sizeof(u64
)]) = 0ULL;
1236 entry
= (struct kretprobe_trace_entry
*)raw_data
;
1239 tracing_generic_entry_update(ent
, irq_flags
, pc
);
1240 ent
->type
= call
->id
;
1241 entry
->nargs
= tp
->nr_args
;
1242 entry
->func
= (unsigned long)tp
->rp
.kp
.addr
;
1243 entry
->ret_ip
= (unsigned long)ri
->ret_addr
;
1244 for (i
= 0; i
< tp
->nr_args
; i
++)
1245 entry
->args
[i
] = call_fetch(&tp
->args
[i
].fetch
, regs
);
1246 perf_tp_event(call
->id
, entry
->ret_ip
, 1, entry
, size
);
1248 local_irq_restore(irq_flags
);
1252 static int probe_profile_enable(struct ftrace_event_call
*call
)
1254 struct trace_probe
*tp
= (struct trace_probe
*)call
->data
;
1256 tp
->flags
|= TP_FLAG_PROFILE
;
1258 if (probe_is_return(tp
))
1259 return enable_kretprobe(&tp
->rp
);
1261 return enable_kprobe(&tp
->rp
.kp
);
1264 static void probe_profile_disable(struct ftrace_event_call
*call
)
1266 struct trace_probe
*tp
= (struct trace_probe
*)call
->data
;
1268 tp
->flags
&= ~TP_FLAG_PROFILE
;
1270 if (!(tp
->flags
& TP_FLAG_TRACE
)) {
1271 if (probe_is_return(tp
))
1272 disable_kretprobe(&tp
->rp
);
1274 disable_kprobe(&tp
->rp
.kp
);
1277 #endif /* CONFIG_EVENT_PROFILE */
1281 int kprobe_dispatcher(struct kprobe
*kp
, struct pt_regs
*regs
)
1283 struct trace_probe
*tp
= container_of(kp
, struct trace_probe
, rp
.kp
);
1285 if (tp
->flags
& TP_FLAG_TRACE
)
1286 kprobe_trace_func(kp
, regs
);
1287 #ifdef CONFIG_EVENT_PROFILE
1288 if (tp
->flags
& TP_FLAG_PROFILE
)
1289 kprobe_profile_func(kp
, regs
);
1290 #endif /* CONFIG_EVENT_PROFILE */
1291 return 0; /* We don't tweek kernel, so just return 0 */
1295 int kretprobe_dispatcher(struct kretprobe_instance
*ri
, struct pt_regs
*regs
)
1297 struct trace_probe
*tp
= container_of(ri
->rp
, struct trace_probe
, rp
);
1299 if (tp
->flags
& TP_FLAG_TRACE
)
1300 kretprobe_trace_func(ri
, regs
);
1301 #ifdef CONFIG_EVENT_PROFILE
1302 if (tp
->flags
& TP_FLAG_PROFILE
)
1303 kretprobe_profile_func(ri
, regs
);
1304 #endif /* CONFIG_EVENT_PROFILE */
1305 return 0; /* We don't tweek kernel, so just return 0 */
1308 static int register_probe_event(struct trace_probe
*tp
)
1310 struct ftrace_event_call
*call
= &tp
->call
;
1313 /* Initialize ftrace_event_call */
1314 if (probe_is_return(tp
)) {
1315 tp
->event
.trace
= print_kretprobe_event
;
1316 call
->raw_init
= probe_event_raw_init
;
1317 call
->show_format
= kretprobe_event_show_format
;
1318 call
->define_fields
= kretprobe_event_define_fields
;
1320 tp
->event
.trace
= print_kprobe_event
;
1321 call
->raw_init
= probe_event_raw_init
;
1322 call
->show_format
= kprobe_event_show_format
;
1323 call
->define_fields
= kprobe_event_define_fields
;
1325 call
->event
= &tp
->event
;
1326 call
->id
= register_ftrace_event(&tp
->event
);
1330 call
->regfunc
= probe_event_enable
;
1331 call
->unregfunc
= probe_event_disable
;
1333 #ifdef CONFIG_EVENT_PROFILE
1334 atomic_set(&call
->profile_count
, -1);
1335 call
->profile_enable
= probe_profile_enable
;
1336 call
->profile_disable
= probe_profile_disable
;
1339 ret
= trace_add_event_call(call
);
1341 pr_info("Failed to register kprobe event: %s\n", call
->name
);
1342 unregister_ftrace_event(&tp
->event
);
1347 static void unregister_probe_event(struct trace_probe
*tp
)
1349 /* tp->event is unregistered in trace_remove_event_call() */
1350 trace_remove_event_call(&tp
->call
);
1353 /* Make a debugfs interface for controling probe points */
1354 static __init
int init_kprobe_trace(void)
1356 struct dentry
*d_tracer
;
1357 struct dentry
*entry
;
1359 d_tracer
= tracing_init_dentry();
1363 entry
= debugfs_create_file("kprobe_events", 0644, d_tracer
,
1364 NULL
, &kprobe_events_ops
);
1366 /* Event list interface */
1368 pr_warning("Could not create debugfs "
1369 "'kprobe_events' entry\n");
1371 /* Profile interface */
1372 entry
= debugfs_create_file("kprobe_profile", 0444, d_tracer
,
1373 NULL
, &kprobe_profile_ops
);
1376 pr_warning("Could not create debugfs "
1377 "'kprobe_profile' entry\n");
1380 fs_initcall(init_kprobe_trace
);
1383 #ifdef CONFIG_FTRACE_STARTUP_TEST
1385 static int kprobe_trace_selftest_target(int a1
, int a2
, int a3
,
1386 int a4
, int a5
, int a6
)
1388 return a1
+ a2
+ a3
+ a4
+ a5
+ a6
;
1391 static __init
int kprobe_trace_self_tests_init(void)
1394 int (*target
)(int, int, int, int, int, int);
1396 target
= kprobe_trace_selftest_target
;
1398 pr_info("Testing kprobe tracing: ");
1400 ret
= command_trace_probe("p:testprobe kprobe_trace_selftest_target "
1401 "a1 a2 a3 a4 a5 a6");
1402 if (WARN_ON_ONCE(ret
))
1403 pr_warning("error enabling function entry\n");
1405 ret
= command_trace_probe("r:testprobe2 kprobe_trace_selftest_target "
1407 if (WARN_ON_ONCE(ret
))
1408 pr_warning("error enabling function return\n");
1410 ret
= target(1, 2, 3, 4, 5, 6);
1412 cleanup_all_probes();
1418 late_initcall(kprobe_trace_self_tests_init
);