2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
30 #include <asm/ftrace.h>
34 #define FTRACE_WARN_ON(cond) \
40 #define FTRACE_WARN_ON_ONCE(cond) \
42 if (WARN_ON_ONCE(cond)) \
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly
;
48 static int last_ftrace_enabled
;
50 /* Quick disabling of function tracer. */
51 int function_trace_stop
;
53 /* By default, current tracing type is normal tracing. */
54 enum ftrace_tracing_type_t ftrace_tracing_type
= FTRACE_TYPE_ENTER
;
57 * ftrace_disabled is set when an anomaly is discovered.
58 * ftrace_disabled is much stronger than ftrace_enabled.
60 static int ftrace_disabled __read_mostly
;
62 static DEFINE_SPINLOCK(ftrace_lock
);
63 static DEFINE_MUTEX(ftrace_sysctl_lock
);
65 static struct ftrace_ops ftrace_list_end __read_mostly
=
70 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
71 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
72 ftrace_func_t __ftrace_trace_function __read_mostly
= ftrace_stub
;
74 static void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
76 struct ftrace_ops
*op
= ftrace_list
;
78 /* in case someone actually ports this to alpha! */
79 read_barrier_depends();
81 while (op
!= &ftrace_list_end
) {
83 read_barrier_depends();
84 op
->func(ip
, parent_ip
);
90 * clear_ftrace_function - reset the ftrace function
92 * This NULLs the ftrace function and in essence stops
93 * tracing. There may be lag
95 void clear_ftrace_function(void)
97 ftrace_trace_function
= ftrace_stub
;
98 __ftrace_trace_function
= ftrace_stub
;
101 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
103 * For those archs that do not test ftrace_trace_stop in their
104 * mcount call site, we need to do it from C.
106 static void ftrace_test_stop_func(unsigned long ip
, unsigned long parent_ip
)
108 if (function_trace_stop
)
111 __ftrace_trace_function(ip
, parent_ip
);
115 static int __register_ftrace_function(struct ftrace_ops
*ops
)
117 /* should not be called from interrupt context */
118 spin_lock(&ftrace_lock
);
120 ops
->next
= ftrace_list
;
122 * We are entering ops into the ftrace_list but another
123 * CPU might be walking that list. We need to make sure
124 * the ops->next pointer is valid before another CPU sees
125 * the ops pointer included into the ftrace_list.
130 if (ftrace_enabled
) {
132 * For one func, simply call it directly.
133 * For more than one func, call the chain.
135 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
136 if (ops
->next
== &ftrace_list_end
)
137 ftrace_trace_function
= ops
->func
;
139 ftrace_trace_function
= ftrace_list_func
;
141 if (ops
->next
== &ftrace_list_end
)
142 __ftrace_trace_function
= ops
->func
;
144 __ftrace_trace_function
= ftrace_list_func
;
145 ftrace_trace_function
= ftrace_test_stop_func
;
149 spin_unlock(&ftrace_lock
);
154 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
156 struct ftrace_ops
**p
;
159 /* should not be called from interrupt context */
160 spin_lock(&ftrace_lock
);
163 * If we are removing the last function, then simply point
164 * to the ftrace_stub.
166 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
167 ftrace_trace_function
= ftrace_stub
;
168 ftrace_list
= &ftrace_list_end
;
172 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
183 if (ftrace_enabled
) {
184 /* If we only have one func left, then call that directly */
185 if (ftrace_list
->next
== &ftrace_list_end
)
186 ftrace_trace_function
= ftrace_list
->func
;
190 spin_unlock(&ftrace_lock
);
195 #ifdef CONFIG_DYNAMIC_FTRACE
196 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
197 # error Dynamic ftrace depends on MCOUNT_RECORD
201 * Since MCOUNT_ADDR may point to mcount itself, we do not want
202 * to get it confused by reading a reference in the code as we
203 * are parsing on objcopy output of text. Use a variable for
206 static unsigned long mcount_addr
= MCOUNT_ADDR
;
209 FTRACE_ENABLE_CALLS
= (1 << 0),
210 FTRACE_DISABLE_CALLS
= (1 << 1),
211 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
212 FTRACE_ENABLE_MCOUNT
= (1 << 3),
213 FTRACE_DISABLE_MCOUNT
= (1 << 4),
216 static int ftrace_filtered
;
218 static LIST_HEAD(ftrace_new_addrs
);
220 static DEFINE_MUTEX(ftrace_regex_lock
);
223 struct ftrace_page
*next
;
225 struct dyn_ftrace records
[];
228 #define ENTRIES_PER_PAGE \
229 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
231 /* estimate from running different kernels */
232 #define NR_TO_INIT 10000
234 static struct ftrace_page
*ftrace_pages_start
;
235 static struct ftrace_page
*ftrace_pages
;
237 static struct dyn_ftrace
*ftrace_free_records
;
240 #ifdef CONFIG_KPROBES
242 static int frozen_record_count
;
244 static inline void freeze_record(struct dyn_ftrace
*rec
)
246 if (!(rec
->flags
& FTRACE_FL_FROZEN
)) {
247 rec
->flags
|= FTRACE_FL_FROZEN
;
248 frozen_record_count
++;
252 static inline void unfreeze_record(struct dyn_ftrace
*rec
)
254 if (rec
->flags
& FTRACE_FL_FROZEN
) {
255 rec
->flags
&= ~FTRACE_FL_FROZEN
;
256 frozen_record_count
--;
260 static inline int record_frozen(struct dyn_ftrace
*rec
)
262 return rec
->flags
& FTRACE_FL_FROZEN
;
265 # define freeze_record(rec) ({ 0; })
266 # define unfreeze_record(rec) ({ 0; })
267 # define record_frozen(rec) ({ 0; })
268 #endif /* CONFIG_KPROBES */
270 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
272 rec
->ip
= (unsigned long)ftrace_free_records
;
273 ftrace_free_records
= rec
;
274 rec
->flags
|= FTRACE_FL_FREE
;
277 void ftrace_release(void *start
, unsigned long size
)
279 struct dyn_ftrace
*rec
;
280 struct ftrace_page
*pg
;
281 unsigned long s
= (unsigned long)start
;
282 unsigned long e
= s
+ size
;
285 if (ftrace_disabled
|| !start
)
288 /* should not be called from interrupt context */
289 spin_lock(&ftrace_lock
);
291 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
292 for (i
= 0; i
< pg
->index
; i
++) {
293 rec
= &pg
->records
[i
];
295 if ((rec
->ip
>= s
) && (rec
->ip
< e
))
296 ftrace_free_rec(rec
);
299 spin_unlock(&ftrace_lock
);
302 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
304 struct dyn_ftrace
*rec
;
306 /* First check for freed records */
307 if (ftrace_free_records
) {
308 rec
= ftrace_free_records
;
310 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
311 FTRACE_WARN_ON_ONCE(1);
312 ftrace_free_records
= NULL
;
316 ftrace_free_records
= (void *)rec
->ip
;
317 memset(rec
, 0, sizeof(*rec
));
321 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
322 if (!ftrace_pages
->next
) {
323 /* allocate another page */
325 (void *)get_zeroed_page(GFP_KERNEL
);
326 if (!ftrace_pages
->next
)
329 ftrace_pages
= ftrace_pages
->next
;
332 return &ftrace_pages
->records
[ftrace_pages
->index
++];
335 static struct dyn_ftrace
*
336 ftrace_record_ip(unsigned long ip
)
338 struct dyn_ftrace
*rec
;
343 rec
= ftrace_alloc_dyn_node(ip
);
349 list_add(&rec
->list
, &ftrace_new_addrs
);
354 static void print_ip_ins(const char *fmt
, unsigned char *p
)
358 printk(KERN_CONT
"%s", fmt
);
360 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
361 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
364 static void ftrace_bug(int failed
, unsigned long ip
)
368 FTRACE_WARN_ON_ONCE(1);
369 pr_info("ftrace faulted on modifying ");
373 FTRACE_WARN_ON_ONCE(1);
374 pr_info("ftrace failed to modify ");
376 print_ip_ins(" actual: ", (unsigned char *)ip
);
377 printk(KERN_CONT
"\n");
380 FTRACE_WARN_ON_ONCE(1);
381 pr_info("ftrace faulted on writing ");
385 FTRACE_WARN_ON_ONCE(1);
386 pr_info("ftrace faulted on unknown error ");
393 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
395 unsigned long ip
, fl
;
396 unsigned long ftrace_addr
;
398 #ifdef CONFIG_FUNCTION_RET_TRACER
399 if (ftrace_tracing_type
== FTRACE_TYPE_ENTER
)
400 ftrace_addr
= (unsigned long)ftrace_caller
;
402 ftrace_addr
= (unsigned long)ftrace_return_caller
;
404 ftrace_addr
= (unsigned long)ftrace_caller
;
410 * If this record is not to be traced and
411 * it is not enabled then do nothing.
413 * If this record is not to be traced and
414 * it is enabled then disabled it.
417 if (rec
->flags
& FTRACE_FL_NOTRACE
) {
418 if (rec
->flags
& FTRACE_FL_ENABLED
)
419 rec
->flags
&= ~FTRACE_FL_ENABLED
;
423 } else if (ftrace_filtered
&& enable
) {
428 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
);
430 /* Record is filtered and enabled, do nothing */
431 if (fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
))
434 /* Record is not filtered and is not enabled do nothing */
438 /* Record is not filtered but enabled, disable it */
439 if (fl
== FTRACE_FL_ENABLED
)
440 rec
->flags
&= ~FTRACE_FL_ENABLED
;
442 /* Otherwise record is filtered but not enabled, enable it */
443 rec
->flags
|= FTRACE_FL_ENABLED
;
445 /* Disable or not filtered */
448 /* if record is enabled, do nothing */
449 if (rec
->flags
& FTRACE_FL_ENABLED
)
452 rec
->flags
|= FTRACE_FL_ENABLED
;
456 /* if record is not enabled do nothing */
457 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
460 rec
->flags
&= ~FTRACE_FL_ENABLED
;
464 if (rec
->flags
& FTRACE_FL_ENABLED
)
465 return ftrace_make_call(rec
, ftrace_addr
);
467 return ftrace_make_nop(NULL
, rec
, ftrace_addr
);
470 static void ftrace_replace_code(int enable
)
473 struct dyn_ftrace
*rec
;
474 struct ftrace_page
*pg
;
476 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
477 for (i
= 0; i
< pg
->index
; i
++) {
478 rec
= &pg
->records
[i
];
481 * Skip over free records and records that have
484 if (rec
->flags
& FTRACE_FL_FREE
||
485 rec
->flags
& FTRACE_FL_FAILED
)
488 /* ignore updates to this record's mcount site */
489 if (get_kprobe((void *)rec
->ip
)) {
493 unfreeze_record(rec
);
496 failed
= __ftrace_replace_code(rec
, enable
);
497 if (failed
&& (rec
->flags
& FTRACE_FL_CONVERTED
)) {
498 rec
->flags
|= FTRACE_FL_FAILED
;
499 if ((system_state
== SYSTEM_BOOTING
) ||
500 !core_kernel_text(rec
->ip
)) {
501 ftrace_free_rec(rec
);
503 ftrace_bug(failed
, rec
->ip
);
510 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
517 ret
= ftrace_make_nop(mod
, rec
, mcount_addr
);
520 rec
->flags
|= FTRACE_FL_FAILED
;
526 static int __ftrace_modify_code(void *data
)
530 if (*command
& FTRACE_ENABLE_CALLS
)
531 ftrace_replace_code(1);
532 else if (*command
& FTRACE_DISABLE_CALLS
)
533 ftrace_replace_code(0);
535 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
536 ftrace_update_ftrace_func(ftrace_trace_function
);
541 static void ftrace_run_update_code(int command
)
543 stop_machine(__ftrace_modify_code
, &command
, NULL
);
546 static ftrace_func_t saved_ftrace_func
;
547 static int ftrace_start_up
;
548 static DEFINE_MUTEX(ftrace_start_lock
);
550 static void ftrace_startup(void)
554 if (unlikely(ftrace_disabled
))
557 mutex_lock(&ftrace_start_lock
);
559 command
|= FTRACE_ENABLE_CALLS
;
561 if (saved_ftrace_func
!= ftrace_trace_function
) {
562 saved_ftrace_func
= ftrace_trace_function
;
563 command
|= FTRACE_UPDATE_TRACE_FUNC
;
566 if (!command
|| !ftrace_enabled
)
569 ftrace_run_update_code(command
);
571 mutex_unlock(&ftrace_start_lock
);
574 static void ftrace_shutdown(void)
578 if (unlikely(ftrace_disabled
))
581 mutex_lock(&ftrace_start_lock
);
583 if (!ftrace_start_up
)
584 command
|= FTRACE_DISABLE_CALLS
;
586 if (saved_ftrace_func
!= ftrace_trace_function
) {
587 saved_ftrace_func
= ftrace_trace_function
;
588 command
|= FTRACE_UPDATE_TRACE_FUNC
;
591 if (!command
|| !ftrace_enabled
)
594 ftrace_run_update_code(command
);
596 mutex_unlock(&ftrace_start_lock
);
599 static void ftrace_startup_sysctl(void)
601 int command
= FTRACE_ENABLE_MCOUNT
;
603 if (unlikely(ftrace_disabled
))
606 mutex_lock(&ftrace_start_lock
);
607 /* Force update next time */
608 saved_ftrace_func
= NULL
;
609 /* ftrace_start_up is true if we want ftrace running */
611 command
|= FTRACE_ENABLE_CALLS
;
613 ftrace_run_update_code(command
);
614 mutex_unlock(&ftrace_start_lock
);
617 static void ftrace_shutdown_sysctl(void)
619 int command
= FTRACE_DISABLE_MCOUNT
;
621 if (unlikely(ftrace_disabled
))
624 mutex_lock(&ftrace_start_lock
);
625 /* ftrace_start_up is true if ftrace is running */
627 command
|= FTRACE_DISABLE_CALLS
;
629 ftrace_run_update_code(command
);
630 mutex_unlock(&ftrace_start_lock
);
633 static cycle_t ftrace_update_time
;
634 static unsigned long ftrace_update_cnt
;
635 unsigned long ftrace_update_tot_cnt
;
637 static int ftrace_update_code(struct module
*mod
)
639 struct dyn_ftrace
*p
, *t
;
642 start
= ftrace_now(raw_smp_processor_id());
643 ftrace_update_cnt
= 0;
645 list_for_each_entry_safe(p
, t
, &ftrace_new_addrs
, list
) {
647 /* If something went wrong, bail without enabling anything */
648 if (unlikely(ftrace_disabled
))
651 list_del_init(&p
->list
);
653 /* convert record (i.e, patch mcount-call with NOP) */
654 if (ftrace_code_disable(mod
, p
)) {
655 p
->flags
|= FTRACE_FL_CONVERTED
;
661 stop
= ftrace_now(raw_smp_processor_id());
662 ftrace_update_time
= stop
- start
;
663 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
668 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
670 struct ftrace_page
*pg
;
674 /* allocate a few pages */
675 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
676 if (!ftrace_pages_start
)
680 * Allocate a few more pages.
682 * TODO: have some parser search vmlinux before
683 * final linking to find all calls to ftrace.
685 * a) know how many pages to allocate.
687 * b) set up the table then.
689 * The dynamic code is still necessary for
693 pg
= ftrace_pages
= ftrace_pages_start
;
695 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
696 pr_info("ftrace: allocating %ld entries in %d pages\n",
697 num_to_init
, cnt
+ 1);
699 for (i
= 0; i
< cnt
; i
++) {
700 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
702 /* If we fail, we'll try later anyway */
713 FTRACE_ITER_FILTER
= (1 << 0),
714 FTRACE_ITER_CONT
= (1 << 1),
715 FTRACE_ITER_NOTRACE
= (1 << 2),
716 FTRACE_ITER_FAILURES
= (1 << 3),
719 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
721 struct ftrace_iterator
{
723 struct ftrace_page
*pg
;
726 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
732 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
734 struct ftrace_iterator
*iter
= m
->private;
735 struct dyn_ftrace
*rec
= NULL
;
739 /* should not be called from interrupt context */
740 spin_lock(&ftrace_lock
);
742 if (iter
->idx
>= iter
->pg
->index
) {
743 if (iter
->pg
->next
) {
744 iter
->pg
= iter
->pg
->next
;
749 rec
= &iter
->pg
->records
[iter
->idx
++];
750 if ((rec
->flags
& FTRACE_FL_FREE
) ||
752 (!(iter
->flags
& FTRACE_ITER_FAILURES
) &&
753 (rec
->flags
& FTRACE_FL_FAILED
)) ||
755 ((iter
->flags
& FTRACE_ITER_FAILURES
) &&
756 !(rec
->flags
& FTRACE_FL_FAILED
)) ||
758 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
759 !(rec
->flags
& FTRACE_FL_FILTER
)) ||
761 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
762 !(rec
->flags
& FTRACE_FL_NOTRACE
))) {
767 spin_unlock(&ftrace_lock
);
774 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
776 struct ftrace_iterator
*iter
= m
->private;
780 if (*pos
> iter
->pos
)
784 p
= t_next(m
, p
, &l
);
789 static void t_stop(struct seq_file
*m
, void *p
)
793 static int t_show(struct seq_file
*m
, void *v
)
795 struct ftrace_iterator
*iter
= m
->private;
796 struct dyn_ftrace
*rec
= v
;
797 char str
[KSYM_SYMBOL_LEN
];
803 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
805 ret
= seq_printf(m
, "%s\n", str
);
814 static struct seq_operations show_ftrace_seq_ops
= {
822 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
824 struct ftrace_iterator
*iter
;
827 if (unlikely(ftrace_disabled
))
830 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
834 iter
->pg
= ftrace_pages_start
;
837 ret
= seq_open(file
, &show_ftrace_seq_ops
);
839 struct seq_file
*m
= file
->private_data
;
849 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
851 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
852 struct ftrace_iterator
*iter
= m
->private;
854 seq_release(inode
, file
);
861 ftrace_failures_open(struct inode
*inode
, struct file
*file
)
865 struct ftrace_iterator
*iter
;
867 ret
= ftrace_avail_open(inode
, file
);
869 m
= (struct seq_file
*)file
->private_data
;
870 iter
= (struct ftrace_iterator
*)m
->private;
871 iter
->flags
= FTRACE_ITER_FAILURES
;
878 static void ftrace_filter_reset(int enable
)
880 struct ftrace_page
*pg
;
881 struct dyn_ftrace
*rec
;
882 unsigned long type
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
885 /* should not be called from interrupt context */
886 spin_lock(&ftrace_lock
);
889 pg
= ftrace_pages_start
;
891 for (i
= 0; i
< pg
->index
; i
++) {
892 rec
= &pg
->records
[i
];
893 if (rec
->flags
& FTRACE_FL_FAILED
)
899 spin_unlock(&ftrace_lock
);
903 ftrace_regex_open(struct inode
*inode
, struct file
*file
, int enable
)
905 struct ftrace_iterator
*iter
;
908 if (unlikely(ftrace_disabled
))
911 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
915 mutex_lock(&ftrace_regex_lock
);
916 if ((file
->f_mode
& FMODE_WRITE
) &&
917 !(file
->f_flags
& O_APPEND
))
918 ftrace_filter_reset(enable
);
920 if (file
->f_mode
& FMODE_READ
) {
921 iter
->pg
= ftrace_pages_start
;
923 iter
->flags
= enable
? FTRACE_ITER_FILTER
:
926 ret
= seq_open(file
, &show_ftrace_seq_ops
);
928 struct seq_file
*m
= file
->private_data
;
933 file
->private_data
= iter
;
934 mutex_unlock(&ftrace_regex_lock
);
940 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
942 return ftrace_regex_open(inode
, file
, 1);
946 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
948 return ftrace_regex_open(inode
, file
, 0);
952 ftrace_regex_read(struct file
*file
, char __user
*ubuf
,
953 size_t cnt
, loff_t
*ppos
)
955 if (file
->f_mode
& FMODE_READ
)
956 return seq_read(file
, ubuf
, cnt
, ppos
);
962 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
966 if (file
->f_mode
& FMODE_READ
)
967 ret
= seq_lseek(file
, offset
, origin
);
969 file
->f_pos
= ret
= 1;
982 ftrace_match(unsigned char *buff
, int len
, int enable
)
984 char str
[KSYM_SYMBOL_LEN
];
986 struct ftrace_page
*pg
;
987 struct dyn_ftrace
*rec
;
988 int type
= MATCH_FULL
;
989 unsigned long flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
990 unsigned i
, match
= 0, search_len
= 0;
992 for (i
= 0; i
< len
; i
++) {
993 if (buff
[i
] == '*') {
995 search
= buff
+ i
+ 1;
996 type
= MATCH_END_ONLY
;
997 search_len
= len
- (i
+ 1);
999 if (type
== MATCH_END_ONLY
) {
1000 type
= MATCH_MIDDLE_ONLY
;
1003 type
= MATCH_FRONT_ONLY
;
1011 /* should not be called from interrupt context */
1012 spin_lock(&ftrace_lock
);
1014 ftrace_filtered
= 1;
1015 pg
= ftrace_pages_start
;
1017 for (i
= 0; i
< pg
->index
; i
++) {
1021 rec
= &pg
->records
[i
];
1022 if (rec
->flags
& FTRACE_FL_FAILED
)
1024 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1027 if (strcmp(str
, buff
) == 0)
1030 case MATCH_FRONT_ONLY
:
1031 if (memcmp(str
, buff
, match
) == 0)
1034 case MATCH_MIDDLE_ONLY
:
1035 if (strstr(str
, search
))
1038 case MATCH_END_ONLY
:
1039 ptr
= strstr(str
, search
);
1040 if (ptr
&& (ptr
[search_len
] == 0))
1049 spin_unlock(&ftrace_lock
);
1053 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
1054 size_t cnt
, loff_t
*ppos
, int enable
)
1056 struct ftrace_iterator
*iter
;
1061 if (!cnt
|| cnt
< 0)
1064 mutex_lock(&ftrace_regex_lock
);
1066 if (file
->f_mode
& FMODE_READ
) {
1067 struct seq_file
*m
= file
->private_data
;
1070 iter
= file
->private_data
;
1073 iter
->flags
&= ~FTRACE_ITER_CONT
;
1074 iter
->buffer_idx
= 0;
1077 ret
= get_user(ch
, ubuf
++);
1083 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1084 /* skip white space */
1085 while (cnt
&& isspace(ch
)) {
1086 ret
= get_user(ch
, ubuf
++);
1094 file
->f_pos
+= read
;
1099 iter
->buffer_idx
= 0;
1102 while (cnt
&& !isspace(ch
)) {
1103 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1104 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1109 ret
= get_user(ch
, ubuf
++);
1118 iter
->buffer
[iter
->buffer_idx
] = 0;
1119 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1120 iter
->buffer_idx
= 0;
1122 iter
->flags
|= FTRACE_ITER_CONT
;
1125 file
->f_pos
+= read
;
1129 mutex_unlock(&ftrace_regex_lock
);
1135 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1136 size_t cnt
, loff_t
*ppos
)
1138 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
1142 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
1143 size_t cnt
, loff_t
*ppos
)
1145 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
1149 ftrace_set_regex(unsigned char *buf
, int len
, int reset
, int enable
)
1151 if (unlikely(ftrace_disabled
))
1154 mutex_lock(&ftrace_regex_lock
);
1156 ftrace_filter_reset(enable
);
1158 ftrace_match(buf
, len
, enable
);
1159 mutex_unlock(&ftrace_regex_lock
);
1163 * ftrace_set_filter - set a function to filter on in ftrace
1164 * @buf - the string that holds the function filter text.
1165 * @len - the length of the string.
1166 * @reset - non zero to reset all filters before applying this filter.
1168 * Filters denote which functions should be enabled when tracing is enabled.
1169 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1171 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1173 ftrace_set_regex(buf
, len
, reset
, 1);
1177 * ftrace_set_notrace - set a function to not trace in ftrace
1178 * @buf - the string that holds the function notrace text.
1179 * @len - the length of the string.
1180 * @reset - non zero to reset all filters before applying this filter.
1182 * Notrace Filters denote which functions should not be enabled when tracing
1183 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1186 void ftrace_set_notrace(unsigned char *buf
, int len
, int reset
)
1188 ftrace_set_regex(buf
, len
, reset
, 0);
1192 ftrace_regex_release(struct inode
*inode
, struct file
*file
, int enable
)
1194 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1195 struct ftrace_iterator
*iter
;
1197 mutex_lock(&ftrace_regex_lock
);
1198 if (file
->f_mode
& FMODE_READ
) {
1201 seq_release(inode
, file
);
1203 iter
= file
->private_data
;
1205 if (iter
->buffer_idx
) {
1207 iter
->buffer
[iter
->buffer_idx
] = 0;
1208 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1211 mutex_lock(&ftrace_sysctl_lock
);
1212 mutex_lock(&ftrace_start_lock
);
1213 if (ftrace_start_up
&& ftrace_enabled
)
1214 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1215 mutex_unlock(&ftrace_start_lock
);
1216 mutex_unlock(&ftrace_sysctl_lock
);
1219 mutex_unlock(&ftrace_regex_lock
);
1224 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1226 return ftrace_regex_release(inode
, file
, 1);
1230 ftrace_notrace_release(struct inode
*inode
, struct file
*file
)
1232 return ftrace_regex_release(inode
, file
, 0);
1235 static struct file_operations ftrace_avail_fops
= {
1236 .open
= ftrace_avail_open
,
1238 .llseek
= seq_lseek
,
1239 .release
= ftrace_avail_release
,
1242 static struct file_operations ftrace_failures_fops
= {
1243 .open
= ftrace_failures_open
,
1245 .llseek
= seq_lseek
,
1246 .release
= ftrace_avail_release
,
1249 static struct file_operations ftrace_filter_fops
= {
1250 .open
= ftrace_filter_open
,
1251 .read
= ftrace_regex_read
,
1252 .write
= ftrace_filter_write
,
1253 .llseek
= ftrace_regex_lseek
,
1254 .release
= ftrace_filter_release
,
1257 static struct file_operations ftrace_notrace_fops
= {
1258 .open
= ftrace_notrace_open
,
1259 .read
= ftrace_regex_read
,
1260 .write
= ftrace_notrace_write
,
1261 .llseek
= ftrace_regex_lseek
,
1262 .release
= ftrace_notrace_release
,
1265 static __init
int ftrace_init_debugfs(void)
1267 struct dentry
*d_tracer
;
1268 struct dentry
*entry
;
1270 d_tracer
= tracing_init_dentry();
1272 entry
= debugfs_create_file("available_filter_functions", 0444,
1273 d_tracer
, NULL
, &ftrace_avail_fops
);
1275 pr_warning("Could not create debugfs "
1276 "'available_filter_functions' entry\n");
1278 entry
= debugfs_create_file("failures", 0444,
1279 d_tracer
, NULL
, &ftrace_failures_fops
);
1281 pr_warning("Could not create debugfs 'failures' entry\n");
1283 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1284 NULL
, &ftrace_filter_fops
);
1286 pr_warning("Could not create debugfs "
1287 "'set_ftrace_filter' entry\n");
1289 entry
= debugfs_create_file("set_ftrace_notrace", 0644, d_tracer
,
1290 NULL
, &ftrace_notrace_fops
);
1292 pr_warning("Could not create debugfs "
1293 "'set_ftrace_notrace' entry\n");
1298 fs_initcall(ftrace_init_debugfs
);
1300 static int ftrace_convert_nops(struct module
*mod
,
1301 unsigned long *start
,
1306 unsigned long flags
;
1308 mutex_lock(&ftrace_start_lock
);
1311 addr
= ftrace_call_adjust(*p
++);
1313 * Some architecture linkers will pad between
1314 * the different mcount_loc sections of different
1315 * object files to satisfy alignments.
1316 * Skip any NULL pointers.
1320 ftrace_record_ip(addr
);
1323 /* disable interrupts to prevent kstop machine */
1324 local_irq_save(flags
);
1325 ftrace_update_code(mod
);
1326 local_irq_restore(flags
);
1327 mutex_unlock(&ftrace_start_lock
);
1332 void ftrace_init_module(struct module
*mod
,
1333 unsigned long *start
, unsigned long *end
)
1335 if (ftrace_disabled
|| start
== end
)
1337 ftrace_convert_nops(mod
, start
, end
);
1340 extern unsigned long __start_mcount_loc
[];
1341 extern unsigned long __stop_mcount_loc
[];
1343 void __init
ftrace_init(void)
1345 unsigned long count
, addr
, flags
;
1348 /* Keep the ftrace pointer to the stub */
1349 addr
= (unsigned long)ftrace_stub
;
1351 local_irq_save(flags
);
1352 ftrace_dyn_arch_init(&addr
);
1353 local_irq_restore(flags
);
1355 /* ftrace_dyn_arch_init places the return code in addr */
1359 count
= __stop_mcount_loc
- __start_mcount_loc
;
1361 ret
= ftrace_dyn_table_alloc(count
);
1365 last_ftrace_enabled
= ftrace_enabled
= 1;
1367 ret
= ftrace_convert_nops(NULL
,
1373 ftrace_disabled
= 1;
1378 static int __init
ftrace_nodyn_init(void)
1383 device_initcall(ftrace_nodyn_init
);
1385 # define ftrace_startup() do { } while (0)
1386 # define ftrace_shutdown() do { } while (0)
1387 # define ftrace_startup_sysctl() do { } while (0)
1388 # define ftrace_shutdown_sysctl() do { } while (0)
1389 #endif /* CONFIG_DYNAMIC_FTRACE */
1392 * ftrace_kill - kill ftrace
1394 * This function should be used by panic code. It stops ftrace
1395 * but in a not so nice way. If you need to simply kill ftrace
1396 * from a non-atomic section, use ftrace_kill.
1398 void ftrace_kill(void)
1400 ftrace_disabled
= 1;
1402 clear_ftrace_function();
1406 * register_ftrace_function - register a function for profiling
1407 * @ops - ops structure that holds the function for profiling.
1409 * Register a function to be called by all functions in the
1412 * Note: @ops->func and all the functions it calls must be labeled
1413 * with "notrace", otherwise it will go into a
1416 int register_ftrace_function(struct ftrace_ops
*ops
)
1420 if (unlikely(ftrace_disabled
))
1423 mutex_lock(&ftrace_sysctl_lock
);
1425 if (ftrace_tracing_type
== FTRACE_TYPE_RETURN
) {
1430 ret
= __register_ftrace_function(ops
);
1434 mutex_unlock(&ftrace_sysctl_lock
);
1439 * unregister_ftrace_function - unresgister a function for profiling.
1440 * @ops - ops structure that holds the function to unregister
1442 * Unregister a function that was added to be called by ftrace profiling.
1444 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1448 mutex_lock(&ftrace_sysctl_lock
);
1449 ret
= __unregister_ftrace_function(ops
);
1451 mutex_unlock(&ftrace_sysctl_lock
);
1457 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1458 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1463 if (unlikely(ftrace_disabled
))
1466 mutex_lock(&ftrace_sysctl_lock
);
1468 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1470 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1473 last_ftrace_enabled
= ftrace_enabled
;
1475 if (ftrace_enabled
) {
1477 ftrace_startup_sysctl();
1479 /* we are starting ftrace again */
1480 if (ftrace_list
!= &ftrace_list_end
) {
1481 if (ftrace_list
->next
== &ftrace_list_end
)
1482 ftrace_trace_function
= ftrace_list
->func
;
1484 ftrace_trace_function
= ftrace_list_func
;
1488 /* stopping ftrace calls (just send to ftrace_stub) */
1489 ftrace_trace_function
= ftrace_stub
;
1491 ftrace_shutdown_sysctl();
1495 mutex_unlock(&ftrace_sysctl_lock
);
1499 #ifdef CONFIG_FUNCTION_RET_TRACER
1501 static atomic_t ftrace_retfunc_active
;
1503 /* The callback that hooks the return of a function */
1504 trace_function_return_t ftrace_function_return
=
1505 (trace_function_return_t
)ftrace_stub
;
1508 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1509 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
1513 unsigned long flags
;
1514 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
1515 struct task_struct
*g
, *t
;
1517 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
1518 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
1519 * sizeof(struct ftrace_ret_stack
),
1521 if (!ret_stack_list
[i
]) {
1529 read_lock_irqsave(&tasklist_lock
, flags
);
1530 do_each_thread(g
, t
) {
1536 if (t
->ret_stack
== NULL
) {
1537 t
->ret_stack
= ret_stack_list
[start
++];
1538 t
->curr_ret_stack
= -1;
1539 atomic_set(&t
->trace_overrun
, 0);
1541 } while_each_thread(g
, t
);
1544 read_unlock_irqrestore(&tasklist_lock
, flags
);
1546 for (i
= start
; i
< end
; i
++)
1547 kfree(ret_stack_list
[i
]);
1551 /* Allocate a return stack for each task */
1552 static int start_return_tracing(void)
1554 struct ftrace_ret_stack
**ret_stack_list
;
1557 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
1558 sizeof(struct ftrace_ret_stack
*),
1561 if (!ret_stack_list
)
1565 ret
= alloc_retstack_tasklist(ret_stack_list
);
1566 } while (ret
== -EAGAIN
);
1568 kfree(ret_stack_list
);
1572 int register_ftrace_return(trace_function_return_t func
)
1576 mutex_lock(&ftrace_sysctl_lock
);
1579 * Don't launch return tracing if normal function
1580 * tracing is already running.
1582 if (ftrace_trace_function
!= ftrace_stub
) {
1586 atomic_inc(&ftrace_retfunc_active
);
1587 ret
= start_return_tracing();
1589 atomic_dec(&ftrace_retfunc_active
);
1592 ftrace_tracing_type
= FTRACE_TYPE_RETURN
;
1593 ftrace_function_return
= func
;
1597 mutex_unlock(&ftrace_sysctl_lock
);
1601 void unregister_ftrace_return(void)
1603 mutex_lock(&ftrace_sysctl_lock
);
1605 atomic_dec(&ftrace_retfunc_active
);
1606 ftrace_function_return
= (trace_function_return_t
)ftrace_stub
;
1608 /* Restore normal tracing type */
1609 ftrace_tracing_type
= FTRACE_TYPE_ENTER
;
1611 mutex_unlock(&ftrace_sysctl_lock
);
1614 /* Allocate a return stack for newly created task */
1615 void ftrace_retfunc_init_task(struct task_struct
*t
)
1617 if (atomic_read(&ftrace_retfunc_active
)) {
1618 t
->ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
1619 * sizeof(struct ftrace_ret_stack
),
1623 t
->curr_ret_stack
= -1;
1624 atomic_set(&t
->trace_overrun
, 0);
1626 t
->ret_stack
= NULL
;
1629 void ftrace_retfunc_exit_task(struct task_struct
*t
)
1631 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
1633 t
->ret_stack
= NULL
;
1634 /* NULL must become visible to IRQs before we free it: */