2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/debugfs.h>
21 #include <linux/hardirq.h>
22 #include <linux/kthread.h>
23 #include <linux/uaccess.h>
24 #include <linux/kprobes.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/ctype.h>
28 #include <linux/list.h>
30 #include <asm/ftrace.h>
34 #define FTRACE_WARN_ON(cond) \
40 #define FTRACE_WARN_ON_ONCE(cond) \
42 if (WARN_ON_ONCE(cond)) \
46 /* ftrace_enabled is a method to turn ftrace on or off */
47 int ftrace_enabled __read_mostly
;
48 static int last_ftrace_enabled
;
50 /* ftrace_pid_trace >= 0 will only trace threads with this pid */
51 static int ftrace_pid_trace
= -1;
53 /* Quick disabling of function tracer. */
54 int function_trace_stop
;
57 * ftrace_disabled is set when an anomaly is discovered.
58 * ftrace_disabled is much stronger than ftrace_enabled.
60 static int ftrace_disabled __read_mostly
;
62 static DEFINE_SPINLOCK(ftrace_lock
);
63 static DEFINE_MUTEX(ftrace_sysctl_lock
);
64 static DEFINE_MUTEX(ftrace_start_lock
);
66 static struct ftrace_ops ftrace_list_end __read_mostly
=
71 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
72 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
73 ftrace_func_t __ftrace_trace_function __read_mostly
= ftrace_stub
;
74 ftrace_func_t ftrace_pid_function __read_mostly
= ftrace_stub
;
76 static void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
78 struct ftrace_ops
*op
= ftrace_list
;
80 /* in case someone actually ports this to alpha! */
81 read_barrier_depends();
83 while (op
!= &ftrace_list_end
) {
85 read_barrier_depends();
86 op
->func(ip
, parent_ip
);
91 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
)
93 if (current
->pid
!= ftrace_pid_trace
)
96 ftrace_pid_function(ip
, parent_ip
);
99 static void set_ftrace_pid_function(ftrace_func_t func
)
101 /* do not set ftrace_pid_function to itself! */
102 if (func
!= ftrace_pid_func
)
103 ftrace_pid_function
= func
;
107 * clear_ftrace_function - reset the ftrace function
109 * This NULLs the ftrace function and in essence stops
110 * tracing. There may be lag
112 void clear_ftrace_function(void)
114 ftrace_trace_function
= ftrace_stub
;
115 __ftrace_trace_function
= ftrace_stub
;
116 ftrace_pid_function
= ftrace_stub
;
119 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121 * For those archs that do not test ftrace_trace_stop in their
122 * mcount call site, we need to do it from C.
124 static void ftrace_test_stop_func(unsigned long ip
, unsigned long parent_ip
)
126 if (function_trace_stop
)
129 __ftrace_trace_function(ip
, parent_ip
);
133 static int __register_ftrace_function(struct ftrace_ops
*ops
)
135 /* should not be called from interrupt context */
136 spin_lock(&ftrace_lock
);
138 ops
->next
= ftrace_list
;
140 * We are entering ops into the ftrace_list but another
141 * CPU might be walking that list. We need to make sure
142 * the ops->next pointer is valid before another CPU sees
143 * the ops pointer included into the ftrace_list.
148 if (ftrace_enabled
) {
151 if (ops
->next
== &ftrace_list_end
)
154 func
= ftrace_list_func
;
156 if (ftrace_pid_trace
>= 0) {
157 set_ftrace_pid_function(func
);
158 func
= ftrace_pid_func
;
162 * For one func, simply call it directly.
163 * For more than one func, call the chain.
165 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
166 ftrace_trace_function
= func
;
168 __ftrace_trace_function
= func
;
169 ftrace_trace_function
= ftrace_test_stop_func
;
173 spin_unlock(&ftrace_lock
);
178 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
180 struct ftrace_ops
**p
;
183 /* should not be called from interrupt context */
184 spin_lock(&ftrace_lock
);
187 * If we are removing the last function, then simply point
188 * to the ftrace_stub.
190 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
191 ftrace_trace_function
= ftrace_stub
;
192 ftrace_list
= &ftrace_list_end
;
196 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
207 if (ftrace_enabled
) {
208 /* If we only have one func left, then call that directly */
209 if (ftrace_list
->next
== &ftrace_list_end
) {
210 ftrace_func_t func
= ftrace_list
->func
;
212 if (ftrace_pid_trace
>= 0) {
213 set_ftrace_pid_function(func
);
214 func
= ftrace_pid_func
;
216 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
217 ftrace_trace_function
= func
;
219 __ftrace_trace_function
= func
;
225 spin_unlock(&ftrace_lock
);
230 static void ftrace_update_pid_func(void)
234 /* should not be called from interrupt context */
235 spin_lock(&ftrace_lock
);
237 if (ftrace_trace_function
== ftrace_stub
)
240 func
= ftrace_trace_function
;
242 if (ftrace_pid_trace
>= 0) {
243 set_ftrace_pid_function(func
);
244 func
= ftrace_pid_func
;
246 if (func
!= ftrace_pid_func
)
249 set_ftrace_pid_function(func
);
252 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
253 ftrace_trace_function
= func
;
255 __ftrace_trace_function
= func
;
259 spin_unlock(&ftrace_lock
);
262 #ifdef CONFIG_DYNAMIC_FTRACE
263 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
264 # error Dynamic ftrace depends on MCOUNT_RECORD
268 * Since MCOUNT_ADDR may point to mcount itself, we do not want
269 * to get it confused by reading a reference in the code as we
270 * are parsing on objcopy output of text. Use a variable for
273 static unsigned long mcount_addr
= MCOUNT_ADDR
;
276 FTRACE_ENABLE_CALLS
= (1 << 0),
277 FTRACE_DISABLE_CALLS
= (1 << 1),
278 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
279 FTRACE_ENABLE_MCOUNT
= (1 << 3),
280 FTRACE_DISABLE_MCOUNT
= (1 << 4),
281 FTRACE_START_FUNC_RET
= (1 << 5),
282 FTRACE_STOP_FUNC_RET
= (1 << 6),
285 static int ftrace_filtered
;
287 static LIST_HEAD(ftrace_new_addrs
);
289 static DEFINE_MUTEX(ftrace_regex_lock
);
292 struct ftrace_page
*next
;
294 struct dyn_ftrace records
[];
297 #define ENTRIES_PER_PAGE \
298 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
300 /* estimate from running different kernels */
301 #define NR_TO_INIT 10000
303 static struct ftrace_page
*ftrace_pages_start
;
304 static struct ftrace_page
*ftrace_pages
;
306 static struct dyn_ftrace
*ftrace_free_records
;
309 #ifdef CONFIG_KPROBES
311 static int frozen_record_count
;
313 static inline void freeze_record(struct dyn_ftrace
*rec
)
315 if (!(rec
->flags
& FTRACE_FL_FROZEN
)) {
316 rec
->flags
|= FTRACE_FL_FROZEN
;
317 frozen_record_count
++;
321 static inline void unfreeze_record(struct dyn_ftrace
*rec
)
323 if (rec
->flags
& FTRACE_FL_FROZEN
) {
324 rec
->flags
&= ~FTRACE_FL_FROZEN
;
325 frozen_record_count
--;
329 static inline int record_frozen(struct dyn_ftrace
*rec
)
331 return rec
->flags
& FTRACE_FL_FROZEN
;
334 # define freeze_record(rec) ({ 0; })
335 # define unfreeze_record(rec) ({ 0; })
336 # define record_frozen(rec) ({ 0; })
337 #endif /* CONFIG_KPROBES */
339 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
341 rec
->ip
= (unsigned long)ftrace_free_records
;
342 ftrace_free_records
= rec
;
343 rec
->flags
|= FTRACE_FL_FREE
;
346 void ftrace_release(void *start
, unsigned long size
)
348 struct dyn_ftrace
*rec
;
349 struct ftrace_page
*pg
;
350 unsigned long s
= (unsigned long)start
;
351 unsigned long e
= s
+ size
;
354 if (ftrace_disabled
|| !start
)
357 /* should not be called from interrupt context */
358 spin_lock(&ftrace_lock
);
360 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
361 for (i
= 0; i
< pg
->index
; i
++) {
362 rec
= &pg
->records
[i
];
364 if ((rec
->ip
>= s
) && (rec
->ip
< e
))
365 ftrace_free_rec(rec
);
368 spin_unlock(&ftrace_lock
);
371 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
373 struct dyn_ftrace
*rec
;
375 /* First check for freed records */
376 if (ftrace_free_records
) {
377 rec
= ftrace_free_records
;
379 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
380 FTRACE_WARN_ON_ONCE(1);
381 ftrace_free_records
= NULL
;
385 ftrace_free_records
= (void *)rec
->ip
;
386 memset(rec
, 0, sizeof(*rec
));
390 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
391 if (!ftrace_pages
->next
) {
392 /* allocate another page */
394 (void *)get_zeroed_page(GFP_KERNEL
);
395 if (!ftrace_pages
->next
)
398 ftrace_pages
= ftrace_pages
->next
;
401 return &ftrace_pages
->records
[ftrace_pages
->index
++];
404 static struct dyn_ftrace
*
405 ftrace_record_ip(unsigned long ip
)
407 struct dyn_ftrace
*rec
;
412 rec
= ftrace_alloc_dyn_node(ip
);
418 list_add(&rec
->list
, &ftrace_new_addrs
);
423 static void print_ip_ins(const char *fmt
, unsigned char *p
)
427 printk(KERN_CONT
"%s", fmt
);
429 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
430 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
433 static void ftrace_bug(int failed
, unsigned long ip
)
437 FTRACE_WARN_ON_ONCE(1);
438 pr_info("ftrace faulted on modifying ");
442 FTRACE_WARN_ON_ONCE(1);
443 pr_info("ftrace failed to modify ");
445 print_ip_ins(" actual: ", (unsigned char *)ip
);
446 printk(KERN_CONT
"\n");
449 FTRACE_WARN_ON_ONCE(1);
450 pr_info("ftrace faulted on writing ");
454 FTRACE_WARN_ON_ONCE(1);
455 pr_info("ftrace faulted on unknown error ");
462 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
464 unsigned long ip
, fl
;
465 unsigned long ftrace_addr
;
467 ftrace_addr
= (unsigned long)ftrace_caller
;
472 * If this record is not to be traced and
473 * it is not enabled then do nothing.
475 * If this record is not to be traced and
476 * it is enabled then disabled it.
479 if (rec
->flags
& FTRACE_FL_NOTRACE
) {
480 if (rec
->flags
& FTRACE_FL_ENABLED
)
481 rec
->flags
&= ~FTRACE_FL_ENABLED
;
485 } else if (ftrace_filtered
&& enable
) {
490 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
);
492 /* Record is filtered and enabled, do nothing */
493 if (fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
))
496 /* Record is not filtered and is not enabled do nothing */
500 /* Record is not filtered but enabled, disable it */
501 if (fl
== FTRACE_FL_ENABLED
)
502 rec
->flags
&= ~FTRACE_FL_ENABLED
;
504 /* Otherwise record is filtered but not enabled, enable it */
505 rec
->flags
|= FTRACE_FL_ENABLED
;
507 /* Disable or not filtered */
510 /* if record is enabled, do nothing */
511 if (rec
->flags
& FTRACE_FL_ENABLED
)
514 rec
->flags
|= FTRACE_FL_ENABLED
;
518 /* if record is not enabled do nothing */
519 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
522 rec
->flags
&= ~FTRACE_FL_ENABLED
;
526 if (rec
->flags
& FTRACE_FL_ENABLED
)
527 return ftrace_make_call(rec
, ftrace_addr
);
529 return ftrace_make_nop(NULL
, rec
, ftrace_addr
);
532 static void ftrace_replace_code(int enable
)
535 struct dyn_ftrace
*rec
;
536 struct ftrace_page
*pg
;
538 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
539 for (i
= 0; i
< pg
->index
; i
++) {
540 rec
= &pg
->records
[i
];
543 * Skip over free records and records that have
546 if (rec
->flags
& FTRACE_FL_FREE
||
547 rec
->flags
& FTRACE_FL_FAILED
)
550 /* ignore updates to this record's mcount site */
551 if (get_kprobe((void *)rec
->ip
)) {
555 unfreeze_record(rec
);
558 failed
= __ftrace_replace_code(rec
, enable
);
559 if (failed
&& (rec
->flags
& FTRACE_FL_CONVERTED
)) {
560 rec
->flags
|= FTRACE_FL_FAILED
;
561 if ((system_state
== SYSTEM_BOOTING
) ||
562 !core_kernel_text(rec
->ip
)) {
563 ftrace_free_rec(rec
);
565 ftrace_bug(failed
, rec
->ip
);
572 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
579 ret
= ftrace_make_nop(mod
, rec
, mcount_addr
);
582 rec
->flags
|= FTRACE_FL_FAILED
;
588 static int __ftrace_modify_code(void *data
)
592 if (*command
& FTRACE_ENABLE_CALLS
)
593 ftrace_replace_code(1);
594 else if (*command
& FTRACE_DISABLE_CALLS
)
595 ftrace_replace_code(0);
597 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
598 ftrace_update_ftrace_func(ftrace_trace_function
);
600 if (*command
& FTRACE_START_FUNC_RET
)
601 ftrace_enable_ftrace_graph_caller();
602 else if (*command
& FTRACE_STOP_FUNC_RET
)
603 ftrace_disable_ftrace_graph_caller();
608 static void ftrace_run_update_code(int command
)
610 stop_machine(__ftrace_modify_code
, &command
, NULL
);
613 static ftrace_func_t saved_ftrace_func
;
614 static int ftrace_start_up
;
616 static void ftrace_startup_enable(int command
)
618 if (saved_ftrace_func
!= ftrace_trace_function
) {
619 saved_ftrace_func
= ftrace_trace_function
;
620 command
|= FTRACE_UPDATE_TRACE_FUNC
;
623 if (!command
|| !ftrace_enabled
)
626 ftrace_run_update_code(command
);
629 static void ftrace_startup(int command
)
631 if (unlikely(ftrace_disabled
))
634 mutex_lock(&ftrace_start_lock
);
636 command
|= FTRACE_ENABLE_CALLS
;
638 ftrace_startup_enable(command
);
640 mutex_unlock(&ftrace_start_lock
);
643 static void ftrace_shutdown(int command
)
645 if (unlikely(ftrace_disabled
))
648 mutex_lock(&ftrace_start_lock
);
650 if (!ftrace_start_up
)
651 command
|= FTRACE_DISABLE_CALLS
;
653 if (saved_ftrace_func
!= ftrace_trace_function
) {
654 saved_ftrace_func
= ftrace_trace_function
;
655 command
|= FTRACE_UPDATE_TRACE_FUNC
;
658 if (!command
|| !ftrace_enabled
)
661 ftrace_run_update_code(command
);
663 mutex_unlock(&ftrace_start_lock
);
666 static void ftrace_startup_sysctl(void)
668 int command
= FTRACE_ENABLE_MCOUNT
;
670 if (unlikely(ftrace_disabled
))
673 mutex_lock(&ftrace_start_lock
);
674 /* Force update next time */
675 saved_ftrace_func
= NULL
;
676 /* ftrace_start_up is true if we want ftrace running */
678 command
|= FTRACE_ENABLE_CALLS
;
680 ftrace_run_update_code(command
);
681 mutex_unlock(&ftrace_start_lock
);
684 static void ftrace_shutdown_sysctl(void)
686 int command
= FTRACE_DISABLE_MCOUNT
;
688 if (unlikely(ftrace_disabled
))
691 mutex_lock(&ftrace_start_lock
);
692 /* ftrace_start_up is true if ftrace is running */
694 command
|= FTRACE_DISABLE_CALLS
;
696 ftrace_run_update_code(command
);
697 mutex_unlock(&ftrace_start_lock
);
700 static cycle_t ftrace_update_time
;
701 static unsigned long ftrace_update_cnt
;
702 unsigned long ftrace_update_tot_cnt
;
704 static int ftrace_update_code(struct module
*mod
)
706 struct dyn_ftrace
*p
, *t
;
709 start
= ftrace_now(raw_smp_processor_id());
710 ftrace_update_cnt
= 0;
712 list_for_each_entry_safe(p
, t
, &ftrace_new_addrs
, list
) {
714 /* If something went wrong, bail without enabling anything */
715 if (unlikely(ftrace_disabled
))
718 list_del_init(&p
->list
);
720 /* convert record (i.e, patch mcount-call with NOP) */
721 if (ftrace_code_disable(mod
, p
)) {
722 p
->flags
|= FTRACE_FL_CONVERTED
;
728 stop
= ftrace_now(raw_smp_processor_id());
729 ftrace_update_time
= stop
- start
;
730 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
735 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
737 struct ftrace_page
*pg
;
741 /* allocate a few pages */
742 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
743 if (!ftrace_pages_start
)
747 * Allocate a few more pages.
749 * TODO: have some parser search vmlinux before
750 * final linking to find all calls to ftrace.
752 * a) know how many pages to allocate.
754 * b) set up the table then.
756 * The dynamic code is still necessary for
760 pg
= ftrace_pages
= ftrace_pages_start
;
762 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
763 pr_info("ftrace: allocating %ld entries in %d pages\n",
764 num_to_init
, cnt
+ 1);
766 for (i
= 0; i
< cnt
; i
++) {
767 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
769 /* If we fail, we'll try later anyway */
780 FTRACE_ITER_FILTER
= (1 << 0),
781 FTRACE_ITER_CONT
= (1 << 1),
782 FTRACE_ITER_NOTRACE
= (1 << 2),
783 FTRACE_ITER_FAILURES
= (1 << 3),
786 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
788 struct ftrace_iterator
{
789 struct ftrace_page
*pg
;
792 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
798 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
800 struct ftrace_iterator
*iter
= m
->private;
801 struct dyn_ftrace
*rec
= NULL
;
805 /* should not be called from interrupt context */
806 spin_lock(&ftrace_lock
);
808 if (iter
->idx
>= iter
->pg
->index
) {
809 if (iter
->pg
->next
) {
810 iter
->pg
= iter
->pg
->next
;
817 rec
= &iter
->pg
->records
[iter
->idx
++];
818 if ((rec
->flags
& FTRACE_FL_FREE
) ||
820 (!(iter
->flags
& FTRACE_ITER_FAILURES
) &&
821 (rec
->flags
& FTRACE_FL_FAILED
)) ||
823 ((iter
->flags
& FTRACE_ITER_FAILURES
) &&
824 !(rec
->flags
& FTRACE_FL_FAILED
)) ||
826 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
827 !(rec
->flags
& FTRACE_FL_FILTER
)) ||
829 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
830 !(rec
->flags
& FTRACE_FL_NOTRACE
))) {
835 spin_unlock(&ftrace_lock
);
840 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
842 struct ftrace_iterator
*iter
= m
->private;
852 p
= t_next(m
, p
, pos
);
857 static void t_stop(struct seq_file
*m
, void *p
)
861 static int t_show(struct seq_file
*m
, void *v
)
863 struct dyn_ftrace
*rec
= v
;
864 char str
[KSYM_SYMBOL_LEN
];
869 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
871 seq_printf(m
, "%s\n", str
);
876 static struct seq_operations show_ftrace_seq_ops
= {
884 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
886 struct ftrace_iterator
*iter
;
889 if (unlikely(ftrace_disabled
))
892 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
896 iter
->pg
= ftrace_pages_start
;
898 ret
= seq_open(file
, &show_ftrace_seq_ops
);
900 struct seq_file
*m
= file
->private_data
;
910 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
912 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
913 struct ftrace_iterator
*iter
= m
->private;
915 seq_release(inode
, file
);
922 ftrace_failures_open(struct inode
*inode
, struct file
*file
)
926 struct ftrace_iterator
*iter
;
928 ret
= ftrace_avail_open(inode
, file
);
930 m
= (struct seq_file
*)file
->private_data
;
931 iter
= (struct ftrace_iterator
*)m
->private;
932 iter
->flags
= FTRACE_ITER_FAILURES
;
939 static void ftrace_filter_reset(int enable
)
941 struct ftrace_page
*pg
;
942 struct dyn_ftrace
*rec
;
943 unsigned long type
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
946 /* should not be called from interrupt context */
947 spin_lock(&ftrace_lock
);
950 pg
= ftrace_pages_start
;
952 for (i
= 0; i
< pg
->index
; i
++) {
953 rec
= &pg
->records
[i
];
954 if (rec
->flags
& FTRACE_FL_FAILED
)
960 spin_unlock(&ftrace_lock
);
964 ftrace_regex_open(struct inode
*inode
, struct file
*file
, int enable
)
966 struct ftrace_iterator
*iter
;
969 if (unlikely(ftrace_disabled
))
972 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
976 mutex_lock(&ftrace_regex_lock
);
977 if ((file
->f_mode
& FMODE_WRITE
) &&
978 !(file
->f_flags
& O_APPEND
))
979 ftrace_filter_reset(enable
);
981 if (file
->f_mode
& FMODE_READ
) {
982 iter
->pg
= ftrace_pages_start
;
983 iter
->flags
= enable
? FTRACE_ITER_FILTER
:
986 ret
= seq_open(file
, &show_ftrace_seq_ops
);
988 struct seq_file
*m
= file
->private_data
;
993 file
->private_data
= iter
;
994 mutex_unlock(&ftrace_regex_lock
);
1000 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
1002 return ftrace_regex_open(inode
, file
, 1);
1006 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
1008 return ftrace_regex_open(inode
, file
, 0);
1012 ftrace_regex_read(struct file
*file
, char __user
*ubuf
,
1013 size_t cnt
, loff_t
*ppos
)
1015 if (file
->f_mode
& FMODE_READ
)
1016 return seq_read(file
, ubuf
, cnt
, ppos
);
1022 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
1026 if (file
->f_mode
& FMODE_READ
)
1027 ret
= seq_lseek(file
, offset
, origin
);
1029 file
->f_pos
= ret
= 1;
1042 ftrace_match(unsigned char *buff
, int len
, int enable
)
1044 char str
[KSYM_SYMBOL_LEN
];
1045 char *search
= NULL
;
1046 struct ftrace_page
*pg
;
1047 struct dyn_ftrace
*rec
;
1048 int type
= MATCH_FULL
;
1049 unsigned long flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1050 unsigned i
, match
= 0, search_len
= 0;
1052 for (i
= 0; i
< len
; i
++) {
1053 if (buff
[i
] == '*') {
1055 search
= buff
+ i
+ 1;
1056 type
= MATCH_END_ONLY
;
1057 search_len
= len
- (i
+ 1);
1059 if (type
== MATCH_END_ONLY
) {
1060 type
= MATCH_MIDDLE_ONLY
;
1063 type
= MATCH_FRONT_ONLY
;
1071 /* should not be called from interrupt context */
1072 spin_lock(&ftrace_lock
);
1074 ftrace_filtered
= 1;
1075 pg
= ftrace_pages_start
;
1077 for (i
= 0; i
< pg
->index
; i
++) {
1081 rec
= &pg
->records
[i
];
1082 if (rec
->flags
& FTRACE_FL_FAILED
)
1084 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1087 if (strcmp(str
, buff
) == 0)
1090 case MATCH_FRONT_ONLY
:
1091 if (memcmp(str
, buff
, match
) == 0)
1094 case MATCH_MIDDLE_ONLY
:
1095 if (strstr(str
, search
))
1098 case MATCH_END_ONLY
:
1099 ptr
= strstr(str
, search
);
1100 if (ptr
&& (ptr
[search_len
] == 0))
1109 spin_unlock(&ftrace_lock
);
1113 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
1114 size_t cnt
, loff_t
*ppos
, int enable
)
1116 struct ftrace_iterator
*iter
;
1121 if (!cnt
|| cnt
< 0)
1124 mutex_lock(&ftrace_regex_lock
);
1126 if (file
->f_mode
& FMODE_READ
) {
1127 struct seq_file
*m
= file
->private_data
;
1130 iter
= file
->private_data
;
1133 iter
->flags
&= ~FTRACE_ITER_CONT
;
1134 iter
->buffer_idx
= 0;
1137 ret
= get_user(ch
, ubuf
++);
1143 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
1144 /* skip white space */
1145 while (cnt
&& isspace(ch
)) {
1146 ret
= get_user(ch
, ubuf
++);
1154 file
->f_pos
+= read
;
1159 iter
->buffer_idx
= 0;
1162 while (cnt
&& !isspace(ch
)) {
1163 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
1164 iter
->buffer
[iter
->buffer_idx
++] = ch
;
1169 ret
= get_user(ch
, ubuf
++);
1178 iter
->buffer
[iter
->buffer_idx
] = 0;
1179 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1180 iter
->buffer_idx
= 0;
1182 iter
->flags
|= FTRACE_ITER_CONT
;
1185 file
->f_pos
+= read
;
1189 mutex_unlock(&ftrace_regex_lock
);
1195 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
1196 size_t cnt
, loff_t
*ppos
)
1198 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
1202 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
1203 size_t cnt
, loff_t
*ppos
)
1205 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
1209 ftrace_set_regex(unsigned char *buf
, int len
, int reset
, int enable
)
1211 if (unlikely(ftrace_disabled
))
1214 mutex_lock(&ftrace_regex_lock
);
1216 ftrace_filter_reset(enable
);
1218 ftrace_match(buf
, len
, enable
);
1219 mutex_unlock(&ftrace_regex_lock
);
1223 * ftrace_set_filter - set a function to filter on in ftrace
1224 * @buf - the string that holds the function filter text.
1225 * @len - the length of the string.
1226 * @reset - non zero to reset all filters before applying this filter.
1228 * Filters denote which functions should be enabled when tracing is enabled.
1229 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1231 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
1233 ftrace_set_regex(buf
, len
, reset
, 1);
1237 * ftrace_set_notrace - set a function to not trace in ftrace
1238 * @buf - the string that holds the function notrace text.
1239 * @len - the length of the string.
1240 * @reset - non zero to reset all filters before applying this filter.
1242 * Notrace Filters denote which functions should not be enabled when tracing
1243 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1246 void ftrace_set_notrace(unsigned char *buf
, int len
, int reset
)
1248 ftrace_set_regex(buf
, len
, reset
, 0);
1252 ftrace_regex_release(struct inode
*inode
, struct file
*file
, int enable
)
1254 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1255 struct ftrace_iterator
*iter
;
1257 mutex_lock(&ftrace_regex_lock
);
1258 if (file
->f_mode
& FMODE_READ
) {
1261 seq_release(inode
, file
);
1263 iter
= file
->private_data
;
1265 if (iter
->buffer_idx
) {
1267 iter
->buffer
[iter
->buffer_idx
] = 0;
1268 ftrace_match(iter
->buffer
, iter
->buffer_idx
, enable
);
1271 mutex_lock(&ftrace_sysctl_lock
);
1272 mutex_lock(&ftrace_start_lock
);
1273 if (ftrace_start_up
&& ftrace_enabled
)
1274 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
1275 mutex_unlock(&ftrace_start_lock
);
1276 mutex_unlock(&ftrace_sysctl_lock
);
1279 mutex_unlock(&ftrace_regex_lock
);
1284 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
1286 return ftrace_regex_release(inode
, file
, 1);
1290 ftrace_notrace_release(struct inode
*inode
, struct file
*file
)
1292 return ftrace_regex_release(inode
, file
, 0);
1295 static struct file_operations ftrace_avail_fops
= {
1296 .open
= ftrace_avail_open
,
1298 .llseek
= seq_lseek
,
1299 .release
= ftrace_avail_release
,
1302 static struct file_operations ftrace_failures_fops
= {
1303 .open
= ftrace_failures_open
,
1305 .llseek
= seq_lseek
,
1306 .release
= ftrace_avail_release
,
1309 static struct file_operations ftrace_filter_fops
= {
1310 .open
= ftrace_filter_open
,
1311 .read
= ftrace_regex_read
,
1312 .write
= ftrace_filter_write
,
1313 .llseek
= ftrace_regex_lseek
,
1314 .release
= ftrace_filter_release
,
1317 static struct file_operations ftrace_notrace_fops
= {
1318 .open
= ftrace_notrace_open
,
1319 .read
= ftrace_regex_read
,
1320 .write
= ftrace_notrace_write
,
1321 .llseek
= ftrace_regex_lseek
,
1322 .release
= ftrace_notrace_release
,
1325 static __init
int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
)
1327 struct dentry
*entry
;
1329 entry
= debugfs_create_file("available_filter_functions", 0444,
1330 d_tracer
, NULL
, &ftrace_avail_fops
);
1332 pr_warning("Could not create debugfs "
1333 "'available_filter_functions' entry\n");
1335 entry
= debugfs_create_file("failures", 0444,
1336 d_tracer
, NULL
, &ftrace_failures_fops
);
1338 pr_warning("Could not create debugfs 'failures' entry\n");
1340 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
1341 NULL
, &ftrace_filter_fops
);
1343 pr_warning("Could not create debugfs "
1344 "'set_ftrace_filter' entry\n");
1346 entry
= debugfs_create_file("set_ftrace_notrace", 0644, d_tracer
,
1347 NULL
, &ftrace_notrace_fops
);
1349 pr_warning("Could not create debugfs "
1350 "'set_ftrace_notrace' entry\n");
1355 static int ftrace_convert_nops(struct module
*mod
,
1356 unsigned long *start
,
1361 unsigned long flags
;
1363 mutex_lock(&ftrace_start_lock
);
1366 addr
= ftrace_call_adjust(*p
++);
1368 * Some architecture linkers will pad between
1369 * the different mcount_loc sections of different
1370 * object files to satisfy alignments.
1371 * Skip any NULL pointers.
1375 ftrace_record_ip(addr
);
1378 /* disable interrupts to prevent kstop machine */
1379 local_irq_save(flags
);
1380 ftrace_update_code(mod
);
1381 local_irq_restore(flags
);
1382 mutex_unlock(&ftrace_start_lock
);
1387 void ftrace_init_module(struct module
*mod
,
1388 unsigned long *start
, unsigned long *end
)
1390 if (ftrace_disabled
|| start
== end
)
1392 ftrace_convert_nops(mod
, start
, end
);
1395 extern unsigned long __start_mcount_loc
[];
1396 extern unsigned long __stop_mcount_loc
[];
1398 void __init
ftrace_init(void)
1400 unsigned long count
, addr
, flags
;
1403 /* Keep the ftrace pointer to the stub */
1404 addr
= (unsigned long)ftrace_stub
;
1406 local_irq_save(flags
);
1407 ftrace_dyn_arch_init(&addr
);
1408 local_irq_restore(flags
);
1410 /* ftrace_dyn_arch_init places the return code in addr */
1414 count
= __stop_mcount_loc
- __start_mcount_loc
;
1416 ret
= ftrace_dyn_table_alloc(count
);
1420 last_ftrace_enabled
= ftrace_enabled
= 1;
1422 ret
= ftrace_convert_nops(NULL
,
1428 ftrace_disabled
= 1;
1433 static int __init
ftrace_nodyn_init(void)
1438 device_initcall(ftrace_nodyn_init
);
1440 static inline int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
) { return 0; }
1441 static inline void ftrace_startup_enable(int command
) { }
1442 /* Keep as macros so we do not need to define the commands */
1443 # define ftrace_startup(command) do { } while (0)
1444 # define ftrace_shutdown(command) do { } while (0)
1445 # define ftrace_startup_sysctl() do { } while (0)
1446 # define ftrace_shutdown_sysctl() do { } while (0)
1447 #endif /* CONFIG_DYNAMIC_FTRACE */
1450 ftrace_pid_read(struct file
*file
, char __user
*ubuf
,
1451 size_t cnt
, loff_t
*ppos
)
1456 if (ftrace_pid_trace
>= 0)
1457 r
= sprintf(buf
, "%u\n", ftrace_pid_trace
);
1459 r
= sprintf(buf
, "no pid\n");
1461 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
1465 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
1466 size_t cnt
, loff_t
*ppos
)
1472 if (cnt
>= sizeof(buf
))
1475 if (copy_from_user(&buf
, ubuf
, cnt
))
1480 ret
= strict_strtol(buf
, 10, &val
);
1484 mutex_lock(&ftrace_start_lock
);
1486 /* disable pid tracing */
1487 if (ftrace_pid_trace
< 0)
1489 ftrace_pid_trace
= -1;
1493 if (ftrace_pid_trace
== val
)
1496 ftrace_pid_trace
= val
;
1499 /* update the function call */
1500 ftrace_update_pid_func();
1501 ftrace_startup_enable(0);
1504 mutex_unlock(&ftrace_start_lock
);
1509 static struct file_operations ftrace_pid_fops
= {
1510 .read
= ftrace_pid_read
,
1511 .write
= ftrace_pid_write
,
1514 static __init
int ftrace_init_debugfs(void)
1516 struct dentry
*d_tracer
;
1517 struct dentry
*entry
;
1519 d_tracer
= tracing_init_dentry();
1523 ftrace_init_dyn_debugfs(d_tracer
);
1525 entry
= debugfs_create_file("set_ftrace_pid", 0644, d_tracer
,
1526 NULL
, &ftrace_pid_fops
);
1528 pr_warning("Could not create debugfs "
1529 "'set_ftrace_pid' entry\n");
1533 fs_initcall(ftrace_init_debugfs
);
1536 * ftrace_kill - kill ftrace
1538 * This function should be used by panic code. It stops ftrace
1539 * but in a not so nice way. If you need to simply kill ftrace
1540 * from a non-atomic section, use ftrace_kill.
1542 void ftrace_kill(void)
1544 ftrace_disabled
= 1;
1546 clear_ftrace_function();
1550 * register_ftrace_function - register a function for profiling
1551 * @ops - ops structure that holds the function for profiling.
1553 * Register a function to be called by all functions in the
1556 * Note: @ops->func and all the functions it calls must be labeled
1557 * with "notrace", otherwise it will go into a
1560 int register_ftrace_function(struct ftrace_ops
*ops
)
1564 if (unlikely(ftrace_disabled
))
1567 mutex_lock(&ftrace_sysctl_lock
);
1569 ret
= __register_ftrace_function(ops
);
1572 mutex_unlock(&ftrace_sysctl_lock
);
1577 * unregister_ftrace_function - unresgister a function for profiling.
1578 * @ops - ops structure that holds the function to unregister
1580 * Unregister a function that was added to be called by ftrace profiling.
1582 int unregister_ftrace_function(struct ftrace_ops
*ops
)
1586 mutex_lock(&ftrace_sysctl_lock
);
1587 ret
= __unregister_ftrace_function(ops
);
1589 mutex_unlock(&ftrace_sysctl_lock
);
1595 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
1596 struct file
*file
, void __user
*buffer
, size_t *lenp
,
1601 if (unlikely(ftrace_disabled
))
1604 mutex_lock(&ftrace_sysctl_lock
);
1606 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
1608 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
1611 last_ftrace_enabled
= ftrace_enabled
;
1613 if (ftrace_enabled
) {
1615 ftrace_startup_sysctl();
1617 /* we are starting ftrace again */
1618 if (ftrace_list
!= &ftrace_list_end
) {
1619 if (ftrace_list
->next
== &ftrace_list_end
)
1620 ftrace_trace_function
= ftrace_list
->func
;
1622 ftrace_trace_function
= ftrace_list_func
;
1626 /* stopping ftrace calls (just send to ftrace_stub) */
1627 ftrace_trace_function
= ftrace_stub
;
1629 ftrace_shutdown_sysctl();
1633 mutex_unlock(&ftrace_sysctl_lock
);
1637 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1639 static atomic_t ftrace_graph_active
;
1641 /* The callbacks that hook a function */
1642 trace_func_graph_ret_t ftrace_graph_return
=
1643 (trace_func_graph_ret_t
)ftrace_stub
;
1644 trace_func_graph_ent_t ftrace_graph_entry
=
1645 (trace_func_graph_ent_t
)ftrace_stub
;
1647 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1648 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
1652 unsigned long flags
;
1653 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
1654 struct task_struct
*g
, *t
;
1656 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
1657 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
1658 * sizeof(struct ftrace_ret_stack
),
1660 if (!ret_stack_list
[i
]) {
1668 read_lock_irqsave(&tasklist_lock
, flags
);
1669 do_each_thread(g
, t
) {
1675 if (t
->ret_stack
== NULL
) {
1676 t
->ret_stack
= ret_stack_list
[start
++];
1677 t
->curr_ret_stack
= -1;
1678 atomic_set(&t
->trace_overrun
, 0);
1680 } while_each_thread(g
, t
);
1683 read_unlock_irqrestore(&tasklist_lock
, flags
);
1685 for (i
= start
; i
< end
; i
++)
1686 kfree(ret_stack_list
[i
]);
1690 /* Allocate a return stack for each task */
1691 static int start_graph_tracing(void)
1693 struct ftrace_ret_stack
**ret_stack_list
;
1696 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
1697 sizeof(struct ftrace_ret_stack
*),
1700 if (!ret_stack_list
)
1704 ret
= alloc_retstack_tasklist(ret_stack_list
);
1705 } while (ret
== -EAGAIN
);
1707 kfree(ret_stack_list
);
1711 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
1712 trace_func_graph_ent_t entryfunc
)
1716 mutex_lock(&ftrace_sysctl_lock
);
1718 atomic_inc(&ftrace_graph_active
);
1719 ret
= start_graph_tracing();
1721 atomic_dec(&ftrace_graph_active
);
1725 ftrace_graph_return
= retfunc
;
1726 ftrace_graph_entry
= entryfunc
;
1728 ftrace_startup(FTRACE_START_FUNC_RET
);
1731 mutex_unlock(&ftrace_sysctl_lock
);
1735 void unregister_ftrace_graph(void)
1737 mutex_lock(&ftrace_sysctl_lock
);
1739 atomic_dec(&ftrace_graph_active
);
1740 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
1741 ftrace_graph_entry
= (trace_func_graph_ent_t
)ftrace_stub
;
1742 ftrace_shutdown(FTRACE_STOP_FUNC_RET
);
1744 mutex_unlock(&ftrace_sysctl_lock
);
1747 /* Allocate a return stack for newly created task */
1748 void ftrace_graph_init_task(struct task_struct
*t
)
1750 if (atomic_read(&ftrace_graph_active
)) {
1751 t
->ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
1752 * sizeof(struct ftrace_ret_stack
),
1756 t
->curr_ret_stack
= -1;
1757 atomic_set(&t
->trace_overrun
, 0);
1759 t
->ret_stack
= NULL
;
1762 void ftrace_graph_exit_task(struct task_struct
*t
)
1764 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
1766 t
->ret_stack
= NULL
;
1767 /* NULL must become visible to IRQs before we free it: */