2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/kprobes.h>
26 #include <linux/ftrace.h>
27 #include <linux/sysctl.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
32 #include <trace/sched.h>
34 #include <asm/ftrace.h>
36 #include "trace_output.h"
37 #include "trace_stat.h"
39 #define FTRACE_WARN_ON(cond) \
45 #define FTRACE_WARN_ON_ONCE(cond) \
47 if (WARN_ON_ONCE(cond)) \
51 /* hash bits for specific function selection */
52 #define FTRACE_HASH_BITS 7
53 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
55 /* ftrace_enabled is a method to turn ftrace on or off */
56 int ftrace_enabled __read_mostly
;
57 static int last_ftrace_enabled
;
59 /* Quick disabling of function tracer. */
60 int function_trace_stop
;
63 * ftrace_disabled is set when an anomaly is discovered.
64 * ftrace_disabled is much stronger than ftrace_enabled.
66 static int ftrace_disabled __read_mostly
;
68 static DEFINE_MUTEX(ftrace_lock
);
70 static struct ftrace_ops ftrace_list_end __read_mostly
=
75 static struct ftrace_ops
*ftrace_list __read_mostly
= &ftrace_list_end
;
76 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
77 ftrace_func_t __ftrace_trace_function __read_mostly
= ftrace_stub
;
78 ftrace_func_t ftrace_pid_function __read_mostly
= ftrace_stub
;
80 static void ftrace_list_func(unsigned long ip
, unsigned long parent_ip
)
82 struct ftrace_ops
*op
= ftrace_list
;
84 /* in case someone actually ports this to alpha! */
85 read_barrier_depends();
87 while (op
!= &ftrace_list_end
) {
89 read_barrier_depends();
90 op
->func(ip
, parent_ip
);
95 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
)
97 if (!test_tsk_trace_trace(current
))
100 ftrace_pid_function(ip
, parent_ip
);
103 static void set_ftrace_pid_function(ftrace_func_t func
)
105 /* do not set ftrace_pid_function to itself! */
106 if (func
!= ftrace_pid_func
)
107 ftrace_pid_function
= func
;
111 * clear_ftrace_function - reset the ftrace function
113 * This NULLs the ftrace function and in essence stops
114 * tracing. There may be lag
116 void clear_ftrace_function(void)
118 ftrace_trace_function
= ftrace_stub
;
119 __ftrace_trace_function
= ftrace_stub
;
120 ftrace_pid_function
= ftrace_stub
;
123 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
125 * For those archs that do not test ftrace_trace_stop in their
126 * mcount call site, we need to do it from C.
128 static void ftrace_test_stop_func(unsigned long ip
, unsigned long parent_ip
)
130 if (function_trace_stop
)
133 __ftrace_trace_function(ip
, parent_ip
);
137 static int __register_ftrace_function(struct ftrace_ops
*ops
)
139 ops
->next
= ftrace_list
;
141 * We are entering ops into the ftrace_list but another
142 * CPU might be walking that list. We need to make sure
143 * the ops->next pointer is valid before another CPU sees
144 * the ops pointer included into the ftrace_list.
149 if (ftrace_enabled
) {
152 if (ops
->next
== &ftrace_list_end
)
155 func
= ftrace_list_func
;
157 if (ftrace_pid_trace
) {
158 set_ftrace_pid_function(func
);
159 func
= ftrace_pid_func
;
163 * For one func, simply call it directly.
164 * For more than one func, call the chain.
166 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
167 ftrace_trace_function
= func
;
169 __ftrace_trace_function
= func
;
170 ftrace_trace_function
= ftrace_test_stop_func
;
177 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
179 struct ftrace_ops
**p
;
182 * If we are removing the last function, then simply point
183 * to the ftrace_stub.
185 if (ftrace_list
== ops
&& ops
->next
== &ftrace_list_end
) {
186 ftrace_trace_function
= ftrace_stub
;
187 ftrace_list
= &ftrace_list_end
;
191 for (p
= &ftrace_list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
200 if (ftrace_enabled
) {
201 /* If we only have one func left, then call that directly */
202 if (ftrace_list
->next
== &ftrace_list_end
) {
203 ftrace_func_t func
= ftrace_list
->func
;
205 if (ftrace_pid_trace
) {
206 set_ftrace_pid_function(func
);
207 func
= ftrace_pid_func
;
209 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
210 ftrace_trace_function
= func
;
212 __ftrace_trace_function
= func
;
220 static void ftrace_update_pid_func(void)
224 if (ftrace_trace_function
== ftrace_stub
)
227 func
= ftrace_trace_function
;
229 if (ftrace_pid_trace
) {
230 set_ftrace_pid_function(func
);
231 func
= ftrace_pid_func
;
233 if (func
== ftrace_pid_func
)
234 func
= ftrace_pid_function
;
237 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
238 ftrace_trace_function
= func
;
240 __ftrace_trace_function
= func
;
244 #ifdef CONFIG_FUNCTION_PROFILER
245 struct ftrace_profile
{
246 struct hlist_node node
;
248 unsigned long counter
;
249 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
250 unsigned long long time
;
254 struct ftrace_profile_page
{
255 struct ftrace_profile_page
*next
;
257 struct ftrace_profile records
[];
260 #define PROFILE_RECORDS_SIZE \
261 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
263 #define PROFILES_PER_PAGE \
264 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
266 /* TODO: make these percpu, to prevent cache line bouncing */
267 static struct ftrace_profile_page
*profile_pages_start
;
268 static struct ftrace_profile_page
*profile_pages
;
270 static struct hlist_head
*ftrace_profile_hash
;
271 static int ftrace_profile_bits
;
272 static int ftrace_profile_enabled
;
273 static DEFINE_MUTEX(ftrace_profile_lock
);
275 static DEFINE_PER_CPU(atomic_t
, ftrace_profile_disable
);
277 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
279 static raw_spinlock_t ftrace_profile_rec_lock
=
280 (raw_spinlock_t
)__RAW_SPIN_LOCK_UNLOCKED
;
283 function_stat_next(void *v
, int idx
)
285 struct ftrace_profile
*rec
= v
;
286 struct ftrace_profile_page
*pg
;
288 pg
= (struct ftrace_profile_page
*)((unsigned long)rec
& PAGE_MASK
);
292 if ((void *)rec
>= (void *)&pg
->records
[pg
->index
]) {
296 rec
= &pg
->records
[0];
304 static void *function_stat_start(struct tracer_stat
*trace
)
306 return function_stat_next(&profile_pages_start
->records
[0], 0);
309 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
310 /* function graph compares on total time */
311 static int function_stat_cmp(void *p1
, void *p2
)
313 struct ftrace_profile
*a
= p1
;
314 struct ftrace_profile
*b
= p2
;
316 if (a
->time
< b
->time
)
318 if (a
->time
> b
->time
)
324 /* not function graph compares against hits */
325 static int function_stat_cmp(void *p1
, void *p2
)
327 struct ftrace_profile
*a
= p1
;
328 struct ftrace_profile
*b
= p2
;
330 if (a
->counter
< b
->counter
)
332 if (a
->counter
> b
->counter
)
339 static int function_stat_headers(struct seq_file
*m
)
341 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
342 seq_printf(m
, " Function Hit Time\n"
343 " -------- --- ----\n");
345 seq_printf(m
, " Function Hit\n"
351 static int function_stat_show(struct seq_file
*m
, void *v
)
353 struct ftrace_profile
*rec
= v
;
354 char str
[KSYM_SYMBOL_LEN
];
355 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
356 static struct trace_seq s
;
357 static DEFINE_MUTEX(mutex
);
361 trace_print_graph_duration(rec
->time
, &s
);
364 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
365 seq_printf(m
, " %-30.30s %10lu", str
, rec
->counter
);
367 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
369 trace_print_seq(m
, &s
);
370 mutex_unlock(&mutex
);
377 static struct tracer_stat function_stats
= {
379 .stat_start
= function_stat_start
,
380 .stat_next
= function_stat_next
,
381 .stat_cmp
= function_stat_cmp
,
382 .stat_headers
= function_stat_headers
,
383 .stat_show
= function_stat_show
386 static void ftrace_profile_reset(void)
388 struct ftrace_profile_page
*pg
;
390 pg
= profile_pages
= profile_pages_start
;
393 memset(pg
->records
, 0, PROFILE_RECORDS_SIZE
);
398 memset(ftrace_profile_hash
, 0,
399 FTRACE_PROFILE_HASH_SIZE
* sizeof(struct hlist_head
));
402 int ftrace_profile_pages_init(void)
404 struct ftrace_profile_page
*pg
;
407 /* If we already allocated, do nothing */
411 profile_pages
= (void *)get_zeroed_page(GFP_KERNEL
);
415 pg
= profile_pages_start
= profile_pages
;
417 /* allocate 10 more pages to start */
418 for (i
= 0; i
< 10; i
++) {
419 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
421 * We only care about allocating profile_pages, if
422 * we failed to allocate here, hopefully we will allocate
433 static int ftrace_profile_init(void)
437 if (ftrace_profile_hash
) {
438 /* If the profile is already created, simply reset it */
439 ftrace_profile_reset();
444 * We are profiling all functions, but usually only a few thousand
445 * functions are hit. We'll make a hash of 1024 items.
447 size
= FTRACE_PROFILE_HASH_SIZE
;
449 ftrace_profile_hash
=
450 kzalloc(sizeof(struct hlist_head
) * size
, GFP_KERNEL
);
452 if (!ftrace_profile_hash
)
457 for (; size
; size
>>= 1)
458 ftrace_profile_bits
++;
460 /* Preallocate a few pages */
461 if (ftrace_profile_pages_init() < 0) {
462 kfree(ftrace_profile_hash
);
463 ftrace_profile_hash
= NULL
;
470 /* interrupts must be disabled */
471 static struct ftrace_profile
*ftrace_find_profiled_func(unsigned long ip
)
473 struct ftrace_profile
*rec
;
474 struct hlist_head
*hhd
;
475 struct hlist_node
*n
;
478 key
= hash_long(ip
, ftrace_profile_bits
);
479 hhd
= &ftrace_profile_hash
[key
];
481 if (hlist_empty(hhd
))
484 hlist_for_each_entry_rcu(rec
, n
, hhd
, node
) {
492 static void ftrace_add_profile(struct ftrace_profile
*rec
)
496 key
= hash_long(rec
->ip
, ftrace_profile_bits
);
497 hlist_add_head_rcu(&rec
->node
, &ftrace_profile_hash
[key
]);
500 /* Interrupts must be disabled calling this */
501 static struct ftrace_profile
*
502 ftrace_profile_alloc(unsigned long ip
, bool alloc_safe
)
504 struct ftrace_profile
*rec
= NULL
;
506 /* prevent recursion */
507 if (atomic_inc_return(&__get_cpu_var(ftrace_profile_disable
)) != 1)
510 __raw_spin_lock(&ftrace_profile_rec_lock
);
512 /* Try to always keep another page available */
513 if (!profile_pages
->next
&& alloc_safe
)
514 profile_pages
->next
= (void *)get_zeroed_page(GFP_ATOMIC
);
517 * Try to find the function again since another
518 * task on another CPU could have added it
520 rec
= ftrace_find_profiled_func(ip
);
524 if (profile_pages
->index
== PROFILES_PER_PAGE
) {
525 if (!profile_pages
->next
)
527 profile_pages
= profile_pages
->next
;
530 rec
= &profile_pages
->records
[profile_pages
->index
++];
532 ftrace_add_profile(rec
);
535 __raw_spin_unlock(&ftrace_profile_rec_lock
);
537 atomic_dec(&__get_cpu_var(ftrace_profile_disable
));
543 * If we are not in an interrupt, or softirq and
544 * and interrupts are disabled and preemption is not enabled
545 * (not in a spinlock) then it should be safe to allocate memory.
547 static bool ftrace_safe_to_allocate(void)
549 return !in_interrupt() && irqs_disabled() && !preempt_count();
553 function_profile_call(unsigned long ip
, unsigned long parent_ip
)
555 struct ftrace_profile
*rec
;
559 if (!ftrace_profile_enabled
)
562 alloc_safe
= ftrace_safe_to_allocate();
564 local_irq_save(flags
);
565 rec
= ftrace_find_profiled_func(ip
);
567 rec
= ftrace_profile_alloc(ip
, alloc_safe
);
574 local_irq_restore(flags
);
577 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
578 static int profile_graph_entry(struct ftrace_graph_ent
*trace
)
580 function_profile_call(trace
->func
, 0);
584 static void profile_graph_return(struct ftrace_graph_ret
*trace
)
587 struct ftrace_profile
*rec
;
589 local_irq_save(flags
);
590 rec
= ftrace_find_profiled_func(trace
->func
);
592 rec
->time
+= trace
->rettime
- trace
->calltime
;
593 local_irq_restore(flags
);
596 static int register_ftrace_profiler(void)
598 return register_ftrace_graph(&profile_graph_return
,
599 &profile_graph_entry
);
602 static void unregister_ftrace_profiler(void)
604 unregister_ftrace_graph();
607 static struct ftrace_ops ftrace_profile_ops __read_mostly
=
609 .func
= function_profile_call
,
612 static int register_ftrace_profiler(void)
614 return register_ftrace_function(&ftrace_profile_ops
);
617 static void unregister_ftrace_profiler(void)
619 unregister_ftrace_function(&ftrace_profile_ops
);
621 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
624 ftrace_profile_write(struct file
*filp
, const char __user
*ubuf
,
625 size_t cnt
, loff_t
*ppos
)
631 if (cnt
>= sizeof(buf
))
634 if (copy_from_user(&buf
, ubuf
, cnt
))
639 ret
= strict_strtoul(buf
, 10, &val
);
645 mutex_lock(&ftrace_profile_lock
);
646 if (ftrace_profile_enabled
^ val
) {
648 ret
= ftrace_profile_init();
654 ret
= register_ftrace_profiler();
659 ftrace_profile_enabled
= 1;
661 ftrace_profile_enabled
= 0;
662 unregister_ftrace_profiler();
666 mutex_unlock(&ftrace_profile_lock
);
674 ftrace_profile_read(struct file
*filp
, char __user
*ubuf
,
675 size_t cnt
, loff_t
*ppos
)
680 r
= sprintf(buf
, "%u\n", ftrace_profile_enabled
);
681 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
684 static const struct file_operations ftrace_profile_fops
= {
685 .open
= tracing_open_generic
,
686 .read
= ftrace_profile_read
,
687 .write
= ftrace_profile_write
,
690 static void ftrace_profile_debugfs(struct dentry
*d_tracer
)
692 struct dentry
*entry
;
695 ret
= register_stat_tracer(&function_stats
);
697 pr_warning("Warning: could not register "
702 entry
= debugfs_create_file("function_profile_enabled", 0644,
703 d_tracer
, NULL
, &ftrace_profile_fops
);
705 pr_warning("Could not create debugfs "
706 "'function_profile_enabled' entry\n");
709 #else /* CONFIG_FUNCTION_PROFILER */
710 static void ftrace_profile_debugfs(struct dentry
*d_tracer
)
713 #endif /* CONFIG_FUNCTION_PROFILER */
715 /* set when tracing only a pid */
716 struct pid
*ftrace_pid_trace
;
717 static struct pid
* const ftrace_swapper_pid
= &init_struct_pid
;
719 #ifdef CONFIG_DYNAMIC_FTRACE
721 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
722 # error Dynamic ftrace depends on MCOUNT_RECORD
725 static struct hlist_head ftrace_func_hash
[FTRACE_FUNC_HASHSIZE
] __read_mostly
;
727 struct ftrace_func_probe
{
728 struct hlist_node node
;
729 struct ftrace_probe_ops
*ops
;
737 FTRACE_ENABLE_CALLS
= (1 << 0),
738 FTRACE_DISABLE_CALLS
= (1 << 1),
739 FTRACE_UPDATE_TRACE_FUNC
= (1 << 2),
740 FTRACE_ENABLE_MCOUNT
= (1 << 3),
741 FTRACE_DISABLE_MCOUNT
= (1 << 4),
742 FTRACE_START_FUNC_RET
= (1 << 5),
743 FTRACE_STOP_FUNC_RET
= (1 << 6),
746 static int ftrace_filtered
;
748 static struct dyn_ftrace
*ftrace_new_addrs
;
750 static DEFINE_MUTEX(ftrace_regex_lock
);
753 struct ftrace_page
*next
;
755 struct dyn_ftrace records
[];
758 #define ENTRIES_PER_PAGE \
759 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
761 /* estimate from running different kernels */
762 #define NR_TO_INIT 10000
764 static struct ftrace_page
*ftrace_pages_start
;
765 static struct ftrace_page
*ftrace_pages
;
767 static struct dyn_ftrace
*ftrace_free_records
;
770 * This is a double for. Do not use 'break' to break out of the loop,
771 * you must use a goto.
773 #define do_for_each_ftrace_rec(pg, rec) \
774 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
776 for (_____i = 0; _____i < pg->index; _____i++) { \
777 rec = &pg->records[_____i];
779 #define while_for_each_ftrace_rec() \
783 #ifdef CONFIG_KPROBES
785 static int frozen_record_count
;
787 static inline void freeze_record(struct dyn_ftrace
*rec
)
789 if (!(rec
->flags
& FTRACE_FL_FROZEN
)) {
790 rec
->flags
|= FTRACE_FL_FROZEN
;
791 frozen_record_count
++;
795 static inline void unfreeze_record(struct dyn_ftrace
*rec
)
797 if (rec
->flags
& FTRACE_FL_FROZEN
) {
798 rec
->flags
&= ~FTRACE_FL_FROZEN
;
799 frozen_record_count
--;
803 static inline int record_frozen(struct dyn_ftrace
*rec
)
805 return rec
->flags
& FTRACE_FL_FROZEN
;
808 # define freeze_record(rec) ({ 0; })
809 # define unfreeze_record(rec) ({ 0; })
810 # define record_frozen(rec) ({ 0; })
811 #endif /* CONFIG_KPROBES */
813 static void ftrace_free_rec(struct dyn_ftrace
*rec
)
815 rec
->freelist
= ftrace_free_records
;
816 ftrace_free_records
= rec
;
817 rec
->flags
|= FTRACE_FL_FREE
;
820 void ftrace_release(void *start
, unsigned long size
)
822 struct dyn_ftrace
*rec
;
823 struct ftrace_page
*pg
;
824 unsigned long s
= (unsigned long)start
;
825 unsigned long e
= s
+ size
;
827 if (ftrace_disabled
|| !start
)
830 mutex_lock(&ftrace_lock
);
831 do_for_each_ftrace_rec(pg
, rec
) {
832 if ((rec
->ip
>= s
) && (rec
->ip
< e
) &&
833 !(rec
->flags
& FTRACE_FL_FREE
))
834 ftrace_free_rec(rec
);
835 } while_for_each_ftrace_rec();
836 mutex_unlock(&ftrace_lock
);
839 static struct dyn_ftrace
*ftrace_alloc_dyn_node(unsigned long ip
)
841 struct dyn_ftrace
*rec
;
843 /* First check for freed records */
844 if (ftrace_free_records
) {
845 rec
= ftrace_free_records
;
847 if (unlikely(!(rec
->flags
& FTRACE_FL_FREE
))) {
848 FTRACE_WARN_ON_ONCE(1);
849 ftrace_free_records
= NULL
;
853 ftrace_free_records
= rec
->freelist
;
854 memset(rec
, 0, sizeof(*rec
));
858 if (ftrace_pages
->index
== ENTRIES_PER_PAGE
) {
859 if (!ftrace_pages
->next
) {
860 /* allocate another page */
862 (void *)get_zeroed_page(GFP_KERNEL
);
863 if (!ftrace_pages
->next
)
866 ftrace_pages
= ftrace_pages
->next
;
869 return &ftrace_pages
->records
[ftrace_pages
->index
++];
872 static struct dyn_ftrace
*
873 ftrace_record_ip(unsigned long ip
)
875 struct dyn_ftrace
*rec
;
880 rec
= ftrace_alloc_dyn_node(ip
);
885 rec
->newlist
= ftrace_new_addrs
;
886 ftrace_new_addrs
= rec
;
891 static void print_ip_ins(const char *fmt
, unsigned char *p
)
895 printk(KERN_CONT
"%s", fmt
);
897 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
898 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
901 static void ftrace_bug(int failed
, unsigned long ip
)
905 FTRACE_WARN_ON_ONCE(1);
906 pr_info("ftrace faulted on modifying ");
910 FTRACE_WARN_ON_ONCE(1);
911 pr_info("ftrace failed to modify ");
913 print_ip_ins(" actual: ", (unsigned char *)ip
);
914 printk(KERN_CONT
"\n");
917 FTRACE_WARN_ON_ONCE(1);
918 pr_info("ftrace faulted on writing ");
922 FTRACE_WARN_ON_ONCE(1);
923 pr_info("ftrace faulted on unknown error ");
930 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
932 unsigned long ftrace_addr
;
933 unsigned long ip
, fl
;
935 ftrace_addr
= (unsigned long)FTRACE_ADDR
;
940 * If this record is not to be traced and
941 * it is not enabled then do nothing.
943 * If this record is not to be traced and
944 * it is enabled then disable it.
947 if (rec
->flags
& FTRACE_FL_NOTRACE
) {
948 if (rec
->flags
& FTRACE_FL_ENABLED
)
949 rec
->flags
&= ~FTRACE_FL_ENABLED
;
953 } else if (ftrace_filtered
&& enable
) {
958 fl
= rec
->flags
& (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
);
960 /* Record is filtered and enabled, do nothing */
961 if (fl
== (FTRACE_FL_FILTER
| FTRACE_FL_ENABLED
))
964 /* Record is not filtered or enabled, do nothing */
968 /* Record is not filtered but enabled, disable it */
969 if (fl
== FTRACE_FL_ENABLED
)
970 rec
->flags
&= ~FTRACE_FL_ENABLED
;
972 /* Otherwise record is filtered but not enabled, enable it */
973 rec
->flags
|= FTRACE_FL_ENABLED
;
975 /* Disable or not filtered */
978 /* if record is enabled, do nothing */
979 if (rec
->flags
& FTRACE_FL_ENABLED
)
982 rec
->flags
|= FTRACE_FL_ENABLED
;
986 /* if record is not enabled, do nothing */
987 if (!(rec
->flags
& FTRACE_FL_ENABLED
))
990 rec
->flags
&= ~FTRACE_FL_ENABLED
;
994 if (rec
->flags
& FTRACE_FL_ENABLED
)
995 return ftrace_make_call(rec
, ftrace_addr
);
997 return ftrace_make_nop(NULL
, rec
, ftrace_addr
);
1000 static void ftrace_replace_code(int enable
)
1002 struct dyn_ftrace
*rec
;
1003 struct ftrace_page
*pg
;
1006 do_for_each_ftrace_rec(pg
, rec
) {
1008 * Skip over free records, records that have
1009 * failed and not converted.
1011 if (rec
->flags
& FTRACE_FL_FREE
||
1012 rec
->flags
& FTRACE_FL_FAILED
||
1013 !(rec
->flags
& FTRACE_FL_CONVERTED
))
1016 /* ignore updates to this record's mcount site */
1017 if (get_kprobe((void *)rec
->ip
)) {
1021 unfreeze_record(rec
);
1024 failed
= __ftrace_replace_code(rec
, enable
);
1026 rec
->flags
|= FTRACE_FL_FAILED
;
1027 if ((system_state
== SYSTEM_BOOTING
) ||
1028 !core_kernel_text(rec
->ip
)) {
1029 ftrace_free_rec(rec
);
1031 ftrace_bug(failed
, rec
->ip
);
1032 /* Stop processing */
1036 } while_for_each_ftrace_rec();
1040 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
1047 ret
= ftrace_make_nop(mod
, rec
, MCOUNT_ADDR
);
1049 ftrace_bug(ret
, ip
);
1050 rec
->flags
|= FTRACE_FL_FAILED
;
1057 * archs can override this function if they must do something
1058 * before the modifying code is performed.
1060 int __weak
ftrace_arch_code_modify_prepare(void)
1066 * archs can override this function if they must do something
1067 * after the modifying code is performed.
1069 int __weak
ftrace_arch_code_modify_post_process(void)
1074 static int __ftrace_modify_code(void *data
)
1076 int *command
= data
;
1078 if (*command
& FTRACE_ENABLE_CALLS
)
1079 ftrace_replace_code(1);
1080 else if (*command
& FTRACE_DISABLE_CALLS
)
1081 ftrace_replace_code(0);
1083 if (*command
& FTRACE_UPDATE_TRACE_FUNC
)
1084 ftrace_update_ftrace_func(ftrace_trace_function
);
1086 if (*command
& FTRACE_START_FUNC_RET
)
1087 ftrace_enable_ftrace_graph_caller();
1088 else if (*command
& FTRACE_STOP_FUNC_RET
)
1089 ftrace_disable_ftrace_graph_caller();
1094 static void ftrace_run_update_code(int command
)
1098 ret
= ftrace_arch_code_modify_prepare();
1099 FTRACE_WARN_ON(ret
);
1103 stop_machine(__ftrace_modify_code
, &command
, NULL
);
1105 ret
= ftrace_arch_code_modify_post_process();
1106 FTRACE_WARN_ON(ret
);
1109 static ftrace_func_t saved_ftrace_func
;
1110 static int ftrace_start_up
;
1112 static void ftrace_startup_enable(int command
)
1114 if (saved_ftrace_func
!= ftrace_trace_function
) {
1115 saved_ftrace_func
= ftrace_trace_function
;
1116 command
|= FTRACE_UPDATE_TRACE_FUNC
;
1119 if (!command
|| !ftrace_enabled
)
1122 ftrace_run_update_code(command
);
1125 static void ftrace_startup(int command
)
1127 if (unlikely(ftrace_disabled
))
1131 command
|= FTRACE_ENABLE_CALLS
;
1133 ftrace_startup_enable(command
);
1136 static void ftrace_shutdown(int command
)
1138 if (unlikely(ftrace_disabled
))
1142 if (!ftrace_start_up
)
1143 command
|= FTRACE_DISABLE_CALLS
;
1145 if (saved_ftrace_func
!= ftrace_trace_function
) {
1146 saved_ftrace_func
= ftrace_trace_function
;
1147 command
|= FTRACE_UPDATE_TRACE_FUNC
;
1150 if (!command
|| !ftrace_enabled
)
1153 ftrace_run_update_code(command
);
1156 static void ftrace_startup_sysctl(void)
1158 int command
= FTRACE_ENABLE_MCOUNT
;
1160 if (unlikely(ftrace_disabled
))
1163 /* Force update next time */
1164 saved_ftrace_func
= NULL
;
1165 /* ftrace_start_up is true if we want ftrace running */
1166 if (ftrace_start_up
)
1167 command
|= FTRACE_ENABLE_CALLS
;
1169 ftrace_run_update_code(command
);
1172 static void ftrace_shutdown_sysctl(void)
1174 int command
= FTRACE_DISABLE_MCOUNT
;
1176 if (unlikely(ftrace_disabled
))
1179 /* ftrace_start_up is true if ftrace is running */
1180 if (ftrace_start_up
)
1181 command
|= FTRACE_DISABLE_CALLS
;
1183 ftrace_run_update_code(command
);
1186 static cycle_t ftrace_update_time
;
1187 static unsigned long ftrace_update_cnt
;
1188 unsigned long ftrace_update_tot_cnt
;
1190 static int ftrace_update_code(struct module
*mod
)
1192 struct dyn_ftrace
*p
;
1193 cycle_t start
, stop
;
1195 start
= ftrace_now(raw_smp_processor_id());
1196 ftrace_update_cnt
= 0;
1198 while (ftrace_new_addrs
) {
1200 /* If something went wrong, bail without enabling anything */
1201 if (unlikely(ftrace_disabled
))
1204 p
= ftrace_new_addrs
;
1205 ftrace_new_addrs
= p
->newlist
;
1208 /* convert record (i.e, patch mcount-call with NOP) */
1209 if (ftrace_code_disable(mod
, p
)) {
1210 p
->flags
|= FTRACE_FL_CONVERTED
;
1211 ftrace_update_cnt
++;
1216 stop
= ftrace_now(raw_smp_processor_id());
1217 ftrace_update_time
= stop
- start
;
1218 ftrace_update_tot_cnt
+= ftrace_update_cnt
;
1223 static int __init
ftrace_dyn_table_alloc(unsigned long num_to_init
)
1225 struct ftrace_page
*pg
;
1229 /* allocate a few pages */
1230 ftrace_pages_start
= (void *)get_zeroed_page(GFP_KERNEL
);
1231 if (!ftrace_pages_start
)
1235 * Allocate a few more pages.
1237 * TODO: have some parser search vmlinux before
1238 * final linking to find all calls to ftrace.
1240 * a) know how many pages to allocate.
1242 * b) set up the table then.
1244 * The dynamic code is still necessary for
1248 pg
= ftrace_pages
= ftrace_pages_start
;
1250 cnt
= num_to_init
/ ENTRIES_PER_PAGE
;
1251 pr_info("ftrace: allocating %ld entries in %d pages\n",
1252 num_to_init
, cnt
+ 1);
1254 for (i
= 0; i
< cnt
; i
++) {
1255 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
1257 /* If we fail, we'll try later anyway */
1268 FTRACE_ITER_FILTER
= (1 << 0),
1269 FTRACE_ITER_CONT
= (1 << 1),
1270 FTRACE_ITER_NOTRACE
= (1 << 2),
1271 FTRACE_ITER_FAILURES
= (1 << 3),
1272 FTRACE_ITER_PRINTALL
= (1 << 4),
1273 FTRACE_ITER_HASH
= (1 << 5),
1276 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1278 struct ftrace_iterator
{
1279 struct ftrace_page
*pg
;
1283 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
1284 unsigned buffer_idx
;
1289 t_hash_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1291 struct ftrace_iterator
*iter
= m
->private;
1292 struct hlist_node
*hnd
= v
;
1293 struct hlist_head
*hhd
;
1295 WARN_ON(!(iter
->flags
& FTRACE_ITER_HASH
));
1300 if (iter
->hidx
>= FTRACE_FUNC_HASHSIZE
)
1303 hhd
= &ftrace_func_hash
[iter
->hidx
];
1305 if (hlist_empty(hhd
)) {
1324 static void *t_hash_start(struct seq_file
*m
, loff_t
*pos
)
1326 struct ftrace_iterator
*iter
= m
->private;
1329 iter
->flags
|= FTRACE_ITER_HASH
;
1331 return t_hash_next(m
, p
, pos
);
1334 static int t_hash_show(struct seq_file
*m
, void *v
)
1336 struct ftrace_func_probe
*rec
;
1337 struct hlist_node
*hnd
= v
;
1338 char str
[KSYM_SYMBOL_LEN
];
1340 rec
= hlist_entry(hnd
, struct ftrace_func_probe
, node
);
1342 if (rec
->ops
->print
)
1343 return rec
->ops
->print(m
, rec
->ip
, rec
->ops
, rec
->data
);
1345 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1346 seq_printf(m
, "%s:", str
);
1348 kallsyms_lookup((unsigned long)rec
->ops
->func
, NULL
, NULL
, NULL
, str
);
1349 seq_printf(m
, "%s", str
);
1352 seq_printf(m
, ":%p", rec
->data
);
1359 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
1361 struct ftrace_iterator
*iter
= m
->private;
1362 struct dyn_ftrace
*rec
= NULL
;
1364 if (iter
->flags
& FTRACE_ITER_HASH
)
1365 return t_hash_next(m
, v
, pos
);
1369 if (iter
->flags
& FTRACE_ITER_PRINTALL
)
1373 if (iter
->idx
>= iter
->pg
->index
) {
1374 if (iter
->pg
->next
) {
1375 iter
->pg
= iter
->pg
->next
;
1382 rec
= &iter
->pg
->records
[iter
->idx
++];
1383 if ((rec
->flags
& FTRACE_FL_FREE
) ||
1385 (!(iter
->flags
& FTRACE_ITER_FAILURES
) &&
1386 (rec
->flags
& FTRACE_FL_FAILED
)) ||
1388 ((iter
->flags
& FTRACE_ITER_FAILURES
) &&
1389 !(rec
->flags
& FTRACE_FL_FAILED
)) ||
1391 ((iter
->flags
& FTRACE_ITER_FILTER
) &&
1392 !(rec
->flags
& FTRACE_FL_FILTER
)) ||
1394 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
1395 !(rec
->flags
& FTRACE_FL_NOTRACE
))) {
1404 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
1406 struct ftrace_iterator
*iter
= m
->private;
1409 mutex_lock(&ftrace_lock
);
1411 * For set_ftrace_filter reading, if we have the filter
1412 * off, we can short cut and just print out that all
1413 * functions are enabled.
1415 if (iter
->flags
& FTRACE_ITER_FILTER
&& !ftrace_filtered
) {
1417 return t_hash_start(m
, pos
);
1418 iter
->flags
|= FTRACE_ITER_PRINTALL
;
1423 if (iter
->flags
& FTRACE_ITER_HASH
)
1424 return t_hash_start(m
, pos
);
1433 p
= t_next(m
, p
, pos
);
1436 return t_hash_start(m
, pos
);
1441 static void t_stop(struct seq_file
*m
, void *p
)
1443 mutex_unlock(&ftrace_lock
);
1446 static int t_show(struct seq_file
*m
, void *v
)
1448 struct ftrace_iterator
*iter
= m
->private;
1449 struct dyn_ftrace
*rec
= v
;
1450 char str
[KSYM_SYMBOL_LEN
];
1452 if (iter
->flags
& FTRACE_ITER_HASH
)
1453 return t_hash_show(m
, v
);
1455 if (iter
->flags
& FTRACE_ITER_PRINTALL
) {
1456 seq_printf(m
, "#### all functions enabled ####\n");
1463 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1465 seq_printf(m
, "%s\n", str
);
1470 static struct seq_operations show_ftrace_seq_ops
= {
1478 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
1480 struct ftrace_iterator
*iter
;
1483 if (unlikely(ftrace_disabled
))
1486 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1490 iter
->pg
= ftrace_pages_start
;
1492 ret
= seq_open(file
, &show_ftrace_seq_ops
);
1494 struct seq_file
*m
= file
->private_data
;
1504 int ftrace_avail_release(struct inode
*inode
, struct file
*file
)
1506 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
1507 struct ftrace_iterator
*iter
= m
->private;
1509 seq_release(inode
, file
);
1516 ftrace_failures_open(struct inode
*inode
, struct file
*file
)
1520 struct ftrace_iterator
*iter
;
1522 ret
= ftrace_avail_open(inode
, file
);
1524 m
= (struct seq_file
*)file
->private_data
;
1525 iter
= (struct ftrace_iterator
*)m
->private;
1526 iter
->flags
= FTRACE_ITER_FAILURES
;
1533 static void ftrace_filter_reset(int enable
)
1535 struct ftrace_page
*pg
;
1536 struct dyn_ftrace
*rec
;
1537 unsigned long type
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1539 mutex_lock(&ftrace_lock
);
1541 ftrace_filtered
= 0;
1542 do_for_each_ftrace_rec(pg
, rec
) {
1543 if (rec
->flags
& FTRACE_FL_FAILED
)
1545 rec
->flags
&= ~type
;
1546 } while_for_each_ftrace_rec();
1547 mutex_unlock(&ftrace_lock
);
1551 ftrace_regex_open(struct inode
*inode
, struct file
*file
, int enable
)
1553 struct ftrace_iterator
*iter
;
1556 if (unlikely(ftrace_disabled
))
1559 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1563 mutex_lock(&ftrace_regex_lock
);
1564 if ((file
->f_mode
& FMODE_WRITE
) &&
1565 !(file
->f_flags
& O_APPEND
))
1566 ftrace_filter_reset(enable
);
1568 if (file
->f_mode
& FMODE_READ
) {
1569 iter
->pg
= ftrace_pages_start
;
1570 iter
->flags
= enable
? FTRACE_ITER_FILTER
:
1571 FTRACE_ITER_NOTRACE
;
1573 ret
= seq_open(file
, &show_ftrace_seq_ops
);
1575 struct seq_file
*m
= file
->private_data
;
1580 file
->private_data
= iter
;
1581 mutex_unlock(&ftrace_regex_lock
);
1587 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
1589 return ftrace_regex_open(inode
, file
, 1);
1593 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
1595 return ftrace_regex_open(inode
, file
, 0);
1599 ftrace_regex_lseek(struct file
*file
, loff_t offset
, int origin
)
1603 if (file
->f_mode
& FMODE_READ
)
1604 ret
= seq_lseek(file
, offset
, origin
);
1606 file
->f_pos
= ret
= 1;
1619 * (static function - no need for kernel doc)
1621 * Pass in a buffer containing a glob and this function will
1622 * set search to point to the search part of the buffer and
1623 * return the type of search it is (see enum above).
1624 * This does modify buff.
1626 * Returns enum type.
1627 * search returns the pointer to use for comparison.
1628 * not returns 1 if buff started with a '!'
1632 ftrace_setup_glob(char *buff
, int len
, char **search
, int *not)
1634 int type
= MATCH_FULL
;
1637 if (buff
[0] == '!') {
1646 for (i
= 0; i
< len
; i
++) {
1647 if (buff
[i
] == '*') {
1650 type
= MATCH_END_ONLY
;
1652 if (type
== MATCH_END_ONLY
)
1653 type
= MATCH_MIDDLE_ONLY
;
1655 type
= MATCH_FRONT_ONLY
;
1665 static int ftrace_match(char *str
, char *regex
, int len
, int type
)
1672 if (strcmp(str
, regex
) == 0)
1675 case MATCH_FRONT_ONLY
:
1676 if (strncmp(str
, regex
, len
) == 0)
1679 case MATCH_MIDDLE_ONLY
:
1680 if (strstr(str
, regex
))
1683 case MATCH_END_ONLY
:
1684 ptr
= strstr(str
, regex
);
1685 if (ptr
&& (ptr
[len
] == 0))
1694 ftrace_match_record(struct dyn_ftrace
*rec
, char *regex
, int len
, int type
)
1696 char str
[KSYM_SYMBOL_LEN
];
1698 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
1699 return ftrace_match(str
, regex
, len
, type
);
1702 static void ftrace_match_records(char *buff
, int len
, int enable
)
1704 unsigned int search_len
;
1705 struct ftrace_page
*pg
;
1706 struct dyn_ftrace
*rec
;
1712 flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1713 type
= ftrace_setup_glob(buff
, len
, &search
, ¬);
1715 search_len
= strlen(search
);
1717 mutex_lock(&ftrace_lock
);
1718 do_for_each_ftrace_rec(pg
, rec
) {
1720 if (rec
->flags
& FTRACE_FL_FAILED
)
1723 if (ftrace_match_record(rec
, search
, search_len
, type
)) {
1725 rec
->flags
&= ~flag
;
1730 * Only enable filtering if we have a function that
1733 if (enable
&& (rec
->flags
& FTRACE_FL_FILTER
))
1734 ftrace_filtered
= 1;
1735 } while_for_each_ftrace_rec();
1736 mutex_unlock(&ftrace_lock
);
1740 ftrace_match_module_record(struct dyn_ftrace
*rec
, char *mod
,
1741 char *regex
, int len
, int type
)
1743 char str
[KSYM_SYMBOL_LEN
];
1746 kallsyms_lookup(rec
->ip
, NULL
, NULL
, &modname
, str
);
1748 if (!modname
|| strcmp(modname
, mod
))
1751 /* blank search means to match all funcs in the mod */
1753 return ftrace_match(str
, regex
, len
, type
);
1758 static void ftrace_match_module_records(char *buff
, char *mod
, int enable
)
1760 unsigned search_len
= 0;
1761 struct ftrace_page
*pg
;
1762 struct dyn_ftrace
*rec
;
1763 int type
= MATCH_FULL
;
1764 char *search
= buff
;
1768 flag
= enable
? FTRACE_FL_FILTER
: FTRACE_FL_NOTRACE
;
1770 /* blank or '*' mean the same */
1771 if (strcmp(buff
, "*") == 0)
1774 /* handle the case of 'dont filter this module' */
1775 if (strcmp(buff
, "!") == 0 || strcmp(buff
, "!*") == 0) {
1781 type
= ftrace_setup_glob(buff
, strlen(buff
), &search
, ¬);
1782 search_len
= strlen(search
);
1785 mutex_lock(&ftrace_lock
);
1786 do_for_each_ftrace_rec(pg
, rec
) {
1788 if (rec
->flags
& FTRACE_FL_FAILED
)
1791 if (ftrace_match_module_record(rec
, mod
,
1792 search
, search_len
, type
)) {
1794 rec
->flags
&= ~flag
;
1798 if (enable
&& (rec
->flags
& FTRACE_FL_FILTER
))
1799 ftrace_filtered
= 1;
1801 } while_for_each_ftrace_rec();
1802 mutex_unlock(&ftrace_lock
);
1806 * We register the module command as a template to show others how
1807 * to register the a command as well.
1811 ftrace_mod_callback(char *func
, char *cmd
, char *param
, int enable
)
1816 * cmd == 'mod' because we only registered this func
1817 * for the 'mod' ftrace_func_command.
1818 * But if you register one func with multiple commands,
1819 * you can tell which command was used by the cmd
1823 /* we must have a module name */
1827 mod
= strsep(¶m
, ":");
1831 ftrace_match_module_records(func
, mod
, enable
);
1835 static struct ftrace_func_command ftrace_mod_cmd
= {
1837 .func
= ftrace_mod_callback
,
1840 static int __init
ftrace_mod_cmd_init(void)
1842 return register_ftrace_command(&ftrace_mod_cmd
);
1844 device_initcall(ftrace_mod_cmd_init
);
1847 function_trace_probe_call(unsigned long ip
, unsigned long parent_ip
)
1849 struct ftrace_func_probe
*entry
;
1850 struct hlist_head
*hhd
;
1851 struct hlist_node
*n
;
1855 key
= hash_long(ip
, FTRACE_HASH_BITS
);
1857 hhd
= &ftrace_func_hash
[key
];
1859 if (hlist_empty(hhd
))
1863 * Disable preemption for these calls to prevent a RCU grace
1864 * period. This syncs the hash iteration and freeing of items
1865 * on the hash. rcu_read_lock is too dangerous here.
1867 resched
= ftrace_preempt_disable();
1868 hlist_for_each_entry_rcu(entry
, n
, hhd
, node
) {
1869 if (entry
->ip
== ip
)
1870 entry
->ops
->func(ip
, parent_ip
, &entry
->data
);
1872 ftrace_preempt_enable(resched
);
1875 static struct ftrace_ops trace_probe_ops __read_mostly
=
1877 .func
= function_trace_probe_call
,
1880 static int ftrace_probe_registered
;
1882 static void __enable_ftrace_function_probe(void)
1886 if (ftrace_probe_registered
)
1889 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
1890 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
1894 /* Nothing registered? */
1895 if (i
== FTRACE_FUNC_HASHSIZE
)
1898 __register_ftrace_function(&trace_probe_ops
);
1900 ftrace_probe_registered
= 1;
1903 static void __disable_ftrace_function_probe(void)
1907 if (!ftrace_probe_registered
)
1910 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
1911 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
1916 /* no more funcs left */
1917 __unregister_ftrace_function(&trace_probe_ops
);
1919 ftrace_probe_registered
= 0;
1923 static void ftrace_free_entry_rcu(struct rcu_head
*rhp
)
1925 struct ftrace_func_probe
*entry
=
1926 container_of(rhp
, struct ftrace_func_probe
, rcu
);
1928 if (entry
->ops
->free
)
1929 entry
->ops
->free(&entry
->data
);
1935 register_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
1938 struct ftrace_func_probe
*entry
;
1939 struct ftrace_page
*pg
;
1940 struct dyn_ftrace
*rec
;
1946 type
= ftrace_setup_glob(glob
, strlen(glob
), &search
, ¬);
1947 len
= strlen(search
);
1949 /* we do not support '!' for function probes */
1953 mutex_lock(&ftrace_lock
);
1954 do_for_each_ftrace_rec(pg
, rec
) {
1956 if (rec
->flags
& FTRACE_FL_FAILED
)
1959 if (!ftrace_match_record(rec
, search
, len
, type
))
1962 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1964 /* If we did not process any, then return error */
1975 * The caller might want to do something special
1976 * for each function we find. We call the callback
1977 * to give the caller an opportunity to do so.
1979 if (ops
->callback
) {
1980 if (ops
->callback(rec
->ip
, &entry
->data
) < 0) {
1981 /* caller does not like this func */
1988 entry
->ip
= rec
->ip
;
1990 key
= hash_long(entry
->ip
, FTRACE_HASH_BITS
);
1991 hlist_add_head_rcu(&entry
->node
, &ftrace_func_hash
[key
]);
1993 } while_for_each_ftrace_rec();
1994 __enable_ftrace_function_probe();
1997 mutex_unlock(&ftrace_lock
);
2003 PROBE_TEST_FUNC
= 1,
2008 __unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
2009 void *data
, int flags
)
2011 struct ftrace_func_probe
*entry
;
2012 struct hlist_node
*n
, *tmp
;
2013 char str
[KSYM_SYMBOL_LEN
];
2014 int type
= MATCH_FULL
;
2018 if (glob
&& (strcmp(glob
, "*") || !strlen(glob
)))
2023 type
= ftrace_setup_glob(glob
, strlen(glob
), &search
, ¬);
2024 len
= strlen(search
);
2026 /* we do not support '!' for function probes */
2031 mutex_lock(&ftrace_lock
);
2032 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
2033 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
2035 hlist_for_each_entry_safe(entry
, n
, tmp
, hhd
, node
) {
2037 /* break up if statements for readability */
2038 if ((flags
& PROBE_TEST_FUNC
) && entry
->ops
!= ops
)
2041 if ((flags
& PROBE_TEST_DATA
) && entry
->data
!= data
)
2044 /* do this last, since it is the most expensive */
2046 kallsyms_lookup(entry
->ip
, NULL
, NULL
,
2048 if (!ftrace_match(str
, glob
, len
, type
))
2052 hlist_del(&entry
->node
);
2053 call_rcu(&entry
->rcu
, ftrace_free_entry_rcu
);
2056 __disable_ftrace_function_probe();
2057 mutex_unlock(&ftrace_lock
);
2061 unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
2064 __unregister_ftrace_function_probe(glob
, ops
, data
,
2065 PROBE_TEST_FUNC
| PROBE_TEST_DATA
);
2069 unregister_ftrace_function_probe_func(char *glob
, struct ftrace_probe_ops
*ops
)
2071 __unregister_ftrace_function_probe(glob
, ops
, NULL
, PROBE_TEST_FUNC
);
2074 void unregister_ftrace_function_probe_all(char *glob
)
2076 __unregister_ftrace_function_probe(glob
, NULL
, NULL
, 0);
2079 static LIST_HEAD(ftrace_commands
);
2080 static DEFINE_MUTEX(ftrace_cmd_mutex
);
2082 int register_ftrace_command(struct ftrace_func_command
*cmd
)
2084 struct ftrace_func_command
*p
;
2087 mutex_lock(&ftrace_cmd_mutex
);
2088 list_for_each_entry(p
, &ftrace_commands
, list
) {
2089 if (strcmp(cmd
->name
, p
->name
) == 0) {
2094 list_add(&cmd
->list
, &ftrace_commands
);
2096 mutex_unlock(&ftrace_cmd_mutex
);
2101 int unregister_ftrace_command(struct ftrace_func_command
*cmd
)
2103 struct ftrace_func_command
*p
, *n
;
2106 mutex_lock(&ftrace_cmd_mutex
);
2107 list_for_each_entry_safe(p
, n
, &ftrace_commands
, list
) {
2108 if (strcmp(cmd
->name
, p
->name
) == 0) {
2110 list_del_init(&p
->list
);
2115 mutex_unlock(&ftrace_cmd_mutex
);
2120 static int ftrace_process_regex(char *buff
, int len
, int enable
)
2122 char *func
, *command
, *next
= buff
;
2123 struct ftrace_func_command
*p
;
2126 func
= strsep(&next
, ":");
2129 ftrace_match_records(func
, len
, enable
);
2135 command
= strsep(&next
, ":");
2137 mutex_lock(&ftrace_cmd_mutex
);
2138 list_for_each_entry(p
, &ftrace_commands
, list
) {
2139 if (strcmp(p
->name
, command
) == 0) {
2140 ret
= p
->func(func
, command
, next
, enable
);
2145 mutex_unlock(&ftrace_cmd_mutex
);
2151 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
2152 size_t cnt
, loff_t
*ppos
, int enable
)
2154 struct ftrace_iterator
*iter
;
2159 if (!cnt
|| cnt
< 0)
2162 mutex_lock(&ftrace_regex_lock
);
2164 if (file
->f_mode
& FMODE_READ
) {
2165 struct seq_file
*m
= file
->private_data
;
2168 iter
= file
->private_data
;
2171 iter
->flags
&= ~FTRACE_ITER_CONT
;
2172 iter
->buffer_idx
= 0;
2175 ret
= get_user(ch
, ubuf
++);
2181 if (!(iter
->flags
& ~FTRACE_ITER_CONT
)) {
2182 /* skip white space */
2183 while (cnt
&& isspace(ch
)) {
2184 ret
= get_user(ch
, ubuf
++);
2192 file
->f_pos
+= read
;
2197 iter
->buffer_idx
= 0;
2200 while (cnt
&& !isspace(ch
)) {
2201 if (iter
->buffer_idx
< FTRACE_BUFF_MAX
)
2202 iter
->buffer
[iter
->buffer_idx
++] = ch
;
2207 ret
= get_user(ch
, ubuf
++);
2216 iter
->buffer
[iter
->buffer_idx
] = 0;
2217 ret
= ftrace_process_regex(iter
->buffer
,
2218 iter
->buffer_idx
, enable
);
2221 iter
->buffer_idx
= 0;
2223 iter
->flags
|= FTRACE_ITER_CONT
;
2226 file
->f_pos
+= read
;
2230 mutex_unlock(&ftrace_regex_lock
);
2236 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
2237 size_t cnt
, loff_t
*ppos
)
2239 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
2243 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
2244 size_t cnt
, loff_t
*ppos
)
2246 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
2250 ftrace_set_regex(unsigned char *buf
, int len
, int reset
, int enable
)
2252 if (unlikely(ftrace_disabled
))
2255 mutex_lock(&ftrace_regex_lock
);
2257 ftrace_filter_reset(enable
);
2259 ftrace_match_records(buf
, len
, enable
);
2260 mutex_unlock(&ftrace_regex_lock
);
2264 * ftrace_set_filter - set a function to filter on in ftrace
2265 * @buf - the string that holds the function filter text.
2266 * @len - the length of the string.
2267 * @reset - non zero to reset all filters before applying this filter.
2269 * Filters denote which functions should be enabled when tracing is enabled.
2270 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2272 void ftrace_set_filter(unsigned char *buf
, int len
, int reset
)
2274 ftrace_set_regex(buf
, len
, reset
, 1);
2278 * ftrace_set_notrace - set a function to not trace in ftrace
2279 * @buf - the string that holds the function notrace text.
2280 * @len - the length of the string.
2281 * @reset - non zero to reset all filters before applying this filter.
2283 * Notrace Filters denote which functions should not be enabled when tracing
2284 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2287 void ftrace_set_notrace(unsigned char *buf
, int len
, int reset
)
2289 ftrace_set_regex(buf
, len
, reset
, 0);
2293 ftrace_regex_release(struct inode
*inode
, struct file
*file
, int enable
)
2295 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
2296 struct ftrace_iterator
*iter
;
2298 mutex_lock(&ftrace_regex_lock
);
2299 if (file
->f_mode
& FMODE_READ
) {
2302 seq_release(inode
, file
);
2304 iter
= file
->private_data
;
2306 if (iter
->buffer_idx
) {
2308 iter
->buffer
[iter
->buffer_idx
] = 0;
2309 ftrace_match_records(iter
->buffer
, iter
->buffer_idx
, enable
);
2312 mutex_lock(&ftrace_lock
);
2313 if (ftrace_start_up
&& ftrace_enabled
)
2314 ftrace_run_update_code(FTRACE_ENABLE_CALLS
);
2315 mutex_unlock(&ftrace_lock
);
2318 mutex_unlock(&ftrace_regex_lock
);
2323 ftrace_filter_release(struct inode
*inode
, struct file
*file
)
2325 return ftrace_regex_release(inode
, file
, 1);
2329 ftrace_notrace_release(struct inode
*inode
, struct file
*file
)
2331 return ftrace_regex_release(inode
, file
, 0);
2334 static const struct file_operations ftrace_avail_fops
= {
2335 .open
= ftrace_avail_open
,
2337 .llseek
= seq_lseek
,
2338 .release
= ftrace_avail_release
,
2341 static const struct file_operations ftrace_failures_fops
= {
2342 .open
= ftrace_failures_open
,
2344 .llseek
= seq_lseek
,
2345 .release
= ftrace_avail_release
,
2348 static const struct file_operations ftrace_filter_fops
= {
2349 .open
= ftrace_filter_open
,
2351 .write
= ftrace_filter_write
,
2352 .llseek
= ftrace_regex_lseek
,
2353 .release
= ftrace_filter_release
,
2356 static const struct file_operations ftrace_notrace_fops
= {
2357 .open
= ftrace_notrace_open
,
2359 .write
= ftrace_notrace_write
,
2360 .llseek
= ftrace_regex_lseek
,
2361 .release
= ftrace_notrace_release
,
2364 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2366 static DEFINE_MUTEX(graph_lock
);
2368 int ftrace_graph_count
;
2369 unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
2372 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
2374 unsigned long *array
= m
->private;
2379 if (index
>= ftrace_graph_count
)
2382 return &array
[index
];
2385 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
2389 mutex_lock(&graph_lock
);
2391 /* Nothing, tell g_show to print all functions are enabled */
2392 if (!ftrace_graph_count
&& !*pos
)
2395 p
= g_next(m
, p
, pos
);
2400 static void g_stop(struct seq_file
*m
, void *p
)
2402 mutex_unlock(&graph_lock
);
2405 static int g_show(struct seq_file
*m
, void *v
)
2407 unsigned long *ptr
= v
;
2408 char str
[KSYM_SYMBOL_LEN
];
2413 if (ptr
== (unsigned long *)1) {
2414 seq_printf(m
, "#### all functions enabled ####\n");
2418 kallsyms_lookup(*ptr
, NULL
, NULL
, NULL
, str
);
2420 seq_printf(m
, "%s\n", str
);
2425 static struct seq_operations ftrace_graph_seq_ops
= {
2433 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
2437 if (unlikely(ftrace_disabled
))
2440 mutex_lock(&graph_lock
);
2441 if ((file
->f_mode
& FMODE_WRITE
) &&
2442 !(file
->f_flags
& O_APPEND
)) {
2443 ftrace_graph_count
= 0;
2444 memset(ftrace_graph_funcs
, 0, sizeof(ftrace_graph_funcs
));
2447 if (file
->f_mode
& FMODE_READ
) {
2448 ret
= seq_open(file
, &ftrace_graph_seq_ops
);
2450 struct seq_file
*m
= file
->private_data
;
2451 m
->private = ftrace_graph_funcs
;
2454 file
->private_data
= ftrace_graph_funcs
;
2455 mutex_unlock(&graph_lock
);
2461 ftrace_set_func(unsigned long *array
, int *idx
, char *buffer
)
2463 struct dyn_ftrace
*rec
;
2464 struct ftrace_page
*pg
;
2472 if (ftrace_disabled
)
2476 type
= ftrace_setup_glob(buffer
, strlen(buffer
), &search
, ¬);
2480 search_len
= strlen(search
);
2482 mutex_lock(&ftrace_lock
);
2483 do_for_each_ftrace_rec(pg
, rec
) {
2485 if (*idx
>= FTRACE_GRAPH_MAX_FUNCS
)
2488 if (rec
->flags
& (FTRACE_FL_FAILED
| FTRACE_FL_FREE
))
2491 if (ftrace_match_record(rec
, search
, search_len
, type
)) {
2492 /* ensure it is not already in the array */
2494 for (i
= 0; i
< *idx
; i
++)
2495 if (array
[i
] == rec
->ip
) {
2500 array
[(*idx
)++] = rec
->ip
;
2504 } while_for_each_ftrace_rec();
2506 mutex_unlock(&ftrace_lock
);
2508 return found
? 0 : -EINVAL
;
2512 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
2513 size_t cnt
, loff_t
*ppos
)
2515 unsigned char buffer
[FTRACE_BUFF_MAX
+1];
2516 unsigned long *array
;
2522 if (!cnt
|| cnt
< 0)
2525 mutex_lock(&graph_lock
);
2527 if (ftrace_graph_count
>= FTRACE_GRAPH_MAX_FUNCS
) {
2532 if (file
->f_mode
& FMODE_READ
) {
2533 struct seq_file
*m
= file
->private_data
;
2536 array
= file
->private_data
;
2538 ret
= get_user(ch
, ubuf
++);
2544 /* skip white space */
2545 while (cnt
&& isspace(ch
)) {
2546 ret
= get_user(ch
, ubuf
++);
2559 while (cnt
&& !isspace(ch
)) {
2560 if (index
< FTRACE_BUFF_MAX
)
2561 buffer
[index
++] = ch
;
2566 ret
= get_user(ch
, ubuf
++);
2574 /* we allow only one expression at a time */
2575 ret
= ftrace_set_func(array
, &ftrace_graph_count
, buffer
);
2579 file
->f_pos
+= read
;
2583 mutex_unlock(&graph_lock
);
2588 static const struct file_operations ftrace_graph_fops
= {
2589 .open
= ftrace_graph_open
,
2591 .write
= ftrace_graph_write
,
2593 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2595 static __init
int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
)
2597 struct dentry
*entry
;
2599 entry
= debugfs_create_file("available_filter_functions", 0444,
2600 d_tracer
, NULL
, &ftrace_avail_fops
);
2602 pr_warning("Could not create debugfs "
2603 "'available_filter_functions' entry\n");
2605 entry
= debugfs_create_file("failures", 0444,
2606 d_tracer
, NULL
, &ftrace_failures_fops
);
2608 pr_warning("Could not create debugfs 'failures' entry\n");
2610 entry
= debugfs_create_file("set_ftrace_filter", 0644, d_tracer
,
2611 NULL
, &ftrace_filter_fops
);
2613 pr_warning("Could not create debugfs "
2614 "'set_ftrace_filter' entry\n");
2616 entry
= debugfs_create_file("set_ftrace_notrace", 0644, d_tracer
,
2617 NULL
, &ftrace_notrace_fops
);
2619 pr_warning("Could not create debugfs "
2620 "'set_ftrace_notrace' entry\n");
2622 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2623 entry
= debugfs_create_file("set_graph_function", 0444, d_tracer
,
2625 &ftrace_graph_fops
);
2627 pr_warning("Could not create debugfs "
2628 "'set_graph_function' entry\n");
2629 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2634 static int ftrace_convert_nops(struct module
*mod
,
2635 unsigned long *start
,
2640 unsigned long flags
;
2642 mutex_lock(&ftrace_lock
);
2645 addr
= ftrace_call_adjust(*p
++);
2647 * Some architecture linkers will pad between
2648 * the different mcount_loc sections of different
2649 * object files to satisfy alignments.
2650 * Skip any NULL pointers.
2654 ftrace_record_ip(addr
);
2657 /* disable interrupts to prevent kstop machine */
2658 local_irq_save(flags
);
2659 ftrace_update_code(mod
);
2660 local_irq_restore(flags
);
2661 mutex_unlock(&ftrace_lock
);
2666 void ftrace_init_module(struct module
*mod
,
2667 unsigned long *start
, unsigned long *end
)
2669 if (ftrace_disabled
|| start
== end
)
2671 ftrace_convert_nops(mod
, start
, end
);
2674 extern unsigned long __start_mcount_loc
[];
2675 extern unsigned long __stop_mcount_loc
[];
2677 void __init
ftrace_init(void)
2679 unsigned long count
, addr
, flags
;
2682 /* Keep the ftrace pointer to the stub */
2683 addr
= (unsigned long)ftrace_stub
;
2685 local_irq_save(flags
);
2686 ftrace_dyn_arch_init(&addr
);
2687 local_irq_restore(flags
);
2689 /* ftrace_dyn_arch_init places the return code in addr */
2693 count
= __stop_mcount_loc
- __start_mcount_loc
;
2695 ret
= ftrace_dyn_table_alloc(count
);
2699 last_ftrace_enabled
= ftrace_enabled
= 1;
2701 ret
= ftrace_convert_nops(NULL
,
2707 ftrace_disabled
= 1;
2712 static int __init
ftrace_nodyn_init(void)
2717 device_initcall(ftrace_nodyn_init
);
2719 static inline int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
) { return 0; }
2720 static inline void ftrace_startup_enable(int command
) { }
2721 /* Keep as macros so we do not need to define the commands */
2722 # define ftrace_startup(command) do { } while (0)
2723 # define ftrace_shutdown(command) do { } while (0)
2724 # define ftrace_startup_sysctl() do { } while (0)
2725 # define ftrace_shutdown_sysctl() do { } while (0)
2726 #endif /* CONFIG_DYNAMIC_FTRACE */
2729 ftrace_pid_read(struct file
*file
, char __user
*ubuf
,
2730 size_t cnt
, loff_t
*ppos
)
2735 if (ftrace_pid_trace
== ftrace_swapper_pid
)
2736 r
= sprintf(buf
, "swapper tasks\n");
2737 else if (ftrace_pid_trace
)
2738 r
= sprintf(buf
, "%u\n", pid_vnr(ftrace_pid_trace
));
2740 r
= sprintf(buf
, "no pid\n");
2742 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
2745 static void clear_ftrace_swapper(void)
2747 struct task_struct
*p
;
2751 for_each_online_cpu(cpu
) {
2753 clear_tsk_trace_trace(p
);
2758 static void set_ftrace_swapper(void)
2760 struct task_struct
*p
;
2764 for_each_online_cpu(cpu
) {
2766 set_tsk_trace_trace(p
);
2771 static void clear_ftrace_pid(struct pid
*pid
)
2773 struct task_struct
*p
;
2776 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
2777 clear_tsk_trace_trace(p
);
2778 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
2784 static void set_ftrace_pid(struct pid
*pid
)
2786 struct task_struct
*p
;
2789 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
2790 set_tsk_trace_trace(p
);
2791 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
2795 static void clear_ftrace_pid_task(struct pid
**pid
)
2797 if (*pid
== ftrace_swapper_pid
)
2798 clear_ftrace_swapper();
2800 clear_ftrace_pid(*pid
);
2805 static void set_ftrace_pid_task(struct pid
*pid
)
2807 if (pid
== ftrace_swapper_pid
)
2808 set_ftrace_swapper();
2810 set_ftrace_pid(pid
);
2814 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
2815 size_t cnt
, loff_t
*ppos
)
2822 if (cnt
>= sizeof(buf
))
2825 if (copy_from_user(&buf
, ubuf
, cnt
))
2830 ret
= strict_strtol(buf
, 10, &val
);
2834 mutex_lock(&ftrace_lock
);
2836 /* disable pid tracing */
2837 if (!ftrace_pid_trace
)
2840 clear_ftrace_pid_task(&ftrace_pid_trace
);
2843 /* swapper task is special */
2845 pid
= ftrace_swapper_pid
;
2846 if (pid
== ftrace_pid_trace
)
2849 pid
= find_get_pid(val
);
2851 if (pid
== ftrace_pid_trace
) {
2857 if (ftrace_pid_trace
)
2858 clear_ftrace_pid_task(&ftrace_pid_trace
);
2863 ftrace_pid_trace
= pid
;
2865 set_ftrace_pid_task(ftrace_pid_trace
);
2868 /* update the function call */
2869 ftrace_update_pid_func();
2870 ftrace_startup_enable(0);
2873 mutex_unlock(&ftrace_lock
);
2878 static const struct file_operations ftrace_pid_fops
= {
2879 .read
= ftrace_pid_read
,
2880 .write
= ftrace_pid_write
,
2883 static __init
int ftrace_init_debugfs(void)
2885 struct dentry
*d_tracer
;
2886 struct dentry
*entry
;
2888 d_tracer
= tracing_init_dentry();
2892 ftrace_init_dyn_debugfs(d_tracer
);
2894 entry
= debugfs_create_file("set_ftrace_pid", 0644, d_tracer
,
2895 NULL
, &ftrace_pid_fops
);
2897 pr_warning("Could not create debugfs "
2898 "'set_ftrace_pid' entry\n");
2900 ftrace_profile_debugfs(d_tracer
);
2904 fs_initcall(ftrace_init_debugfs
);
2907 * ftrace_kill - kill ftrace
2909 * This function should be used by panic code. It stops ftrace
2910 * but in a not so nice way. If you need to simply kill ftrace
2911 * from a non-atomic section, use ftrace_kill.
2913 void ftrace_kill(void)
2915 ftrace_disabled
= 1;
2917 clear_ftrace_function();
2921 * register_ftrace_function - register a function for profiling
2922 * @ops - ops structure that holds the function for profiling.
2924 * Register a function to be called by all functions in the
2927 * Note: @ops->func and all the functions it calls must be labeled
2928 * with "notrace", otherwise it will go into a
2931 int register_ftrace_function(struct ftrace_ops
*ops
)
2935 if (unlikely(ftrace_disabled
))
2938 mutex_lock(&ftrace_lock
);
2940 ret
= __register_ftrace_function(ops
);
2943 mutex_unlock(&ftrace_lock
);
2948 * unregister_ftrace_function - unregister a function for profiling.
2949 * @ops - ops structure that holds the function to unregister
2951 * Unregister a function that was added to be called by ftrace profiling.
2953 int unregister_ftrace_function(struct ftrace_ops
*ops
)
2957 mutex_lock(&ftrace_lock
);
2958 ret
= __unregister_ftrace_function(ops
);
2960 mutex_unlock(&ftrace_lock
);
2966 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
2967 struct file
*file
, void __user
*buffer
, size_t *lenp
,
2972 if (unlikely(ftrace_disabled
))
2975 mutex_lock(&ftrace_lock
);
2977 ret
= proc_dointvec(table
, write
, file
, buffer
, lenp
, ppos
);
2979 if (ret
|| !write
|| (last_ftrace_enabled
== ftrace_enabled
))
2982 last_ftrace_enabled
= ftrace_enabled
;
2984 if (ftrace_enabled
) {
2986 ftrace_startup_sysctl();
2988 /* we are starting ftrace again */
2989 if (ftrace_list
!= &ftrace_list_end
) {
2990 if (ftrace_list
->next
== &ftrace_list_end
)
2991 ftrace_trace_function
= ftrace_list
->func
;
2993 ftrace_trace_function
= ftrace_list_func
;
2997 /* stopping ftrace calls (just send to ftrace_stub) */
2998 ftrace_trace_function
= ftrace_stub
;
3000 ftrace_shutdown_sysctl();
3004 mutex_unlock(&ftrace_lock
);
3008 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3010 static atomic_t ftrace_graph_active
;
3011 static struct notifier_block ftrace_suspend_notifier
;
3013 int ftrace_graph_entry_stub(struct ftrace_graph_ent
*trace
)
3018 /* The callbacks that hook a function */
3019 trace_func_graph_ret_t ftrace_graph_return
=
3020 (trace_func_graph_ret_t
)ftrace_stub
;
3021 trace_func_graph_ent_t ftrace_graph_entry
= ftrace_graph_entry_stub
;
3023 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3024 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
3028 unsigned long flags
;
3029 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
3030 struct task_struct
*g
, *t
;
3032 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
3033 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
3034 * sizeof(struct ftrace_ret_stack
),
3036 if (!ret_stack_list
[i
]) {
3044 read_lock_irqsave(&tasklist_lock
, flags
);
3045 do_each_thread(g
, t
) {
3051 if (t
->ret_stack
== NULL
) {
3052 t
->curr_ret_stack
= -1;
3053 /* Make sure IRQs see the -1 first: */
3055 t
->ret_stack
= ret_stack_list
[start
++];
3056 atomic_set(&t
->tracing_graph_pause
, 0);
3057 atomic_set(&t
->trace_overrun
, 0);
3059 } while_each_thread(g
, t
);
3062 read_unlock_irqrestore(&tasklist_lock
, flags
);
3064 for (i
= start
; i
< end
; i
++)
3065 kfree(ret_stack_list
[i
]);
3070 ftrace_graph_probe_sched_switch(struct rq
*__rq
, struct task_struct
*prev
,
3071 struct task_struct
*next
)
3073 unsigned long long timestamp
;
3077 * Does the user want to count the time a function was asleep.
3078 * If so, do not update the time stamps.
3080 if (trace_flags
& TRACE_ITER_SLEEP_TIME
)
3083 timestamp
= trace_clock_local();
3085 prev
->ftrace_timestamp
= timestamp
;
3087 /* only process tasks that we timestamped */
3088 if (!next
->ftrace_timestamp
)
3092 * Update all the counters in next to make up for the
3093 * time next was sleeping.
3095 timestamp
-= next
->ftrace_timestamp
;
3097 for (index
= next
->curr_ret_stack
; index
>= 0; index
--)
3098 next
->ret_stack
[index
].calltime
+= timestamp
;
3101 /* Allocate a return stack for each task */
3102 static int start_graph_tracing(void)
3104 struct ftrace_ret_stack
**ret_stack_list
;
3107 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
3108 sizeof(struct ftrace_ret_stack
*),
3111 if (!ret_stack_list
)
3114 /* The cpu_boot init_task->ret_stack will never be freed */
3115 for_each_online_cpu(cpu
)
3116 ftrace_graph_init_task(idle_task(cpu
));
3119 ret
= alloc_retstack_tasklist(ret_stack_list
);
3120 } while (ret
== -EAGAIN
);
3123 ret
= register_trace_sched_switch(ftrace_graph_probe_sched_switch
);
3125 pr_info("ftrace_graph: Couldn't activate tracepoint"
3126 " probe to kernel_sched_switch\n");
3129 kfree(ret_stack_list
);
3134 * Hibernation protection.
3135 * The state of the current task is too much unstable during
3136 * suspend/restore to disk. We want to protect against that.
3139 ftrace_suspend_notifier_call(struct notifier_block
*bl
, unsigned long state
,
3143 case PM_HIBERNATION_PREPARE
:
3144 pause_graph_tracing();
3147 case PM_POST_HIBERNATION
:
3148 unpause_graph_tracing();
3154 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
3155 trace_func_graph_ent_t entryfunc
)
3159 mutex_lock(&ftrace_lock
);
3161 /* we currently allow only one tracer registered at a time */
3162 if (atomic_read(&ftrace_graph_active
)) {
3167 ftrace_suspend_notifier
.notifier_call
= ftrace_suspend_notifier_call
;
3168 register_pm_notifier(&ftrace_suspend_notifier
);
3170 atomic_inc(&ftrace_graph_active
);
3171 ret
= start_graph_tracing();
3173 atomic_dec(&ftrace_graph_active
);
3177 ftrace_graph_return
= retfunc
;
3178 ftrace_graph_entry
= entryfunc
;
3180 ftrace_startup(FTRACE_START_FUNC_RET
);
3183 mutex_unlock(&ftrace_lock
);
3187 void unregister_ftrace_graph(void)
3189 mutex_lock(&ftrace_lock
);
3191 atomic_dec(&ftrace_graph_active
);
3192 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch
);
3193 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
3194 ftrace_graph_entry
= ftrace_graph_entry_stub
;
3195 ftrace_shutdown(FTRACE_STOP_FUNC_RET
);
3196 unregister_pm_notifier(&ftrace_suspend_notifier
);
3198 mutex_unlock(&ftrace_lock
);
3201 /* Allocate a return stack for newly created task */
3202 void ftrace_graph_init_task(struct task_struct
*t
)
3204 if (atomic_read(&ftrace_graph_active
)) {
3205 t
->ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
3206 * sizeof(struct ftrace_ret_stack
),
3210 t
->curr_ret_stack
= -1;
3211 atomic_set(&t
->tracing_graph_pause
, 0);
3212 atomic_set(&t
->trace_overrun
, 0);
3213 t
->ftrace_timestamp
= 0;
3215 t
->ret_stack
= NULL
;
3218 void ftrace_graph_exit_task(struct task_struct
*t
)
3220 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
3222 t
->ret_stack
= NULL
;
3223 /* NULL must become visible to IRQs before we free it: */
3229 void ftrace_graph_stop(void)