2 * Infrastructure for profiling code inserted by 'gcc -pg'.
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
10 * Based on code in the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/bsearch.h>
26 #include <linux/module.h>
27 #include <linux/ftrace.h>
28 #include <linux/sysctl.h>
29 #include <linux/slab.h>
30 #include <linux/ctype.h>
31 #include <linux/sort.h>
32 #include <linux/list.h>
33 #include <linux/hash.h>
34 #include <linux/rcupdate.h>
36 #include <trace/events/sched.h>
38 #include <asm/setup.h>
40 #include "trace_output.h"
41 #include "trace_stat.h"
43 #define FTRACE_WARN_ON(cond) \
51 #define FTRACE_WARN_ON_ONCE(cond) \
54 if (WARN_ON_ONCE(___r)) \
59 /* hash bits for specific function selection */
60 #define FTRACE_HASH_BITS 7
61 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
62 #define FTRACE_HASH_DEFAULT_BITS 10
63 #define FTRACE_HASH_MAX_BITS 12
65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
67 #ifdef CONFIG_DYNAMIC_FTRACE
68 #define INIT_OPS_HASH(opsname) \
69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
71 #define ASSIGN_OPS_HASH(opsname, val) \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
75 #define INIT_OPS_HASH(opsname)
76 #define ASSIGN_OPS_HASH(opsname, val)
79 static struct ftrace_ops ftrace_list_end __read_mostly
= {
81 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_STUB
,
82 INIT_OPS_HASH(ftrace_list_end
)
85 /* ftrace_enabled is a method to turn ftrace on or off */
86 int ftrace_enabled __read_mostly
;
87 static int last_ftrace_enabled
;
89 /* Current function tracing op */
90 struct ftrace_ops
*function_trace_op __read_mostly
= &ftrace_list_end
;
91 /* What to set function_trace_op to */
92 static struct ftrace_ops
*set_function_trace_op
;
94 /* List for set_ftrace_pid's pids. */
95 LIST_HEAD(ftrace_pids
);
97 struct list_head list
;
102 * ftrace_disabled is set when an anomaly is discovered.
103 * ftrace_disabled is much stronger than ftrace_enabled.
105 static int ftrace_disabled __read_mostly
;
107 static DEFINE_MUTEX(ftrace_lock
);
109 static struct ftrace_ops
*ftrace_control_list __read_mostly
= &ftrace_list_end
;
110 static struct ftrace_ops
*ftrace_ops_list __read_mostly
= &ftrace_list_end
;
111 ftrace_func_t ftrace_trace_function __read_mostly
= ftrace_stub
;
112 ftrace_func_t ftrace_pid_function __read_mostly
= ftrace_stub
;
113 static struct ftrace_ops global_ops
;
114 static struct ftrace_ops control_ops
;
116 static void ftrace_ops_recurs_func(unsigned long ip
, unsigned long parent_ip
,
117 struct ftrace_ops
*op
, struct pt_regs
*regs
);
119 #if ARCH_SUPPORTS_FTRACE_OPS
120 static void ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
121 struct ftrace_ops
*op
, struct pt_regs
*regs
);
123 /* See comment below, where ftrace_ops_list_func is defined */
124 static void ftrace_ops_no_ops(unsigned long ip
, unsigned long parent_ip
);
125 #define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
129 * Traverse the ftrace_global_list, invoking all entries. The reason that we
130 * can use rcu_dereference_raw_notrace() is that elements removed from this list
131 * are simply leaked, so there is no need to interact with a grace-period
132 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
133 * concurrent insertions into the ftrace_global_list.
135 * Silly Alpha and silly pointer-speculation compiler optimizations!
137 #define do_for_each_ftrace_op(op, list) \
138 op = rcu_dereference_raw_notrace(list); \
142 * Optimized for just a single item in the list (as that is the normal case).
144 #define while_for_each_ftrace_op(op) \
145 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
146 unlikely((op) != &ftrace_list_end))
148 static inline void ftrace_ops_init(struct ftrace_ops
*ops
)
150 #ifdef CONFIG_DYNAMIC_FTRACE
151 if (!(ops
->flags
& FTRACE_OPS_FL_INITIALIZED
)) {
152 mutex_init(&ops
->local_hash
.regex_lock
);
153 ops
->func_hash
= &ops
->local_hash
;
154 ops
->flags
|= FTRACE_OPS_FL_INITIALIZED
;
160 * ftrace_nr_registered_ops - return number of ops registered
162 * Returns the number of ftrace_ops registered and tracing functions
164 int ftrace_nr_registered_ops(void)
166 struct ftrace_ops
*ops
;
169 mutex_lock(&ftrace_lock
);
171 for (ops
= ftrace_ops_list
;
172 ops
!= &ftrace_list_end
; ops
= ops
->next
)
175 mutex_unlock(&ftrace_lock
);
180 static void ftrace_pid_func(unsigned long ip
, unsigned long parent_ip
,
181 struct ftrace_ops
*op
, struct pt_regs
*regs
)
183 if (!test_tsk_trace_trace(current
))
186 ftrace_pid_function(ip
, parent_ip
, op
, regs
);
189 static void set_ftrace_pid_function(ftrace_func_t func
)
191 /* do not set ftrace_pid_function to itself! */
192 if (func
!= ftrace_pid_func
)
193 ftrace_pid_function
= func
;
197 * clear_ftrace_function - reset the ftrace function
199 * This NULLs the ftrace function and in essence stops
200 * tracing. There may be lag
202 void clear_ftrace_function(void)
204 ftrace_trace_function
= ftrace_stub
;
205 ftrace_pid_function
= ftrace_stub
;
208 static void control_ops_disable_all(struct ftrace_ops
*ops
)
212 for_each_possible_cpu(cpu
)
213 *per_cpu_ptr(ops
->disabled
, cpu
) = 1;
216 static int control_ops_alloc(struct ftrace_ops
*ops
)
218 int __percpu
*disabled
;
220 disabled
= alloc_percpu(int);
224 ops
->disabled
= disabled
;
225 control_ops_disable_all(ops
);
229 static void ftrace_sync(struct work_struct
*work
)
232 * This function is just a stub to implement a hard force
233 * of synchronize_sched(). This requires synchronizing
234 * tasks even in userspace and idle.
236 * Yes, function tracing is rude.
240 static void ftrace_sync_ipi(void *data
)
242 /* Probably not needed, but do it anyway */
246 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
247 static void update_function_graph_func(void);
249 static inline void update_function_graph_func(void) { }
252 static void update_ftrace_function(void)
257 * Prepare the ftrace_ops that the arch callback will use.
258 * If there's only one ftrace_ops registered, the ftrace_ops_list
259 * will point to the ops we want.
261 set_function_trace_op
= ftrace_ops_list
;
263 /* If there's no ftrace_ops registered, just call the stub function */
264 if (ftrace_ops_list
== &ftrace_list_end
) {
268 * If we are at the end of the list and this ops is
269 * recursion safe and not dynamic and the arch supports passing ops,
270 * then have the mcount trampoline call the function directly.
272 } else if (ftrace_ops_list
->next
== &ftrace_list_end
) {
273 func
= ftrace_ops_get_func(ftrace_ops_list
);
276 /* Just use the default ftrace_ops */
277 set_function_trace_op
= &ftrace_list_end
;
278 func
= ftrace_ops_list_func
;
281 update_function_graph_func();
283 /* If there's no change, then do nothing more here */
284 if (ftrace_trace_function
== func
)
288 * If we are using the list function, it doesn't care
289 * about the function_trace_ops.
291 if (func
== ftrace_ops_list_func
) {
292 ftrace_trace_function
= func
;
294 * Don't even bother setting function_trace_ops,
295 * it would be racy to do so anyway.
300 #ifndef CONFIG_DYNAMIC_FTRACE
302 * For static tracing, we need to be a bit more careful.
303 * The function change takes affect immediately. Thus,
304 * we need to coorditate the setting of the function_trace_ops
305 * with the setting of the ftrace_trace_function.
307 * Set the function to the list ops, which will call the
308 * function we want, albeit indirectly, but it handles the
309 * ftrace_ops and doesn't depend on function_trace_op.
311 ftrace_trace_function
= ftrace_ops_list_func
;
313 * Make sure all CPUs see this. Yes this is slow, but static
314 * tracing is slow and nasty to have enabled.
316 schedule_on_each_cpu(ftrace_sync
);
317 /* Now all cpus are using the list ops. */
318 function_trace_op
= set_function_trace_op
;
319 /* Make sure the function_trace_op is visible on all CPUs */
321 /* Nasty way to force a rmb on all cpus */
322 smp_call_function(ftrace_sync_ipi
, NULL
, 1);
323 /* OK, we are all set to update the ftrace_trace_function now! */
324 #endif /* !CONFIG_DYNAMIC_FTRACE */
326 ftrace_trace_function
= func
;
329 int using_ftrace_ops_list_func(void)
331 return ftrace_trace_function
== ftrace_ops_list_func
;
334 static void add_ftrace_ops(struct ftrace_ops
**list
, struct ftrace_ops
*ops
)
338 * We are entering ops into the list but another
339 * CPU might be walking that list. We need to make sure
340 * the ops->next pointer is valid before another CPU sees
341 * the ops pointer included into the list.
343 rcu_assign_pointer(*list
, ops
);
346 static int remove_ftrace_ops(struct ftrace_ops
**list
, struct ftrace_ops
*ops
)
348 struct ftrace_ops
**p
;
351 * If we are removing the last function, then simply point
352 * to the ftrace_stub.
354 if (*list
== ops
&& ops
->next
== &ftrace_list_end
) {
355 *list
= &ftrace_list_end
;
359 for (p
= list
; *p
!= &ftrace_list_end
; p
= &(*p
)->next
)
370 static void add_ftrace_list_ops(struct ftrace_ops
**list
,
371 struct ftrace_ops
*main_ops
,
372 struct ftrace_ops
*ops
)
374 int first
= *list
== &ftrace_list_end
;
375 add_ftrace_ops(list
, ops
);
377 add_ftrace_ops(&ftrace_ops_list
, main_ops
);
380 static int remove_ftrace_list_ops(struct ftrace_ops
**list
,
381 struct ftrace_ops
*main_ops
,
382 struct ftrace_ops
*ops
)
384 int ret
= remove_ftrace_ops(list
, ops
);
385 if (!ret
&& *list
== &ftrace_list_end
)
386 ret
= remove_ftrace_ops(&ftrace_ops_list
, main_ops
);
390 static void ftrace_update_trampoline(struct ftrace_ops
*ops
);
392 static int __register_ftrace_function(struct ftrace_ops
*ops
)
394 if (ops
->flags
& FTRACE_OPS_FL_DELETED
)
397 if (WARN_ON(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
400 #ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
402 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
403 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
404 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
406 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
&&
407 !(ops
->flags
& FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
))
410 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED
)
411 ops
->flags
|= FTRACE_OPS_FL_SAVE_REGS
;
414 if (!core_kernel_data((unsigned long)ops
))
415 ops
->flags
|= FTRACE_OPS_FL_DYNAMIC
;
417 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
) {
418 if (control_ops_alloc(ops
))
420 add_ftrace_list_ops(&ftrace_control_list
, &control_ops
, ops
);
422 add_ftrace_ops(&ftrace_ops_list
, ops
);
424 ftrace_update_trampoline(ops
);
427 update_ftrace_function();
432 static int __unregister_ftrace_function(struct ftrace_ops
*ops
)
436 if (WARN_ON(!(ops
->flags
& FTRACE_OPS_FL_ENABLED
)))
439 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
) {
440 ret
= remove_ftrace_list_ops(&ftrace_control_list
,
443 ret
= remove_ftrace_ops(&ftrace_ops_list
, ops
);
449 update_ftrace_function();
454 static void ftrace_update_pid_func(void)
456 /* Only do something if we are tracing something */
457 if (ftrace_trace_function
== ftrace_stub
)
460 update_ftrace_function();
463 #ifdef CONFIG_FUNCTION_PROFILER
464 struct ftrace_profile
{
465 struct hlist_node node
;
467 unsigned long counter
;
468 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
469 unsigned long long time
;
470 unsigned long long time_squared
;
474 struct ftrace_profile_page
{
475 struct ftrace_profile_page
*next
;
477 struct ftrace_profile records
[];
480 struct ftrace_profile_stat
{
482 struct hlist_head
*hash
;
483 struct ftrace_profile_page
*pages
;
484 struct ftrace_profile_page
*start
;
485 struct tracer_stat stat
;
488 #define PROFILE_RECORDS_SIZE \
489 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
491 #define PROFILES_PER_PAGE \
492 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
494 static int ftrace_profile_enabled __read_mostly
;
496 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
497 static DEFINE_MUTEX(ftrace_profile_lock
);
499 static DEFINE_PER_CPU(struct ftrace_profile_stat
, ftrace_profile_stats
);
501 #define FTRACE_PROFILE_HASH_BITS 10
502 #define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
505 function_stat_next(void *v
, int idx
)
507 struct ftrace_profile
*rec
= v
;
508 struct ftrace_profile_page
*pg
;
510 pg
= (struct ftrace_profile_page
*)((unsigned long)rec
& PAGE_MASK
);
516 if ((void *)rec
>= (void *)&pg
->records
[pg
->index
]) {
520 rec
= &pg
->records
[0];
528 static void *function_stat_start(struct tracer_stat
*trace
)
530 struct ftrace_profile_stat
*stat
=
531 container_of(trace
, struct ftrace_profile_stat
, stat
);
533 if (!stat
|| !stat
->start
)
536 return function_stat_next(&stat
->start
->records
[0], 0);
539 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
540 /* function graph compares on total time */
541 static int function_stat_cmp(void *p1
, void *p2
)
543 struct ftrace_profile
*a
= p1
;
544 struct ftrace_profile
*b
= p2
;
546 if (a
->time
< b
->time
)
548 if (a
->time
> b
->time
)
554 /* not function graph compares against hits */
555 static int function_stat_cmp(void *p1
, void *p2
)
557 struct ftrace_profile
*a
= p1
;
558 struct ftrace_profile
*b
= p2
;
560 if (a
->counter
< b
->counter
)
562 if (a
->counter
> b
->counter
)
569 static int function_stat_headers(struct seq_file
*m
)
571 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
572 seq_printf(m
, " Function "
575 "--- ---- --- ---\n");
577 seq_printf(m
, " Function Hit\n"
583 static int function_stat_show(struct seq_file
*m
, void *v
)
585 struct ftrace_profile
*rec
= v
;
586 char str
[KSYM_SYMBOL_LEN
];
588 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
589 static struct trace_seq s
;
590 unsigned long long avg
;
591 unsigned long long stddev
;
593 mutex_lock(&ftrace_profile_lock
);
595 /* we raced with function_profile_reset() */
596 if (unlikely(rec
->counter
== 0)) {
601 kallsyms_lookup(rec
->ip
, NULL
, NULL
, NULL
, str
);
602 seq_printf(m
, " %-30.30s %10lu", str
, rec
->counter
);
604 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
607 do_div(avg
, rec
->counter
);
609 /* Sample standard deviation (s^2) */
610 if (rec
->counter
<= 1)
614 * Apply Welford's method:
615 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
617 stddev
= rec
->counter
* rec
->time_squared
-
618 rec
->time
* rec
->time
;
621 * Divide only 1000 for ns^2 -> us^2 conversion.
622 * trace_print_graph_duration will divide 1000 again.
624 do_div(stddev
, rec
->counter
* (rec
->counter
- 1) * 1000);
628 trace_print_graph_duration(rec
->time
, &s
);
629 trace_seq_puts(&s
, " ");
630 trace_print_graph_duration(avg
, &s
);
631 trace_seq_puts(&s
, " ");
632 trace_print_graph_duration(stddev
, &s
);
633 trace_print_seq(m
, &s
);
637 mutex_unlock(&ftrace_profile_lock
);
642 static void ftrace_profile_reset(struct ftrace_profile_stat
*stat
)
644 struct ftrace_profile_page
*pg
;
646 pg
= stat
->pages
= stat
->start
;
649 memset(pg
->records
, 0, PROFILE_RECORDS_SIZE
);
654 memset(stat
->hash
, 0,
655 FTRACE_PROFILE_HASH_SIZE
* sizeof(struct hlist_head
));
658 int ftrace_profile_pages_init(struct ftrace_profile_stat
*stat
)
660 struct ftrace_profile_page
*pg
;
665 /* If we already allocated, do nothing */
669 stat
->pages
= (void *)get_zeroed_page(GFP_KERNEL
);
673 #ifdef CONFIG_DYNAMIC_FTRACE
674 functions
= ftrace_update_tot_cnt
;
677 * We do not know the number of functions that exist because
678 * dynamic tracing is what counts them. With past experience
679 * we have around 20K functions. That should be more than enough.
680 * It is highly unlikely we will execute every function in
686 pg
= stat
->start
= stat
->pages
;
688 pages
= DIV_ROUND_UP(functions
, PROFILES_PER_PAGE
);
690 for (i
= 1; i
< pages
; i
++) {
691 pg
->next
= (void *)get_zeroed_page(GFP_KERNEL
);
702 unsigned long tmp
= (unsigned long)pg
;
714 static int ftrace_profile_init_cpu(int cpu
)
716 struct ftrace_profile_stat
*stat
;
719 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
722 /* If the profile is already created, simply reset it */
723 ftrace_profile_reset(stat
);
728 * We are profiling all functions, but usually only a few thousand
729 * functions are hit. We'll make a hash of 1024 items.
731 size
= FTRACE_PROFILE_HASH_SIZE
;
733 stat
->hash
= kzalloc(sizeof(struct hlist_head
) * size
, GFP_KERNEL
);
738 /* Preallocate the function profiling pages */
739 if (ftrace_profile_pages_init(stat
) < 0) {
748 static int ftrace_profile_init(void)
753 for_each_possible_cpu(cpu
) {
754 ret
= ftrace_profile_init_cpu(cpu
);
762 /* interrupts must be disabled */
763 static struct ftrace_profile
*
764 ftrace_find_profiled_func(struct ftrace_profile_stat
*stat
, unsigned long ip
)
766 struct ftrace_profile
*rec
;
767 struct hlist_head
*hhd
;
770 key
= hash_long(ip
, FTRACE_PROFILE_HASH_BITS
);
771 hhd
= &stat
->hash
[key
];
773 if (hlist_empty(hhd
))
776 hlist_for_each_entry_rcu_notrace(rec
, hhd
, node
) {
784 static void ftrace_add_profile(struct ftrace_profile_stat
*stat
,
785 struct ftrace_profile
*rec
)
789 key
= hash_long(rec
->ip
, FTRACE_PROFILE_HASH_BITS
);
790 hlist_add_head_rcu(&rec
->node
, &stat
->hash
[key
]);
794 * The memory is already allocated, this simply finds a new record to use.
796 static struct ftrace_profile
*
797 ftrace_profile_alloc(struct ftrace_profile_stat
*stat
, unsigned long ip
)
799 struct ftrace_profile
*rec
= NULL
;
801 /* prevent recursion (from NMIs) */
802 if (atomic_inc_return(&stat
->disabled
) != 1)
806 * Try to find the function again since an NMI
807 * could have added it
809 rec
= ftrace_find_profiled_func(stat
, ip
);
813 if (stat
->pages
->index
== PROFILES_PER_PAGE
) {
814 if (!stat
->pages
->next
)
816 stat
->pages
= stat
->pages
->next
;
819 rec
= &stat
->pages
->records
[stat
->pages
->index
++];
821 ftrace_add_profile(stat
, rec
);
824 atomic_dec(&stat
->disabled
);
830 function_profile_call(unsigned long ip
, unsigned long parent_ip
,
831 struct ftrace_ops
*ops
, struct pt_regs
*regs
)
833 struct ftrace_profile_stat
*stat
;
834 struct ftrace_profile
*rec
;
837 if (!ftrace_profile_enabled
)
840 local_irq_save(flags
);
842 stat
= this_cpu_ptr(&ftrace_profile_stats
);
843 if (!stat
->hash
|| !ftrace_profile_enabled
)
846 rec
= ftrace_find_profiled_func(stat
, ip
);
848 rec
= ftrace_profile_alloc(stat
, ip
);
855 local_irq_restore(flags
);
858 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
859 static int profile_graph_entry(struct ftrace_graph_ent
*trace
)
861 function_profile_call(trace
->func
, 0, NULL
, NULL
);
865 static void profile_graph_return(struct ftrace_graph_ret
*trace
)
867 struct ftrace_profile_stat
*stat
;
868 unsigned long long calltime
;
869 struct ftrace_profile
*rec
;
872 local_irq_save(flags
);
873 stat
= this_cpu_ptr(&ftrace_profile_stats
);
874 if (!stat
->hash
|| !ftrace_profile_enabled
)
877 /* If the calltime was zero'd ignore it */
878 if (!trace
->calltime
)
881 calltime
= trace
->rettime
- trace
->calltime
;
883 if (!(trace_flags
& TRACE_ITER_GRAPH_TIME
)) {
886 index
= trace
->depth
;
888 /* Append this call time to the parent time to subtract */
890 current
->ret_stack
[index
- 1].subtime
+= calltime
;
892 if (current
->ret_stack
[index
].subtime
< calltime
)
893 calltime
-= current
->ret_stack
[index
].subtime
;
898 rec
= ftrace_find_profiled_func(stat
, trace
->func
);
900 rec
->time
+= calltime
;
901 rec
->time_squared
+= calltime
* calltime
;
905 local_irq_restore(flags
);
908 static int register_ftrace_profiler(void)
910 return register_ftrace_graph(&profile_graph_return
,
911 &profile_graph_entry
);
914 static void unregister_ftrace_profiler(void)
916 unregister_ftrace_graph();
919 static struct ftrace_ops ftrace_profile_ops __read_mostly
= {
920 .func
= function_profile_call
,
921 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
922 INIT_OPS_HASH(ftrace_profile_ops
)
925 static int register_ftrace_profiler(void)
927 return register_ftrace_function(&ftrace_profile_ops
);
930 static void unregister_ftrace_profiler(void)
932 unregister_ftrace_function(&ftrace_profile_ops
);
934 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
937 ftrace_profile_write(struct file
*filp
, const char __user
*ubuf
,
938 size_t cnt
, loff_t
*ppos
)
943 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
949 mutex_lock(&ftrace_profile_lock
);
950 if (ftrace_profile_enabled
^ val
) {
952 ret
= ftrace_profile_init();
958 ret
= register_ftrace_profiler();
963 ftrace_profile_enabled
= 1;
965 ftrace_profile_enabled
= 0;
967 * unregister_ftrace_profiler calls stop_machine
968 * so this acts like an synchronize_sched.
970 unregister_ftrace_profiler();
974 mutex_unlock(&ftrace_profile_lock
);
982 ftrace_profile_read(struct file
*filp
, char __user
*ubuf
,
983 size_t cnt
, loff_t
*ppos
)
985 char buf
[64]; /* big enough to hold a number */
988 r
= sprintf(buf
, "%u\n", ftrace_profile_enabled
);
989 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
992 static const struct file_operations ftrace_profile_fops
= {
993 .open
= tracing_open_generic
,
994 .read
= ftrace_profile_read
,
995 .write
= ftrace_profile_write
,
996 .llseek
= default_llseek
,
999 /* used to initialize the real stat files */
1000 static struct tracer_stat function_stats __initdata
= {
1001 .name
= "functions",
1002 .stat_start
= function_stat_start
,
1003 .stat_next
= function_stat_next
,
1004 .stat_cmp
= function_stat_cmp
,
1005 .stat_headers
= function_stat_headers
,
1006 .stat_show
= function_stat_show
1009 static __init
void ftrace_profile_debugfs(struct dentry
*d_tracer
)
1011 struct ftrace_profile_stat
*stat
;
1012 struct dentry
*entry
;
1017 for_each_possible_cpu(cpu
) {
1018 stat
= &per_cpu(ftrace_profile_stats
, cpu
);
1020 /* allocate enough for function name + cpu number */
1021 name
= kmalloc(32, GFP_KERNEL
);
1024 * The files created are permanent, if something happens
1025 * we still do not free memory.
1028 "Could not allocate stat file for cpu %d\n",
1032 stat
->stat
= function_stats
;
1033 snprintf(name
, 32, "function%d", cpu
);
1034 stat
->stat
.name
= name
;
1035 ret
= register_stat_tracer(&stat
->stat
);
1038 "Could not register function stat for cpu %d\n",
1045 entry
= debugfs_create_file("function_profile_enabled", 0644,
1046 d_tracer
, NULL
, &ftrace_profile_fops
);
1048 pr_warning("Could not create debugfs "
1049 "'function_profile_enabled' entry\n");
1052 #else /* CONFIG_FUNCTION_PROFILER */
1053 static __init
void ftrace_profile_debugfs(struct dentry
*d_tracer
)
1056 #endif /* CONFIG_FUNCTION_PROFILER */
1058 static struct pid
* const ftrace_swapper_pid
= &init_struct_pid
;
1060 #ifdef CONFIG_DYNAMIC_FTRACE
1062 static struct ftrace_ops
*removed_ops
;
1065 * Set when doing a global update, like enabling all recs or disabling them.
1066 * It is not set when just updating a single ftrace_ops.
1068 static bool update_all_ops
;
1070 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
1071 # error Dynamic ftrace depends on MCOUNT_RECORD
1074 static struct hlist_head ftrace_func_hash
[FTRACE_FUNC_HASHSIZE
] __read_mostly
;
1076 struct ftrace_func_probe
{
1077 struct hlist_node node
;
1078 struct ftrace_probe_ops
*ops
;
1079 unsigned long flags
;
1082 struct list_head free_list
;
1085 struct ftrace_func_entry
{
1086 struct hlist_node hlist
;
1090 struct ftrace_hash
{
1091 unsigned long size_bits
;
1092 struct hlist_head
*buckets
;
1093 unsigned long count
;
1094 struct rcu_head rcu
;
1098 * We make these constant because no one should touch them,
1099 * but they are used as the default "empty hash", to avoid allocating
1100 * it all the time. These are in a read only section such that if
1101 * anyone does try to modify it, it will cause an exception.
1103 static const struct hlist_head empty_buckets
[1];
1104 static const struct ftrace_hash empty_hash
= {
1105 .buckets
= (struct hlist_head
*)empty_buckets
,
1107 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
1109 static struct ftrace_ops global_ops
= {
1110 .func
= ftrace_stub
,
1111 .local_hash
.notrace_hash
= EMPTY_HASH
,
1112 .local_hash
.filter_hash
= EMPTY_HASH
,
1113 INIT_OPS_HASH(global_ops
)
1114 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
1115 FTRACE_OPS_FL_INITIALIZED
,
1118 struct ftrace_page
{
1119 struct ftrace_page
*next
;
1120 struct dyn_ftrace
*records
;
1125 #define ENTRY_SIZE sizeof(struct dyn_ftrace)
1126 #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
1128 /* estimate from running different kernels */
1129 #define NR_TO_INIT 10000
1131 static struct ftrace_page
*ftrace_pages_start
;
1132 static struct ftrace_page
*ftrace_pages
;
1134 static bool __always_inline
ftrace_hash_empty(struct ftrace_hash
*hash
)
1136 return !hash
|| !hash
->count
;
1139 static struct ftrace_func_entry
*
1140 ftrace_lookup_ip(struct ftrace_hash
*hash
, unsigned long ip
)
1143 struct ftrace_func_entry
*entry
;
1144 struct hlist_head
*hhd
;
1146 if (ftrace_hash_empty(hash
))
1149 if (hash
->size_bits
> 0)
1150 key
= hash_long(ip
, hash
->size_bits
);
1154 hhd
= &hash
->buckets
[key
];
1156 hlist_for_each_entry_rcu_notrace(entry
, hhd
, hlist
) {
1157 if (entry
->ip
== ip
)
1163 static void __add_hash_entry(struct ftrace_hash
*hash
,
1164 struct ftrace_func_entry
*entry
)
1166 struct hlist_head
*hhd
;
1169 if (hash
->size_bits
)
1170 key
= hash_long(entry
->ip
, hash
->size_bits
);
1174 hhd
= &hash
->buckets
[key
];
1175 hlist_add_head(&entry
->hlist
, hhd
);
1179 static int add_hash_entry(struct ftrace_hash
*hash
, unsigned long ip
)
1181 struct ftrace_func_entry
*entry
;
1183 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1188 __add_hash_entry(hash
, entry
);
1194 free_hash_entry(struct ftrace_hash
*hash
,
1195 struct ftrace_func_entry
*entry
)
1197 hlist_del(&entry
->hlist
);
1203 remove_hash_entry(struct ftrace_hash
*hash
,
1204 struct ftrace_func_entry
*entry
)
1206 hlist_del(&entry
->hlist
);
1210 static void ftrace_hash_clear(struct ftrace_hash
*hash
)
1212 struct hlist_head
*hhd
;
1213 struct hlist_node
*tn
;
1214 struct ftrace_func_entry
*entry
;
1215 int size
= 1 << hash
->size_bits
;
1221 for (i
= 0; i
< size
; i
++) {
1222 hhd
= &hash
->buckets
[i
];
1223 hlist_for_each_entry_safe(entry
, tn
, hhd
, hlist
)
1224 free_hash_entry(hash
, entry
);
1226 FTRACE_WARN_ON(hash
->count
);
1229 static void free_ftrace_hash(struct ftrace_hash
*hash
)
1231 if (!hash
|| hash
== EMPTY_HASH
)
1233 ftrace_hash_clear(hash
);
1234 kfree(hash
->buckets
);
1238 static void __free_ftrace_hash_rcu(struct rcu_head
*rcu
)
1240 struct ftrace_hash
*hash
;
1242 hash
= container_of(rcu
, struct ftrace_hash
, rcu
);
1243 free_ftrace_hash(hash
);
1246 static void free_ftrace_hash_rcu(struct ftrace_hash
*hash
)
1248 if (!hash
|| hash
== EMPTY_HASH
)
1250 call_rcu_sched(&hash
->rcu
, __free_ftrace_hash_rcu
);
1253 void ftrace_free_filter(struct ftrace_ops
*ops
)
1255 ftrace_ops_init(ops
);
1256 free_ftrace_hash(ops
->func_hash
->filter_hash
);
1257 free_ftrace_hash(ops
->func_hash
->notrace_hash
);
1260 static struct ftrace_hash
*alloc_ftrace_hash(int size_bits
)
1262 struct ftrace_hash
*hash
;
1265 hash
= kzalloc(sizeof(*hash
), GFP_KERNEL
);
1269 size
= 1 << size_bits
;
1270 hash
->buckets
= kcalloc(size
, sizeof(*hash
->buckets
), GFP_KERNEL
);
1272 if (!hash
->buckets
) {
1277 hash
->size_bits
= size_bits
;
1282 static struct ftrace_hash
*
1283 alloc_and_copy_ftrace_hash(int size_bits
, struct ftrace_hash
*hash
)
1285 struct ftrace_func_entry
*entry
;
1286 struct ftrace_hash
*new_hash
;
1291 new_hash
= alloc_ftrace_hash(size_bits
);
1296 if (ftrace_hash_empty(hash
))
1299 size
= 1 << hash
->size_bits
;
1300 for (i
= 0; i
< size
; i
++) {
1301 hlist_for_each_entry(entry
, &hash
->buckets
[i
], hlist
) {
1302 ret
= add_hash_entry(new_hash
, entry
->ip
);
1308 FTRACE_WARN_ON(new_hash
->count
!= hash
->count
);
1313 free_ftrace_hash(new_hash
);
1318 ftrace_hash_rec_disable_modify(struct ftrace_ops
*ops
, int filter_hash
);
1320 ftrace_hash_rec_enable_modify(struct ftrace_ops
*ops
, int filter_hash
);
1323 ftrace_hash_move(struct ftrace_ops
*ops
, int enable
,
1324 struct ftrace_hash
**dst
, struct ftrace_hash
*src
)
1326 struct ftrace_func_entry
*entry
;
1327 struct hlist_node
*tn
;
1328 struct hlist_head
*hhd
;
1329 struct ftrace_hash
*new_hash
;
1330 int size
= src
->count
;
1335 * If the new source is empty, just free dst and assign it
1339 new_hash
= EMPTY_HASH
;
1344 * Make the hash size about 1/2 the # found
1346 for (size
/= 2; size
; size
>>= 1)
1349 /* Don't allocate too much */
1350 if (bits
> FTRACE_HASH_MAX_BITS
)
1351 bits
= FTRACE_HASH_MAX_BITS
;
1353 new_hash
= alloc_ftrace_hash(bits
);
1357 size
= 1 << src
->size_bits
;
1358 for (i
= 0; i
< size
; i
++) {
1359 hhd
= &src
->buckets
[i
];
1360 hlist_for_each_entry_safe(entry
, tn
, hhd
, hlist
) {
1361 remove_hash_entry(src
, entry
);
1362 __add_hash_entry(new_hash
, entry
);
1368 * Remove the current set, update the hash and add
1371 ftrace_hash_rec_disable_modify(ops
, enable
);
1373 rcu_assign_pointer(*dst
, new_hash
);
1375 ftrace_hash_rec_enable_modify(ops
, enable
);
1380 static bool hash_contains_ip(unsigned long ip
,
1381 struct ftrace_ops_hash
*hash
)
1384 * The function record is a match if it exists in the filter
1385 * hash and not in the notrace hash. Note, an emty hash is
1386 * considered a match for the filter hash, but an empty
1387 * notrace hash is considered not in the notrace hash.
1389 return (ftrace_hash_empty(hash
->filter_hash
) ||
1390 ftrace_lookup_ip(hash
->filter_hash
, ip
)) &&
1391 (ftrace_hash_empty(hash
->notrace_hash
) ||
1392 !ftrace_lookup_ip(hash
->notrace_hash
, ip
));
1396 * Test the hashes for this ops to see if we want to call
1397 * the ops->func or not.
1399 * It's a match if the ip is in the ops->filter_hash or
1400 * the filter_hash does not exist or is empty,
1402 * the ip is not in the ops->notrace_hash.
1404 * This needs to be called with preemption disabled as
1405 * the hashes are freed with call_rcu_sched().
1408 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
, void *regs
)
1410 struct ftrace_ops_hash hash
;
1413 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1415 * There's a small race when adding ops that the ftrace handler
1416 * that wants regs, may be called without them. We can not
1417 * allow that handler to be called if regs is NULL.
1419 if (regs
== NULL
&& (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
))
1423 hash
.filter_hash
= rcu_dereference_raw_notrace(ops
->func_hash
->filter_hash
);
1424 hash
.notrace_hash
= rcu_dereference_raw_notrace(ops
->func_hash
->notrace_hash
);
1426 if (hash_contains_ip(ip
, &hash
))
1435 * This is a double for. Do not use 'break' to break out of the loop,
1436 * you must use a goto.
1438 #define do_for_each_ftrace_rec(pg, rec) \
1439 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1441 for (_____i = 0; _____i < pg->index; _____i++) { \
1442 rec = &pg->records[_____i];
1444 #define while_for_each_ftrace_rec() \
1449 static int ftrace_cmp_recs(const void *a
, const void *b
)
1451 const struct dyn_ftrace
*key
= a
;
1452 const struct dyn_ftrace
*rec
= b
;
1454 if (key
->flags
< rec
->ip
)
1456 if (key
->ip
>= rec
->ip
+ MCOUNT_INSN_SIZE
)
1461 static unsigned long ftrace_location_range(unsigned long start
, unsigned long end
)
1463 struct ftrace_page
*pg
;
1464 struct dyn_ftrace
*rec
;
1465 struct dyn_ftrace key
;
1468 key
.flags
= end
; /* overload flags, as it is unsigned long */
1470 for (pg
= ftrace_pages_start
; pg
; pg
= pg
->next
) {
1471 if (end
< pg
->records
[0].ip
||
1472 start
>= (pg
->records
[pg
->index
- 1].ip
+ MCOUNT_INSN_SIZE
))
1474 rec
= bsearch(&key
, pg
->records
, pg
->index
,
1475 sizeof(struct dyn_ftrace
),
1485 * ftrace_location - return true if the ip giving is a traced location
1486 * @ip: the instruction pointer to check
1488 * Returns rec->ip if @ip given is a pointer to a ftrace location.
1489 * That is, the instruction that is either a NOP or call to
1490 * the function tracer. It checks the ftrace internal tables to
1491 * determine if the address belongs or not.
1493 unsigned long ftrace_location(unsigned long ip
)
1495 return ftrace_location_range(ip
, ip
);
1499 * ftrace_text_reserved - return true if range contains an ftrace location
1500 * @start: start of range to search
1501 * @end: end of range to search (inclusive). @end points to the last byte to check.
1503 * Returns 1 if @start and @end contains a ftrace location.
1504 * That is, the instruction that is either a NOP or call to
1505 * the function tracer. It checks the ftrace internal tables to
1506 * determine if the address belongs or not.
1508 int ftrace_text_reserved(const void *start
, const void *end
)
1512 ret
= ftrace_location_range((unsigned long)start
,
1513 (unsigned long)end
);
1518 /* Test if ops registered to this rec needs regs */
1519 static bool test_rec_ops_needs_regs(struct dyn_ftrace
*rec
)
1521 struct ftrace_ops
*ops
;
1522 bool keep_regs
= false;
1524 for (ops
= ftrace_ops_list
;
1525 ops
!= &ftrace_list_end
; ops
= ops
->next
) {
1526 /* pass rec in as regs to have non-NULL val */
1527 if (ftrace_ops_test(ops
, rec
->ip
, rec
)) {
1528 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
1538 static void __ftrace_hash_rec_update(struct ftrace_ops
*ops
,
1542 struct ftrace_hash
*hash
;
1543 struct ftrace_hash
*other_hash
;
1544 struct ftrace_page
*pg
;
1545 struct dyn_ftrace
*rec
;
1549 /* Only update if the ops has been registered */
1550 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
1554 * In the filter_hash case:
1555 * If the count is zero, we update all records.
1556 * Otherwise we just update the items in the hash.
1558 * In the notrace_hash case:
1559 * We enable the update in the hash.
1560 * As disabling notrace means enabling the tracing,
1561 * and enabling notrace means disabling, the inc variable
1565 hash
= ops
->func_hash
->filter_hash
;
1566 other_hash
= ops
->func_hash
->notrace_hash
;
1567 if (ftrace_hash_empty(hash
))
1571 hash
= ops
->func_hash
->notrace_hash
;
1572 other_hash
= ops
->func_hash
->filter_hash
;
1574 * If the notrace hash has no items,
1575 * then there's nothing to do.
1577 if (ftrace_hash_empty(hash
))
1581 do_for_each_ftrace_rec(pg
, rec
) {
1582 int in_other_hash
= 0;
1588 * Only the filter_hash affects all records.
1589 * Update if the record is not in the notrace hash.
1591 if (!other_hash
|| !ftrace_lookup_ip(other_hash
, rec
->ip
))
1594 in_hash
= !!ftrace_lookup_ip(hash
, rec
->ip
);
1595 in_other_hash
= !!ftrace_lookup_ip(other_hash
, rec
->ip
);
1598 * If filter_hash is set, we want to match all functions
1599 * that are in the hash but not in the other hash.
1601 * If filter_hash is not set, then we are decrementing.
1602 * That means we match anything that is in the hash
1603 * and also in the other_hash. That is, we need to turn
1604 * off functions in the other hash because they are disabled
1607 if (filter_hash
&& in_hash
&& !in_other_hash
)
1609 else if (!filter_hash
&& in_hash
&&
1610 (in_other_hash
|| ftrace_hash_empty(other_hash
)))
1618 if (FTRACE_WARN_ON(ftrace_rec_count(rec
) == FTRACE_REF_MAX
))
1622 * If there's only a single callback registered to a
1623 * function, and the ops has a trampoline registered
1624 * for it, then we can call it directly.
1626 if (ftrace_rec_count(rec
) == 1 && ops
->trampoline
)
1627 rec
->flags
|= FTRACE_FL_TRAMP
;
1630 * If we are adding another function callback
1631 * to this function, and the previous had a
1632 * custom trampoline in use, then we need to go
1633 * back to the default trampoline.
1635 rec
->flags
&= ~FTRACE_FL_TRAMP
;
1638 * If any ops wants regs saved for this function
1639 * then all ops will get saved regs.
1641 if (ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
)
1642 rec
->flags
|= FTRACE_FL_REGS
;
1644 if (FTRACE_WARN_ON(ftrace_rec_count(rec
) == 0))
1649 * If the rec had REGS enabled and the ops that is
1650 * being removed had REGS set, then see if there is
1651 * still any ops for this record that wants regs.
1652 * If not, we can stop recording them.
1654 if (ftrace_rec_count(rec
) > 0 &&
1655 rec
->flags
& FTRACE_FL_REGS
&&
1656 ops
->flags
& FTRACE_OPS_FL_SAVE_REGS
) {
1657 if (!test_rec_ops_needs_regs(rec
))
1658 rec
->flags
&= ~FTRACE_FL_REGS
;
1662 * If the rec had TRAMP enabled, then it needs to
1663 * be cleared. As TRAMP can only be enabled iff
1664 * there is only a single ops attached to it.
1665 * In otherwords, always disable it on decrementing.
1666 * In the future, we may set it if rec count is
1667 * decremented to one, and the ops that is left
1670 rec
->flags
&= ~FTRACE_FL_TRAMP
;
1673 * flags will be cleared in ftrace_check_record()
1674 * if rec count is zero.
1678 /* Shortcut, if we handled all records, we are done. */
1679 if (!all
&& count
== hash
->count
)
1681 } while_for_each_ftrace_rec();
1684 static void ftrace_hash_rec_disable(struct ftrace_ops
*ops
,
1687 __ftrace_hash_rec_update(ops
, filter_hash
, 0);
1690 static void ftrace_hash_rec_enable(struct ftrace_ops
*ops
,
1693 __ftrace_hash_rec_update(ops
, filter_hash
, 1);
1696 static void ftrace_hash_rec_update_modify(struct ftrace_ops
*ops
,
1697 int filter_hash
, int inc
)
1699 struct ftrace_ops
*op
;
1701 __ftrace_hash_rec_update(ops
, filter_hash
, inc
);
1703 if (ops
->func_hash
!= &global_ops
.local_hash
)
1707 * If the ops shares the global_ops hash, then we need to update
1708 * all ops that are enabled and use this hash.
1710 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1714 if (op
->func_hash
== &global_ops
.local_hash
)
1715 __ftrace_hash_rec_update(op
, filter_hash
, inc
);
1716 } while_for_each_ftrace_op(op
);
1719 static void ftrace_hash_rec_disable_modify(struct ftrace_ops
*ops
,
1722 ftrace_hash_rec_update_modify(ops
, filter_hash
, 0);
1725 static void ftrace_hash_rec_enable_modify(struct ftrace_ops
*ops
,
1728 ftrace_hash_rec_update_modify(ops
, filter_hash
, 1);
1731 static void print_ip_ins(const char *fmt
, unsigned char *p
)
1735 printk(KERN_CONT
"%s", fmt
);
1737 for (i
= 0; i
< MCOUNT_INSN_SIZE
; i
++)
1738 printk(KERN_CONT
"%s%02x", i
? ":" : "", p
[i
]);
1742 * ftrace_bug - report and shutdown function tracer
1743 * @failed: The failed type (EFAULT, EINVAL, EPERM)
1744 * @ip: The address that failed
1746 * The arch code that enables or disables the function tracing
1747 * can call ftrace_bug() when it has detected a problem in
1748 * modifying the code. @failed should be one of either:
1749 * EFAULT - if the problem happens on reading the @ip address
1750 * EINVAL - if what is read at @ip is not what was expected
1751 * EPERM - if the problem happens on writting to the @ip address
1753 void ftrace_bug(int failed
, unsigned long ip
)
1757 FTRACE_WARN_ON_ONCE(1);
1758 pr_info("ftrace faulted on modifying ");
1762 FTRACE_WARN_ON_ONCE(1);
1763 pr_info("ftrace failed to modify ");
1765 print_ip_ins(" actual: ", (unsigned char *)ip
);
1766 printk(KERN_CONT
"\n");
1769 FTRACE_WARN_ON_ONCE(1);
1770 pr_info("ftrace faulted on writing ");
1774 FTRACE_WARN_ON_ONCE(1);
1775 pr_info("ftrace faulted on unknown error ");
1780 static int ftrace_check_record(struct dyn_ftrace
*rec
, int enable
, int update
)
1782 unsigned long flag
= 0UL;
1785 * If we are updating calls:
1787 * If the record has a ref count, then we need to enable it
1788 * because someone is using it.
1790 * Otherwise we make sure its disabled.
1792 * If we are disabling calls, then disable all records that
1795 if (enable
&& ftrace_rec_count(rec
))
1796 flag
= FTRACE_FL_ENABLED
;
1799 * If enabling and the REGS flag does not match the REGS_EN, or
1800 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
1801 * this record. Set flags to fail the compare against ENABLED.
1804 if (!(rec
->flags
& FTRACE_FL_REGS
) !=
1805 !(rec
->flags
& FTRACE_FL_REGS_EN
))
1806 flag
|= FTRACE_FL_REGS
;
1808 if (!(rec
->flags
& FTRACE_FL_TRAMP
) !=
1809 !(rec
->flags
& FTRACE_FL_TRAMP_EN
))
1810 flag
|= FTRACE_FL_TRAMP
;
1813 /* If the state of this record hasn't changed, then do nothing */
1814 if ((rec
->flags
& FTRACE_FL_ENABLED
) == flag
)
1815 return FTRACE_UPDATE_IGNORE
;
1818 /* Save off if rec is being enabled (for return value) */
1819 flag
^= rec
->flags
& FTRACE_FL_ENABLED
;
1822 rec
->flags
|= FTRACE_FL_ENABLED
;
1823 if (flag
& FTRACE_FL_REGS
) {
1824 if (rec
->flags
& FTRACE_FL_REGS
)
1825 rec
->flags
|= FTRACE_FL_REGS_EN
;
1827 rec
->flags
&= ~FTRACE_FL_REGS_EN
;
1829 if (flag
& FTRACE_FL_TRAMP
) {
1830 if (rec
->flags
& FTRACE_FL_TRAMP
)
1831 rec
->flags
|= FTRACE_FL_TRAMP_EN
;
1833 rec
->flags
&= ~FTRACE_FL_TRAMP_EN
;
1838 * If this record is being updated from a nop, then
1839 * return UPDATE_MAKE_CALL.
1841 * return UPDATE_MODIFY_CALL to tell the caller to convert
1842 * from the save regs, to a non-save regs function or
1843 * vice versa, or from a trampoline call.
1845 if (flag
& FTRACE_FL_ENABLED
)
1846 return FTRACE_UPDATE_MAKE_CALL
;
1848 return FTRACE_UPDATE_MODIFY_CALL
;
1852 /* If there's no more users, clear all flags */
1853 if (!ftrace_rec_count(rec
))
1856 /* Just disable the record (keep REGS state) */
1857 rec
->flags
&= ~FTRACE_FL_ENABLED
;
1860 return FTRACE_UPDATE_MAKE_NOP
;
1864 * ftrace_update_record, set a record that now is tracing or not
1865 * @rec: the record to update
1866 * @enable: set to 1 if the record is tracing, zero to force disable
1868 * The records that represent all functions that can be traced need
1869 * to be updated when tracing has been enabled.
1871 int ftrace_update_record(struct dyn_ftrace
*rec
, int enable
)
1873 return ftrace_check_record(rec
, enable
, 1);
1877 * ftrace_test_record, check if the record has been enabled or not
1878 * @rec: the record to test
1879 * @enable: set to 1 to check if enabled, 0 if it is disabled
1881 * The arch code may need to test if a record is already set to
1882 * tracing to determine how to modify the function code that it
1885 int ftrace_test_record(struct dyn_ftrace
*rec
, int enable
)
1887 return ftrace_check_record(rec
, enable
, 0);
1890 static struct ftrace_ops
*
1891 ftrace_find_tramp_ops_any(struct dyn_ftrace
*rec
)
1893 struct ftrace_ops
*op
;
1894 unsigned long ip
= rec
->ip
;
1896 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1898 if (!op
->trampoline
)
1901 if (hash_contains_ip(ip
, op
->func_hash
))
1903 } while_for_each_ftrace_op(op
);
1908 static struct ftrace_ops
*
1909 ftrace_find_tramp_ops_curr(struct dyn_ftrace
*rec
)
1911 struct ftrace_ops
*op
;
1912 unsigned long ip
= rec
->ip
;
1915 * Need to check removed ops first.
1916 * If they are being removed, and this rec has a tramp,
1917 * and this rec is in the ops list, then it would be the
1918 * one with the tramp.
1921 if (hash_contains_ip(ip
, &removed_ops
->old_hash
))
1926 * Need to find the current trampoline for a rec.
1927 * Now, a trampoline is only attached to a rec if there
1928 * was a single 'ops' attached to it. But this can be called
1929 * when we are adding another op to the rec or removing the
1930 * current one. Thus, if the op is being added, we can
1931 * ignore it because it hasn't attached itself to the rec
1934 * If an ops is being modified (hooking to different functions)
1935 * then we don't care about the new functions that are being
1936 * added, just the old ones (that are probably being removed).
1938 * If we are adding an ops to a function that already is using
1939 * a trampoline, it needs to be removed (trampolines are only
1940 * for single ops connected), then an ops that is not being
1941 * modified also needs to be checked.
1943 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1945 if (!op
->trampoline
)
1949 * If the ops is being added, it hasn't gotten to
1950 * the point to be removed from this tree yet.
1952 if (op
->flags
& FTRACE_OPS_FL_ADDING
)
1957 * If the ops is being modified and is in the old
1958 * hash, then it is probably being removed from this
1961 if ((op
->flags
& FTRACE_OPS_FL_MODIFYING
) &&
1962 hash_contains_ip(ip
, &op
->old_hash
))
1965 * If the ops is not being added or modified, and it's
1966 * in its normal filter hash, then this must be the one
1969 if (!(op
->flags
& FTRACE_OPS_FL_MODIFYING
) &&
1970 hash_contains_ip(ip
, op
->func_hash
))
1973 } while_for_each_ftrace_op(op
);
1978 static struct ftrace_ops
*
1979 ftrace_find_tramp_ops_new(struct dyn_ftrace
*rec
)
1981 struct ftrace_ops
*op
;
1982 unsigned long ip
= rec
->ip
;
1984 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
1985 /* pass rec in as regs to have non-NULL val */
1986 if (hash_contains_ip(ip
, op
->func_hash
))
1988 } while_for_each_ftrace_op(op
);
1994 * ftrace_get_addr_new - Get the call address to set to
1995 * @rec: The ftrace record descriptor
1997 * If the record has the FTRACE_FL_REGS set, that means that it
1998 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
1999 * is not not set, then it wants to convert to the normal callback.
2001 * Returns the address of the trampoline to set to
2003 unsigned long ftrace_get_addr_new(struct dyn_ftrace
*rec
)
2005 struct ftrace_ops
*ops
;
2007 /* Trampolines take precedence over regs */
2008 if (rec
->flags
& FTRACE_FL_TRAMP
) {
2009 ops
= ftrace_find_tramp_ops_new(rec
);
2010 if (FTRACE_WARN_ON(!ops
|| !ops
->trampoline
)) {
2011 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2012 (void *)rec
->ip
, (void *)rec
->ip
, rec
->flags
);
2013 /* Ftrace is shutting down, return anything */
2014 return (unsigned long)FTRACE_ADDR
;
2016 return ops
->trampoline
;
2019 if (rec
->flags
& FTRACE_FL_REGS
)
2020 return (unsigned long)FTRACE_REGS_ADDR
;
2022 return (unsigned long)FTRACE_ADDR
;
2026 * ftrace_get_addr_curr - Get the call address that is already there
2027 * @rec: The ftrace record descriptor
2029 * The FTRACE_FL_REGS_EN is set when the record already points to
2030 * a function that saves all the regs. Basically the '_EN' version
2031 * represents the current state of the function.
2033 * Returns the address of the trampoline that is currently being called
2035 unsigned long ftrace_get_addr_curr(struct dyn_ftrace
*rec
)
2037 struct ftrace_ops
*ops
;
2039 /* Trampolines take precedence over regs */
2040 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
2041 ops
= ftrace_find_tramp_ops_curr(rec
);
2042 if (FTRACE_WARN_ON(!ops
)) {
2043 pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2044 (void *)rec
->ip
, (void *)rec
->ip
);
2045 /* Ftrace is shutting down, return anything */
2046 return (unsigned long)FTRACE_ADDR
;
2048 return ops
->trampoline
;
2051 if (rec
->flags
& FTRACE_FL_REGS_EN
)
2052 return (unsigned long)FTRACE_REGS_ADDR
;
2054 return (unsigned long)FTRACE_ADDR
;
2058 __ftrace_replace_code(struct dyn_ftrace
*rec
, int enable
)
2060 unsigned long ftrace_old_addr
;
2061 unsigned long ftrace_addr
;
2064 ftrace_addr
= ftrace_get_addr_new(rec
);
2066 /* This needs to be done before we call ftrace_update_record */
2067 ftrace_old_addr
= ftrace_get_addr_curr(rec
);
2069 ret
= ftrace_update_record(rec
, enable
);
2072 case FTRACE_UPDATE_IGNORE
:
2075 case FTRACE_UPDATE_MAKE_CALL
:
2076 return ftrace_make_call(rec
, ftrace_addr
);
2078 case FTRACE_UPDATE_MAKE_NOP
:
2079 return ftrace_make_nop(NULL
, rec
, ftrace_old_addr
);
2081 case FTRACE_UPDATE_MODIFY_CALL
:
2082 return ftrace_modify_call(rec
, ftrace_old_addr
, ftrace_addr
);
2085 return -1; /* unknow ftrace bug */
2088 void __weak
ftrace_replace_code(int enable
)
2090 struct dyn_ftrace
*rec
;
2091 struct ftrace_page
*pg
;
2094 if (unlikely(ftrace_disabled
))
2097 do_for_each_ftrace_rec(pg
, rec
) {
2098 failed
= __ftrace_replace_code(rec
, enable
);
2100 ftrace_bug(failed
, rec
->ip
);
2101 /* Stop processing */
2104 } while_for_each_ftrace_rec();
2107 struct ftrace_rec_iter
{
2108 struct ftrace_page
*pg
;
2113 * ftrace_rec_iter_start, start up iterating over traced functions
2115 * Returns an iterator handle that is used to iterate over all
2116 * the records that represent address locations where functions
2119 * May return NULL if no records are available.
2121 struct ftrace_rec_iter
*ftrace_rec_iter_start(void)
2124 * We only use a single iterator.
2125 * Protected by the ftrace_lock mutex.
2127 static struct ftrace_rec_iter ftrace_rec_iter
;
2128 struct ftrace_rec_iter
*iter
= &ftrace_rec_iter
;
2130 iter
->pg
= ftrace_pages_start
;
2133 /* Could have empty pages */
2134 while (iter
->pg
&& !iter
->pg
->index
)
2135 iter
->pg
= iter
->pg
->next
;
2144 * ftrace_rec_iter_next, get the next record to process.
2145 * @iter: The handle to the iterator.
2147 * Returns the next iterator after the given iterator @iter.
2149 struct ftrace_rec_iter
*ftrace_rec_iter_next(struct ftrace_rec_iter
*iter
)
2153 if (iter
->index
>= iter
->pg
->index
) {
2154 iter
->pg
= iter
->pg
->next
;
2157 /* Could have empty pages */
2158 while (iter
->pg
&& !iter
->pg
->index
)
2159 iter
->pg
= iter
->pg
->next
;
2169 * ftrace_rec_iter_record, get the record at the iterator location
2170 * @iter: The current iterator location
2172 * Returns the record that the current @iter is at.
2174 struct dyn_ftrace
*ftrace_rec_iter_record(struct ftrace_rec_iter
*iter
)
2176 return &iter
->pg
->records
[iter
->index
];
2180 ftrace_code_disable(struct module
*mod
, struct dyn_ftrace
*rec
)
2187 if (unlikely(ftrace_disabled
))
2190 ret
= ftrace_make_nop(mod
, rec
, MCOUNT_ADDR
);
2192 ftrace_bug(ret
, ip
);
2199 * archs can override this function if they must do something
2200 * before the modifying code is performed.
2202 int __weak
ftrace_arch_code_modify_prepare(void)
2208 * archs can override this function if they must do something
2209 * after the modifying code is performed.
2211 int __weak
ftrace_arch_code_modify_post_process(void)
2216 void ftrace_modify_all_code(int command
)
2218 int update
= command
& FTRACE_UPDATE_TRACE_FUNC
;
2222 * If the ftrace_caller calls a ftrace_ops func directly,
2223 * we need to make sure that it only traces functions it
2224 * expects to trace. When doing the switch of functions,
2225 * we need to update to the ftrace_ops_list_func first
2226 * before the transition between old and new calls are set,
2227 * as the ftrace_ops_list_func will check the ops hashes
2228 * to make sure the ops are having the right functions
2232 err
= ftrace_update_ftrace_func(ftrace_ops_list_func
);
2233 if (FTRACE_WARN_ON(err
))
2237 if (command
& FTRACE_UPDATE_CALLS
)
2238 ftrace_replace_code(1);
2239 else if (command
& FTRACE_DISABLE_CALLS
)
2240 ftrace_replace_code(0);
2242 if (update
&& ftrace_trace_function
!= ftrace_ops_list_func
) {
2243 function_trace_op
= set_function_trace_op
;
2245 /* If irqs are disabled, we are in stop machine */
2246 if (!irqs_disabled())
2247 smp_call_function(ftrace_sync_ipi
, NULL
, 1);
2248 err
= ftrace_update_ftrace_func(ftrace_trace_function
);
2249 if (FTRACE_WARN_ON(err
))
2253 if (command
& FTRACE_START_FUNC_RET
)
2254 err
= ftrace_enable_ftrace_graph_caller();
2255 else if (command
& FTRACE_STOP_FUNC_RET
)
2256 err
= ftrace_disable_ftrace_graph_caller();
2257 FTRACE_WARN_ON(err
);
2260 static int __ftrace_modify_code(void *data
)
2262 int *command
= data
;
2264 ftrace_modify_all_code(*command
);
2270 * ftrace_run_stop_machine, go back to the stop machine method
2271 * @command: The command to tell ftrace what to do
2273 * If an arch needs to fall back to the stop machine method, the
2274 * it can call this function.
2276 void ftrace_run_stop_machine(int command
)
2278 stop_machine(__ftrace_modify_code
, &command
, NULL
);
2282 * arch_ftrace_update_code, modify the code to trace or not trace
2283 * @command: The command that needs to be done
2285 * Archs can override this function if it does not need to
2286 * run stop_machine() to modify code.
2288 void __weak
arch_ftrace_update_code(int command
)
2290 ftrace_run_stop_machine(command
);
2293 static void ftrace_run_update_code(int command
)
2297 ret
= ftrace_arch_code_modify_prepare();
2298 FTRACE_WARN_ON(ret
);
2303 * By default we use stop_machine() to modify the code.
2304 * But archs can do what ever they want as long as it
2305 * is safe. The stop_machine() is the safest, but also
2306 * produces the most overhead.
2308 arch_ftrace_update_code(command
);
2310 ret
= ftrace_arch_code_modify_post_process();
2311 FTRACE_WARN_ON(ret
);
2314 static void ftrace_run_modify_code(struct ftrace_ops
*ops
, int command
,
2315 struct ftrace_hash
*old_hash
)
2317 ops
->flags
|= FTRACE_OPS_FL_MODIFYING
;
2318 ops
->old_hash
.filter_hash
= old_hash
;
2319 ftrace_run_update_code(command
);
2320 ops
->old_hash
.filter_hash
= NULL
;
2321 ops
->flags
&= ~FTRACE_OPS_FL_MODIFYING
;
2324 static ftrace_func_t saved_ftrace_func
;
2325 static int ftrace_start_up
;
2327 static void control_ops_free(struct ftrace_ops
*ops
)
2329 free_percpu(ops
->disabled
);
2332 static void ftrace_startup_enable(int command
)
2334 if (saved_ftrace_func
!= ftrace_trace_function
) {
2335 saved_ftrace_func
= ftrace_trace_function
;
2336 command
|= FTRACE_UPDATE_TRACE_FUNC
;
2339 if (!command
|| !ftrace_enabled
)
2342 ftrace_run_update_code(command
);
2345 static void ftrace_startup_all(int command
)
2347 update_all_ops
= true;
2348 ftrace_startup_enable(command
);
2349 update_all_ops
= false;
2352 static int ftrace_startup(struct ftrace_ops
*ops
, int command
)
2356 if (unlikely(ftrace_disabled
))
2359 ret
= __register_ftrace_function(ops
);
2364 command
|= FTRACE_UPDATE_CALLS
;
2367 * Note that ftrace probes uses this to start up
2368 * and modify functions it will probe. But we still
2369 * set the ADDING flag for modification, as probes
2370 * do not have trampolines. If they add them in the
2371 * future, then the probes will need to distinguish
2372 * between adding and updating probes.
2374 ops
->flags
|= FTRACE_OPS_FL_ENABLED
| FTRACE_OPS_FL_ADDING
;
2376 ftrace_hash_rec_enable(ops
, 1);
2378 ftrace_startup_enable(command
);
2380 ops
->flags
&= ~FTRACE_OPS_FL_ADDING
;
2385 static int ftrace_shutdown(struct ftrace_ops
*ops
, int command
)
2389 if (unlikely(ftrace_disabled
))
2392 ret
= __unregister_ftrace_function(ops
);
2398 * Just warn in case of unbalance, no need to kill ftrace, it's not
2399 * critical but the ftrace_call callers may be never nopped again after
2400 * further ftrace uses.
2402 WARN_ON_ONCE(ftrace_start_up
< 0);
2404 ftrace_hash_rec_disable(ops
, 1);
2406 ops
->flags
&= ~FTRACE_OPS_FL_ENABLED
;
2408 command
|= FTRACE_UPDATE_CALLS
;
2410 if (saved_ftrace_func
!= ftrace_trace_function
) {
2411 saved_ftrace_func
= ftrace_trace_function
;
2412 command
|= FTRACE_UPDATE_TRACE_FUNC
;
2415 if (!command
|| !ftrace_enabled
) {
2417 * If these are control ops, they still need their
2418 * per_cpu field freed. Since, function tracing is
2419 * not currently active, we can just free them
2420 * without synchronizing all CPUs.
2422 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
)
2423 control_ops_free(ops
);
2428 * If the ops uses a trampoline, then it needs to be
2429 * tested first on update.
2431 ops
->flags
|= FTRACE_OPS_FL_REMOVING
;
2434 /* The trampoline logic checks the old hashes */
2435 ops
->old_hash
.filter_hash
= ops
->func_hash
->filter_hash
;
2436 ops
->old_hash
.notrace_hash
= ops
->func_hash
->notrace_hash
;
2438 ftrace_run_update_code(command
);
2441 * If there's no more ops registered with ftrace, run a
2442 * sanity check to make sure all rec flags are cleared.
2444 if (ftrace_ops_list
== &ftrace_list_end
) {
2445 struct ftrace_page
*pg
;
2446 struct dyn_ftrace
*rec
;
2448 do_for_each_ftrace_rec(pg
, rec
) {
2449 if (FTRACE_WARN_ON_ONCE(rec
->flags
))
2450 pr_warn(" %pS flags:%lx\n",
2451 (void *)rec
->ip
, rec
->flags
);
2452 } while_for_each_ftrace_rec();
2455 ops
->old_hash
.filter_hash
= NULL
;
2456 ops
->old_hash
.notrace_hash
= NULL
;
2459 ops
->flags
&= ~FTRACE_OPS_FL_REMOVING
;
2462 * Dynamic ops may be freed, we must make sure that all
2463 * callers are done before leaving this function.
2464 * The same goes for freeing the per_cpu data of the control
2467 * Again, normal synchronize_sched() is not good enough.
2468 * We need to do a hard force of sched synchronization.
2469 * This is because we use preempt_disable() to do RCU, but
2470 * the function tracers can be called where RCU is not watching
2471 * (like before user_exit()). We can not rely on the RCU
2472 * infrastructure to do the synchronization, thus we must do it
2475 if (ops
->flags
& (FTRACE_OPS_FL_DYNAMIC
| FTRACE_OPS_FL_CONTROL
)) {
2476 schedule_on_each_cpu(ftrace_sync
);
2478 if (ops
->flags
& FTRACE_OPS_FL_CONTROL
)
2479 control_ops_free(ops
);
2485 static void ftrace_startup_sysctl(void)
2487 if (unlikely(ftrace_disabled
))
2490 /* Force update next time */
2491 saved_ftrace_func
= NULL
;
2492 /* ftrace_start_up is true if we want ftrace running */
2493 if (ftrace_start_up
)
2494 ftrace_run_update_code(FTRACE_UPDATE_CALLS
);
2497 static void ftrace_shutdown_sysctl(void)
2499 if (unlikely(ftrace_disabled
))
2502 /* ftrace_start_up is true if ftrace is running */
2503 if (ftrace_start_up
)
2504 ftrace_run_update_code(FTRACE_DISABLE_CALLS
);
2507 static cycle_t ftrace_update_time
;
2508 unsigned long ftrace_update_tot_cnt
;
2510 static inline int ops_traces_mod(struct ftrace_ops
*ops
)
2513 * Filter_hash being empty will default to trace module.
2514 * But notrace hash requires a test of individual module functions.
2516 return ftrace_hash_empty(ops
->func_hash
->filter_hash
) &&
2517 ftrace_hash_empty(ops
->func_hash
->notrace_hash
);
2521 * Check if the current ops references the record.
2523 * If the ops traces all functions, then it was already accounted for.
2524 * If the ops does not trace the current record function, skip it.
2525 * If the ops ignores the function via notrace filter, skip it.
2528 ops_references_rec(struct ftrace_ops
*ops
, struct dyn_ftrace
*rec
)
2530 /* If ops isn't enabled, ignore it */
2531 if (!(ops
->flags
& FTRACE_OPS_FL_ENABLED
))
2534 /* If ops traces all mods, we already accounted for it */
2535 if (ops_traces_mod(ops
))
2538 /* The function must be in the filter */
2539 if (!ftrace_hash_empty(ops
->func_hash
->filter_hash
) &&
2540 !ftrace_lookup_ip(ops
->func_hash
->filter_hash
, rec
->ip
))
2543 /* If in notrace hash, we ignore it too */
2544 if (ftrace_lookup_ip(ops
->func_hash
->notrace_hash
, rec
->ip
))
2550 static int referenced_filters(struct dyn_ftrace
*rec
)
2552 struct ftrace_ops
*ops
;
2555 for (ops
= ftrace_ops_list
; ops
!= &ftrace_list_end
; ops
= ops
->next
) {
2556 if (ops_references_rec(ops
, rec
))
2563 static int ftrace_update_code(struct module
*mod
, struct ftrace_page
*new_pgs
)
2565 struct ftrace_page
*pg
;
2566 struct dyn_ftrace
*p
;
2567 cycle_t start
, stop
;
2568 unsigned long update_cnt
= 0;
2569 unsigned long ref
= 0;
2574 * When adding a module, we need to check if tracers are
2575 * currently enabled and if they are set to trace all functions.
2576 * If they are, we need to enable the module functions as well
2577 * as update the reference counts for those function records.
2580 struct ftrace_ops
*ops
;
2582 for (ops
= ftrace_ops_list
;
2583 ops
!= &ftrace_list_end
; ops
= ops
->next
) {
2584 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
) {
2585 if (ops_traces_mod(ops
))
2593 start
= ftrace_now(raw_smp_processor_id());
2595 for (pg
= new_pgs
; pg
; pg
= pg
->next
) {
2597 for (i
= 0; i
< pg
->index
; i
++) {
2600 /* If something went wrong, bail without enabling anything */
2601 if (unlikely(ftrace_disabled
))
2604 p
= &pg
->records
[i
];
2606 cnt
+= referenced_filters(p
);
2610 * Do the initial record conversion from mcount jump
2611 * to the NOP instructions.
2613 if (!ftrace_code_disable(mod
, p
))
2619 * If the tracing is enabled, go ahead and enable the record.
2621 * The reason not to enable the record immediatelly is the
2622 * inherent check of ftrace_make_nop/ftrace_make_call for
2623 * correct previous instructions. Making first the NOP
2624 * conversion puts the module to the correct state, thus
2625 * passing the ftrace_make_call check.
2627 if (ftrace_start_up
&& cnt
) {
2628 int failed
= __ftrace_replace_code(p
, 1);
2630 ftrace_bug(failed
, p
->ip
);
2635 stop
= ftrace_now(raw_smp_processor_id());
2636 ftrace_update_time
= stop
- start
;
2637 ftrace_update_tot_cnt
+= update_cnt
;
2642 static int ftrace_allocate_records(struct ftrace_page
*pg
, int count
)
2647 if (WARN_ON(!count
))
2650 order
= get_count_order(DIV_ROUND_UP(count
, ENTRIES_PER_PAGE
));
2653 * We want to fill as much as possible. No more than a page
2656 while ((PAGE_SIZE
<< order
) / ENTRY_SIZE
>= count
+ ENTRIES_PER_PAGE
)
2660 pg
->records
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
2663 /* if we can't allocate this size, try something smaller */
2670 cnt
= (PAGE_SIZE
<< order
) / ENTRY_SIZE
;
2679 static struct ftrace_page
*
2680 ftrace_allocate_pages(unsigned long num_to_init
)
2682 struct ftrace_page
*start_pg
;
2683 struct ftrace_page
*pg
;
2690 start_pg
= pg
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
2695 * Try to allocate as much as possible in one continues
2696 * location that fills in all of the space. We want to
2697 * waste as little space as possible.
2700 cnt
= ftrace_allocate_records(pg
, num_to_init
);
2708 pg
->next
= kzalloc(sizeof(*pg
), GFP_KERNEL
);
2720 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
2721 free_pages((unsigned long)pg
->records
, order
);
2722 start_pg
= pg
->next
;
2726 pr_info("ftrace: FAILED to allocate memory for functions\n");
2730 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
2732 struct ftrace_iterator
{
2735 struct ftrace_page
*pg
;
2736 struct dyn_ftrace
*func
;
2737 struct ftrace_func_probe
*probe
;
2738 struct trace_parser parser
;
2739 struct ftrace_hash
*hash
;
2740 struct ftrace_ops
*ops
;
2747 t_hash_next(struct seq_file
*m
, loff_t
*pos
)
2749 struct ftrace_iterator
*iter
= m
->private;
2750 struct hlist_node
*hnd
= NULL
;
2751 struct hlist_head
*hhd
;
2757 hnd
= &iter
->probe
->node
;
2759 if (iter
->hidx
>= FTRACE_FUNC_HASHSIZE
)
2762 hhd
= &ftrace_func_hash
[iter
->hidx
];
2764 if (hlist_empty(hhd
)) {
2780 if (WARN_ON_ONCE(!hnd
))
2783 iter
->probe
= hlist_entry(hnd
, struct ftrace_func_probe
, node
);
2788 static void *t_hash_start(struct seq_file
*m
, loff_t
*pos
)
2790 struct ftrace_iterator
*iter
= m
->private;
2794 if (!(iter
->flags
& FTRACE_ITER_DO_HASH
))
2797 if (iter
->func_pos
> *pos
)
2801 for (l
= 0; l
<= (*pos
- iter
->func_pos
); ) {
2802 p
= t_hash_next(m
, &l
);
2809 /* Only set this if we have an item */
2810 iter
->flags
|= FTRACE_ITER_HASH
;
2816 t_hash_show(struct seq_file
*m
, struct ftrace_iterator
*iter
)
2818 struct ftrace_func_probe
*rec
;
2821 if (WARN_ON_ONCE(!rec
))
2824 if (rec
->ops
->print
)
2825 return rec
->ops
->print(m
, rec
->ip
, rec
->ops
, rec
->data
);
2827 seq_printf(m
, "%ps:%ps", (void *)rec
->ip
, (void *)rec
->ops
->func
);
2830 seq_printf(m
, ":%p", rec
->data
);
2837 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
2839 struct ftrace_iterator
*iter
= m
->private;
2840 struct ftrace_ops
*ops
= iter
->ops
;
2841 struct dyn_ftrace
*rec
= NULL
;
2843 if (unlikely(ftrace_disabled
))
2846 if (iter
->flags
& FTRACE_ITER_HASH
)
2847 return t_hash_next(m
, pos
);
2850 iter
->pos
= iter
->func_pos
= *pos
;
2852 if (iter
->flags
& FTRACE_ITER_PRINTALL
)
2853 return t_hash_start(m
, pos
);
2856 if (iter
->idx
>= iter
->pg
->index
) {
2857 if (iter
->pg
->next
) {
2858 iter
->pg
= iter
->pg
->next
;
2863 rec
= &iter
->pg
->records
[iter
->idx
++];
2864 if (((iter
->flags
& FTRACE_ITER_FILTER
) &&
2865 !(ftrace_lookup_ip(ops
->func_hash
->filter_hash
, rec
->ip
))) ||
2867 ((iter
->flags
& FTRACE_ITER_NOTRACE
) &&
2868 !ftrace_lookup_ip(ops
->func_hash
->notrace_hash
, rec
->ip
)) ||
2870 ((iter
->flags
& FTRACE_ITER_ENABLED
) &&
2871 !(rec
->flags
& FTRACE_FL_ENABLED
))) {
2879 return t_hash_start(m
, pos
);
2886 static void reset_iter_read(struct ftrace_iterator
*iter
)
2890 iter
->flags
&= ~(FTRACE_ITER_PRINTALL
| FTRACE_ITER_HASH
);
2893 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
2895 struct ftrace_iterator
*iter
= m
->private;
2896 struct ftrace_ops
*ops
= iter
->ops
;
2900 mutex_lock(&ftrace_lock
);
2902 if (unlikely(ftrace_disabled
))
2906 * If an lseek was done, then reset and start from beginning.
2908 if (*pos
< iter
->pos
)
2909 reset_iter_read(iter
);
2912 * For set_ftrace_filter reading, if we have the filter
2913 * off, we can short cut and just print out that all
2914 * functions are enabled.
2916 if ((iter
->flags
& FTRACE_ITER_FILTER
&&
2917 ftrace_hash_empty(ops
->func_hash
->filter_hash
)) ||
2918 (iter
->flags
& FTRACE_ITER_NOTRACE
&&
2919 ftrace_hash_empty(ops
->func_hash
->notrace_hash
))) {
2921 return t_hash_start(m
, pos
);
2922 iter
->flags
|= FTRACE_ITER_PRINTALL
;
2923 /* reset in case of seek/pread */
2924 iter
->flags
&= ~FTRACE_ITER_HASH
;
2928 if (iter
->flags
& FTRACE_ITER_HASH
)
2929 return t_hash_start(m
, pos
);
2932 * Unfortunately, we need to restart at ftrace_pages_start
2933 * every time we let go of the ftrace_mutex. This is because
2934 * those pointers can change without the lock.
2936 iter
->pg
= ftrace_pages_start
;
2938 for (l
= 0; l
<= *pos
; ) {
2939 p
= t_next(m
, p
, &l
);
2945 return t_hash_start(m
, pos
);
2950 static void t_stop(struct seq_file
*m
, void *p
)
2952 mutex_unlock(&ftrace_lock
);
2955 static int t_show(struct seq_file
*m
, void *v
)
2957 struct ftrace_iterator
*iter
= m
->private;
2958 struct dyn_ftrace
*rec
;
2960 if (iter
->flags
& FTRACE_ITER_HASH
)
2961 return t_hash_show(m
, iter
);
2963 if (iter
->flags
& FTRACE_ITER_PRINTALL
) {
2964 if (iter
->flags
& FTRACE_ITER_NOTRACE
)
2965 seq_printf(m
, "#### no functions disabled ####\n");
2967 seq_printf(m
, "#### all functions enabled ####\n");
2976 seq_printf(m
, "%ps", (void *)rec
->ip
);
2977 if (iter
->flags
& FTRACE_ITER_ENABLED
) {
2978 seq_printf(m
, " (%ld)%s",
2979 ftrace_rec_count(rec
),
2980 rec
->flags
& FTRACE_FL_REGS
? " R" : " ");
2981 if (rec
->flags
& FTRACE_FL_TRAMP_EN
) {
2982 struct ftrace_ops
*ops
;
2984 ops
= ftrace_find_tramp_ops_any(rec
);
2986 seq_printf(m
, "\ttramp: %pS",
2987 (void *)ops
->trampoline
);
2989 seq_printf(m
, "\ttramp: ERROR!");
2993 seq_printf(m
, "\n");
2998 static const struct seq_operations show_ftrace_seq_ops
= {
3006 ftrace_avail_open(struct inode
*inode
, struct file
*file
)
3008 struct ftrace_iterator
*iter
;
3010 if (unlikely(ftrace_disabled
))
3013 iter
= __seq_open_private(file
, &show_ftrace_seq_ops
, sizeof(*iter
));
3015 iter
->pg
= ftrace_pages_start
;
3016 iter
->ops
= &global_ops
;
3019 return iter
? 0 : -ENOMEM
;
3023 ftrace_enabled_open(struct inode
*inode
, struct file
*file
)
3025 struct ftrace_iterator
*iter
;
3027 iter
= __seq_open_private(file
, &show_ftrace_seq_ops
, sizeof(*iter
));
3029 iter
->pg
= ftrace_pages_start
;
3030 iter
->flags
= FTRACE_ITER_ENABLED
;
3031 iter
->ops
= &global_ops
;
3034 return iter
? 0 : -ENOMEM
;
3038 * ftrace_regex_open - initialize function tracer filter files
3039 * @ops: The ftrace_ops that hold the hash filters
3040 * @flag: The type of filter to process
3041 * @inode: The inode, usually passed in to your open routine
3042 * @file: The file, usually passed in to your open routine
3044 * ftrace_regex_open() initializes the filter files for the
3045 * @ops. Depending on @flag it may process the filter hash or
3046 * the notrace hash of @ops. With this called from the open
3047 * routine, you can use ftrace_filter_write() for the write
3048 * routine if @flag has FTRACE_ITER_FILTER set, or
3049 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
3050 * tracing_lseek() should be used as the lseek routine, and
3051 * release must call ftrace_regex_release().
3054 ftrace_regex_open(struct ftrace_ops
*ops
, int flag
,
3055 struct inode
*inode
, struct file
*file
)
3057 struct ftrace_iterator
*iter
;
3058 struct ftrace_hash
*hash
;
3061 ftrace_ops_init(ops
);
3063 if (unlikely(ftrace_disabled
))
3066 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
3070 if (trace_parser_get_init(&iter
->parser
, FTRACE_BUFF_MAX
)) {
3078 mutex_lock(&ops
->func_hash
->regex_lock
);
3080 if (flag
& FTRACE_ITER_NOTRACE
)
3081 hash
= ops
->func_hash
->notrace_hash
;
3083 hash
= ops
->func_hash
->filter_hash
;
3085 if (file
->f_mode
& FMODE_WRITE
) {
3086 const int size_bits
= FTRACE_HASH_DEFAULT_BITS
;
3088 if (file
->f_flags
& O_TRUNC
)
3089 iter
->hash
= alloc_ftrace_hash(size_bits
);
3091 iter
->hash
= alloc_and_copy_ftrace_hash(size_bits
, hash
);
3094 trace_parser_put(&iter
->parser
);
3101 if (file
->f_mode
& FMODE_READ
) {
3102 iter
->pg
= ftrace_pages_start
;
3104 ret
= seq_open(file
, &show_ftrace_seq_ops
);
3106 struct seq_file
*m
= file
->private_data
;
3110 free_ftrace_hash(iter
->hash
);
3111 trace_parser_put(&iter
->parser
);
3115 file
->private_data
= iter
;
3118 mutex_unlock(&ops
->func_hash
->regex_lock
);
3124 ftrace_filter_open(struct inode
*inode
, struct file
*file
)
3126 struct ftrace_ops
*ops
= inode
->i_private
;
3128 return ftrace_regex_open(ops
,
3129 FTRACE_ITER_FILTER
| FTRACE_ITER_DO_HASH
,
3134 ftrace_notrace_open(struct inode
*inode
, struct file
*file
)
3136 struct ftrace_ops
*ops
= inode
->i_private
;
3138 return ftrace_regex_open(ops
, FTRACE_ITER_NOTRACE
,
3142 static int ftrace_match(char *str
, char *regex
, int len
, int type
)
3149 if (strcmp(str
, regex
) == 0)
3152 case MATCH_FRONT_ONLY
:
3153 if (strncmp(str
, regex
, len
) == 0)
3156 case MATCH_MIDDLE_ONLY
:
3157 if (strstr(str
, regex
))
3160 case MATCH_END_ONLY
:
3162 if (slen
>= len
&& memcmp(str
+ slen
- len
, regex
, len
) == 0)
3171 enter_record(struct ftrace_hash
*hash
, struct dyn_ftrace
*rec
, int not)
3173 struct ftrace_func_entry
*entry
;
3176 entry
= ftrace_lookup_ip(hash
, rec
->ip
);
3178 /* Do nothing if it doesn't exist */
3182 free_hash_entry(hash
, entry
);
3184 /* Do nothing if it exists */
3188 ret
= add_hash_entry(hash
, rec
->ip
);
3194 ftrace_match_record(struct dyn_ftrace
*rec
, char *mod
,
3195 char *regex
, int len
, int type
)
3197 char str
[KSYM_SYMBOL_LEN
];
3200 kallsyms_lookup(rec
->ip
, NULL
, NULL
, &modname
, str
);
3203 /* module lookup requires matching the module */
3204 if (!modname
|| strcmp(modname
, mod
))
3207 /* blank search means to match all funcs in the mod */
3212 return ftrace_match(str
, regex
, len
, type
);
3216 match_records(struct ftrace_hash
*hash
, char *buff
,
3217 int len
, char *mod
, int not)
3219 unsigned search_len
= 0;
3220 struct ftrace_page
*pg
;
3221 struct dyn_ftrace
*rec
;
3222 int type
= MATCH_FULL
;
3223 char *search
= buff
;
3228 type
= filter_parse_regex(buff
, len
, &search
, ¬);
3229 search_len
= strlen(search
);
3232 mutex_lock(&ftrace_lock
);
3234 if (unlikely(ftrace_disabled
))
3237 do_for_each_ftrace_rec(pg
, rec
) {
3238 if (ftrace_match_record(rec
, mod
, search
, search_len
, type
)) {
3239 ret
= enter_record(hash
, rec
, not);
3246 } while_for_each_ftrace_rec();
3248 mutex_unlock(&ftrace_lock
);
3254 ftrace_match_records(struct ftrace_hash
*hash
, char *buff
, int len
)
3256 return match_records(hash
, buff
, len
, NULL
, 0);
3260 ftrace_match_module_records(struct ftrace_hash
*hash
, char *buff
, char *mod
)
3264 /* blank or '*' mean the same */
3265 if (strcmp(buff
, "*") == 0)
3268 /* handle the case of 'dont filter this module' */
3269 if (strcmp(buff
, "!") == 0 || strcmp(buff
, "!*") == 0) {
3274 return match_records(hash
, buff
, strlen(buff
), mod
, not);
3278 * We register the module command as a template to show others how
3279 * to register the a command as well.
3283 ftrace_mod_callback(struct ftrace_hash
*hash
,
3284 char *func
, char *cmd
, char *param
, int enable
)
3290 * cmd == 'mod' because we only registered this func
3291 * for the 'mod' ftrace_func_command.
3292 * But if you register one func with multiple commands,
3293 * you can tell which command was used by the cmd
3297 /* we must have a module name */
3301 mod
= strsep(¶m
, ":");
3305 ret
= ftrace_match_module_records(hash
, func
, mod
);
3314 static struct ftrace_func_command ftrace_mod_cmd
= {
3316 .func
= ftrace_mod_callback
,
3319 static int __init
ftrace_mod_cmd_init(void)
3321 return register_ftrace_command(&ftrace_mod_cmd
);
3323 core_initcall(ftrace_mod_cmd_init
);
3325 static void function_trace_probe_call(unsigned long ip
, unsigned long parent_ip
,
3326 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
3328 struct ftrace_func_probe
*entry
;
3329 struct hlist_head
*hhd
;
3332 key
= hash_long(ip
, FTRACE_HASH_BITS
);
3334 hhd
= &ftrace_func_hash
[key
];
3336 if (hlist_empty(hhd
))
3340 * Disable preemption for these calls to prevent a RCU grace
3341 * period. This syncs the hash iteration and freeing of items
3342 * on the hash. rcu_read_lock is too dangerous here.
3344 preempt_disable_notrace();
3345 hlist_for_each_entry_rcu_notrace(entry
, hhd
, node
) {
3346 if (entry
->ip
== ip
)
3347 entry
->ops
->func(ip
, parent_ip
, &entry
->data
);
3349 preempt_enable_notrace();
3352 static struct ftrace_ops trace_probe_ops __read_mostly
=
3354 .func
= function_trace_probe_call
,
3355 .flags
= FTRACE_OPS_FL_INITIALIZED
,
3356 INIT_OPS_HASH(trace_probe_ops
)
3359 static int ftrace_probe_registered
;
3361 static void __enable_ftrace_function_probe(struct ftrace_hash
*old_hash
)
3366 if (ftrace_probe_registered
) {
3367 /* still need to update the function call sites */
3369 ftrace_run_modify_code(&trace_probe_ops
, FTRACE_UPDATE_CALLS
,
3374 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3375 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3379 /* Nothing registered? */
3380 if (i
== FTRACE_FUNC_HASHSIZE
)
3383 ret
= ftrace_startup(&trace_probe_ops
, 0);
3385 ftrace_probe_registered
= 1;
3388 static void __disable_ftrace_function_probe(void)
3392 if (!ftrace_probe_registered
)
3395 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3396 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3401 /* no more funcs left */
3402 ftrace_shutdown(&trace_probe_ops
, 0);
3404 ftrace_probe_registered
= 0;
3408 static void ftrace_free_entry(struct ftrace_func_probe
*entry
)
3410 if (entry
->ops
->free
)
3411 entry
->ops
->free(entry
->ops
, entry
->ip
, &entry
->data
);
3416 register_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3419 struct ftrace_func_probe
*entry
;
3420 struct ftrace_hash
**orig_hash
= &trace_probe_ops
.func_hash
->filter_hash
;
3421 struct ftrace_hash
*old_hash
= *orig_hash
;
3422 struct ftrace_hash
*hash
;
3423 struct ftrace_page
*pg
;
3424 struct dyn_ftrace
*rec
;
3431 type
= filter_parse_regex(glob
, strlen(glob
), &search
, ¬);
3432 len
= strlen(search
);
3434 /* we do not support '!' for function probes */
3438 mutex_lock(&trace_probe_ops
.func_hash
->regex_lock
);
3440 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, old_hash
);
3446 if (unlikely(ftrace_disabled
)) {
3451 mutex_lock(&ftrace_lock
);
3453 do_for_each_ftrace_rec(pg
, rec
) {
3455 if (!ftrace_match_record(rec
, NULL
, search
, len
, type
))
3458 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
3460 /* If we did not process any, then return error */
3471 * The caller might want to do something special
3472 * for each function we find. We call the callback
3473 * to give the caller an opportunity to do so.
3476 if (ops
->init(ops
, rec
->ip
, &entry
->data
) < 0) {
3477 /* caller does not like this func */
3483 ret
= enter_record(hash
, rec
, 0);
3491 entry
->ip
= rec
->ip
;
3493 key
= hash_long(entry
->ip
, FTRACE_HASH_BITS
);
3494 hlist_add_head_rcu(&entry
->node
, &ftrace_func_hash
[key
]);
3496 } while_for_each_ftrace_rec();
3498 ret
= ftrace_hash_move(&trace_probe_ops
, 1, orig_hash
, hash
);
3500 __enable_ftrace_function_probe(old_hash
);
3503 free_ftrace_hash_rcu(old_hash
);
3508 mutex_unlock(&ftrace_lock
);
3510 mutex_unlock(&trace_probe_ops
.func_hash
->regex_lock
);
3511 free_ftrace_hash(hash
);
3517 PROBE_TEST_FUNC
= 1,
3522 __unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3523 void *data
, int flags
)
3525 struct ftrace_func_entry
*rec_entry
;
3526 struct ftrace_func_probe
*entry
;
3527 struct ftrace_func_probe
*p
;
3528 struct ftrace_hash
**orig_hash
= &trace_probe_ops
.func_hash
->filter_hash
;
3529 struct ftrace_hash
*old_hash
= *orig_hash
;
3530 struct list_head free_list
;
3531 struct ftrace_hash
*hash
;
3532 struct hlist_node
*tmp
;
3533 char str
[KSYM_SYMBOL_LEN
];
3534 int type
= MATCH_FULL
;
3539 if (glob
&& (strcmp(glob
, "*") == 0 || !strlen(glob
)))
3544 type
= filter_parse_regex(glob
, strlen(glob
), &search
, ¬);
3545 len
= strlen(search
);
3547 /* we do not support '!' for function probes */
3552 mutex_lock(&trace_probe_ops
.func_hash
->regex_lock
);
3554 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, *orig_hash
);
3556 /* Hmm, should report this somehow */
3559 INIT_LIST_HEAD(&free_list
);
3561 for (i
= 0; i
< FTRACE_FUNC_HASHSIZE
; i
++) {
3562 struct hlist_head
*hhd
= &ftrace_func_hash
[i
];
3564 hlist_for_each_entry_safe(entry
, tmp
, hhd
, node
) {
3566 /* break up if statements for readability */
3567 if ((flags
& PROBE_TEST_FUNC
) && entry
->ops
!= ops
)
3570 if ((flags
& PROBE_TEST_DATA
) && entry
->data
!= data
)
3573 /* do this last, since it is the most expensive */
3575 kallsyms_lookup(entry
->ip
, NULL
, NULL
,
3577 if (!ftrace_match(str
, glob
, len
, type
))
3581 rec_entry
= ftrace_lookup_ip(hash
, entry
->ip
);
3582 /* It is possible more than one entry had this ip */
3584 free_hash_entry(hash
, rec_entry
);
3586 hlist_del_rcu(&entry
->node
);
3587 list_add(&entry
->free_list
, &free_list
);
3590 mutex_lock(&ftrace_lock
);
3591 __disable_ftrace_function_probe();
3593 * Remove after the disable is called. Otherwise, if the last
3594 * probe is removed, a null hash means *all enabled*.
3596 ret
= ftrace_hash_move(&trace_probe_ops
, 1, orig_hash
, hash
);
3597 synchronize_sched();
3599 free_ftrace_hash_rcu(old_hash
);
3601 list_for_each_entry_safe(entry
, p
, &free_list
, free_list
) {
3602 list_del(&entry
->free_list
);
3603 ftrace_free_entry(entry
);
3605 mutex_unlock(&ftrace_lock
);
3608 mutex_unlock(&trace_probe_ops
.func_hash
->regex_lock
);
3609 free_ftrace_hash(hash
);
3613 unregister_ftrace_function_probe(char *glob
, struct ftrace_probe_ops
*ops
,
3616 __unregister_ftrace_function_probe(glob
, ops
, data
,
3617 PROBE_TEST_FUNC
| PROBE_TEST_DATA
);
3621 unregister_ftrace_function_probe_func(char *glob
, struct ftrace_probe_ops
*ops
)
3623 __unregister_ftrace_function_probe(glob
, ops
, NULL
, PROBE_TEST_FUNC
);
3626 void unregister_ftrace_function_probe_all(char *glob
)
3628 __unregister_ftrace_function_probe(glob
, NULL
, NULL
, 0);
3631 static LIST_HEAD(ftrace_commands
);
3632 static DEFINE_MUTEX(ftrace_cmd_mutex
);
3635 * Currently we only register ftrace commands from __init, so mark this
3638 __init
int register_ftrace_command(struct ftrace_func_command
*cmd
)
3640 struct ftrace_func_command
*p
;
3643 mutex_lock(&ftrace_cmd_mutex
);
3644 list_for_each_entry(p
, &ftrace_commands
, list
) {
3645 if (strcmp(cmd
->name
, p
->name
) == 0) {
3650 list_add(&cmd
->list
, &ftrace_commands
);
3652 mutex_unlock(&ftrace_cmd_mutex
);
3658 * Currently we only unregister ftrace commands from __init, so mark
3661 __init
int unregister_ftrace_command(struct ftrace_func_command
*cmd
)
3663 struct ftrace_func_command
*p
, *n
;
3666 mutex_lock(&ftrace_cmd_mutex
);
3667 list_for_each_entry_safe(p
, n
, &ftrace_commands
, list
) {
3668 if (strcmp(cmd
->name
, p
->name
) == 0) {
3670 list_del_init(&p
->list
);
3675 mutex_unlock(&ftrace_cmd_mutex
);
3680 static int ftrace_process_regex(struct ftrace_hash
*hash
,
3681 char *buff
, int len
, int enable
)
3683 char *func
, *command
, *next
= buff
;
3684 struct ftrace_func_command
*p
;
3687 func
= strsep(&next
, ":");
3690 ret
= ftrace_match_records(hash
, func
, len
);
3700 command
= strsep(&next
, ":");
3702 mutex_lock(&ftrace_cmd_mutex
);
3703 list_for_each_entry(p
, &ftrace_commands
, list
) {
3704 if (strcmp(p
->name
, command
) == 0) {
3705 ret
= p
->func(hash
, func
, command
, next
, enable
);
3710 mutex_unlock(&ftrace_cmd_mutex
);
3716 ftrace_regex_write(struct file
*file
, const char __user
*ubuf
,
3717 size_t cnt
, loff_t
*ppos
, int enable
)
3719 struct ftrace_iterator
*iter
;
3720 struct trace_parser
*parser
;
3726 if (file
->f_mode
& FMODE_READ
) {
3727 struct seq_file
*m
= file
->private_data
;
3730 iter
= file
->private_data
;
3732 if (unlikely(ftrace_disabled
))
3735 /* iter->hash is a local copy, so we don't need regex_lock */
3737 parser
= &iter
->parser
;
3738 read
= trace_get_user(parser
, ubuf
, cnt
, ppos
);
3740 if (read
>= 0 && trace_parser_loaded(parser
) &&
3741 !trace_parser_cont(parser
)) {
3742 ret
= ftrace_process_regex(iter
->hash
, parser
->buffer
,
3743 parser
->idx
, enable
);
3744 trace_parser_clear(parser
);
3755 ftrace_filter_write(struct file
*file
, const char __user
*ubuf
,
3756 size_t cnt
, loff_t
*ppos
)
3758 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 1);
3762 ftrace_notrace_write(struct file
*file
, const char __user
*ubuf
,
3763 size_t cnt
, loff_t
*ppos
)
3765 return ftrace_regex_write(file
, ubuf
, cnt
, ppos
, 0);
3769 ftrace_match_addr(struct ftrace_hash
*hash
, unsigned long ip
, int remove
)
3771 struct ftrace_func_entry
*entry
;
3773 if (!ftrace_location(ip
))
3777 entry
= ftrace_lookup_ip(hash
, ip
);
3780 free_hash_entry(hash
, entry
);
3784 return add_hash_entry(hash
, ip
);
3787 static void ftrace_ops_update_code(struct ftrace_ops
*ops
,
3788 struct ftrace_hash
*old_hash
)
3790 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
&& ftrace_enabled
)
3791 ftrace_run_modify_code(ops
, FTRACE_UPDATE_CALLS
, old_hash
);
3795 ftrace_set_hash(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
3796 unsigned long ip
, int remove
, int reset
, int enable
)
3798 struct ftrace_hash
**orig_hash
;
3799 struct ftrace_hash
*old_hash
;
3800 struct ftrace_hash
*hash
;
3803 if (unlikely(ftrace_disabled
))
3806 mutex_lock(&ops
->func_hash
->regex_lock
);
3809 orig_hash
= &ops
->func_hash
->filter_hash
;
3811 orig_hash
= &ops
->func_hash
->notrace_hash
;
3814 hash
= alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
);
3816 hash
= alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS
, *orig_hash
);
3820 goto out_regex_unlock
;
3823 if (buf
&& !ftrace_match_records(hash
, buf
, len
)) {
3825 goto out_regex_unlock
;
3828 ret
= ftrace_match_addr(hash
, ip
, remove
);
3830 goto out_regex_unlock
;
3833 mutex_lock(&ftrace_lock
);
3834 old_hash
= *orig_hash
;
3835 ret
= ftrace_hash_move(ops
, enable
, orig_hash
, hash
);
3837 ftrace_ops_update_code(ops
, old_hash
);
3838 free_ftrace_hash_rcu(old_hash
);
3840 mutex_unlock(&ftrace_lock
);
3843 mutex_unlock(&ops
->func_hash
->regex_lock
);
3845 free_ftrace_hash(hash
);
3850 ftrace_set_addr(struct ftrace_ops
*ops
, unsigned long ip
, int remove
,
3851 int reset
, int enable
)
3853 return ftrace_set_hash(ops
, 0, 0, ip
, remove
, reset
, enable
);
3857 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
3858 * @ops - the ops to set the filter with
3859 * @ip - the address to add to or remove from the filter.
3860 * @remove - non zero to remove the ip from the filter
3861 * @reset - non zero to reset all filters before applying this filter.
3863 * Filters denote which functions should be enabled when tracing is enabled
3864 * If @ip is NULL, it failes to update filter.
3866 int ftrace_set_filter_ip(struct ftrace_ops
*ops
, unsigned long ip
,
3867 int remove
, int reset
)
3869 ftrace_ops_init(ops
);
3870 return ftrace_set_addr(ops
, ip
, remove
, reset
, 1);
3872 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip
);
3875 ftrace_set_regex(struct ftrace_ops
*ops
, unsigned char *buf
, int len
,
3876 int reset
, int enable
)
3878 return ftrace_set_hash(ops
, buf
, len
, 0, 0, reset
, enable
);
3882 * ftrace_set_filter - set a function to filter on in ftrace
3883 * @ops - the ops to set the filter with
3884 * @buf - the string that holds the function filter text.
3885 * @len - the length of the string.
3886 * @reset - non zero to reset all filters before applying this filter.
3888 * Filters denote which functions should be enabled when tracing is enabled.
3889 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3891 int ftrace_set_filter(struct ftrace_ops
*ops
, unsigned char *buf
,
3894 ftrace_ops_init(ops
);
3895 return ftrace_set_regex(ops
, buf
, len
, reset
, 1);
3897 EXPORT_SYMBOL_GPL(ftrace_set_filter
);
3900 * ftrace_set_notrace - set a function to not trace in ftrace
3901 * @ops - the ops to set the notrace filter with
3902 * @buf - the string that holds the function notrace text.
3903 * @len - the length of the string.
3904 * @reset - non zero to reset all filters before applying this filter.
3906 * Notrace Filters denote which functions should not be enabled when tracing
3907 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3910 int ftrace_set_notrace(struct ftrace_ops
*ops
, unsigned char *buf
,
3913 ftrace_ops_init(ops
);
3914 return ftrace_set_regex(ops
, buf
, len
, reset
, 0);
3916 EXPORT_SYMBOL_GPL(ftrace_set_notrace
);
3918 * ftrace_set_global_filter - set a function to filter on with global tracers
3919 * @buf - the string that holds the function filter text.
3920 * @len - the length of the string.
3921 * @reset - non zero to reset all filters before applying this filter.
3923 * Filters denote which functions should be enabled when tracing is enabled.
3924 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
3926 void ftrace_set_global_filter(unsigned char *buf
, int len
, int reset
)
3928 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 1);
3930 EXPORT_SYMBOL_GPL(ftrace_set_global_filter
);
3933 * ftrace_set_global_notrace - set a function to not trace with global tracers
3934 * @buf - the string that holds the function notrace text.
3935 * @len - the length of the string.
3936 * @reset - non zero to reset all filters before applying this filter.
3938 * Notrace Filters denote which functions should not be enabled when tracing
3939 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
3942 void ftrace_set_global_notrace(unsigned char *buf
, int len
, int reset
)
3944 ftrace_set_regex(&global_ops
, buf
, len
, reset
, 0);
3946 EXPORT_SYMBOL_GPL(ftrace_set_global_notrace
);
3949 * command line interface to allow users to set filters on boot up.
3951 #define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
3952 static char ftrace_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
3953 static char ftrace_filter_buf
[FTRACE_FILTER_SIZE
] __initdata
;
3955 /* Used by function selftest to not test if filter is set */
3956 bool ftrace_filter_param __initdata
;
3958 static int __init
set_ftrace_notrace(char *str
)
3960 ftrace_filter_param
= true;
3961 strlcpy(ftrace_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
3964 __setup("ftrace_notrace=", set_ftrace_notrace
);
3966 static int __init
set_ftrace_filter(char *str
)
3968 ftrace_filter_param
= true;
3969 strlcpy(ftrace_filter_buf
, str
, FTRACE_FILTER_SIZE
);
3972 __setup("ftrace_filter=", set_ftrace_filter
);
3974 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3975 static char ftrace_graph_buf
[FTRACE_FILTER_SIZE
] __initdata
;
3976 static char ftrace_graph_notrace_buf
[FTRACE_FILTER_SIZE
] __initdata
;
3977 static int ftrace_set_func(unsigned long *array
, int *idx
, int size
, char *buffer
);
3979 static unsigned long save_global_trampoline
;
3980 static unsigned long save_global_flags
;
3982 static int __init
set_graph_function(char *str
)
3984 strlcpy(ftrace_graph_buf
, str
, FTRACE_FILTER_SIZE
);
3987 __setup("ftrace_graph_filter=", set_graph_function
);
3989 static int __init
set_graph_notrace_function(char *str
)
3991 strlcpy(ftrace_graph_notrace_buf
, str
, FTRACE_FILTER_SIZE
);
3994 __setup("ftrace_graph_notrace=", set_graph_notrace_function
);
3996 static void __init
set_ftrace_early_graph(char *buf
, int enable
)
4000 unsigned long *table
= ftrace_graph_funcs
;
4001 int *count
= &ftrace_graph_count
;
4004 table
= ftrace_graph_notrace_funcs
;
4005 count
= &ftrace_graph_notrace_count
;
4009 func
= strsep(&buf
, ",");
4010 /* we allow only one expression at a time */
4011 ret
= ftrace_set_func(table
, count
, FTRACE_GRAPH_MAX_FUNCS
, func
);
4013 printk(KERN_DEBUG
"ftrace: function %s not "
4014 "traceable\n", func
);
4017 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4020 ftrace_set_early_filter(struct ftrace_ops
*ops
, char *buf
, int enable
)
4024 ftrace_ops_init(ops
);
4027 func
= strsep(&buf
, ",");
4028 ftrace_set_regex(ops
, func
, strlen(func
), 0, enable
);
4032 static void __init
set_ftrace_early_filters(void)
4034 if (ftrace_filter_buf
[0])
4035 ftrace_set_early_filter(&global_ops
, ftrace_filter_buf
, 1);
4036 if (ftrace_notrace_buf
[0])
4037 ftrace_set_early_filter(&global_ops
, ftrace_notrace_buf
, 0);
4038 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4039 if (ftrace_graph_buf
[0])
4040 set_ftrace_early_graph(ftrace_graph_buf
, 1);
4041 if (ftrace_graph_notrace_buf
[0])
4042 set_ftrace_early_graph(ftrace_graph_notrace_buf
, 0);
4043 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4046 int ftrace_regex_release(struct inode
*inode
, struct file
*file
)
4048 struct seq_file
*m
= (struct seq_file
*)file
->private_data
;
4049 struct ftrace_iterator
*iter
;
4050 struct ftrace_hash
**orig_hash
;
4051 struct ftrace_hash
*old_hash
;
4052 struct trace_parser
*parser
;
4056 if (file
->f_mode
& FMODE_READ
) {
4058 seq_release(inode
, file
);
4060 iter
= file
->private_data
;
4062 parser
= &iter
->parser
;
4063 if (trace_parser_loaded(parser
)) {
4064 parser
->buffer
[parser
->idx
] = 0;
4065 ftrace_match_records(iter
->hash
, parser
->buffer
, parser
->idx
);
4068 trace_parser_put(parser
);
4070 mutex_lock(&iter
->ops
->func_hash
->regex_lock
);
4072 if (file
->f_mode
& FMODE_WRITE
) {
4073 filter_hash
= !!(iter
->flags
& FTRACE_ITER_FILTER
);
4076 orig_hash
= &iter
->ops
->func_hash
->filter_hash
;
4078 orig_hash
= &iter
->ops
->func_hash
->notrace_hash
;
4080 mutex_lock(&ftrace_lock
);
4081 old_hash
= *orig_hash
;
4082 ret
= ftrace_hash_move(iter
->ops
, filter_hash
,
4083 orig_hash
, iter
->hash
);
4085 ftrace_ops_update_code(iter
->ops
, old_hash
);
4086 free_ftrace_hash_rcu(old_hash
);
4088 mutex_unlock(&ftrace_lock
);
4091 mutex_unlock(&iter
->ops
->func_hash
->regex_lock
);
4092 free_ftrace_hash(iter
->hash
);
4098 static const struct file_operations ftrace_avail_fops
= {
4099 .open
= ftrace_avail_open
,
4101 .llseek
= seq_lseek
,
4102 .release
= seq_release_private
,
4105 static const struct file_operations ftrace_enabled_fops
= {
4106 .open
= ftrace_enabled_open
,
4108 .llseek
= seq_lseek
,
4109 .release
= seq_release_private
,
4112 static const struct file_operations ftrace_filter_fops
= {
4113 .open
= ftrace_filter_open
,
4115 .write
= ftrace_filter_write
,
4116 .llseek
= tracing_lseek
,
4117 .release
= ftrace_regex_release
,
4120 static const struct file_operations ftrace_notrace_fops
= {
4121 .open
= ftrace_notrace_open
,
4123 .write
= ftrace_notrace_write
,
4124 .llseek
= tracing_lseek
,
4125 .release
= ftrace_regex_release
,
4128 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4130 static DEFINE_MUTEX(graph_lock
);
4132 int ftrace_graph_count
;
4133 int ftrace_graph_notrace_count
;
4134 unsigned long ftrace_graph_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
4135 unsigned long ftrace_graph_notrace_funcs
[FTRACE_GRAPH_MAX_FUNCS
] __read_mostly
;
4137 struct ftrace_graph_data
{
4138 unsigned long *table
;
4141 const struct seq_operations
*seq_ops
;
4145 __g_next(struct seq_file
*m
, loff_t
*pos
)
4147 struct ftrace_graph_data
*fgd
= m
->private;
4149 if (*pos
>= *fgd
->count
)
4151 return &fgd
->table
[*pos
];
4155 g_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4158 return __g_next(m
, pos
);
4161 static void *g_start(struct seq_file
*m
, loff_t
*pos
)
4163 struct ftrace_graph_data
*fgd
= m
->private;
4165 mutex_lock(&graph_lock
);
4167 /* Nothing, tell g_show to print all functions are enabled */
4168 if (!*fgd
->count
&& !*pos
)
4171 return __g_next(m
, pos
);
4174 static void g_stop(struct seq_file
*m
, void *p
)
4176 mutex_unlock(&graph_lock
);
4179 static int g_show(struct seq_file
*m
, void *v
)
4181 unsigned long *ptr
= v
;
4186 if (ptr
== (unsigned long *)1) {
4187 struct ftrace_graph_data
*fgd
= m
->private;
4189 if (fgd
->table
== ftrace_graph_funcs
)
4190 seq_printf(m
, "#### all functions enabled ####\n");
4192 seq_printf(m
, "#### no functions disabled ####\n");
4196 seq_printf(m
, "%ps\n", (void *)*ptr
);
4201 static const struct seq_operations ftrace_graph_seq_ops
= {
4209 __ftrace_graph_open(struct inode
*inode
, struct file
*file
,
4210 struct ftrace_graph_data
*fgd
)
4214 mutex_lock(&graph_lock
);
4215 if ((file
->f_mode
& FMODE_WRITE
) &&
4216 (file
->f_flags
& O_TRUNC
)) {
4218 memset(fgd
->table
, 0, fgd
->size
* sizeof(*fgd
->table
));
4220 mutex_unlock(&graph_lock
);
4222 if (file
->f_mode
& FMODE_READ
) {
4223 ret
= seq_open(file
, fgd
->seq_ops
);
4225 struct seq_file
*m
= file
->private_data
;
4229 file
->private_data
= fgd
;
4235 ftrace_graph_open(struct inode
*inode
, struct file
*file
)
4237 struct ftrace_graph_data
*fgd
;
4239 if (unlikely(ftrace_disabled
))
4242 fgd
= kmalloc(sizeof(*fgd
), GFP_KERNEL
);
4246 fgd
->table
= ftrace_graph_funcs
;
4247 fgd
->size
= FTRACE_GRAPH_MAX_FUNCS
;
4248 fgd
->count
= &ftrace_graph_count
;
4249 fgd
->seq_ops
= &ftrace_graph_seq_ops
;
4251 return __ftrace_graph_open(inode
, file
, fgd
);
4255 ftrace_graph_notrace_open(struct inode
*inode
, struct file
*file
)
4257 struct ftrace_graph_data
*fgd
;
4259 if (unlikely(ftrace_disabled
))
4262 fgd
= kmalloc(sizeof(*fgd
), GFP_KERNEL
);
4266 fgd
->table
= ftrace_graph_notrace_funcs
;
4267 fgd
->size
= FTRACE_GRAPH_MAX_FUNCS
;
4268 fgd
->count
= &ftrace_graph_notrace_count
;
4269 fgd
->seq_ops
= &ftrace_graph_seq_ops
;
4271 return __ftrace_graph_open(inode
, file
, fgd
);
4275 ftrace_graph_release(struct inode
*inode
, struct file
*file
)
4277 if (file
->f_mode
& FMODE_READ
) {
4278 struct seq_file
*m
= file
->private_data
;
4281 seq_release(inode
, file
);
4283 kfree(file
->private_data
);
4290 ftrace_set_func(unsigned long *array
, int *idx
, int size
, char *buffer
)
4292 struct dyn_ftrace
*rec
;
4293 struct ftrace_page
*pg
;
4302 type
= filter_parse_regex(buffer
, strlen(buffer
), &search
, ¬);
4303 if (!not && *idx
>= size
)
4306 search_len
= strlen(search
);
4308 mutex_lock(&ftrace_lock
);
4310 if (unlikely(ftrace_disabled
)) {
4311 mutex_unlock(&ftrace_lock
);
4315 do_for_each_ftrace_rec(pg
, rec
) {
4317 if (ftrace_match_record(rec
, NULL
, search
, search_len
, type
)) {
4318 /* if it is in the array */
4320 for (i
= 0; i
< *idx
; i
++) {
4321 if (array
[i
] == rec
->ip
) {
4330 array
[(*idx
)++] = rec
->ip
;
4336 array
[i
] = array
[--(*idx
)];
4342 } while_for_each_ftrace_rec();
4344 mutex_unlock(&ftrace_lock
);
4353 ftrace_graph_write(struct file
*file
, const char __user
*ubuf
,
4354 size_t cnt
, loff_t
*ppos
)
4356 struct trace_parser parser
;
4357 ssize_t read
, ret
= 0;
4358 struct ftrace_graph_data
*fgd
= file
->private_data
;
4363 if (trace_parser_get_init(&parser
, FTRACE_BUFF_MAX
))
4366 read
= trace_get_user(&parser
, ubuf
, cnt
, ppos
);
4368 if (read
>= 0 && trace_parser_loaded((&parser
))) {
4369 parser
.buffer
[parser
.idx
] = 0;
4371 mutex_lock(&graph_lock
);
4373 /* we allow only one expression at a time */
4374 ret
= ftrace_set_func(fgd
->table
, fgd
->count
, fgd
->size
,
4377 mutex_unlock(&graph_lock
);
4383 trace_parser_put(&parser
);
4388 static const struct file_operations ftrace_graph_fops
= {
4389 .open
= ftrace_graph_open
,
4391 .write
= ftrace_graph_write
,
4392 .llseek
= tracing_lseek
,
4393 .release
= ftrace_graph_release
,
4396 static const struct file_operations ftrace_graph_notrace_fops
= {
4397 .open
= ftrace_graph_notrace_open
,
4399 .write
= ftrace_graph_write
,
4400 .llseek
= tracing_lseek
,
4401 .release
= ftrace_graph_release
,
4403 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4405 void ftrace_create_filter_files(struct ftrace_ops
*ops
,
4406 struct dentry
*parent
)
4409 trace_create_file("set_ftrace_filter", 0644, parent
,
4410 ops
, &ftrace_filter_fops
);
4412 trace_create_file("set_ftrace_notrace", 0644, parent
,
4413 ops
, &ftrace_notrace_fops
);
4417 * The name "destroy_filter_files" is really a misnomer. Although
4418 * in the future, it may actualy delete the files, but this is
4419 * really intended to make sure the ops passed in are disabled
4420 * and that when this function returns, the caller is free to
4423 * The "destroy" name is only to match the "create" name that this
4424 * should be paired with.
4426 void ftrace_destroy_filter_files(struct ftrace_ops
*ops
)
4428 mutex_lock(&ftrace_lock
);
4429 if (ops
->flags
& FTRACE_OPS_FL_ENABLED
)
4430 ftrace_shutdown(ops
, 0);
4431 ops
->flags
|= FTRACE_OPS_FL_DELETED
;
4432 mutex_unlock(&ftrace_lock
);
4435 static __init
int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
)
4438 trace_create_file("available_filter_functions", 0444,
4439 d_tracer
, NULL
, &ftrace_avail_fops
);
4441 trace_create_file("enabled_functions", 0444,
4442 d_tracer
, NULL
, &ftrace_enabled_fops
);
4444 ftrace_create_filter_files(&global_ops
, d_tracer
);
4446 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4447 trace_create_file("set_graph_function", 0444, d_tracer
,
4449 &ftrace_graph_fops
);
4450 trace_create_file("set_graph_notrace", 0444, d_tracer
,
4452 &ftrace_graph_notrace_fops
);
4453 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4458 static int ftrace_cmp_ips(const void *a
, const void *b
)
4460 const unsigned long *ipa
= a
;
4461 const unsigned long *ipb
= b
;
4470 static void ftrace_swap_ips(void *a
, void *b
, int size
)
4472 unsigned long *ipa
= a
;
4473 unsigned long *ipb
= b
;
4481 static int ftrace_process_locs(struct module
*mod
,
4482 unsigned long *start
,
4485 struct ftrace_page
*start_pg
;
4486 struct ftrace_page
*pg
;
4487 struct dyn_ftrace
*rec
;
4488 unsigned long count
;
4491 unsigned long flags
= 0; /* Shut up gcc */
4494 count
= end
- start
;
4499 sort(start
, count
, sizeof(*start
),
4500 ftrace_cmp_ips
, ftrace_swap_ips
);
4502 start_pg
= ftrace_allocate_pages(count
);
4506 mutex_lock(&ftrace_lock
);
4509 * Core and each module needs their own pages, as
4510 * modules will free them when they are removed.
4511 * Force a new page to be allocated for modules.
4514 WARN_ON(ftrace_pages
|| ftrace_pages_start
);
4515 /* First initialization */
4516 ftrace_pages
= ftrace_pages_start
= start_pg
;
4521 if (WARN_ON(ftrace_pages
->next
)) {
4522 /* Hmm, we have free pages? */
4523 while (ftrace_pages
->next
)
4524 ftrace_pages
= ftrace_pages
->next
;
4527 ftrace_pages
->next
= start_pg
;
4533 addr
= ftrace_call_adjust(*p
++);
4535 * Some architecture linkers will pad between
4536 * the different mcount_loc sections of different
4537 * object files to satisfy alignments.
4538 * Skip any NULL pointers.
4543 if (pg
->index
== pg
->size
) {
4544 /* We should have allocated enough */
4545 if (WARN_ON(!pg
->next
))
4550 rec
= &pg
->records
[pg
->index
++];
4554 /* We should have used all pages */
4557 /* Assign the last page to ftrace_pages */
4561 * We only need to disable interrupts on start up
4562 * because we are modifying code that an interrupt
4563 * may execute, and the modification is not atomic.
4564 * But for modules, nothing runs the code we modify
4565 * until we are finished with it, and there's no
4566 * reason to cause large interrupt latencies while we do it.
4569 local_irq_save(flags
);
4570 ftrace_update_code(mod
, start_pg
);
4572 local_irq_restore(flags
);
4575 mutex_unlock(&ftrace_lock
);
4580 #ifdef CONFIG_MODULES
4582 #define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4584 void ftrace_release_mod(struct module
*mod
)
4586 struct dyn_ftrace
*rec
;
4587 struct ftrace_page
**last_pg
;
4588 struct ftrace_page
*pg
;
4591 mutex_lock(&ftrace_lock
);
4593 if (ftrace_disabled
)
4597 * Each module has its own ftrace_pages, remove
4598 * them from the list.
4600 last_pg
= &ftrace_pages_start
;
4601 for (pg
= ftrace_pages_start
; pg
; pg
= *last_pg
) {
4602 rec
= &pg
->records
[0];
4603 if (within_module_core(rec
->ip
, mod
)) {
4605 * As core pages are first, the first
4606 * page should never be a module page.
4608 if (WARN_ON(pg
== ftrace_pages_start
))
4611 /* Check if we are deleting the last page */
4612 if (pg
== ftrace_pages
)
4613 ftrace_pages
= next_to_ftrace_page(last_pg
);
4615 *last_pg
= pg
->next
;
4616 order
= get_count_order(pg
->size
/ ENTRIES_PER_PAGE
);
4617 free_pages((unsigned long)pg
->records
, order
);
4620 last_pg
= &pg
->next
;
4623 mutex_unlock(&ftrace_lock
);
4626 static void ftrace_init_module(struct module
*mod
,
4627 unsigned long *start
, unsigned long *end
)
4629 if (ftrace_disabled
|| start
== end
)
4631 ftrace_process_locs(mod
, start
, end
);
4634 void ftrace_module_init(struct module
*mod
)
4636 ftrace_init_module(mod
, mod
->ftrace_callsites
,
4637 mod
->ftrace_callsites
+
4638 mod
->num_ftrace_callsites
);
4641 static int ftrace_module_notify_exit(struct notifier_block
*self
,
4642 unsigned long val
, void *data
)
4644 struct module
*mod
= data
;
4646 if (val
== MODULE_STATE_GOING
)
4647 ftrace_release_mod(mod
);
4652 static int ftrace_module_notify_exit(struct notifier_block
*self
,
4653 unsigned long val
, void *data
)
4657 #endif /* CONFIG_MODULES */
4659 struct notifier_block ftrace_module_exit_nb
= {
4660 .notifier_call
= ftrace_module_notify_exit
,
4661 .priority
= INT_MIN
, /* Run after anything that can remove kprobes */
4664 void __init
ftrace_init(void)
4666 extern unsigned long __start_mcount_loc
[];
4667 extern unsigned long __stop_mcount_loc
[];
4668 unsigned long count
, flags
;
4671 local_irq_save(flags
);
4672 ret
= ftrace_dyn_arch_init();
4673 local_irq_restore(flags
);
4677 count
= __stop_mcount_loc
- __start_mcount_loc
;
4679 pr_info("ftrace: No functions to be traced?\n");
4683 pr_info("ftrace: allocating %ld entries in %ld pages\n",
4684 count
, count
/ ENTRIES_PER_PAGE
+ 1);
4686 last_ftrace_enabled
= ftrace_enabled
= 1;
4688 ret
= ftrace_process_locs(NULL
,
4692 ret
= register_module_notifier(&ftrace_module_exit_nb
);
4694 pr_warning("Failed to register trace ftrace module exit notifier\n");
4696 set_ftrace_early_filters();
4700 ftrace_disabled
= 1;
4703 /* Do nothing if arch does not support this */
4704 void __weak
arch_ftrace_update_trampoline(struct ftrace_ops
*ops
)
4708 static void ftrace_update_trampoline(struct ftrace_ops
*ops
)
4710 /* Currently, only non dynamic ops can have a trampoline */
4711 if (ops
->flags
& FTRACE_OPS_FL_DYNAMIC
)
4714 arch_ftrace_update_trampoline(ops
);
4719 static struct ftrace_ops global_ops
= {
4720 .func
= ftrace_stub
,
4721 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
4724 static int __init
ftrace_nodyn_init(void)
4729 core_initcall(ftrace_nodyn_init
);
4731 static inline int ftrace_init_dyn_debugfs(struct dentry
*d_tracer
) { return 0; }
4732 static inline void ftrace_startup_enable(int command
) { }
4733 static inline void ftrace_startup_all(int command
) { }
4734 /* Keep as macros so we do not need to define the commands */
4735 # define ftrace_startup(ops, command) \
4737 int ___ret = __register_ftrace_function(ops); \
4739 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
4742 # define ftrace_shutdown(ops, command) \
4744 int ___ret = __unregister_ftrace_function(ops); \
4746 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
4750 # define ftrace_startup_sysctl() do { } while (0)
4751 # define ftrace_shutdown_sysctl() do { } while (0)
4754 ftrace_ops_test(struct ftrace_ops
*ops
, unsigned long ip
, void *regs
)
4759 static void ftrace_update_trampoline(struct ftrace_ops
*ops
)
4763 #endif /* CONFIG_DYNAMIC_FTRACE */
4765 __init
void ftrace_init_global_array_ops(struct trace_array
*tr
)
4767 tr
->ops
= &global_ops
;
4768 tr
->ops
->private = tr
;
4771 void ftrace_init_array_ops(struct trace_array
*tr
, ftrace_func_t func
)
4773 /* If we filter on pids, update to use the pid function */
4774 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) {
4775 if (WARN_ON(tr
->ops
->func
!= ftrace_stub
))
4776 printk("ftrace ops had %pS for function\n",
4778 /* Only the top level instance does pid tracing */
4779 if (!list_empty(&ftrace_pids
)) {
4780 set_ftrace_pid_function(func
);
4781 func
= ftrace_pid_func
;
4784 tr
->ops
->func
= func
;
4785 tr
->ops
->private = tr
;
4788 void ftrace_reset_array_ops(struct trace_array
*tr
)
4790 tr
->ops
->func
= ftrace_stub
;
4794 ftrace_ops_control_func(unsigned long ip
, unsigned long parent_ip
,
4795 struct ftrace_ops
*op
, struct pt_regs
*regs
)
4797 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT
)))
4801 * Some of the ops may be dynamically allocated,
4802 * they must be freed after a synchronize_sched().
4804 preempt_disable_notrace();
4805 trace_recursion_set(TRACE_CONTROL_BIT
);
4808 * Control funcs (perf) uses RCU. Only trace if
4809 * RCU is currently active.
4811 if (!rcu_is_watching())
4814 do_for_each_ftrace_op(op
, ftrace_control_list
) {
4815 if (!(op
->flags
& FTRACE_OPS_FL_STUB
) &&
4816 !ftrace_function_local_disabled(op
) &&
4817 ftrace_ops_test(op
, ip
, regs
))
4818 op
->func(ip
, parent_ip
, op
, regs
);
4819 } while_for_each_ftrace_op(op
);
4821 trace_recursion_clear(TRACE_CONTROL_BIT
);
4822 preempt_enable_notrace();
4825 static struct ftrace_ops control_ops
= {
4826 .func
= ftrace_ops_control_func
,
4827 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
| FTRACE_OPS_FL_INITIALIZED
,
4828 INIT_OPS_HASH(control_ops
)
4832 __ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
4833 struct ftrace_ops
*ignored
, struct pt_regs
*regs
)
4835 struct ftrace_ops
*op
;
4838 bit
= trace_test_and_set_recursion(TRACE_LIST_START
, TRACE_LIST_MAX
);
4843 * Some of the ops may be dynamically allocated,
4844 * they must be freed after a synchronize_sched().
4846 preempt_disable_notrace();
4847 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
4848 if (ftrace_ops_test(op
, ip
, regs
)) {
4849 if (FTRACE_WARN_ON(!op
->func
)) {
4850 pr_warn("op=%p %pS\n", op
, op
);
4853 op
->func(ip
, parent_ip
, op
, regs
);
4855 } while_for_each_ftrace_op(op
);
4857 preempt_enable_notrace();
4858 trace_clear_recursion(bit
);
4862 * Some archs only support passing ip and parent_ip. Even though
4863 * the list function ignores the op parameter, we do not want any
4864 * C side effects, where a function is called without the caller
4865 * sending a third parameter.
4866 * Archs are to support both the regs and ftrace_ops at the same time.
4867 * If they support ftrace_ops, it is assumed they support regs.
4868 * If call backs want to use regs, they must either check for regs
4869 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
4870 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
4871 * An architecture can pass partial regs with ftrace_ops and still
4872 * set the ARCH_SUPPORT_FTARCE_OPS.
4874 #if ARCH_SUPPORTS_FTRACE_OPS
4875 static void ftrace_ops_list_func(unsigned long ip
, unsigned long parent_ip
,
4876 struct ftrace_ops
*op
, struct pt_regs
*regs
)
4878 __ftrace_ops_list_func(ip
, parent_ip
, NULL
, regs
);
4881 static void ftrace_ops_no_ops(unsigned long ip
, unsigned long parent_ip
)
4883 __ftrace_ops_list_func(ip
, parent_ip
, NULL
, NULL
);
4888 * If there's only one function registered but it does not support
4889 * recursion, this function will be called by the mcount trampoline.
4890 * This function will handle recursion protection.
4892 static void ftrace_ops_recurs_func(unsigned long ip
, unsigned long parent_ip
,
4893 struct ftrace_ops
*op
, struct pt_regs
*regs
)
4897 bit
= trace_test_and_set_recursion(TRACE_LIST_START
, TRACE_LIST_MAX
);
4901 op
->func(ip
, parent_ip
, op
, regs
);
4903 trace_clear_recursion(bit
);
4907 * ftrace_ops_get_func - get the function a trampoline should call
4908 * @ops: the ops to get the function for
4910 * Normally the mcount trampoline will call the ops->func, but there
4911 * are times that it should not. For example, if the ops does not
4912 * have its own recursion protection, then it should call the
4913 * ftrace_ops_recurs_func() instead.
4915 * Returns the function that the trampoline should call for @ops.
4917 ftrace_func_t
ftrace_ops_get_func(struct ftrace_ops
*ops
)
4920 * If this is a dynamic ops or we force list func,
4921 * then it needs to call the list anyway.
4923 if (ops
->flags
& FTRACE_OPS_FL_DYNAMIC
|| FTRACE_FORCE_LIST_FUNC
)
4924 return ftrace_ops_list_func
;
4927 * If the func handles its own recursion, call it directly.
4928 * Otherwise call the recursion protected function that
4929 * will call the ftrace ops function.
4931 if (!(ops
->flags
& FTRACE_OPS_FL_RECURSION_SAFE
))
4932 return ftrace_ops_recurs_func
;
4937 static void clear_ftrace_swapper(void)
4939 struct task_struct
*p
;
4943 for_each_online_cpu(cpu
) {
4945 clear_tsk_trace_trace(p
);
4950 static void set_ftrace_swapper(void)
4952 struct task_struct
*p
;
4956 for_each_online_cpu(cpu
) {
4958 set_tsk_trace_trace(p
);
4963 static void clear_ftrace_pid(struct pid
*pid
)
4965 struct task_struct
*p
;
4968 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
4969 clear_tsk_trace_trace(p
);
4970 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
4976 static void set_ftrace_pid(struct pid
*pid
)
4978 struct task_struct
*p
;
4981 do_each_pid_task(pid
, PIDTYPE_PID
, p
) {
4982 set_tsk_trace_trace(p
);
4983 } while_each_pid_task(pid
, PIDTYPE_PID
, p
);
4987 static void clear_ftrace_pid_task(struct pid
*pid
)
4989 if (pid
== ftrace_swapper_pid
)
4990 clear_ftrace_swapper();
4992 clear_ftrace_pid(pid
);
4995 static void set_ftrace_pid_task(struct pid
*pid
)
4997 if (pid
== ftrace_swapper_pid
)
4998 set_ftrace_swapper();
5000 set_ftrace_pid(pid
);
5003 static int ftrace_pid_add(int p
)
5006 struct ftrace_pid
*fpid
;
5009 mutex_lock(&ftrace_lock
);
5012 pid
= ftrace_swapper_pid
;
5014 pid
= find_get_pid(p
);
5021 list_for_each_entry(fpid
, &ftrace_pids
, list
)
5022 if (fpid
->pid
== pid
)
5027 fpid
= kmalloc(sizeof(*fpid
), GFP_KERNEL
);
5031 list_add(&fpid
->list
, &ftrace_pids
);
5034 set_ftrace_pid_task(pid
);
5036 ftrace_update_pid_func();
5038 ftrace_startup_all(0);
5040 mutex_unlock(&ftrace_lock
);
5044 if (pid
!= ftrace_swapper_pid
)
5048 mutex_unlock(&ftrace_lock
);
5052 static void ftrace_pid_reset(void)
5054 struct ftrace_pid
*fpid
, *safe
;
5056 mutex_lock(&ftrace_lock
);
5057 list_for_each_entry_safe(fpid
, safe
, &ftrace_pids
, list
) {
5058 struct pid
*pid
= fpid
->pid
;
5060 clear_ftrace_pid_task(pid
);
5062 list_del(&fpid
->list
);
5066 ftrace_update_pid_func();
5067 ftrace_startup_all(0);
5069 mutex_unlock(&ftrace_lock
);
5072 static void *fpid_start(struct seq_file
*m
, loff_t
*pos
)
5074 mutex_lock(&ftrace_lock
);
5076 if (list_empty(&ftrace_pids
) && (!*pos
))
5079 return seq_list_start(&ftrace_pids
, *pos
);
5082 static void *fpid_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
5087 return seq_list_next(v
, &ftrace_pids
, pos
);
5090 static void fpid_stop(struct seq_file
*m
, void *p
)
5092 mutex_unlock(&ftrace_lock
);
5095 static int fpid_show(struct seq_file
*m
, void *v
)
5097 const struct ftrace_pid
*fpid
= list_entry(v
, struct ftrace_pid
, list
);
5099 if (v
== (void *)1) {
5100 seq_printf(m
, "no pid\n");
5104 if (fpid
->pid
== ftrace_swapper_pid
)
5105 seq_printf(m
, "swapper tasks\n");
5107 seq_printf(m
, "%u\n", pid_vnr(fpid
->pid
));
5112 static const struct seq_operations ftrace_pid_sops
= {
5113 .start
= fpid_start
,
5120 ftrace_pid_open(struct inode
*inode
, struct file
*file
)
5124 if ((file
->f_mode
& FMODE_WRITE
) &&
5125 (file
->f_flags
& O_TRUNC
))
5128 if (file
->f_mode
& FMODE_READ
)
5129 ret
= seq_open(file
, &ftrace_pid_sops
);
5135 ftrace_pid_write(struct file
*filp
, const char __user
*ubuf
,
5136 size_t cnt
, loff_t
*ppos
)
5142 if (cnt
>= sizeof(buf
))
5145 if (copy_from_user(&buf
, ubuf
, cnt
))
5151 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5152 * to clean the filter quietly.
5154 tmp
= strstrip(buf
);
5155 if (strlen(tmp
) == 0)
5158 ret
= kstrtol(tmp
, 10, &val
);
5162 ret
= ftrace_pid_add(val
);
5164 return ret
? ret
: cnt
;
5168 ftrace_pid_release(struct inode
*inode
, struct file
*file
)
5170 if (file
->f_mode
& FMODE_READ
)
5171 seq_release(inode
, file
);
5176 static const struct file_operations ftrace_pid_fops
= {
5177 .open
= ftrace_pid_open
,
5178 .write
= ftrace_pid_write
,
5180 .llseek
= tracing_lseek
,
5181 .release
= ftrace_pid_release
,
5184 static __init
int ftrace_init_debugfs(void)
5186 struct dentry
*d_tracer
;
5188 d_tracer
= tracing_init_dentry();
5192 ftrace_init_dyn_debugfs(d_tracer
);
5194 trace_create_file("set_ftrace_pid", 0644, d_tracer
,
5195 NULL
, &ftrace_pid_fops
);
5197 ftrace_profile_debugfs(d_tracer
);
5201 fs_initcall(ftrace_init_debugfs
);
5204 * ftrace_kill - kill ftrace
5206 * This function should be used by panic code. It stops ftrace
5207 * but in a not so nice way. If you need to simply kill ftrace
5208 * from a non-atomic section, use ftrace_kill.
5210 void ftrace_kill(void)
5212 ftrace_disabled
= 1;
5214 clear_ftrace_function();
5218 * Test if ftrace is dead or not.
5220 int ftrace_is_dead(void)
5222 return ftrace_disabled
;
5226 * register_ftrace_function - register a function for profiling
5227 * @ops - ops structure that holds the function for profiling.
5229 * Register a function to be called by all functions in the
5232 * Note: @ops->func and all the functions it calls must be labeled
5233 * with "notrace", otherwise it will go into a
5236 int register_ftrace_function(struct ftrace_ops
*ops
)
5240 ftrace_ops_init(ops
);
5242 mutex_lock(&ftrace_lock
);
5244 ret
= ftrace_startup(ops
, 0);
5246 mutex_unlock(&ftrace_lock
);
5250 EXPORT_SYMBOL_GPL(register_ftrace_function
);
5253 * unregister_ftrace_function - unregister a function for profiling.
5254 * @ops - ops structure that holds the function to unregister
5256 * Unregister a function that was added to be called by ftrace profiling.
5258 int unregister_ftrace_function(struct ftrace_ops
*ops
)
5262 mutex_lock(&ftrace_lock
);
5263 ret
= ftrace_shutdown(ops
, 0);
5264 mutex_unlock(&ftrace_lock
);
5268 EXPORT_SYMBOL_GPL(unregister_ftrace_function
);
5271 ftrace_enable_sysctl(struct ctl_table
*table
, int write
,
5272 void __user
*buffer
, size_t *lenp
,
5277 mutex_lock(&ftrace_lock
);
5279 if (unlikely(ftrace_disabled
))
5282 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
5284 if (ret
|| !write
|| (last_ftrace_enabled
== !!ftrace_enabled
))
5287 last_ftrace_enabled
= !!ftrace_enabled
;
5289 if (ftrace_enabled
) {
5291 ftrace_startup_sysctl();
5293 /* we are starting ftrace again */
5294 if (ftrace_ops_list
!= &ftrace_list_end
)
5295 update_ftrace_function();
5298 /* stopping ftrace calls (just send to ftrace_stub) */
5299 ftrace_trace_function
= ftrace_stub
;
5301 ftrace_shutdown_sysctl();
5305 mutex_unlock(&ftrace_lock
);
5309 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
5311 static struct ftrace_ops graph_ops
= {
5312 .func
= ftrace_stub
,
5313 .flags
= FTRACE_OPS_FL_RECURSION_SAFE
|
5314 FTRACE_OPS_FL_INITIALIZED
|
5316 #ifdef FTRACE_GRAPH_TRAMP_ADDR
5317 .trampoline
= FTRACE_GRAPH_TRAMP_ADDR
,
5319 ASSIGN_OPS_HASH(graph_ops
, &global_ops
.local_hash
)
5322 static int ftrace_graph_active
;
5324 int ftrace_graph_entry_stub(struct ftrace_graph_ent
*trace
)
5329 /* The callbacks that hook a function */
5330 trace_func_graph_ret_t ftrace_graph_return
=
5331 (trace_func_graph_ret_t
)ftrace_stub
;
5332 trace_func_graph_ent_t ftrace_graph_entry
= ftrace_graph_entry_stub
;
5333 static trace_func_graph_ent_t __ftrace_graph_entry
= ftrace_graph_entry_stub
;
5335 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5336 static int alloc_retstack_tasklist(struct ftrace_ret_stack
**ret_stack_list
)
5340 unsigned long flags
;
5341 int start
= 0, end
= FTRACE_RETSTACK_ALLOC_SIZE
;
5342 struct task_struct
*g
, *t
;
5344 for (i
= 0; i
< FTRACE_RETSTACK_ALLOC_SIZE
; i
++) {
5345 ret_stack_list
[i
] = kmalloc(FTRACE_RETFUNC_DEPTH
5346 * sizeof(struct ftrace_ret_stack
),
5348 if (!ret_stack_list
[i
]) {
5356 read_lock_irqsave(&tasklist_lock
, flags
);
5357 do_each_thread(g
, t
) {
5363 if (t
->ret_stack
== NULL
) {
5364 atomic_set(&t
->tracing_graph_pause
, 0);
5365 atomic_set(&t
->trace_overrun
, 0);
5366 t
->curr_ret_stack
= -1;
5367 /* Make sure the tasks see the -1 first: */
5369 t
->ret_stack
= ret_stack_list
[start
++];
5371 } while_each_thread(g
, t
);
5374 read_unlock_irqrestore(&tasklist_lock
, flags
);
5376 for (i
= start
; i
< end
; i
++)
5377 kfree(ret_stack_list
[i
]);
5382 ftrace_graph_probe_sched_switch(void *ignore
,
5383 struct task_struct
*prev
, struct task_struct
*next
)
5385 unsigned long long timestamp
;
5389 * Does the user want to count the time a function was asleep.
5390 * If so, do not update the time stamps.
5392 if (trace_flags
& TRACE_ITER_SLEEP_TIME
)
5395 timestamp
= trace_clock_local();
5397 prev
->ftrace_timestamp
= timestamp
;
5399 /* only process tasks that we timestamped */
5400 if (!next
->ftrace_timestamp
)
5404 * Update all the counters in next to make up for the
5405 * time next was sleeping.
5407 timestamp
-= next
->ftrace_timestamp
;
5409 for (index
= next
->curr_ret_stack
; index
>= 0; index
--)
5410 next
->ret_stack
[index
].calltime
+= timestamp
;
5413 /* Allocate a return stack for each task */
5414 static int start_graph_tracing(void)
5416 struct ftrace_ret_stack
**ret_stack_list
;
5419 ret_stack_list
= kmalloc(FTRACE_RETSTACK_ALLOC_SIZE
*
5420 sizeof(struct ftrace_ret_stack
*),
5423 if (!ret_stack_list
)
5426 /* The cpu_boot init_task->ret_stack will never be freed */
5427 for_each_online_cpu(cpu
) {
5428 if (!idle_task(cpu
)->ret_stack
)
5429 ftrace_graph_init_idle_task(idle_task(cpu
), cpu
);
5433 ret
= alloc_retstack_tasklist(ret_stack_list
);
5434 } while (ret
== -EAGAIN
);
5437 ret
= register_trace_sched_switch(ftrace_graph_probe_sched_switch
, NULL
);
5439 pr_info("ftrace_graph: Couldn't activate tracepoint"
5440 " probe to kernel_sched_switch\n");
5443 kfree(ret_stack_list
);
5448 * Hibernation protection.
5449 * The state of the current task is too much unstable during
5450 * suspend/restore to disk. We want to protect against that.
5453 ftrace_suspend_notifier_call(struct notifier_block
*bl
, unsigned long state
,
5457 case PM_HIBERNATION_PREPARE
:
5458 pause_graph_tracing();
5461 case PM_POST_HIBERNATION
:
5462 unpause_graph_tracing();
5468 static int ftrace_graph_entry_test(struct ftrace_graph_ent
*trace
)
5470 if (!ftrace_ops_test(&global_ops
, trace
->func
, NULL
))
5472 return __ftrace_graph_entry(trace
);
5476 * The function graph tracer should only trace the functions defined
5477 * by set_ftrace_filter and set_ftrace_notrace. If another function
5478 * tracer ops is registered, the graph tracer requires testing the
5479 * function against the global ops, and not just trace any function
5480 * that any ftrace_ops registered.
5482 static void update_function_graph_func(void)
5484 struct ftrace_ops
*op
;
5485 bool do_test
= false;
5488 * The graph and global ops share the same set of functions
5489 * to test. If any other ops is on the list, then
5490 * the graph tracing needs to test if its the function
5493 do_for_each_ftrace_op(op
, ftrace_ops_list
) {
5494 if (op
!= &global_ops
&& op
!= &graph_ops
&&
5495 op
!= &ftrace_list_end
) {
5497 /* in double loop, break out with goto */
5500 } while_for_each_ftrace_op(op
);
5503 ftrace_graph_entry
= ftrace_graph_entry_test
;
5505 ftrace_graph_entry
= __ftrace_graph_entry
;
5508 static struct notifier_block ftrace_suspend_notifier
= {
5509 .notifier_call
= ftrace_suspend_notifier_call
,
5512 int register_ftrace_graph(trace_func_graph_ret_t retfunc
,
5513 trace_func_graph_ent_t entryfunc
)
5517 mutex_lock(&ftrace_lock
);
5519 /* we currently allow only one tracer registered at a time */
5520 if (ftrace_graph_active
) {
5525 register_pm_notifier(&ftrace_suspend_notifier
);
5527 ftrace_graph_active
++;
5528 ret
= start_graph_tracing();
5530 ftrace_graph_active
--;
5534 ftrace_graph_return
= retfunc
;
5537 * Update the indirect function to the entryfunc, and the
5538 * function that gets called to the entry_test first. Then
5539 * call the update fgraph entry function to determine if
5540 * the entryfunc should be called directly or not.
5542 __ftrace_graph_entry
= entryfunc
;
5543 ftrace_graph_entry
= ftrace_graph_entry_test
;
5544 update_function_graph_func();
5546 ret
= ftrace_startup(&graph_ops
, FTRACE_START_FUNC_RET
);
5548 mutex_unlock(&ftrace_lock
);
5552 void unregister_ftrace_graph(void)
5554 mutex_lock(&ftrace_lock
);
5556 if (unlikely(!ftrace_graph_active
))
5559 ftrace_graph_active
--;
5560 ftrace_graph_return
= (trace_func_graph_ret_t
)ftrace_stub
;
5561 ftrace_graph_entry
= ftrace_graph_entry_stub
;
5562 __ftrace_graph_entry
= ftrace_graph_entry_stub
;
5563 ftrace_shutdown(&graph_ops
, FTRACE_STOP_FUNC_RET
);
5564 unregister_pm_notifier(&ftrace_suspend_notifier
);
5565 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch
, NULL
);
5567 #ifdef CONFIG_DYNAMIC_FTRACE
5569 * Function graph does not allocate the trampoline, but
5570 * other global_ops do. We need to reset the ALLOC_TRAMP flag
5573 global_ops
.trampoline
= save_global_trampoline
;
5574 if (save_global_flags
& FTRACE_OPS_FL_ALLOC_TRAMP
)
5575 global_ops
.flags
|= FTRACE_OPS_FL_ALLOC_TRAMP
;
5579 mutex_unlock(&ftrace_lock
);
5582 static DEFINE_PER_CPU(struct ftrace_ret_stack
*, idle_ret_stack
);
5585 graph_init_task(struct task_struct
*t
, struct ftrace_ret_stack
*ret_stack
)
5587 atomic_set(&t
->tracing_graph_pause
, 0);
5588 atomic_set(&t
->trace_overrun
, 0);
5589 t
->ftrace_timestamp
= 0;
5590 /* make curr_ret_stack visible before we add the ret_stack */
5592 t
->ret_stack
= ret_stack
;
5596 * Allocate a return stack for the idle task. May be the first
5597 * time through, or it may be done by CPU hotplug online.
5599 void ftrace_graph_init_idle_task(struct task_struct
*t
, int cpu
)
5601 t
->curr_ret_stack
= -1;
5603 * The idle task has no parent, it either has its own
5604 * stack or no stack at all.
5607 WARN_ON(t
->ret_stack
!= per_cpu(idle_ret_stack
, cpu
));
5609 if (ftrace_graph_active
) {
5610 struct ftrace_ret_stack
*ret_stack
;
5612 ret_stack
= per_cpu(idle_ret_stack
, cpu
);
5614 ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
5615 * sizeof(struct ftrace_ret_stack
),
5619 per_cpu(idle_ret_stack
, cpu
) = ret_stack
;
5621 graph_init_task(t
, ret_stack
);
5625 /* Allocate a return stack for newly created task */
5626 void ftrace_graph_init_task(struct task_struct
*t
)
5628 /* Make sure we do not use the parent ret_stack */
5629 t
->ret_stack
= NULL
;
5630 t
->curr_ret_stack
= -1;
5632 if (ftrace_graph_active
) {
5633 struct ftrace_ret_stack
*ret_stack
;
5635 ret_stack
= kmalloc(FTRACE_RETFUNC_DEPTH
5636 * sizeof(struct ftrace_ret_stack
),
5640 graph_init_task(t
, ret_stack
);
5644 void ftrace_graph_exit_task(struct task_struct
*t
)
5646 struct ftrace_ret_stack
*ret_stack
= t
->ret_stack
;
5648 t
->ret_stack
= NULL
;
5649 /* NULL must become visible to IRQs before we free it: */