3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
9 #include <linux/debugfs.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
12 #include <linux/slab.h>
16 #include "trace_output.h"
18 /* When set, irq functions will be ignored */
19 static int ftrace_graph_skip_irqs
;
21 struct fgraph_cpu_data
{
26 unsigned long enter_funcs
[FTRACE_RETFUNC_DEPTH
];
30 struct fgraph_cpu_data __percpu
*cpu_data
;
32 /* Place to preserve last processed entry. */
33 struct ftrace_graph_ent_entry ent
;
34 struct ftrace_graph_ret_entry ret
;
39 #define TRACE_GRAPH_INDENT 2
41 static unsigned int max_depth
;
43 static struct tracer_opt trace_opts
[] = {
44 /* Display overruns? (for self-debug purpose) */
45 { TRACER_OPT(funcgraph
-overrun
, TRACE_GRAPH_PRINT_OVERRUN
) },
47 { TRACER_OPT(funcgraph
-cpu
, TRACE_GRAPH_PRINT_CPU
) },
48 /* Display Overhead ? */
49 { TRACER_OPT(funcgraph
-overhead
, TRACE_GRAPH_PRINT_OVERHEAD
) },
50 /* Display proc name/pid */
51 { TRACER_OPT(funcgraph
-proc
, TRACE_GRAPH_PRINT_PROC
) },
52 /* Display duration of execution */
53 { TRACER_OPT(funcgraph
-duration
, TRACE_GRAPH_PRINT_DURATION
) },
54 /* Display absolute time of an entry */
55 { TRACER_OPT(funcgraph
-abstime
, TRACE_GRAPH_PRINT_ABS_TIME
) },
56 /* Display interrupts */
57 { TRACER_OPT(funcgraph
-irqs
, TRACE_GRAPH_PRINT_IRQS
) },
58 /* Display function name after trailing } */
59 { TRACER_OPT(funcgraph
-tail
, TRACE_GRAPH_PRINT_TAIL
) },
63 static struct tracer_flags tracer_flags
= {
64 /* Don't display overruns, proc, or tail by default */
65 .val
= TRACE_GRAPH_PRINT_CPU
| TRACE_GRAPH_PRINT_OVERHEAD
|
66 TRACE_GRAPH_PRINT_DURATION
| TRACE_GRAPH_PRINT_IRQS
,
70 static struct trace_array
*graph_array
;
73 * DURATION column is being also used to display IRQ signs,
74 * following values are used by print_graph_irq and others
75 * to fill in space into DURATION column.
78 FLAGS_FILL_FULL
= 1 << TRACE_GRAPH_PRINT_FILL_SHIFT
,
79 FLAGS_FILL_START
= 2 << TRACE_GRAPH_PRINT_FILL_SHIFT
,
80 FLAGS_FILL_END
= 3 << TRACE_GRAPH_PRINT_FILL_SHIFT
,
83 static enum print_line_t
84 print_graph_duration(unsigned long long duration
, struct trace_seq
*s
,
87 /* Add a function return address to the trace stack on thread info.*/
89 ftrace_push_return_trace(unsigned long ret
, unsigned long func
, int *depth
,
90 unsigned long frame_pointer
)
92 unsigned long long calltime
;
95 if (!current
->ret_stack
)
99 * We must make sure the ret_stack is tested before we read
104 /* The return trace stack is full */
105 if (current
->curr_ret_stack
== FTRACE_RETFUNC_DEPTH
- 1) {
106 atomic_inc(¤t
->trace_overrun
);
111 * The curr_ret_stack is an index to ftrace return stack of
112 * current task. Its value should be in [0, FTRACE_RETFUNC_
113 * DEPTH) when the function graph tracer is used. To support
114 * filtering out specific functions, it makes the index
115 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
116 * so when it sees a negative index the ftrace will ignore
117 * the record. And the index gets recovered when returning
118 * from the filtered function by adding the FTRACE_NOTRACE_
119 * DEPTH and then it'll continue to record functions normally.
121 * The curr_ret_stack is initialized to -1 and get increased
122 * in this function. So it can be less than -1 only if it was
123 * filtered out via ftrace_graph_notrace_addr() which can be
124 * set from set_graph_notrace file in debugfs by user.
126 if (current
->curr_ret_stack
< -1)
129 calltime
= trace_clock_local();
131 index
= ++current
->curr_ret_stack
;
132 if (ftrace_graph_notrace_addr(func
))
133 current
->curr_ret_stack
-= FTRACE_NOTRACE_DEPTH
;
135 current
->ret_stack
[index
].ret
= ret
;
136 current
->ret_stack
[index
].func
= func
;
137 current
->ret_stack
[index
].calltime
= calltime
;
138 current
->ret_stack
[index
].subtime
= 0;
139 current
->ret_stack
[index
].fp
= frame_pointer
;
140 *depth
= current
->curr_ret_stack
;
145 /* Retrieve a function return address to the trace stack on thread info.*/
147 ftrace_pop_return_trace(struct ftrace_graph_ret
*trace
, unsigned long *ret
,
148 unsigned long frame_pointer
)
152 index
= current
->curr_ret_stack
;
155 * A negative index here means that it's just returned from a
156 * notrace'd function. Recover index to get an original
157 * return address. See ftrace_push_return_trace().
159 * TODO: Need to check whether the stack gets corrupted.
162 index
+= FTRACE_NOTRACE_DEPTH
;
164 if (unlikely(index
< 0 || index
>= FTRACE_RETFUNC_DEPTH
)) {
167 /* Might as well panic, otherwise we have no where to go */
168 *ret
= (unsigned long)panic
;
172 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
174 * The arch may choose to record the frame pointer used
175 * and check it here to make sure that it is what we expect it
176 * to be. If gcc does not set the place holder of the return
177 * address in the frame pointer, and does a copy instead, then
178 * the function graph trace will fail. This test detects this
181 * Currently, x86_32 with optimize for size (-Os) makes the latest
184 * Note, -mfentry does not use frame pointers, and this test
185 * is not needed if CC_USING_FENTRY is set.
187 if (unlikely(current
->ret_stack
[index
].fp
!= frame_pointer
)) {
189 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
190 " from func %ps return to %lx\n",
191 current
->ret_stack
[index
].fp
,
193 (void *)current
->ret_stack
[index
].func
,
194 current
->ret_stack
[index
].ret
);
195 *ret
= (unsigned long)panic
;
200 *ret
= current
->ret_stack
[index
].ret
;
201 trace
->func
= current
->ret_stack
[index
].func
;
202 trace
->calltime
= current
->ret_stack
[index
].calltime
;
203 trace
->overrun
= atomic_read(¤t
->trace_overrun
);
204 trace
->depth
= index
;
208 * Send the trace to the ring-buffer.
209 * @return the original return address.
211 unsigned long ftrace_return_to_handler(unsigned long frame_pointer
)
213 struct ftrace_graph_ret trace
;
216 ftrace_pop_return_trace(&trace
, &ret
, frame_pointer
);
217 trace
.rettime
= trace_clock_local();
219 current
->curr_ret_stack
--;
221 * The curr_ret_stack can be less than -1 only if it was
222 * filtered out and it's about to return from the function.
223 * Recover the index and continue to trace normal functions.
225 if (current
->curr_ret_stack
< -1) {
226 current
->curr_ret_stack
+= FTRACE_NOTRACE_DEPTH
;
231 * The trace should run after decrementing the ret counter
232 * in case an interrupt were to come in. We don't want to
233 * lose the interrupt if max_depth is set.
235 ftrace_graph_return(&trace
);
237 if (unlikely(!ret
)) {
240 /* Might as well panic. What else to do? */
241 ret
= (unsigned long)panic
;
247 int __trace_graph_entry(struct trace_array
*tr
,
248 struct ftrace_graph_ent
*trace
,
252 struct ftrace_event_call
*call
= &event_funcgraph_entry
;
253 struct ring_buffer_event
*event
;
254 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
255 struct ftrace_graph_ent_entry
*entry
;
257 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
260 event
= trace_buffer_lock_reserve(buffer
, TRACE_GRAPH_ENT
,
261 sizeof(*entry
), flags
, pc
);
264 entry
= ring_buffer_event_data(event
);
265 entry
->graph_ent
= *trace
;
266 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
267 __buffer_unlock_commit(buffer
, event
);
272 static inline int ftrace_graph_ignore_irqs(void)
274 if (!ftrace_graph_skip_irqs
|| trace_recursion_test(TRACE_IRQ_BIT
))
280 int trace_graph_entry(struct ftrace_graph_ent
*trace
)
282 struct trace_array
*tr
= graph_array
;
283 struct trace_array_cpu
*data
;
290 if (!ftrace_trace_task(current
))
293 /* trace it when it is-nested-in or is a function enabled. */
294 if ((!(trace
->depth
|| ftrace_graph_addr(trace
->func
)) ||
295 ftrace_graph_ignore_irqs()) || (trace
->depth
< 0) ||
296 (max_depth
&& trace
->depth
>= max_depth
))
300 * Do not trace a function if it's filtered by set_graph_notrace.
301 * Make the index of ret stack negative to indicate that it should
302 * ignore further functions. But it needs its own ret stack entry
303 * to recover the original index in order to continue tracing after
304 * returning from the function.
306 if (ftrace_graph_notrace_addr(trace
->func
))
309 local_irq_save(flags
);
310 cpu
= raw_smp_processor_id();
311 data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
312 disabled
= atomic_inc_return(&data
->disabled
);
313 if (likely(disabled
== 1)) {
314 pc
= preempt_count();
315 ret
= __trace_graph_entry(tr
, trace
, flags
, pc
);
320 atomic_dec(&data
->disabled
);
321 local_irq_restore(flags
);
326 int trace_graph_thresh_entry(struct ftrace_graph_ent
*trace
)
331 return trace_graph_entry(trace
);
335 __trace_graph_function(struct trace_array
*tr
,
336 unsigned long ip
, unsigned long flags
, int pc
)
338 u64 time
= trace_clock_local();
339 struct ftrace_graph_ent ent
= {
343 struct ftrace_graph_ret ret
= {
350 __trace_graph_entry(tr
, &ent
, flags
, pc
);
351 __trace_graph_return(tr
, &ret
, flags
, pc
);
355 trace_graph_function(struct trace_array
*tr
,
356 unsigned long ip
, unsigned long parent_ip
,
357 unsigned long flags
, int pc
)
359 __trace_graph_function(tr
, ip
, flags
, pc
);
362 void __trace_graph_return(struct trace_array
*tr
,
363 struct ftrace_graph_ret
*trace
,
367 struct ftrace_event_call
*call
= &event_funcgraph_exit
;
368 struct ring_buffer_event
*event
;
369 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
370 struct ftrace_graph_ret_entry
*entry
;
372 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
375 event
= trace_buffer_lock_reserve(buffer
, TRACE_GRAPH_RET
,
376 sizeof(*entry
), flags
, pc
);
379 entry
= ring_buffer_event_data(event
);
381 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
382 __buffer_unlock_commit(buffer
, event
);
385 void trace_graph_return(struct ftrace_graph_ret
*trace
)
387 struct trace_array
*tr
= graph_array
;
388 struct trace_array_cpu
*data
;
394 local_irq_save(flags
);
395 cpu
= raw_smp_processor_id();
396 data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
397 disabled
= atomic_inc_return(&data
->disabled
);
398 if (likely(disabled
== 1)) {
399 pc
= preempt_count();
400 __trace_graph_return(tr
, trace
, flags
, pc
);
402 atomic_dec(&data
->disabled
);
403 local_irq_restore(flags
);
406 void set_graph_array(struct trace_array
*tr
)
410 /* Make graph_array visible before we start tracing */
415 void trace_graph_thresh_return(struct ftrace_graph_ret
*trace
)
417 if (tracing_thresh
&&
418 (trace
->rettime
- trace
->calltime
< tracing_thresh
))
421 trace_graph_return(trace
);
424 static int graph_trace_init(struct trace_array
*tr
)
430 ret
= register_ftrace_graph(&trace_graph_thresh_return
,
431 &trace_graph_thresh_entry
);
433 ret
= register_ftrace_graph(&trace_graph_return
,
437 tracing_start_cmdline_record();
442 static void graph_trace_reset(struct trace_array
*tr
)
444 tracing_stop_cmdline_record();
445 unregister_ftrace_graph();
448 static int max_bytes_for_cpu
;
450 static enum print_line_t
451 print_graph_cpu(struct trace_seq
*s
, int cpu
)
456 * Start with a space character - to make it stand out
457 * to the right a bit when trace output is pasted into
460 ret
= trace_seq_printf(s
, " %*d) ", max_bytes_for_cpu
, cpu
);
462 return TRACE_TYPE_PARTIAL_LINE
;
464 return TRACE_TYPE_HANDLED
;
467 #define TRACE_GRAPH_PROCINFO_LENGTH 14
469 static enum print_line_t
470 print_graph_proc(struct trace_seq
*s
, pid_t pid
)
472 char comm
[TASK_COMM_LEN
];
473 /* sign + log10(MAX_INT) + '\0' */
480 trace_find_cmdline(pid
, comm
);
482 sprintf(pid_str
, "%d", pid
);
484 /* 1 stands for the "-" character */
485 len
= strlen(comm
) + strlen(pid_str
) + 1;
487 if (len
< TRACE_GRAPH_PROCINFO_LENGTH
)
488 spaces
= TRACE_GRAPH_PROCINFO_LENGTH
- len
;
490 /* First spaces to align center */
491 for (i
= 0; i
< spaces
/ 2; i
++) {
492 ret
= trace_seq_putc(s
, ' ');
494 return TRACE_TYPE_PARTIAL_LINE
;
497 ret
= trace_seq_printf(s
, "%s-%s", comm
, pid_str
);
499 return TRACE_TYPE_PARTIAL_LINE
;
501 /* Last spaces to align center */
502 for (i
= 0; i
< spaces
- (spaces
/ 2); i
++) {
503 ret
= trace_seq_putc(s
, ' ');
505 return TRACE_TYPE_PARTIAL_LINE
;
507 return TRACE_TYPE_HANDLED
;
511 static enum print_line_t
512 print_graph_lat_fmt(struct trace_seq
*s
, struct trace_entry
*entry
)
514 if (!trace_seq_putc(s
, ' '))
517 return trace_print_lat_fmt(s
, entry
);
520 /* If the pid changed since the last trace, output this event */
521 static enum print_line_t
522 verif_pid(struct trace_seq
*s
, pid_t pid
, int cpu
, struct fgraph_data
*data
)
529 return TRACE_TYPE_HANDLED
;
531 last_pid
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->last_pid
);
533 if (*last_pid
== pid
)
534 return TRACE_TYPE_HANDLED
;
536 prev_pid
= *last_pid
;
540 return TRACE_TYPE_HANDLED
;
542 * Context-switch trace line:
544 ------------------------------------------
545 | 1) migration/0--1 => sshd-1755
546 ------------------------------------------
549 ret
= trace_seq_puts(s
,
550 " ------------------------------------------\n");
552 return TRACE_TYPE_PARTIAL_LINE
;
554 ret
= print_graph_cpu(s
, cpu
);
555 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
556 return TRACE_TYPE_PARTIAL_LINE
;
558 ret
= print_graph_proc(s
, prev_pid
);
559 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
560 return TRACE_TYPE_PARTIAL_LINE
;
562 ret
= trace_seq_puts(s
, " => ");
564 return TRACE_TYPE_PARTIAL_LINE
;
566 ret
= print_graph_proc(s
, pid
);
567 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
568 return TRACE_TYPE_PARTIAL_LINE
;
570 ret
= trace_seq_puts(s
,
571 "\n ------------------------------------------\n\n");
573 return TRACE_TYPE_PARTIAL_LINE
;
575 return TRACE_TYPE_HANDLED
;
578 static struct ftrace_graph_ret_entry
*
579 get_return_for_leaf(struct trace_iterator
*iter
,
580 struct ftrace_graph_ent_entry
*curr
)
582 struct fgraph_data
*data
= iter
->private;
583 struct ring_buffer_iter
*ring_iter
= NULL
;
584 struct ring_buffer_event
*event
;
585 struct ftrace_graph_ret_entry
*next
;
588 * If the previous output failed to write to the seq buffer,
589 * then we just reuse the data from before.
591 if (data
&& data
->failed
) {
596 ring_iter
= trace_buffer_iter(iter
, iter
->cpu
);
598 /* First peek to compare current entry and the next one */
600 event
= ring_buffer_iter_peek(ring_iter
, NULL
);
603 * We need to consume the current entry to see
606 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
,
608 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, iter
->cpu
,
615 next
= ring_buffer_event_data(event
);
619 * Save current and next entries for later reference
620 * if the output fails.
624 * If the next event is not a return type, then
625 * we only care about what type it is. Otherwise we can
626 * safely copy the entire event.
628 if (next
->ent
.type
== TRACE_GRAPH_RET
)
631 data
->ret
.ent
.type
= next
->ent
.type
;
635 if (next
->ent
.type
!= TRACE_GRAPH_RET
)
638 if (curr
->ent
.pid
!= next
->ent
.pid
||
639 curr
->graph_ent
.func
!= next
->ret
.func
)
642 /* this is a leaf, now advance the iterator */
644 ring_buffer_read(ring_iter
, NULL
);
649 static int print_graph_abs_time(u64 t
, struct trace_seq
*s
)
651 unsigned long usecs_rem
;
653 usecs_rem
= do_div(t
, NSEC_PER_SEC
);
656 return trace_seq_printf(s
, "%5lu.%06lu | ",
657 (unsigned long)t
, usecs_rem
);
660 static enum print_line_t
661 print_graph_irq(struct trace_iterator
*iter
, unsigned long addr
,
662 enum trace_type type
, int cpu
, pid_t pid
, u32 flags
)
665 struct trace_seq
*s
= &iter
->seq
;
667 if (addr
< (unsigned long)__irqentry_text_start
||
668 addr
>= (unsigned long)__irqentry_text_end
)
669 return TRACE_TYPE_UNHANDLED
;
671 if (trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
673 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
) {
674 ret
= print_graph_abs_time(iter
->ts
, s
);
676 return TRACE_TYPE_PARTIAL_LINE
;
680 if (flags
& TRACE_GRAPH_PRINT_CPU
) {
681 ret
= print_graph_cpu(s
, cpu
);
682 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
683 return TRACE_TYPE_PARTIAL_LINE
;
687 if (flags
& TRACE_GRAPH_PRINT_PROC
) {
688 ret
= print_graph_proc(s
, pid
);
689 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
690 return TRACE_TYPE_PARTIAL_LINE
;
691 ret
= trace_seq_puts(s
, " | ");
693 return TRACE_TYPE_PARTIAL_LINE
;
698 ret
= print_graph_duration(0, s
, flags
| FLAGS_FILL_START
);
699 if (ret
!= TRACE_TYPE_HANDLED
)
702 if (type
== TRACE_GRAPH_ENT
)
703 ret
= trace_seq_puts(s
, "==========>");
705 ret
= trace_seq_puts(s
, "<==========");
708 return TRACE_TYPE_PARTIAL_LINE
;
710 ret
= print_graph_duration(0, s
, flags
| FLAGS_FILL_END
);
711 if (ret
!= TRACE_TYPE_HANDLED
)
714 ret
= trace_seq_putc(s
, '\n');
717 return TRACE_TYPE_PARTIAL_LINE
;
718 return TRACE_TYPE_HANDLED
;
722 trace_print_graph_duration(unsigned long long duration
, struct trace_seq
*s
)
724 unsigned long nsecs_rem
= do_div(duration
, 1000);
725 /* log10(ULONG_MAX) + '\0' */
731 sprintf(msecs_str
, "%lu", (unsigned long) duration
);
734 ret
= trace_seq_printf(s
, "%s", msecs_str
);
736 return TRACE_TYPE_PARTIAL_LINE
;
738 len
= strlen(msecs_str
);
740 /* Print nsecs (we don't want to exceed 7 numbers) */
742 size_t slen
= min_t(size_t, sizeof(nsecs_str
), 8UL - len
);
744 snprintf(nsecs_str
, slen
, "%03lu", nsecs_rem
);
745 ret
= trace_seq_printf(s
, ".%s", nsecs_str
);
747 return TRACE_TYPE_PARTIAL_LINE
;
748 len
+= strlen(nsecs_str
);
751 ret
= trace_seq_puts(s
, " us ");
753 return TRACE_TYPE_PARTIAL_LINE
;
755 /* Print remaining spaces to fit the row's width */
756 for (i
= len
; i
< 7; i
++) {
757 ret
= trace_seq_putc(s
, ' ');
759 return TRACE_TYPE_PARTIAL_LINE
;
761 return TRACE_TYPE_HANDLED
;
764 static enum print_line_t
765 print_graph_duration(unsigned long long duration
, struct trace_seq
*s
,
770 if (!(flags
& TRACE_GRAPH_PRINT_DURATION
) ||
771 !(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
772 return TRACE_TYPE_HANDLED
;
774 /* No real adata, just filling the column with spaces */
775 switch (flags
& TRACE_GRAPH_PRINT_FILL_MASK
) {
776 case FLAGS_FILL_FULL
:
777 ret
= trace_seq_puts(s
, " | ");
778 return ret
? TRACE_TYPE_HANDLED
: TRACE_TYPE_PARTIAL_LINE
;
779 case FLAGS_FILL_START
:
780 ret
= trace_seq_puts(s
, " ");
781 return ret
? TRACE_TYPE_HANDLED
: TRACE_TYPE_PARTIAL_LINE
;
783 ret
= trace_seq_puts(s
, " |");
784 return ret
? TRACE_TYPE_HANDLED
: TRACE_TYPE_PARTIAL_LINE
;
787 /* Signal a overhead of time execution to the output */
788 if (flags
& TRACE_GRAPH_PRINT_OVERHEAD
) {
789 /* Duration exceeded 100 msecs */
790 if (duration
> 100000ULL)
791 ret
= trace_seq_puts(s
, "! ");
792 /* Duration exceeded 10 msecs */
793 else if (duration
> 10000ULL)
794 ret
= trace_seq_puts(s
, "+ ");
798 * The -1 means we either did not exceed the duration tresholds
799 * or we dont want to print out the overhead. Either way we need
800 * to fill out the space.
803 ret
= trace_seq_puts(s
, " ");
805 /* Catching here any failure happenned above */
807 return TRACE_TYPE_PARTIAL_LINE
;
809 ret
= trace_print_graph_duration(duration
, s
);
810 if (ret
!= TRACE_TYPE_HANDLED
)
813 ret
= trace_seq_puts(s
, "| ");
815 return TRACE_TYPE_PARTIAL_LINE
;
817 return TRACE_TYPE_HANDLED
;
820 /* Case of a leaf function on its call entry */
821 static enum print_line_t
822 print_graph_entry_leaf(struct trace_iterator
*iter
,
823 struct ftrace_graph_ent_entry
*entry
,
824 struct ftrace_graph_ret_entry
*ret_entry
,
825 struct trace_seq
*s
, u32 flags
)
827 struct fgraph_data
*data
= iter
->private;
828 struct ftrace_graph_ret
*graph_ret
;
829 struct ftrace_graph_ent
*call
;
830 unsigned long long duration
;
834 graph_ret
= &ret_entry
->ret
;
835 call
= &entry
->graph_ent
;
836 duration
= graph_ret
->rettime
- graph_ret
->calltime
;
839 struct fgraph_cpu_data
*cpu_data
;
842 cpu_data
= per_cpu_ptr(data
->cpu_data
, cpu
);
845 * Comments display at + 1 to depth. Since
846 * this is a leaf function, keep the comments
847 * equal to this depth.
849 cpu_data
->depth
= call
->depth
- 1;
851 /* No need to keep this function around for this depth */
852 if (call
->depth
< FTRACE_RETFUNC_DEPTH
)
853 cpu_data
->enter_funcs
[call
->depth
] = 0;
856 /* Overhead and duration */
857 ret
= print_graph_duration(duration
, s
, flags
);
858 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
859 return TRACE_TYPE_PARTIAL_LINE
;
862 for (i
= 0; i
< call
->depth
* TRACE_GRAPH_INDENT
; i
++) {
863 ret
= trace_seq_putc(s
, ' ');
865 return TRACE_TYPE_PARTIAL_LINE
;
868 ret
= trace_seq_printf(s
, "%ps();\n", (void *)call
->func
);
870 return TRACE_TYPE_PARTIAL_LINE
;
872 return TRACE_TYPE_HANDLED
;
875 static enum print_line_t
876 print_graph_entry_nested(struct trace_iterator
*iter
,
877 struct ftrace_graph_ent_entry
*entry
,
878 struct trace_seq
*s
, int cpu
, u32 flags
)
880 struct ftrace_graph_ent
*call
= &entry
->graph_ent
;
881 struct fgraph_data
*data
= iter
->private;
886 struct fgraph_cpu_data
*cpu_data
;
889 cpu_data
= per_cpu_ptr(data
->cpu_data
, cpu
);
890 cpu_data
->depth
= call
->depth
;
892 /* Save this function pointer to see if the exit matches */
893 if (call
->depth
< FTRACE_RETFUNC_DEPTH
)
894 cpu_data
->enter_funcs
[call
->depth
] = call
->func
;
898 ret
= print_graph_duration(0, s
, flags
| FLAGS_FILL_FULL
);
899 if (ret
!= TRACE_TYPE_HANDLED
)
903 for (i
= 0; i
< call
->depth
* TRACE_GRAPH_INDENT
; i
++) {
904 ret
= trace_seq_putc(s
, ' ');
906 return TRACE_TYPE_PARTIAL_LINE
;
909 ret
= trace_seq_printf(s
, "%ps() {\n", (void *)call
->func
);
911 return TRACE_TYPE_PARTIAL_LINE
;
914 * we already consumed the current entry to check the next one
915 * and see if this is a leaf.
917 return TRACE_TYPE_NO_CONSUME
;
920 static enum print_line_t
921 print_graph_prologue(struct trace_iterator
*iter
, struct trace_seq
*s
,
922 int type
, unsigned long addr
, u32 flags
)
924 struct fgraph_data
*data
= iter
->private;
925 struct trace_entry
*ent
= iter
->ent
;
930 if (verif_pid(s
, ent
->pid
, cpu
, data
) == TRACE_TYPE_PARTIAL_LINE
)
931 return TRACE_TYPE_PARTIAL_LINE
;
935 ret
= print_graph_irq(iter
, addr
, type
, cpu
, ent
->pid
, flags
);
936 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
937 return TRACE_TYPE_PARTIAL_LINE
;
940 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
944 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
) {
945 ret
= print_graph_abs_time(iter
->ts
, s
);
947 return TRACE_TYPE_PARTIAL_LINE
;
951 if (flags
& TRACE_GRAPH_PRINT_CPU
) {
952 ret
= print_graph_cpu(s
, cpu
);
953 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
954 return TRACE_TYPE_PARTIAL_LINE
;
958 if (flags
& TRACE_GRAPH_PRINT_PROC
) {
959 ret
= print_graph_proc(s
, ent
->pid
);
960 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
961 return TRACE_TYPE_PARTIAL_LINE
;
963 ret
= trace_seq_puts(s
, " | ");
965 return TRACE_TYPE_PARTIAL_LINE
;
969 if (trace_flags
& TRACE_ITER_LATENCY_FMT
) {
970 ret
= print_graph_lat_fmt(s
, ent
);
971 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
972 return TRACE_TYPE_PARTIAL_LINE
;
979 * Entry check for irq code
982 * - we are inside irq code
983 * - we just entered irq code
986 * - funcgraph-interrupts option is set
987 * - we are not inside irq code
990 check_irq_entry(struct trace_iterator
*iter
, u32 flags
,
991 unsigned long addr
, int depth
)
995 struct fgraph_data
*data
= iter
->private;
998 * If we are either displaying irqs, or we got called as
999 * a graph event and private data does not exist,
1000 * then we bypass the irq check.
1002 if ((flags
& TRACE_GRAPH_PRINT_IRQS
) ||
1006 depth_irq
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->depth_irq
);
1009 * We are inside the irq code
1011 if (*depth_irq
>= 0)
1014 if ((addr
< (unsigned long)__irqentry_text_start
) ||
1015 (addr
>= (unsigned long)__irqentry_text_end
))
1019 * We are entering irq code.
1026 * Return check for irq code
1029 * - we are inside irq code
1030 * - we just left irq code
1033 * - funcgraph-interrupts option is set
1034 * - we are not inside irq code
1037 check_irq_return(struct trace_iterator
*iter
, u32 flags
, int depth
)
1039 int cpu
= iter
->cpu
;
1041 struct fgraph_data
*data
= iter
->private;
1044 * If we are either displaying irqs, or we got called as
1045 * a graph event and private data does not exist,
1046 * then we bypass the irq check.
1048 if ((flags
& TRACE_GRAPH_PRINT_IRQS
) ||
1052 depth_irq
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->depth_irq
);
1055 * We are not inside the irq code.
1057 if (*depth_irq
== -1)
1061 * We are inside the irq code, and this is returning entry.
1062 * Let's not trace it and clear the entry depth, since
1063 * we are out of irq code.
1065 * This condition ensures that we 'leave the irq code' once
1066 * we are out of the entry depth. Thus protecting us from
1067 * the RETURN entry loss.
1069 if (*depth_irq
>= depth
) {
1075 * We are inside the irq code, and this is not the entry.
1080 static enum print_line_t
1081 print_graph_entry(struct ftrace_graph_ent_entry
*field
, struct trace_seq
*s
,
1082 struct trace_iterator
*iter
, u32 flags
)
1084 struct fgraph_data
*data
= iter
->private;
1085 struct ftrace_graph_ent
*call
= &field
->graph_ent
;
1086 struct ftrace_graph_ret_entry
*leaf_ret
;
1087 static enum print_line_t ret
;
1088 int cpu
= iter
->cpu
;
1090 if (check_irq_entry(iter
, flags
, call
->func
, call
->depth
))
1091 return TRACE_TYPE_HANDLED
;
1093 if (print_graph_prologue(iter
, s
, TRACE_GRAPH_ENT
, call
->func
, flags
))
1094 return TRACE_TYPE_PARTIAL_LINE
;
1096 leaf_ret
= get_return_for_leaf(iter
, field
);
1098 ret
= print_graph_entry_leaf(iter
, field
, leaf_ret
, s
, flags
);
1100 ret
= print_graph_entry_nested(iter
, field
, s
, cpu
, flags
);
1104 * If we failed to write our output, then we need to make
1105 * note of it. Because we already consumed our entry.
1117 static enum print_line_t
1118 print_graph_return(struct ftrace_graph_ret
*trace
, struct trace_seq
*s
,
1119 struct trace_entry
*ent
, struct trace_iterator
*iter
,
1122 unsigned long long duration
= trace
->rettime
- trace
->calltime
;
1123 struct fgraph_data
*data
= iter
->private;
1124 pid_t pid
= ent
->pid
;
1125 int cpu
= iter
->cpu
;
1130 if (check_irq_return(iter
, flags
, trace
->depth
))
1131 return TRACE_TYPE_HANDLED
;
1134 struct fgraph_cpu_data
*cpu_data
;
1135 int cpu
= iter
->cpu
;
1137 cpu_data
= per_cpu_ptr(data
->cpu_data
, cpu
);
1140 * Comments display at + 1 to depth. This is the
1141 * return from a function, we now want the comments
1142 * to display at the same level of the bracket.
1144 cpu_data
->depth
= trace
->depth
- 1;
1146 if (trace
->depth
< FTRACE_RETFUNC_DEPTH
) {
1147 if (cpu_data
->enter_funcs
[trace
->depth
] != trace
->func
)
1149 cpu_data
->enter_funcs
[trace
->depth
] = 0;
1153 if (print_graph_prologue(iter
, s
, 0, 0, flags
))
1154 return TRACE_TYPE_PARTIAL_LINE
;
1156 /* Overhead and duration */
1157 ret
= print_graph_duration(duration
, s
, flags
);
1158 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
1159 return TRACE_TYPE_PARTIAL_LINE
;
1162 for (i
= 0; i
< trace
->depth
* TRACE_GRAPH_INDENT
; i
++) {
1163 ret
= trace_seq_putc(s
, ' ');
1165 return TRACE_TYPE_PARTIAL_LINE
;
1169 * If the return function does not have a matching entry,
1170 * then the entry was lost. Instead of just printing
1171 * the '}' and letting the user guess what function this
1172 * belongs to, write out the function name. Always do
1173 * that if the funcgraph-tail option is enabled.
1175 if (func_match
&& !(flags
& TRACE_GRAPH_PRINT_TAIL
)) {
1176 ret
= trace_seq_puts(s
, "}\n");
1178 return TRACE_TYPE_PARTIAL_LINE
;
1180 ret
= trace_seq_printf(s
, "} /* %ps */\n", (void *)trace
->func
);
1182 return TRACE_TYPE_PARTIAL_LINE
;
1186 if (flags
& TRACE_GRAPH_PRINT_OVERRUN
) {
1187 ret
= trace_seq_printf(s
, " (Overruns: %lu)\n",
1190 return TRACE_TYPE_PARTIAL_LINE
;
1193 ret
= print_graph_irq(iter
, trace
->func
, TRACE_GRAPH_RET
,
1195 if (ret
== TRACE_TYPE_PARTIAL_LINE
)
1196 return TRACE_TYPE_PARTIAL_LINE
;
1198 return TRACE_TYPE_HANDLED
;
1201 static enum print_line_t
1202 print_graph_comment(struct trace_seq
*s
, struct trace_entry
*ent
,
1203 struct trace_iterator
*iter
, u32 flags
)
1205 unsigned long sym_flags
= (trace_flags
& TRACE_ITER_SYM_MASK
);
1206 struct fgraph_data
*data
= iter
->private;
1207 struct trace_event
*event
;
1213 depth
= per_cpu_ptr(data
->cpu_data
, iter
->cpu
)->depth
;
1215 if (print_graph_prologue(iter
, s
, 0, 0, flags
))
1216 return TRACE_TYPE_PARTIAL_LINE
;
1219 ret
= print_graph_duration(0, s
, flags
| FLAGS_FILL_FULL
);
1220 if (ret
!= TRACE_TYPE_HANDLED
)
1225 for (i
= 0; i
< (depth
+ 1) * TRACE_GRAPH_INDENT
; i
++) {
1226 ret
= trace_seq_putc(s
, ' ');
1228 return TRACE_TYPE_PARTIAL_LINE
;
1232 ret
= trace_seq_puts(s
, "/* ");
1234 return TRACE_TYPE_PARTIAL_LINE
;
1236 switch (iter
->ent
->type
) {
1238 ret
= trace_print_bprintk_msg_only(iter
);
1239 if (ret
!= TRACE_TYPE_HANDLED
)
1243 ret
= trace_print_printk_msg_only(iter
);
1244 if (ret
!= TRACE_TYPE_HANDLED
)
1248 event
= ftrace_find_event(ent
->type
);
1250 return TRACE_TYPE_UNHANDLED
;
1252 ret
= event
->funcs
->trace(iter
, sym_flags
, event
);
1253 if (ret
!= TRACE_TYPE_HANDLED
)
1257 /* Strip ending newline */
1258 if (s
->buffer
[s
->len
- 1] == '\n') {
1259 s
->buffer
[s
->len
- 1] = '\0';
1263 ret
= trace_seq_puts(s
, " */\n");
1265 return TRACE_TYPE_PARTIAL_LINE
;
1267 return TRACE_TYPE_HANDLED
;
1272 print_graph_function_flags(struct trace_iterator
*iter
, u32 flags
)
1274 struct ftrace_graph_ent_entry
*field
;
1275 struct fgraph_data
*data
= iter
->private;
1276 struct trace_entry
*entry
= iter
->ent
;
1277 struct trace_seq
*s
= &iter
->seq
;
1278 int cpu
= iter
->cpu
;
1281 if (data
&& per_cpu_ptr(data
->cpu_data
, cpu
)->ignore
) {
1282 per_cpu_ptr(data
->cpu_data
, cpu
)->ignore
= 0;
1283 return TRACE_TYPE_HANDLED
;
1287 * If the last output failed, there's a possibility we need
1288 * to print out the missing entry which would never go out.
1290 if (data
&& data
->failed
) {
1292 iter
->cpu
= data
->cpu
;
1293 ret
= print_graph_entry(field
, s
, iter
, flags
);
1294 if (ret
== TRACE_TYPE_HANDLED
&& iter
->cpu
!= cpu
) {
1295 per_cpu_ptr(data
->cpu_data
, iter
->cpu
)->ignore
= 1;
1296 ret
= TRACE_TYPE_NO_CONSUME
;
1302 switch (entry
->type
) {
1303 case TRACE_GRAPH_ENT
: {
1305 * print_graph_entry() may consume the current event,
1306 * thus @field may become invalid, so we need to save it.
1307 * sizeof(struct ftrace_graph_ent_entry) is very small,
1308 * it can be safely saved at the stack.
1310 struct ftrace_graph_ent_entry saved
;
1311 trace_assign_type(field
, entry
);
1313 return print_graph_entry(&saved
, s
, iter
, flags
);
1315 case TRACE_GRAPH_RET
: {
1316 struct ftrace_graph_ret_entry
*field
;
1317 trace_assign_type(field
, entry
);
1318 return print_graph_return(&field
->ret
, s
, entry
, iter
, flags
);
1322 /* dont trace stack and functions as comments */
1323 return TRACE_TYPE_UNHANDLED
;
1326 return print_graph_comment(s
, entry
, iter
, flags
);
1329 return TRACE_TYPE_HANDLED
;
1332 static enum print_line_t
1333 print_graph_function(struct trace_iterator
*iter
)
1335 return print_graph_function_flags(iter
, tracer_flags
.val
);
1338 static enum print_line_t
1339 print_graph_function_event(struct trace_iterator
*iter
, int flags
,
1340 struct trace_event
*event
)
1342 return print_graph_function(iter
);
1345 static void print_lat_header(struct seq_file
*s
, u32 flags
)
1347 static const char spaces
[] = " " /* 16 spaces */
1349 " "; /* 17 spaces */
1352 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
)
1354 if (flags
& TRACE_GRAPH_PRINT_CPU
)
1356 if (flags
& TRACE_GRAPH_PRINT_PROC
)
1359 seq_printf(s
, "#%.*s _-----=> irqs-off \n", size
, spaces
);
1360 seq_printf(s
, "#%.*s / _----=> need-resched \n", size
, spaces
);
1361 seq_printf(s
, "#%.*s| / _---=> hardirq/softirq \n", size
, spaces
);
1362 seq_printf(s
, "#%.*s|| / _--=> preempt-depth \n", size
, spaces
);
1363 seq_printf(s
, "#%.*s||| / \n", size
, spaces
);
1366 static void __print_graph_headers_flags(struct seq_file
*s
, u32 flags
)
1368 int lat
= trace_flags
& TRACE_ITER_LATENCY_FMT
;
1371 print_lat_header(s
, flags
);
1375 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
)
1376 seq_printf(s
, " TIME ");
1377 if (flags
& TRACE_GRAPH_PRINT_CPU
)
1378 seq_printf(s
, " CPU");
1379 if (flags
& TRACE_GRAPH_PRINT_PROC
)
1380 seq_printf(s
, " TASK/PID ");
1382 seq_printf(s
, "||||");
1383 if (flags
& TRACE_GRAPH_PRINT_DURATION
)
1384 seq_printf(s
, " DURATION ");
1385 seq_printf(s
, " FUNCTION CALLS\n");
1389 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
)
1390 seq_printf(s
, " | ");
1391 if (flags
& TRACE_GRAPH_PRINT_CPU
)
1392 seq_printf(s
, " | ");
1393 if (flags
& TRACE_GRAPH_PRINT_PROC
)
1394 seq_printf(s
, " | | ");
1396 seq_printf(s
, "||||");
1397 if (flags
& TRACE_GRAPH_PRINT_DURATION
)
1398 seq_printf(s
, " | | ");
1399 seq_printf(s
, " | | | |\n");
1402 void print_graph_headers(struct seq_file
*s
)
1404 print_graph_headers_flags(s
, tracer_flags
.val
);
1407 void print_graph_headers_flags(struct seq_file
*s
, u32 flags
)
1409 struct trace_iterator
*iter
= s
->private;
1411 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
1414 if (trace_flags
& TRACE_ITER_LATENCY_FMT
) {
1415 /* print nothing if the buffers are empty */
1416 if (trace_empty(iter
))
1419 print_trace_header(s
, iter
);
1422 __print_graph_headers_flags(s
, flags
);
1425 void graph_trace_open(struct trace_iterator
*iter
)
1427 /* pid and depth on the last trace processed */
1428 struct fgraph_data
*data
;
1431 iter
->private = NULL
;
1433 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
1437 data
->cpu_data
= alloc_percpu(struct fgraph_cpu_data
);
1438 if (!data
->cpu_data
)
1441 for_each_possible_cpu(cpu
) {
1442 pid_t
*pid
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->last_pid
);
1443 int *depth
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->depth
);
1444 int *ignore
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->ignore
);
1445 int *depth_irq
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->depth_irq
);
1453 iter
->private = data
;
1460 pr_warning("function graph tracer: not enough memory\n");
1463 void graph_trace_close(struct trace_iterator
*iter
)
1465 struct fgraph_data
*data
= iter
->private;
1468 free_percpu(data
->cpu_data
);
1474 func_graph_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
1476 if (bit
== TRACE_GRAPH_PRINT_IRQS
)
1477 ftrace_graph_skip_irqs
= !set
;
1482 static struct trace_event_functions graph_functions
= {
1483 .trace
= print_graph_function_event
,
1486 static struct trace_event graph_trace_entry_event
= {
1487 .type
= TRACE_GRAPH_ENT
,
1488 .funcs
= &graph_functions
,
1491 static struct trace_event graph_trace_ret_event
= {
1492 .type
= TRACE_GRAPH_RET
,
1493 .funcs
= &graph_functions
1496 static struct tracer graph_trace __tracer_data
= {
1497 .name
= "function_graph",
1498 .open
= graph_trace_open
,
1499 .pipe_open
= graph_trace_open
,
1500 .close
= graph_trace_close
,
1501 .pipe_close
= graph_trace_close
,
1502 .init
= graph_trace_init
,
1503 .reset
= graph_trace_reset
,
1504 .print_line
= print_graph_function
,
1505 .print_header
= print_graph_headers
,
1506 .flags
= &tracer_flags
,
1507 .set_flag
= func_graph_set_flag
,
1508 #ifdef CONFIG_FTRACE_SELFTEST
1509 .selftest
= trace_selftest_startup_function_graph
,
1515 graph_depth_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
1521 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
1533 graph_depth_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
1536 char buf
[15]; /* More than enough to hold UINT_MAX + "\n"*/
1539 n
= sprintf(buf
, "%d\n", max_depth
);
1541 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, n
);
1544 static const struct file_operations graph_depth_fops
= {
1545 .open
= tracing_open_generic
,
1546 .write
= graph_depth_write
,
1547 .read
= graph_depth_read
,
1548 .llseek
= generic_file_llseek
,
1551 static __init
int init_graph_debugfs(void)
1553 struct dentry
*d_tracer
;
1555 d_tracer
= tracing_init_dentry();
1559 trace_create_file("max_graph_depth", 0644, d_tracer
,
1560 NULL
, &graph_depth_fops
);
1564 fs_initcall(init_graph_debugfs
);
1566 static __init
int init_graph_trace(void)
1568 max_bytes_for_cpu
= snprintf(NULL
, 0, "%d", nr_cpu_ids
- 1);
1570 if (!register_ftrace_event(&graph_trace_entry_event
)) {
1571 pr_warning("Warning: could not register graph trace events\n");
1575 if (!register_ftrace_event(&graph_trace_ret_event
)) {
1576 pr_warning("Warning: could not register graph trace events\n");
1580 return register_tracer(&graph_trace
);
1583 core_initcall(init_graph_trace
);