3 * Function graph tracer.
4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 * Mostly borrowed from function tracer which
6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/slab.h>
15 #include "trace_output.h"
17 static bool kill_ftrace_graph
;
20 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22 * ftrace_graph_stop() is called when a severe error is detected in
23 * the function graph tracing. This function is called by the critical
24 * paths of function graph to keep those paths from doing any more harm.
26 bool ftrace_graph_is_dead(void)
28 return kill_ftrace_graph
;
32 * ftrace_graph_stop - set to permanently disable function graph tracincg
34 * In case of an error int function graph tracing, this is called
35 * to try to keep function graph tracing from causing any more harm.
36 * Usually this is pretty severe and this is called to try to at least
37 * get a warning out to the user.
39 void ftrace_graph_stop(void)
41 kill_ftrace_graph
= true;
44 /* When set, irq functions will be ignored */
45 static int ftrace_graph_skip_irqs
;
47 struct fgraph_cpu_data
{
52 unsigned long enter_funcs
[FTRACE_RETFUNC_DEPTH
];
56 struct fgraph_cpu_data __percpu
*cpu_data
;
58 /* Place to preserve last processed entry. */
59 struct ftrace_graph_ent_entry ent
;
60 struct ftrace_graph_ret_entry ret
;
65 #define TRACE_GRAPH_INDENT 2
67 static unsigned int max_depth
;
69 static struct tracer_opt trace_opts
[] = {
70 /* Display overruns? (for self-debug purpose) */
71 { TRACER_OPT(funcgraph
-overrun
, TRACE_GRAPH_PRINT_OVERRUN
) },
73 { TRACER_OPT(funcgraph
-cpu
, TRACE_GRAPH_PRINT_CPU
) },
74 /* Display Overhead ? */
75 { TRACER_OPT(funcgraph
-overhead
, TRACE_GRAPH_PRINT_OVERHEAD
) },
76 /* Display proc name/pid */
77 { TRACER_OPT(funcgraph
-proc
, TRACE_GRAPH_PRINT_PROC
) },
78 /* Display duration of execution */
79 { TRACER_OPT(funcgraph
-duration
, TRACE_GRAPH_PRINT_DURATION
) },
80 /* Display absolute time of an entry */
81 { TRACER_OPT(funcgraph
-abstime
, TRACE_GRAPH_PRINT_ABS_TIME
) },
82 /* Display interrupts */
83 { TRACER_OPT(funcgraph
-irqs
, TRACE_GRAPH_PRINT_IRQS
) },
84 /* Display function name after trailing } */
85 { TRACER_OPT(funcgraph
-tail
, TRACE_GRAPH_PRINT_TAIL
) },
86 /* Include sleep time (scheduled out) between entry and return */
87 { TRACER_OPT(sleep
-time
, TRACE_GRAPH_SLEEP_TIME
) },
88 /* Include time within nested functions */
89 { TRACER_OPT(graph
-time
, TRACE_GRAPH_GRAPH_TIME
) },
93 static struct tracer_flags tracer_flags
= {
94 /* Don't display overruns, proc, or tail by default */
95 .val
= TRACE_GRAPH_PRINT_CPU
| TRACE_GRAPH_PRINT_OVERHEAD
|
96 TRACE_GRAPH_PRINT_DURATION
| TRACE_GRAPH_PRINT_IRQS
|
97 TRACE_GRAPH_SLEEP_TIME
| TRACE_GRAPH_GRAPH_TIME
,
101 static struct trace_array
*graph_array
;
104 * DURATION column is being also used to display IRQ signs,
105 * following values are used by print_graph_irq and others
106 * to fill in space into DURATION column.
109 FLAGS_FILL_FULL
= 1 << TRACE_GRAPH_PRINT_FILL_SHIFT
,
110 FLAGS_FILL_START
= 2 << TRACE_GRAPH_PRINT_FILL_SHIFT
,
111 FLAGS_FILL_END
= 3 << TRACE_GRAPH_PRINT_FILL_SHIFT
,
115 print_graph_duration(struct trace_array
*tr
, unsigned long long duration
,
116 struct trace_seq
*s
, u32 flags
);
118 /* Add a function return address to the trace stack on thread info.*/
120 ftrace_push_return_trace(unsigned long ret
, unsigned long func
, int *depth
,
121 unsigned long frame_pointer
)
123 unsigned long long calltime
;
126 if (unlikely(ftrace_graph_is_dead()))
129 if (!current
->ret_stack
)
133 * We must make sure the ret_stack is tested before we read
138 /* The return trace stack is full */
139 if (current
->curr_ret_stack
== FTRACE_RETFUNC_DEPTH
- 1) {
140 atomic_inc(¤t
->trace_overrun
);
145 * The curr_ret_stack is an index to ftrace return stack of
146 * current task. Its value should be in [0, FTRACE_RETFUNC_
147 * DEPTH) when the function graph tracer is used. To support
148 * filtering out specific functions, it makes the index
149 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
150 * so when it sees a negative index the ftrace will ignore
151 * the record. And the index gets recovered when returning
152 * from the filtered function by adding the FTRACE_NOTRACE_
153 * DEPTH and then it'll continue to record functions normally.
155 * The curr_ret_stack is initialized to -1 and get increased
156 * in this function. So it can be less than -1 only if it was
157 * filtered out via ftrace_graph_notrace_addr() which can be
158 * set from set_graph_notrace file in tracefs by user.
160 if (current
->curr_ret_stack
< -1)
163 calltime
= trace_clock_local();
165 index
= ++current
->curr_ret_stack
;
166 if (ftrace_graph_notrace_addr(func
))
167 current
->curr_ret_stack
-= FTRACE_NOTRACE_DEPTH
;
169 current
->ret_stack
[index
].ret
= ret
;
170 current
->ret_stack
[index
].func
= func
;
171 current
->ret_stack
[index
].calltime
= calltime
;
172 current
->ret_stack
[index
].subtime
= 0;
173 current
->ret_stack
[index
].fp
= frame_pointer
;
174 *depth
= current
->curr_ret_stack
;
179 /* Retrieve a function return address to the trace stack on thread info.*/
181 ftrace_pop_return_trace(struct ftrace_graph_ret
*trace
, unsigned long *ret
,
182 unsigned long frame_pointer
)
186 index
= current
->curr_ret_stack
;
189 * A negative index here means that it's just returned from a
190 * notrace'd function. Recover index to get an original
191 * return address. See ftrace_push_return_trace().
193 * TODO: Need to check whether the stack gets corrupted.
196 index
+= FTRACE_NOTRACE_DEPTH
;
198 if (unlikely(index
< 0 || index
>= FTRACE_RETFUNC_DEPTH
)) {
201 /* Might as well panic, otherwise we have no where to go */
202 *ret
= (unsigned long)panic
;
206 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
208 * The arch may choose to record the frame pointer used
209 * and check it here to make sure that it is what we expect it
210 * to be. If gcc does not set the place holder of the return
211 * address in the frame pointer, and does a copy instead, then
212 * the function graph trace will fail. This test detects this
215 * Currently, x86_32 with optimize for size (-Os) makes the latest
218 * Note, -mfentry does not use frame pointers, and this test
219 * is not needed if CC_USING_FENTRY is set.
221 if (unlikely(current
->ret_stack
[index
].fp
!= frame_pointer
)) {
223 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
224 " from func %ps return to %lx\n",
225 current
->ret_stack
[index
].fp
,
227 (void *)current
->ret_stack
[index
].func
,
228 current
->ret_stack
[index
].ret
);
229 *ret
= (unsigned long)panic
;
234 *ret
= current
->ret_stack
[index
].ret
;
235 trace
->func
= current
->ret_stack
[index
].func
;
236 trace
->calltime
= current
->ret_stack
[index
].calltime
;
237 trace
->overrun
= atomic_read(¤t
->trace_overrun
);
238 trace
->depth
= index
;
242 * Send the trace to the ring-buffer.
243 * @return the original return address.
245 unsigned long ftrace_return_to_handler(unsigned long frame_pointer
)
247 struct ftrace_graph_ret trace
;
250 ftrace_pop_return_trace(&trace
, &ret
, frame_pointer
);
251 trace
.rettime
= trace_clock_local();
253 current
->curr_ret_stack
--;
255 * The curr_ret_stack can be less than -1 only if it was
256 * filtered out and it's about to return from the function.
257 * Recover the index and continue to trace normal functions.
259 if (current
->curr_ret_stack
< -1) {
260 current
->curr_ret_stack
+= FTRACE_NOTRACE_DEPTH
;
265 * The trace should run after decrementing the ret counter
266 * in case an interrupt were to come in. We don't want to
267 * lose the interrupt if max_depth is set.
269 ftrace_graph_return(&trace
);
271 if (unlikely(!ret
)) {
274 /* Might as well panic. What else to do? */
275 ret
= (unsigned long)panic
;
281 int __trace_graph_entry(struct trace_array
*tr
,
282 struct ftrace_graph_ent
*trace
,
286 struct trace_event_call
*call
= &event_funcgraph_entry
;
287 struct ring_buffer_event
*event
;
288 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
289 struct ftrace_graph_ent_entry
*entry
;
291 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
294 event
= trace_buffer_lock_reserve(buffer
, TRACE_GRAPH_ENT
,
295 sizeof(*entry
), flags
, pc
);
298 entry
= ring_buffer_event_data(event
);
299 entry
->graph_ent
= *trace
;
300 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
301 __buffer_unlock_commit(buffer
, event
);
306 static inline int ftrace_graph_ignore_irqs(void)
308 if (!ftrace_graph_skip_irqs
|| trace_recursion_test(TRACE_IRQ_BIT
))
314 int trace_graph_entry(struct ftrace_graph_ent
*trace
)
316 struct trace_array
*tr
= graph_array
;
317 struct trace_array_cpu
*data
;
324 if (!ftrace_trace_task(current
))
327 /* trace it when it is-nested-in or is a function enabled. */
328 if ((!(trace
->depth
|| ftrace_graph_addr(trace
->func
)) ||
329 ftrace_graph_ignore_irqs()) || (trace
->depth
< 0) ||
330 (max_depth
&& trace
->depth
>= max_depth
))
334 * Do not trace a function if it's filtered by set_graph_notrace.
335 * Make the index of ret stack negative to indicate that it should
336 * ignore further functions. But it needs its own ret stack entry
337 * to recover the original index in order to continue tracing after
338 * returning from the function.
340 if (ftrace_graph_notrace_addr(trace
->func
))
343 local_irq_save(flags
);
344 cpu
= raw_smp_processor_id();
345 data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
346 disabled
= atomic_inc_return(&data
->disabled
);
347 if (likely(disabled
== 1)) {
348 pc
= preempt_count();
349 ret
= __trace_graph_entry(tr
, trace
, flags
, pc
);
354 atomic_dec(&data
->disabled
);
355 local_irq_restore(flags
);
360 static int trace_graph_thresh_entry(struct ftrace_graph_ent
*trace
)
365 return trace_graph_entry(trace
);
369 __trace_graph_function(struct trace_array
*tr
,
370 unsigned long ip
, unsigned long flags
, int pc
)
372 u64 time
= trace_clock_local();
373 struct ftrace_graph_ent ent
= {
377 struct ftrace_graph_ret ret
= {
384 __trace_graph_entry(tr
, &ent
, flags
, pc
);
385 __trace_graph_return(tr
, &ret
, flags
, pc
);
389 trace_graph_function(struct trace_array
*tr
,
390 unsigned long ip
, unsigned long parent_ip
,
391 unsigned long flags
, int pc
)
393 __trace_graph_function(tr
, ip
, flags
, pc
);
396 void __trace_graph_return(struct trace_array
*tr
,
397 struct ftrace_graph_ret
*trace
,
401 struct trace_event_call
*call
= &event_funcgraph_exit
;
402 struct ring_buffer_event
*event
;
403 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
404 struct ftrace_graph_ret_entry
*entry
;
406 if (unlikely(__this_cpu_read(ftrace_cpu_disabled
)))
409 event
= trace_buffer_lock_reserve(buffer
, TRACE_GRAPH_RET
,
410 sizeof(*entry
), flags
, pc
);
413 entry
= ring_buffer_event_data(event
);
415 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
416 __buffer_unlock_commit(buffer
, event
);
419 void trace_graph_return(struct ftrace_graph_ret
*trace
)
421 struct trace_array
*tr
= graph_array
;
422 struct trace_array_cpu
*data
;
428 local_irq_save(flags
);
429 cpu
= raw_smp_processor_id();
430 data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
431 disabled
= atomic_inc_return(&data
->disabled
);
432 if (likely(disabled
== 1)) {
433 pc
= preempt_count();
434 __trace_graph_return(tr
, trace
, flags
, pc
);
436 atomic_dec(&data
->disabled
);
437 local_irq_restore(flags
);
440 void set_graph_array(struct trace_array
*tr
)
444 /* Make graph_array visible before we start tracing */
449 static void trace_graph_thresh_return(struct ftrace_graph_ret
*trace
)
451 if (tracing_thresh
&&
452 (trace
->rettime
- trace
->calltime
< tracing_thresh
))
455 trace_graph_return(trace
);
458 static int graph_trace_init(struct trace_array
*tr
)
464 ret
= register_ftrace_graph(&trace_graph_thresh_return
,
465 &trace_graph_thresh_entry
);
467 ret
= register_ftrace_graph(&trace_graph_return
,
471 tracing_start_cmdline_record();
476 static void graph_trace_reset(struct trace_array
*tr
)
478 tracing_stop_cmdline_record();
479 unregister_ftrace_graph();
482 static int graph_trace_update_thresh(struct trace_array
*tr
)
484 graph_trace_reset(tr
);
485 return graph_trace_init(tr
);
488 static int max_bytes_for_cpu
;
490 static void print_graph_cpu(struct trace_seq
*s
, int cpu
)
493 * Start with a space character - to make it stand out
494 * to the right a bit when trace output is pasted into
497 trace_seq_printf(s
, " %*d) ", max_bytes_for_cpu
, cpu
);
500 #define TRACE_GRAPH_PROCINFO_LENGTH 14
502 static void print_graph_proc(struct trace_seq
*s
, pid_t pid
)
504 char comm
[TASK_COMM_LEN
];
505 /* sign + log10(MAX_INT) + '\0' */
511 trace_find_cmdline(pid
, comm
);
513 sprintf(pid_str
, "%d", pid
);
515 /* 1 stands for the "-" character */
516 len
= strlen(comm
) + strlen(pid_str
) + 1;
518 if (len
< TRACE_GRAPH_PROCINFO_LENGTH
)
519 spaces
= TRACE_GRAPH_PROCINFO_LENGTH
- len
;
521 /* First spaces to align center */
522 for (i
= 0; i
< spaces
/ 2; i
++)
523 trace_seq_putc(s
, ' ');
525 trace_seq_printf(s
, "%s-%s", comm
, pid_str
);
527 /* Last spaces to align center */
528 for (i
= 0; i
< spaces
- (spaces
/ 2); i
++)
529 trace_seq_putc(s
, ' ');
533 static void print_graph_lat_fmt(struct trace_seq
*s
, struct trace_entry
*entry
)
535 trace_seq_putc(s
, ' ');
536 trace_print_lat_fmt(s
, entry
);
539 /* If the pid changed since the last trace, output this event */
541 verif_pid(struct trace_seq
*s
, pid_t pid
, int cpu
, struct fgraph_data
*data
)
549 last_pid
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->last_pid
);
551 if (*last_pid
== pid
)
554 prev_pid
= *last_pid
;
560 * Context-switch trace line:
562 ------------------------------------------
563 | 1) migration/0--1 => sshd-1755
564 ------------------------------------------
567 trace_seq_puts(s
, " ------------------------------------------\n");
568 print_graph_cpu(s
, cpu
);
569 print_graph_proc(s
, prev_pid
);
570 trace_seq_puts(s
, " => ");
571 print_graph_proc(s
, pid
);
572 trace_seq_puts(s
, "\n ------------------------------------------\n\n");
575 static struct ftrace_graph_ret_entry
*
576 get_return_for_leaf(struct trace_iterator
*iter
,
577 struct ftrace_graph_ent_entry
*curr
)
579 struct fgraph_data
*data
= iter
->private;
580 struct ring_buffer_iter
*ring_iter
= NULL
;
581 struct ring_buffer_event
*event
;
582 struct ftrace_graph_ret_entry
*next
;
585 * If the previous output failed to write to the seq buffer,
586 * then we just reuse the data from before.
588 if (data
&& data
->failed
) {
593 ring_iter
= trace_buffer_iter(iter
, iter
->cpu
);
595 /* First peek to compare current entry and the next one */
597 event
= ring_buffer_iter_peek(ring_iter
, NULL
);
600 * We need to consume the current entry to see
603 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
,
605 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, iter
->cpu
,
612 next
= ring_buffer_event_data(event
);
616 * Save current and next entries for later reference
617 * if the output fails.
621 * If the next event is not a return type, then
622 * we only care about what type it is. Otherwise we can
623 * safely copy the entire event.
625 if (next
->ent
.type
== TRACE_GRAPH_RET
)
628 data
->ret
.ent
.type
= next
->ent
.type
;
632 if (next
->ent
.type
!= TRACE_GRAPH_RET
)
635 if (curr
->ent
.pid
!= next
->ent
.pid
||
636 curr
->graph_ent
.func
!= next
->ret
.func
)
639 /* this is a leaf, now advance the iterator */
641 ring_buffer_read(ring_iter
, NULL
);
646 static void print_graph_abs_time(u64 t
, struct trace_seq
*s
)
648 unsigned long usecs_rem
;
650 usecs_rem
= do_div(t
, NSEC_PER_SEC
);
653 trace_seq_printf(s
, "%5lu.%06lu | ",
654 (unsigned long)t
, usecs_rem
);
658 print_graph_irq(struct trace_iterator
*iter
, unsigned long addr
,
659 enum trace_type type
, int cpu
, pid_t pid
, u32 flags
)
661 struct trace_array
*tr
= iter
->tr
;
662 struct trace_seq
*s
= &iter
->seq
;
663 struct trace_entry
*ent
= iter
->ent
;
665 if (addr
< (unsigned long)__irqentry_text_start
||
666 addr
>= (unsigned long)__irqentry_text_end
)
669 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
671 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
)
672 print_graph_abs_time(iter
->ts
, s
);
675 if (flags
& TRACE_GRAPH_PRINT_CPU
)
676 print_graph_cpu(s
, cpu
);
679 if (flags
& TRACE_GRAPH_PRINT_PROC
) {
680 print_graph_proc(s
, pid
);
681 trace_seq_puts(s
, " | ");
685 if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
686 print_graph_lat_fmt(s
, ent
);
690 print_graph_duration(tr
, 0, s
, flags
| FLAGS_FILL_START
);
692 if (type
== TRACE_GRAPH_ENT
)
693 trace_seq_puts(s
, "==========>");
695 trace_seq_puts(s
, "<==========");
697 print_graph_duration(tr
, 0, s
, flags
| FLAGS_FILL_END
);
698 trace_seq_putc(s
, '\n');
702 trace_print_graph_duration(unsigned long long duration
, struct trace_seq
*s
)
704 unsigned long nsecs_rem
= do_div(duration
, 1000);
705 /* log10(ULONG_MAX) + '\0' */
711 sprintf(usecs_str
, "%lu", (unsigned long) duration
);
714 trace_seq_printf(s
, "%s", usecs_str
);
716 len
= strlen(usecs_str
);
718 /* Print nsecs (we don't want to exceed 7 numbers) */
720 size_t slen
= min_t(size_t, sizeof(nsecs_str
), 8UL - len
);
722 snprintf(nsecs_str
, slen
, "%03lu", nsecs_rem
);
723 trace_seq_printf(s
, ".%s", nsecs_str
);
724 len
+= strlen(nsecs_str
) + 1;
727 trace_seq_puts(s
, " us ");
729 /* Print remaining spaces to fit the row's width */
730 for (i
= len
; i
< 8; i
++)
731 trace_seq_putc(s
, ' ');
735 print_graph_duration(struct trace_array
*tr
, unsigned long long duration
,
736 struct trace_seq
*s
, u32 flags
)
738 if (!(flags
& TRACE_GRAPH_PRINT_DURATION
) ||
739 !(tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
))
742 /* No real adata, just filling the column with spaces */
743 switch (flags
& TRACE_GRAPH_PRINT_FILL_MASK
) {
744 case FLAGS_FILL_FULL
:
745 trace_seq_puts(s
, " | ");
747 case FLAGS_FILL_START
:
748 trace_seq_puts(s
, " ");
751 trace_seq_puts(s
, " |");
755 /* Signal a overhead of time execution to the output */
756 if (flags
& TRACE_GRAPH_PRINT_OVERHEAD
)
757 trace_seq_printf(s
, "%c ", trace_find_mark(duration
));
759 trace_seq_puts(s
, " ");
761 trace_print_graph_duration(duration
, s
);
762 trace_seq_puts(s
, "| ");
765 /* Case of a leaf function on its call entry */
766 static enum print_line_t
767 print_graph_entry_leaf(struct trace_iterator
*iter
,
768 struct ftrace_graph_ent_entry
*entry
,
769 struct ftrace_graph_ret_entry
*ret_entry
,
770 struct trace_seq
*s
, u32 flags
)
772 struct fgraph_data
*data
= iter
->private;
773 struct trace_array
*tr
= iter
->tr
;
774 struct ftrace_graph_ret
*graph_ret
;
775 struct ftrace_graph_ent
*call
;
776 unsigned long long duration
;
779 graph_ret
= &ret_entry
->ret
;
780 call
= &entry
->graph_ent
;
781 duration
= graph_ret
->rettime
- graph_ret
->calltime
;
784 struct fgraph_cpu_data
*cpu_data
;
787 cpu_data
= per_cpu_ptr(data
->cpu_data
, cpu
);
790 * Comments display at + 1 to depth. Since
791 * this is a leaf function, keep the comments
792 * equal to this depth.
794 cpu_data
->depth
= call
->depth
- 1;
796 /* No need to keep this function around for this depth */
797 if (call
->depth
< FTRACE_RETFUNC_DEPTH
)
798 cpu_data
->enter_funcs
[call
->depth
] = 0;
801 /* Overhead and duration */
802 print_graph_duration(tr
, duration
, s
, flags
);
805 for (i
= 0; i
< call
->depth
* TRACE_GRAPH_INDENT
; i
++)
806 trace_seq_putc(s
, ' ');
808 trace_seq_printf(s
, "%ps();\n", (void *)call
->func
);
810 return trace_handle_return(s
);
813 static enum print_line_t
814 print_graph_entry_nested(struct trace_iterator
*iter
,
815 struct ftrace_graph_ent_entry
*entry
,
816 struct trace_seq
*s
, int cpu
, u32 flags
)
818 struct ftrace_graph_ent
*call
= &entry
->graph_ent
;
819 struct fgraph_data
*data
= iter
->private;
820 struct trace_array
*tr
= iter
->tr
;
824 struct fgraph_cpu_data
*cpu_data
;
827 cpu_data
= per_cpu_ptr(data
->cpu_data
, cpu
);
828 cpu_data
->depth
= call
->depth
;
830 /* Save this function pointer to see if the exit matches */
831 if (call
->depth
< FTRACE_RETFUNC_DEPTH
)
832 cpu_data
->enter_funcs
[call
->depth
] = call
->func
;
836 print_graph_duration(tr
, 0, s
, flags
| FLAGS_FILL_FULL
);
839 for (i
= 0; i
< call
->depth
* TRACE_GRAPH_INDENT
; i
++)
840 trace_seq_putc(s
, ' ');
842 trace_seq_printf(s
, "%ps() {\n", (void *)call
->func
);
844 if (trace_seq_has_overflowed(s
))
845 return TRACE_TYPE_PARTIAL_LINE
;
848 * we already consumed the current entry to check the next one
849 * and see if this is a leaf.
851 return TRACE_TYPE_NO_CONSUME
;
855 print_graph_prologue(struct trace_iterator
*iter
, struct trace_seq
*s
,
856 int type
, unsigned long addr
, u32 flags
)
858 struct fgraph_data
*data
= iter
->private;
859 struct trace_entry
*ent
= iter
->ent
;
860 struct trace_array
*tr
= iter
->tr
;
864 verif_pid(s
, ent
->pid
, cpu
, data
);
868 print_graph_irq(iter
, addr
, type
, cpu
, ent
->pid
, flags
);
870 if (!(tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
))
874 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
)
875 print_graph_abs_time(iter
->ts
, s
);
878 if (flags
& TRACE_GRAPH_PRINT_CPU
)
879 print_graph_cpu(s
, cpu
);
882 if (flags
& TRACE_GRAPH_PRINT_PROC
) {
883 print_graph_proc(s
, ent
->pid
);
884 trace_seq_puts(s
, " | ");
888 if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
889 print_graph_lat_fmt(s
, ent
);
895 * Entry check for irq code
898 * - we are inside irq code
899 * - we just entered irq code
902 * - funcgraph-interrupts option is set
903 * - we are not inside irq code
906 check_irq_entry(struct trace_iterator
*iter
, u32 flags
,
907 unsigned long addr
, int depth
)
911 struct fgraph_data
*data
= iter
->private;
914 * If we are either displaying irqs, or we got called as
915 * a graph event and private data does not exist,
916 * then we bypass the irq check.
918 if ((flags
& TRACE_GRAPH_PRINT_IRQS
) ||
922 depth_irq
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->depth_irq
);
925 * We are inside the irq code
930 if ((addr
< (unsigned long)__irqentry_text_start
) ||
931 (addr
>= (unsigned long)__irqentry_text_end
))
935 * We are entering irq code.
942 * Return check for irq code
945 * - we are inside irq code
946 * - we just left irq code
949 * - funcgraph-interrupts option is set
950 * - we are not inside irq code
953 check_irq_return(struct trace_iterator
*iter
, u32 flags
, int depth
)
957 struct fgraph_data
*data
= iter
->private;
960 * If we are either displaying irqs, or we got called as
961 * a graph event and private data does not exist,
962 * then we bypass the irq check.
964 if ((flags
& TRACE_GRAPH_PRINT_IRQS
) ||
968 depth_irq
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->depth_irq
);
971 * We are not inside the irq code.
973 if (*depth_irq
== -1)
977 * We are inside the irq code, and this is returning entry.
978 * Let's not trace it and clear the entry depth, since
979 * we are out of irq code.
981 * This condition ensures that we 'leave the irq code' once
982 * we are out of the entry depth. Thus protecting us from
983 * the RETURN entry loss.
985 if (*depth_irq
>= depth
) {
991 * We are inside the irq code, and this is not the entry.
996 static enum print_line_t
997 print_graph_entry(struct ftrace_graph_ent_entry
*field
, struct trace_seq
*s
,
998 struct trace_iterator
*iter
, u32 flags
)
1000 struct fgraph_data
*data
= iter
->private;
1001 struct ftrace_graph_ent
*call
= &field
->graph_ent
;
1002 struct ftrace_graph_ret_entry
*leaf_ret
;
1003 static enum print_line_t ret
;
1004 int cpu
= iter
->cpu
;
1006 if (check_irq_entry(iter
, flags
, call
->func
, call
->depth
))
1007 return TRACE_TYPE_HANDLED
;
1009 print_graph_prologue(iter
, s
, TRACE_GRAPH_ENT
, call
->func
, flags
);
1011 leaf_ret
= get_return_for_leaf(iter
, field
);
1013 ret
= print_graph_entry_leaf(iter
, field
, leaf_ret
, s
, flags
);
1015 ret
= print_graph_entry_nested(iter
, field
, s
, cpu
, flags
);
1019 * If we failed to write our output, then we need to make
1020 * note of it. Because we already consumed our entry.
1032 static enum print_line_t
1033 print_graph_return(struct ftrace_graph_ret
*trace
, struct trace_seq
*s
,
1034 struct trace_entry
*ent
, struct trace_iterator
*iter
,
1037 unsigned long long duration
= trace
->rettime
- trace
->calltime
;
1038 struct fgraph_data
*data
= iter
->private;
1039 struct trace_array
*tr
= iter
->tr
;
1040 pid_t pid
= ent
->pid
;
1041 int cpu
= iter
->cpu
;
1045 if (check_irq_return(iter
, flags
, trace
->depth
))
1046 return TRACE_TYPE_HANDLED
;
1049 struct fgraph_cpu_data
*cpu_data
;
1050 int cpu
= iter
->cpu
;
1052 cpu_data
= per_cpu_ptr(data
->cpu_data
, cpu
);
1055 * Comments display at + 1 to depth. This is the
1056 * return from a function, we now want the comments
1057 * to display at the same level of the bracket.
1059 cpu_data
->depth
= trace
->depth
- 1;
1061 if (trace
->depth
< FTRACE_RETFUNC_DEPTH
) {
1062 if (cpu_data
->enter_funcs
[trace
->depth
] != trace
->func
)
1064 cpu_data
->enter_funcs
[trace
->depth
] = 0;
1068 print_graph_prologue(iter
, s
, 0, 0, flags
);
1070 /* Overhead and duration */
1071 print_graph_duration(tr
, duration
, s
, flags
);
1074 for (i
= 0; i
< trace
->depth
* TRACE_GRAPH_INDENT
; i
++)
1075 trace_seq_putc(s
, ' ');
1078 * If the return function does not have a matching entry,
1079 * then the entry was lost. Instead of just printing
1080 * the '}' and letting the user guess what function this
1081 * belongs to, write out the function name. Always do
1082 * that if the funcgraph-tail option is enabled.
1084 if (func_match
&& !(flags
& TRACE_GRAPH_PRINT_TAIL
))
1085 trace_seq_puts(s
, "}\n");
1087 trace_seq_printf(s
, "} /* %ps */\n", (void *)trace
->func
);
1090 if (flags
& TRACE_GRAPH_PRINT_OVERRUN
)
1091 trace_seq_printf(s
, " (Overruns: %lu)\n",
1094 print_graph_irq(iter
, trace
->func
, TRACE_GRAPH_RET
,
1097 return trace_handle_return(s
);
1100 static enum print_line_t
1101 print_graph_comment(struct trace_seq
*s
, struct trace_entry
*ent
,
1102 struct trace_iterator
*iter
, u32 flags
)
1104 struct trace_array
*tr
= iter
->tr
;
1105 unsigned long sym_flags
= (tr
->trace_flags
& TRACE_ITER_SYM_MASK
);
1106 struct fgraph_data
*data
= iter
->private;
1107 struct trace_event
*event
;
1113 depth
= per_cpu_ptr(data
->cpu_data
, iter
->cpu
)->depth
;
1115 print_graph_prologue(iter
, s
, 0, 0, flags
);
1118 print_graph_duration(tr
, 0, s
, flags
| FLAGS_FILL_FULL
);
1122 for (i
= 0; i
< (depth
+ 1) * TRACE_GRAPH_INDENT
; i
++)
1123 trace_seq_putc(s
, ' ');
1126 trace_seq_puts(s
, "/* ");
1128 switch (iter
->ent
->type
) {
1130 ret
= trace_print_bprintk_msg_only(iter
);
1131 if (ret
!= TRACE_TYPE_HANDLED
)
1135 ret
= trace_print_printk_msg_only(iter
);
1136 if (ret
!= TRACE_TYPE_HANDLED
)
1140 event
= ftrace_find_event(ent
->type
);
1142 return TRACE_TYPE_UNHANDLED
;
1144 ret
= event
->funcs
->trace(iter
, sym_flags
, event
);
1145 if (ret
!= TRACE_TYPE_HANDLED
)
1149 if (trace_seq_has_overflowed(s
))
1152 /* Strip ending newline */
1153 if (s
->buffer
[s
->seq
.len
- 1] == '\n') {
1154 s
->buffer
[s
->seq
.len
- 1] = '\0';
1158 trace_seq_puts(s
, " */\n");
1160 return trace_handle_return(s
);
1165 print_graph_function_flags(struct trace_iterator
*iter
, u32 flags
)
1167 struct ftrace_graph_ent_entry
*field
;
1168 struct fgraph_data
*data
= iter
->private;
1169 struct trace_entry
*entry
= iter
->ent
;
1170 struct trace_seq
*s
= &iter
->seq
;
1171 int cpu
= iter
->cpu
;
1174 if (data
&& per_cpu_ptr(data
->cpu_data
, cpu
)->ignore
) {
1175 per_cpu_ptr(data
->cpu_data
, cpu
)->ignore
= 0;
1176 return TRACE_TYPE_HANDLED
;
1180 * If the last output failed, there's a possibility we need
1181 * to print out the missing entry which would never go out.
1183 if (data
&& data
->failed
) {
1185 iter
->cpu
= data
->cpu
;
1186 ret
= print_graph_entry(field
, s
, iter
, flags
);
1187 if (ret
== TRACE_TYPE_HANDLED
&& iter
->cpu
!= cpu
) {
1188 per_cpu_ptr(data
->cpu_data
, iter
->cpu
)->ignore
= 1;
1189 ret
= TRACE_TYPE_NO_CONSUME
;
1195 switch (entry
->type
) {
1196 case TRACE_GRAPH_ENT
: {
1198 * print_graph_entry() may consume the current event,
1199 * thus @field may become invalid, so we need to save it.
1200 * sizeof(struct ftrace_graph_ent_entry) is very small,
1201 * it can be safely saved at the stack.
1203 struct ftrace_graph_ent_entry saved
;
1204 trace_assign_type(field
, entry
);
1206 return print_graph_entry(&saved
, s
, iter
, flags
);
1208 case TRACE_GRAPH_RET
: {
1209 struct ftrace_graph_ret_entry
*field
;
1210 trace_assign_type(field
, entry
);
1211 return print_graph_return(&field
->ret
, s
, entry
, iter
, flags
);
1215 /* dont trace stack and functions as comments */
1216 return TRACE_TYPE_UNHANDLED
;
1219 return print_graph_comment(s
, entry
, iter
, flags
);
1222 return TRACE_TYPE_HANDLED
;
1225 static enum print_line_t
1226 print_graph_function(struct trace_iterator
*iter
)
1228 return print_graph_function_flags(iter
, tracer_flags
.val
);
1231 static enum print_line_t
1232 print_graph_function_event(struct trace_iterator
*iter
, int flags
,
1233 struct trace_event
*event
)
1235 return print_graph_function(iter
);
1238 static void print_lat_header(struct seq_file
*s
, u32 flags
)
1240 static const char spaces
[] = " " /* 16 spaces */
1242 " "; /* 17 spaces */
1245 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
)
1247 if (flags
& TRACE_GRAPH_PRINT_CPU
)
1249 if (flags
& TRACE_GRAPH_PRINT_PROC
)
1252 seq_printf(s
, "#%.*s _-----=> irqs-off \n", size
, spaces
);
1253 seq_printf(s
, "#%.*s / _----=> need-resched \n", size
, spaces
);
1254 seq_printf(s
, "#%.*s| / _---=> hardirq/softirq \n", size
, spaces
);
1255 seq_printf(s
, "#%.*s|| / _--=> preempt-depth \n", size
, spaces
);
1256 seq_printf(s
, "#%.*s||| / \n", size
, spaces
);
1259 static void __print_graph_headers_flags(struct trace_array
*tr
,
1260 struct seq_file
*s
, u32 flags
)
1262 int lat
= tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
;
1265 print_lat_header(s
, flags
);
1269 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
)
1270 seq_puts(s
, " TIME ");
1271 if (flags
& TRACE_GRAPH_PRINT_CPU
)
1272 seq_puts(s
, " CPU");
1273 if (flags
& TRACE_GRAPH_PRINT_PROC
)
1274 seq_puts(s
, " TASK/PID ");
1276 seq_puts(s
, "||||");
1277 if (flags
& TRACE_GRAPH_PRINT_DURATION
)
1278 seq_puts(s
, " DURATION ");
1279 seq_puts(s
, " FUNCTION CALLS\n");
1283 if (flags
& TRACE_GRAPH_PRINT_ABS_TIME
)
1285 if (flags
& TRACE_GRAPH_PRINT_CPU
)
1287 if (flags
& TRACE_GRAPH_PRINT_PROC
)
1288 seq_puts(s
, " | | ");
1290 seq_puts(s
, "||||");
1291 if (flags
& TRACE_GRAPH_PRINT_DURATION
)
1292 seq_puts(s
, " | | ");
1293 seq_puts(s
, " | | | |\n");
1296 static void print_graph_headers(struct seq_file
*s
)
1298 print_graph_headers_flags(s
, tracer_flags
.val
);
1301 void print_graph_headers_flags(struct seq_file
*s
, u32 flags
)
1303 struct trace_iterator
*iter
= s
->private;
1304 struct trace_array
*tr
= iter
->tr
;
1306 if (!(tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
))
1309 if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
) {
1310 /* print nothing if the buffers are empty */
1311 if (trace_empty(iter
))
1314 print_trace_header(s
, iter
);
1317 __print_graph_headers_flags(tr
, s
, flags
);
1320 void graph_trace_open(struct trace_iterator
*iter
)
1322 /* pid and depth on the last trace processed */
1323 struct fgraph_data
*data
;
1327 iter
->private = NULL
;
1329 /* We can be called in atomic context via ftrace_dump() */
1330 gfpflags
= (in_atomic() || irqs_disabled()) ? GFP_ATOMIC
: GFP_KERNEL
;
1332 data
= kzalloc(sizeof(*data
), gfpflags
);
1336 data
->cpu_data
= alloc_percpu_gfp(struct fgraph_cpu_data
, gfpflags
);
1337 if (!data
->cpu_data
)
1340 for_each_possible_cpu(cpu
) {
1341 pid_t
*pid
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->last_pid
);
1342 int *depth
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->depth
);
1343 int *ignore
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->ignore
);
1344 int *depth_irq
= &(per_cpu_ptr(data
->cpu_data
, cpu
)->depth_irq
);
1352 iter
->private = data
;
1359 pr_warning("function graph tracer: not enough memory\n");
1362 void graph_trace_close(struct trace_iterator
*iter
)
1364 struct fgraph_data
*data
= iter
->private;
1367 free_percpu(data
->cpu_data
);
1373 func_graph_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
1375 if (bit
== TRACE_GRAPH_PRINT_IRQS
)
1376 ftrace_graph_skip_irqs
= !set
;
1378 if (bit
== TRACE_GRAPH_SLEEP_TIME
)
1379 ftrace_graph_sleep_time_control(set
);
1381 if (bit
== TRACE_GRAPH_GRAPH_TIME
)
1382 ftrace_graph_graph_time_control(set
);
1387 static struct trace_event_functions graph_functions
= {
1388 .trace
= print_graph_function_event
,
1391 static struct trace_event graph_trace_entry_event
= {
1392 .type
= TRACE_GRAPH_ENT
,
1393 .funcs
= &graph_functions
,
1396 static struct trace_event graph_trace_ret_event
= {
1397 .type
= TRACE_GRAPH_RET
,
1398 .funcs
= &graph_functions
1401 static struct tracer graph_trace __tracer_data
= {
1402 .name
= "function_graph",
1403 .update_thresh
= graph_trace_update_thresh
,
1404 .open
= graph_trace_open
,
1405 .pipe_open
= graph_trace_open
,
1406 .close
= graph_trace_close
,
1407 .pipe_close
= graph_trace_close
,
1408 .init
= graph_trace_init
,
1409 .reset
= graph_trace_reset
,
1410 .print_line
= print_graph_function
,
1411 .print_header
= print_graph_headers
,
1412 .flags
= &tracer_flags
,
1413 .set_flag
= func_graph_set_flag
,
1414 #ifdef CONFIG_FTRACE_SELFTEST
1415 .selftest
= trace_selftest_startup_function_graph
,
1421 graph_depth_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
1427 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
1439 graph_depth_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
1442 char buf
[15]; /* More than enough to hold UINT_MAX + "\n"*/
1445 n
= sprintf(buf
, "%d\n", max_depth
);
1447 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, n
);
1450 static const struct file_operations graph_depth_fops
= {
1451 .open
= tracing_open_generic
,
1452 .write
= graph_depth_write
,
1453 .read
= graph_depth_read
,
1454 .llseek
= generic_file_llseek
,
1457 static __init
int init_graph_tracefs(void)
1459 struct dentry
*d_tracer
;
1461 d_tracer
= tracing_init_dentry();
1462 if (IS_ERR(d_tracer
))
1465 trace_create_file("max_graph_depth", 0644, d_tracer
,
1466 NULL
, &graph_depth_fops
);
1470 fs_initcall(init_graph_tracefs
);
1472 static __init
int init_graph_trace(void)
1474 max_bytes_for_cpu
= snprintf(NULL
, 0, "%d", nr_cpu_ids
- 1);
1476 if (!register_trace_event(&graph_trace_entry_event
)) {
1477 pr_warning("Warning: could not register graph trace events\n");
1481 if (!register_trace_event(&graph_trace_ret_event
)) {
1482 pr_warning("Warning: could not register graph trace events\n");
1486 return register_tracer(&graph_trace
);
1489 core_initcall(init_graph_trace
);