2 * trace task wakeup timings
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 * Based on code from the latency_tracer, that is:
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 Nadia Yvette Chambers
12 #include <linux/module.h>
14 #include <linux/debugfs.h>
15 #include <linux/kallsyms.h>
16 #include <linux/uaccess.h>
17 #include <linux/ftrace.h>
18 #include <linux/sched/rt.h>
19 #include <linux/sched/deadline.h>
20 #include <trace/events/sched.h>
23 static struct trace_array
*wakeup_trace
;
24 static int __read_mostly tracer_enabled
;
26 static struct task_struct
*wakeup_task
;
27 static int wakeup_cpu
;
28 static int wakeup_current_cpu
;
29 static unsigned wakeup_prio
= -1;
32 static int tracing_dl
= 0;
34 static arch_spinlock_t wakeup_lock
=
35 (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
37 static void wakeup_reset(struct trace_array
*tr
);
38 static void __wakeup_reset(struct trace_array
*tr
);
39 static int wakeup_graph_entry(struct ftrace_graph_ent
*trace
);
40 static void wakeup_graph_return(struct ftrace_graph_ret
*trace
);
42 static int save_flags
;
43 static bool function_enabled
;
45 #define TRACE_DISPLAY_GRAPH 1
47 static struct tracer_opt trace_opts
[] = {
48 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
49 /* display latency trace as call graph */
50 { TRACER_OPT(display
-graph
, TRACE_DISPLAY_GRAPH
) },
55 static struct tracer_flags tracer_flags
= {
60 #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
62 #ifdef CONFIG_FUNCTION_TRACER
65 * Prologue for the wakeup function tracers.
67 * Returns 1 if it is OK to continue, and preemption
68 * is disabled and data->disabled is incremented.
69 * 0 if the trace is to be ignored, and preemption
70 * is not disabled and data->disabled is
73 * Note, this function is also used outside this ifdef but
74 * inside the #ifdef of the function graph tracer below.
75 * This is OK, since the function graph tracer is
76 * dependent on the function tracer.
79 func_prolog_preempt_disable(struct trace_array
*tr
,
80 struct trace_array_cpu
**data
,
86 if (likely(!wakeup_task
))
89 *pc
= preempt_count();
90 preempt_disable_notrace();
92 cpu
= raw_smp_processor_id();
93 if (cpu
!= wakeup_current_cpu
)
96 *data
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
);
97 disabled
= atomic_inc_return(&(*data
)->disabled
);
98 if (unlikely(disabled
!= 1))
104 atomic_dec(&(*data
)->disabled
);
107 preempt_enable_notrace();
112 * wakeup uses its own tracer function to keep the overhead down:
115 wakeup_tracer_call(unsigned long ip
, unsigned long parent_ip
,
116 struct ftrace_ops
*op
, struct pt_regs
*pt_regs
)
118 struct trace_array
*tr
= wakeup_trace
;
119 struct trace_array_cpu
*data
;
123 if (!func_prolog_preempt_disable(tr
, &data
, &pc
))
126 local_irq_save(flags
);
127 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
128 local_irq_restore(flags
);
130 atomic_dec(&data
->disabled
);
131 preempt_enable_notrace();
133 #endif /* CONFIG_FUNCTION_TRACER */
135 static int register_wakeup_function(struct trace_array
*tr
, int graph
, int set
)
139 /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
140 if (function_enabled
|| (!set
&& !(trace_flags
& TRACE_ITER_FUNCTION
)))
144 ret
= register_ftrace_graph(&wakeup_graph_return
,
145 &wakeup_graph_entry
);
147 ret
= register_ftrace_function(tr
->ops
);
150 function_enabled
= true;
155 static void unregister_wakeup_function(struct trace_array
*tr
, int graph
)
157 if (!function_enabled
)
161 unregister_ftrace_graph();
163 unregister_ftrace_function(tr
->ops
);
165 function_enabled
= false;
168 static void wakeup_function_set(struct trace_array
*tr
, int set
)
171 register_wakeup_function(tr
, is_graph(), 1);
173 unregister_wakeup_function(tr
, is_graph());
176 static int wakeup_flag_changed(struct trace_array
*tr
, u32 mask
, int set
)
178 struct tracer
*tracer
= tr
->current_trace
;
180 if (mask
& TRACE_ITER_FUNCTION
)
181 wakeup_function_set(tr
, set
);
183 return trace_keep_overwrite(tracer
, mask
, set
);
186 static int start_func_tracer(struct trace_array
*tr
, int graph
)
190 ret
= register_wakeup_function(tr
, graph
, 0);
192 if (!ret
&& tracing_is_enabled())
200 static void stop_func_tracer(struct trace_array
*tr
, int graph
)
204 unregister_wakeup_function(tr
, graph
);
207 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
209 wakeup_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
212 if (!(bit
& TRACE_DISPLAY_GRAPH
))
215 if (!(is_graph() ^ set
))
218 stop_func_tracer(tr
, !set
);
220 wakeup_reset(wakeup_trace
);
223 return start_func_tracer(tr
, set
);
226 static int wakeup_graph_entry(struct ftrace_graph_ent
*trace
)
228 struct trace_array
*tr
= wakeup_trace
;
229 struct trace_array_cpu
*data
;
233 if (!func_prolog_preempt_disable(tr
, &data
, &pc
))
236 local_save_flags(flags
);
237 ret
= __trace_graph_entry(tr
, trace
, flags
, pc
);
238 atomic_dec(&data
->disabled
);
239 preempt_enable_notrace();
244 static void wakeup_graph_return(struct ftrace_graph_ret
*trace
)
246 struct trace_array
*tr
= wakeup_trace
;
247 struct trace_array_cpu
*data
;
251 if (!func_prolog_preempt_disable(tr
, &data
, &pc
))
254 local_save_flags(flags
);
255 __trace_graph_return(tr
, trace
, flags
, pc
);
256 atomic_dec(&data
->disabled
);
258 preempt_enable_notrace();
262 static void wakeup_trace_open(struct trace_iterator
*iter
)
265 graph_trace_open(iter
);
268 static void wakeup_trace_close(struct trace_iterator
*iter
)
271 graph_trace_close(iter
);
274 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
275 TRACE_GRAPH_PRINT_ABS_TIME | \
276 TRACE_GRAPH_PRINT_DURATION)
278 static enum print_line_t
wakeup_print_line(struct trace_iterator
*iter
)
281 * In graph mode call the graph tracer output function,
282 * otherwise go with the TRACE_FN event handler
285 return print_graph_function_flags(iter
, GRAPH_TRACER_FLAGS
);
287 return TRACE_TYPE_UNHANDLED
;
290 static void wakeup_print_header(struct seq_file
*s
)
293 print_graph_headers_flags(s
, GRAPH_TRACER_FLAGS
);
295 trace_default_header(s
);
299 __trace_function(struct trace_array
*tr
,
300 unsigned long ip
, unsigned long parent_ip
,
301 unsigned long flags
, int pc
)
304 trace_graph_function(tr
, ip
, parent_ip
, flags
, pc
);
306 trace_function(tr
, ip
, parent_ip
, flags
, pc
);
309 #define __trace_function trace_function
312 wakeup_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
317 static int wakeup_graph_entry(struct ftrace_graph_ent
*trace
)
322 static enum print_line_t
wakeup_print_line(struct trace_iterator
*iter
)
324 return TRACE_TYPE_UNHANDLED
;
327 static void wakeup_graph_return(struct ftrace_graph_ret
*trace
) { }
328 static void wakeup_trace_open(struct trace_iterator
*iter
) { }
329 static void wakeup_trace_close(struct trace_iterator
*iter
) { }
331 #ifdef CONFIG_FUNCTION_TRACER
332 static void wakeup_print_header(struct seq_file
*s
)
334 trace_default_header(s
);
337 static void wakeup_print_header(struct seq_file
*s
)
339 trace_latency_header(s
);
341 #endif /* CONFIG_FUNCTION_TRACER */
342 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
345 * Should this new latency be reported/recorded?
347 static int report_latency(struct trace_array
*tr
, cycle_t delta
)
349 if (tracing_thresh
) {
350 if (delta
< tracing_thresh
)
353 if (delta
<= tr
->max_latency
)
360 probe_wakeup_migrate_task(void *ignore
, struct task_struct
*task
, int cpu
)
362 if (task
!= wakeup_task
)
365 wakeup_current_cpu
= cpu
;
369 tracing_sched_switch_trace(struct trace_array
*tr
,
370 struct task_struct
*prev
,
371 struct task_struct
*next
,
372 unsigned long flags
, int pc
)
374 struct ftrace_event_call
*call
= &event_context_switch
;
375 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
376 struct ring_buffer_event
*event
;
377 struct ctx_switch_entry
*entry
;
379 event
= trace_buffer_lock_reserve(buffer
, TRACE_CTX
,
380 sizeof(*entry
), flags
, pc
);
383 entry
= ring_buffer_event_data(event
);
384 entry
->prev_pid
= prev
->pid
;
385 entry
->prev_prio
= prev
->prio
;
386 entry
->prev_state
= prev
->state
;
387 entry
->next_pid
= next
->pid
;
388 entry
->next_prio
= next
->prio
;
389 entry
->next_state
= next
->state
;
390 entry
->next_cpu
= task_cpu(next
);
392 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
393 trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
397 tracing_sched_wakeup_trace(struct trace_array
*tr
,
398 struct task_struct
*wakee
,
399 struct task_struct
*curr
,
400 unsigned long flags
, int pc
)
402 struct ftrace_event_call
*call
= &event_wakeup
;
403 struct ring_buffer_event
*event
;
404 struct ctx_switch_entry
*entry
;
405 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
407 event
= trace_buffer_lock_reserve(buffer
, TRACE_WAKE
,
408 sizeof(*entry
), flags
, pc
);
411 entry
= ring_buffer_event_data(event
);
412 entry
->prev_pid
= curr
->pid
;
413 entry
->prev_prio
= curr
->prio
;
414 entry
->prev_state
= curr
->state
;
415 entry
->next_pid
= wakee
->pid
;
416 entry
->next_prio
= wakee
->prio
;
417 entry
->next_state
= wakee
->state
;
418 entry
->next_cpu
= task_cpu(wakee
);
420 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
421 trace_buffer_unlock_commit(buffer
, event
, flags
, pc
);
425 probe_wakeup_sched_switch(void *ignore
,
426 struct task_struct
*prev
, struct task_struct
*next
)
428 struct trace_array_cpu
*data
;
429 cycle_t T0
, T1
, delta
;
435 tracing_record_cmdline(prev
);
437 if (unlikely(!tracer_enabled
))
441 * When we start a new trace, we set wakeup_task to NULL
442 * and then set tracer_enabled = 1. We want to make sure
443 * that another CPU does not see the tracer_enabled = 1
444 * and the wakeup_task with an older task, that might
445 * actually be the same as next.
449 if (next
!= wakeup_task
)
452 pc
= preempt_count();
454 /* disable local data, not wakeup_cpu data */
455 cpu
= raw_smp_processor_id();
456 disabled
= atomic_inc_return(&per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, cpu
)->disabled
);
457 if (likely(disabled
!= 1))
460 local_irq_save(flags
);
461 arch_spin_lock(&wakeup_lock
);
463 /* We could race with grabbing wakeup_lock */
464 if (unlikely(!tracer_enabled
|| next
!= wakeup_task
))
467 /* The task we are waiting for is waking up */
468 data
= per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, wakeup_cpu
);
470 __trace_function(wakeup_trace
, CALLER_ADDR0
, CALLER_ADDR1
, flags
, pc
);
471 tracing_sched_switch_trace(wakeup_trace
, prev
, next
, flags
, pc
);
473 T0
= data
->preempt_timestamp
;
474 T1
= ftrace_now(cpu
);
477 if (!report_latency(wakeup_trace
, delta
))
480 if (likely(!is_tracing_stopped())) {
481 wakeup_trace
->max_latency
= delta
;
482 update_max_tr(wakeup_trace
, wakeup_task
, wakeup_cpu
);
486 __wakeup_reset(wakeup_trace
);
487 arch_spin_unlock(&wakeup_lock
);
488 local_irq_restore(flags
);
490 atomic_dec(&per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, cpu
)->disabled
);
493 static void __wakeup_reset(struct trace_array
*tr
)
500 put_task_struct(wakeup_task
);
505 static void wakeup_reset(struct trace_array
*tr
)
509 tracing_reset_online_cpus(&tr
->trace_buffer
);
511 local_irq_save(flags
);
512 arch_spin_lock(&wakeup_lock
);
514 arch_spin_unlock(&wakeup_lock
);
515 local_irq_restore(flags
);
519 probe_wakeup(void *ignore
, struct task_struct
*p
, int success
)
521 struct trace_array_cpu
*data
;
522 int cpu
= smp_processor_id();
527 if (likely(!tracer_enabled
))
530 tracing_record_cmdline(p
);
531 tracing_record_cmdline(current
);
534 * Semantic is like this:
535 * - wakeup tracer handles all tasks in the system, independently
536 * from their scheduling class;
537 * - wakeup_rt tracer handles tasks belonging to sched_dl and
539 * - wakeup_dl handles tasks belonging to sched_dl class only.
541 if (tracing_dl
|| (wakeup_dl
&& !dl_task(p
)) ||
542 (wakeup_rt
&& !dl_task(p
) && !rt_task(p
)) ||
543 (!dl_task(p
) && (p
->prio
>= wakeup_prio
|| p
->prio
>= current
->prio
)))
546 pc
= preempt_count();
547 disabled
= atomic_inc_return(&per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, cpu
)->disabled
);
548 if (unlikely(disabled
!= 1))
551 /* interrupts should be off from try_to_wake_up */
552 arch_spin_lock(&wakeup_lock
);
554 /* check for races. */
555 if (!tracer_enabled
|| tracing_dl
||
556 (!dl_task(p
) && p
->prio
>= wakeup_prio
))
559 /* reset the trace */
560 __wakeup_reset(wakeup_trace
);
562 wakeup_cpu
= task_cpu(p
);
563 wakeup_current_cpu
= wakeup_cpu
;
564 wakeup_prio
= p
->prio
;
567 * Once you start tracing a -deadline task, don't bother tracing
568 * another task until the first one wakes up.
576 get_task_struct(wakeup_task
);
578 local_save_flags(flags
);
580 data
= per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, wakeup_cpu
);
581 data
->preempt_timestamp
= ftrace_now(cpu
);
582 tracing_sched_wakeup_trace(wakeup_trace
, p
, current
, flags
, pc
);
585 * We must be careful in using CALLER_ADDR2. But since wake_up
586 * is not called by an assembly function (where as schedule is)
587 * it should be safe to use it here.
589 __trace_function(wakeup_trace
, CALLER_ADDR1
, CALLER_ADDR2
, flags
, pc
);
592 arch_spin_unlock(&wakeup_lock
);
594 atomic_dec(&per_cpu_ptr(wakeup_trace
->trace_buffer
.data
, cpu
)->disabled
);
597 static void start_wakeup_tracer(struct trace_array
*tr
)
601 ret
= register_trace_sched_wakeup(probe_wakeup
, NULL
);
603 pr_info("wakeup trace: Couldn't activate tracepoint"
604 " probe to kernel_sched_wakeup\n");
608 ret
= register_trace_sched_wakeup_new(probe_wakeup
, NULL
);
610 pr_info("wakeup trace: Couldn't activate tracepoint"
611 " probe to kernel_sched_wakeup_new\n");
615 ret
= register_trace_sched_switch(probe_wakeup_sched_switch
, NULL
);
617 pr_info("sched trace: Couldn't activate tracepoint"
618 " probe to kernel_sched_switch\n");
619 goto fail_deprobe_wake_new
;
622 ret
= register_trace_sched_migrate_task(probe_wakeup_migrate_task
, NULL
);
624 pr_info("wakeup trace: Couldn't activate tracepoint"
625 " probe to kernel_sched_migrate_task\n");
632 * Don't let the tracer_enabled = 1 show up before
633 * the wakeup_task is reset. This may be overkill since
634 * wakeup_reset does a spin_unlock after setting the
635 * wakeup_task to NULL, but I want to be safe.
636 * This is a slow path anyway.
640 if (start_func_tracer(tr
, is_graph()))
641 printk(KERN_ERR
"failed to start wakeup tracer\n");
644 fail_deprobe_wake_new
:
645 unregister_trace_sched_wakeup_new(probe_wakeup
, NULL
);
647 unregister_trace_sched_wakeup(probe_wakeup
, NULL
);
650 static void stop_wakeup_tracer(struct trace_array
*tr
)
653 stop_func_tracer(tr
, is_graph());
654 unregister_trace_sched_switch(probe_wakeup_sched_switch
, NULL
);
655 unregister_trace_sched_wakeup_new(probe_wakeup
, NULL
);
656 unregister_trace_sched_wakeup(probe_wakeup
, NULL
);
657 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task
, NULL
);
660 static bool wakeup_busy
;
662 static int __wakeup_tracer_init(struct trace_array
*tr
)
664 save_flags
= trace_flags
;
666 /* non overwrite screws up the latency tracers */
667 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, 1);
668 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, 1);
672 ftrace_init_array_ops(tr
, wakeup_tracer_call
);
673 start_wakeup_tracer(tr
);
679 static int wakeup_tracer_init(struct trace_array
*tr
)
686 return __wakeup_tracer_init(tr
);
689 static int wakeup_rt_tracer_init(struct trace_array
*tr
)
696 return __wakeup_tracer_init(tr
);
699 static int wakeup_dl_tracer_init(struct trace_array
*tr
)
706 return __wakeup_tracer_init(tr
);
709 static void wakeup_tracer_reset(struct trace_array
*tr
)
711 int lat_flag
= save_flags
& TRACE_ITER_LATENCY_FMT
;
712 int overwrite_flag
= save_flags
& TRACE_ITER_OVERWRITE
;
714 stop_wakeup_tracer(tr
);
715 /* make sure we put back any tasks we are tracing */
718 set_tracer_flag(tr
, TRACE_ITER_LATENCY_FMT
, lat_flag
);
719 set_tracer_flag(tr
, TRACE_ITER_OVERWRITE
, overwrite_flag
);
720 ftrace_reset_array_ops(tr
);
724 static void wakeup_tracer_start(struct trace_array
*tr
)
730 static void wakeup_tracer_stop(struct trace_array
*tr
)
735 static struct tracer wakeup_tracer __read_mostly
=
738 .init
= wakeup_tracer_init
,
739 .reset
= wakeup_tracer_reset
,
740 .start
= wakeup_tracer_start
,
741 .stop
= wakeup_tracer_stop
,
743 .print_header
= wakeup_print_header
,
744 .print_line
= wakeup_print_line
,
745 .flags
= &tracer_flags
,
746 .set_flag
= wakeup_set_flag
,
747 .flag_changed
= wakeup_flag_changed
,
748 #ifdef CONFIG_FTRACE_SELFTEST
749 .selftest
= trace_selftest_startup_wakeup
,
751 .open
= wakeup_trace_open
,
752 .close
= wakeup_trace_close
,
753 .allow_instances
= true,
757 static struct tracer wakeup_rt_tracer __read_mostly
=
760 .init
= wakeup_rt_tracer_init
,
761 .reset
= wakeup_tracer_reset
,
762 .start
= wakeup_tracer_start
,
763 .stop
= wakeup_tracer_stop
,
765 .print_header
= wakeup_print_header
,
766 .print_line
= wakeup_print_line
,
767 .flags
= &tracer_flags
,
768 .set_flag
= wakeup_set_flag
,
769 .flag_changed
= wakeup_flag_changed
,
770 #ifdef CONFIG_FTRACE_SELFTEST
771 .selftest
= trace_selftest_startup_wakeup
,
773 .open
= wakeup_trace_open
,
774 .close
= wakeup_trace_close
,
775 .allow_instances
= true,
779 static struct tracer wakeup_dl_tracer __read_mostly
=
782 .init
= wakeup_dl_tracer_init
,
783 .reset
= wakeup_tracer_reset
,
784 .start
= wakeup_tracer_start
,
785 .stop
= wakeup_tracer_stop
,
787 .print_header
= wakeup_print_header
,
788 .print_line
= wakeup_print_line
,
789 .flags
= &tracer_flags
,
790 .set_flag
= wakeup_set_flag
,
791 .flag_changed
= wakeup_flag_changed
,
792 #ifdef CONFIG_FTRACE_SELFTEST
793 .selftest
= trace_selftest_startup_wakeup
,
795 .open
= wakeup_trace_open
,
796 .close
= wakeup_trace_close
,
800 __init
static int init_wakeup_tracer(void)
804 ret
= register_tracer(&wakeup_tracer
);
808 ret
= register_tracer(&wakeup_rt_tracer
);
812 ret
= register_tracer(&wakeup_dl_tracer
);
818 core_initcall(init_wakeup_tracer
);