tracing/latency: Fix header output for latency tracers
[deliverable/linux.git] / kernel / trace / trace_sched_wakeup.c
CommitLineData
352ad25a
SR
1/*
2 * trace task wakeup timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Based on code from the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/module.h>
13#include <linux/fs.h>
14#include <linux/debugfs.h>
15#include <linux/kallsyms.h>
16#include <linux/uaccess.h>
17#include <linux/ftrace.h>
ad8d75ff 18#include <trace/events/sched.h>
352ad25a
SR
19
20#include "trace.h"
21
22static struct trace_array *wakeup_trace;
23static int __read_mostly tracer_enabled;
24
25static struct task_struct *wakeup_task;
26static int wakeup_cpu;
478142c3 27static int wakeup_current_cpu;
352ad25a 28static unsigned wakeup_prio = -1;
3244351c 29static int wakeup_rt;
352ad25a 30
445c8951 31static arch_spinlock_t wakeup_lock =
edc35bd7 32 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
352ad25a 33
7495a5be 34static void wakeup_reset(struct trace_array *tr);
e309b41d 35static void __wakeup_reset(struct trace_array *tr);
7495a5be
JO
36static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
37static void wakeup_graph_return(struct ftrace_graph_ret *trace);
352ad25a 38
e9d25fe6
SR
39static int save_lat_flag;
40
7495a5be
JO
41#define TRACE_DISPLAY_GRAPH 1
42
43static struct tracer_opt trace_opts[] = {
44#ifdef CONFIG_FUNCTION_GRAPH_TRACER
45 /* display latency trace as call graph */
46 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
47#endif
48 { } /* Empty entry */
49};
50
51static struct tracer_flags tracer_flags = {
52 .val = 0,
53 .opts = trace_opts,
54};
55
56#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
57
606576ce 58#ifdef CONFIG_FUNCTION_TRACER
542181d3 59
7e18d8e7 60/*
542181d3
SR
61 * Prologue for the wakeup function tracers.
62 *
63 * Returns 1 if it is OK to continue, and preemption
64 * is disabled and data->disabled is incremented.
65 * 0 if the trace is to be ignored, and preemption
66 * is not disabled and data->disabled is
67 * kept the same.
68 *
69 * Note, this function is also used outside this ifdef but
70 * inside the #ifdef of the function graph tracer below.
71 * This is OK, since the function graph tracer is
72 * dependent on the function tracer.
7e18d8e7 73 */
542181d3
SR
74static int
75func_prolog_preempt_disable(struct trace_array *tr,
76 struct trace_array_cpu **data,
77 int *pc)
7e18d8e7 78{
7e18d8e7 79 long disabled;
7e18d8e7
SR
80 int cpu;
81
82 if (likely(!wakeup_task))
542181d3 83 return 0;
7e18d8e7 84
542181d3 85 *pc = preempt_count();
5168ae50 86 preempt_disable_notrace();
7e18d8e7
SR
87
88 cpu = raw_smp_processor_id();
478142c3
SR
89 if (cpu != wakeup_current_cpu)
90 goto out_enable;
91
542181d3
SR
92 *data = tr->data[cpu];
93 disabled = atomic_inc_return(&(*data)->disabled);
7e18d8e7
SR
94 if (unlikely(disabled != 1))
95 goto out;
96
542181d3 97 return 1;
7e18d8e7 98
542181d3
SR
99out:
100 atomic_dec(&(*data)->disabled);
7e18d8e7 101
542181d3
SR
102out_enable:
103 preempt_enable_notrace();
104 return 0;
105}
106
107/*
108 * wakeup uses its own tracer function to keep the overhead down:
109 */
110static void
111wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
112{
113 struct trace_array *tr = wakeup_trace;
114 struct trace_array_cpu *data;
115 unsigned long flags;
116 int pc;
117
118 if (!func_prolog_preempt_disable(tr, &data, &pc))
119 return;
120
121 local_irq_save(flags);
122 trace_function(tr, ip, parent_ip, flags, pc);
e59494f4 123 local_irq_restore(flags);
7e18d8e7 124
7e18d8e7 125 atomic_dec(&data->disabled);
5168ae50 126 preempt_enable_notrace();
7e18d8e7
SR
127}
128
129static struct ftrace_ops trace_ops __read_mostly =
130{
131 .func = wakeup_tracer_call,
b848914c 132 .flags = FTRACE_OPS_FL_GLOBAL,
7e18d8e7 133};
7e40798f 134#endif /* CONFIG_FUNCTION_TRACER */
7495a5be
JO
135
136static int start_func_tracer(int graph)
137{
138 int ret;
139
140 if (!graph)
141 ret = register_ftrace_function(&trace_ops);
142 else
143 ret = register_ftrace_graph(&wakeup_graph_return,
144 &wakeup_graph_entry);
145
146 if (!ret && tracing_is_enabled())
147 tracer_enabled = 1;
148 else
149 tracer_enabled = 0;
150
151 return ret;
152}
153
154static void stop_func_tracer(int graph)
155{
156 tracer_enabled = 0;
157
158 if (!graph)
159 unregister_ftrace_function(&trace_ops);
160 else
161 unregister_ftrace_graph();
162}
163
7495a5be
JO
164#ifdef CONFIG_FUNCTION_GRAPH_TRACER
165static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
166{
167
168 if (!(bit & TRACE_DISPLAY_GRAPH))
169 return -EINVAL;
170
171 if (!(is_graph() ^ set))
172 return 0;
173
174 stop_func_tracer(!set);
175
176 wakeup_reset(wakeup_trace);
177 tracing_max_latency = 0;
178
179 return start_func_tracer(set);
180}
181
182static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
183{
184 struct trace_array *tr = wakeup_trace;
185 struct trace_array_cpu *data;
186 unsigned long flags;
542181d3 187 int pc, ret = 0;
7495a5be 188
542181d3 189 if (!func_prolog_preempt_disable(tr, &data, &pc))
7495a5be
JO
190 return 0;
191
7495a5be
JO
192 local_save_flags(flags);
193 ret = __trace_graph_entry(tr, trace, flags, pc);
7495a5be 194 atomic_dec(&data->disabled);
7495a5be 195 preempt_enable_notrace();
542181d3 196
7495a5be
JO
197 return ret;
198}
199
200static void wakeup_graph_return(struct ftrace_graph_ret *trace)
201{
202 struct trace_array *tr = wakeup_trace;
203 struct trace_array_cpu *data;
204 unsigned long flags;
542181d3 205 int pc;
7495a5be 206
542181d3 207 if (!func_prolog_preempt_disable(tr, &data, &pc))
7495a5be
JO
208 return;
209
7495a5be
JO
210 local_save_flags(flags);
211 __trace_graph_return(tr, trace, flags, pc);
7495a5be
JO
212 atomic_dec(&data->disabled);
213
7495a5be
JO
214 preempt_enable_notrace();
215 return;
216}
217
218static void wakeup_trace_open(struct trace_iterator *iter)
219{
220 if (is_graph())
221 graph_trace_open(iter);
222}
223
224static void wakeup_trace_close(struct trace_iterator *iter)
225{
226 if (iter->private)
227 graph_trace_close(iter);
228}
229
321e68b0
JO
230#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
231 TRACE_GRAPH_PRINT_ABS_TIME | \
232 TRACE_GRAPH_PRINT_DURATION)
7495a5be
JO
233
234static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
235{
236 /*
237 * In graph mode call the graph tracer output function,
238 * otherwise go with the TRACE_FN event handler
239 */
240 if (is_graph())
241 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
242
243 return TRACE_TYPE_UNHANDLED;
244}
245
246static void wakeup_print_header(struct seq_file *s)
247{
248 if (is_graph())
249 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
250 else
251 trace_default_header(s);
252}
253
254static void
255__trace_function(struct trace_array *tr,
256 unsigned long ip, unsigned long parent_ip,
257 unsigned long flags, int pc)
258{
259 if (is_graph())
260 trace_graph_function(tr, ip, parent_ip, flags, pc);
261 else
262 trace_function(tr, ip, parent_ip, flags, pc);
263}
264#else
265#define __trace_function trace_function
266
267static int wakeup_set_flag(u32 old_flags, u32 bit, int set)
268{
269 return -EINVAL;
270}
271
272static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
273{
274 return -1;
275}
276
277static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
278{
279 return TRACE_TYPE_UNHANDLED;
280}
281
282static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
7495a5be
JO
283static void wakeup_trace_open(struct trace_iterator *iter) { }
284static void wakeup_trace_close(struct trace_iterator *iter) { }
7e9a49ef
JO
285
286#ifdef CONFIG_FUNCTION_TRACER
287static void wakeup_print_header(struct seq_file *s)
288{
289 trace_default_header(s);
290}
291#else
292static void wakeup_print_header(struct seq_file *s)
293{
294 trace_latency_header(s);
295}
296#endif /* CONFIG_FUNCTION_TRACER */
7495a5be
JO
297#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
298
352ad25a
SR
299/*
300 * Should this new latency be reported/recorded?
301 */
e309b41d 302static int report_latency(cycle_t delta)
352ad25a
SR
303{
304 if (tracing_thresh) {
305 if (delta < tracing_thresh)
306 return 0;
307 } else {
308 if (delta <= tracing_max_latency)
309 return 0;
310 }
311 return 1;
312}
313
38516ab5
SR
314static void
315probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
478142c3
SR
316{
317 if (task != wakeup_task)
318 return;
319
320 wakeup_current_cpu = cpu;
321}
322
5b82a1b0 323static void notrace
38516ab5
SR
324probe_wakeup_sched_switch(void *ignore,
325 struct task_struct *prev, struct task_struct *next)
352ad25a 326{
352ad25a
SR
327 struct trace_array_cpu *data;
328 cycle_t T0, T1, delta;
329 unsigned long flags;
330 long disabled;
331 int cpu;
38697053 332 int pc;
352ad25a 333
b07c3f19
MD
334 tracing_record_cmdline(prev);
335
352ad25a
SR
336 if (unlikely(!tracer_enabled))
337 return;
338
339 /*
340 * When we start a new trace, we set wakeup_task to NULL
341 * and then set tracer_enabled = 1. We want to make sure
342 * that another CPU does not see the tracer_enabled = 1
343 * and the wakeup_task with an older task, that might
344 * actually be the same as next.
345 */
346 smp_rmb();
347
348 if (next != wakeup_task)
349 return;
350
38697053
SR
351 pc = preempt_count();
352
352ad25a
SR
353 /* disable local data, not wakeup_cpu data */
354 cpu = raw_smp_processor_id();
b07c3f19 355 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
352ad25a
SR
356 if (likely(disabled != 1))
357 goto out;
358
e59494f4 359 local_irq_save(flags);
0199c4e6 360 arch_spin_lock(&wakeup_lock);
352ad25a
SR
361
362 /* We could race with grabbing wakeup_lock */
363 if (unlikely(!tracer_enabled || next != wakeup_task))
364 goto out_unlock;
365
9be24414
SR
366 /* The task we are waiting for is waking up */
367 data = wakeup_trace->data[wakeup_cpu];
368
7495a5be 369 __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
7be42151 370 tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
352ad25a 371
352ad25a 372 T0 = data->preempt_timestamp;
750ed1a4 373 T1 = ftrace_now(cpu);
352ad25a
SR
374 delta = T1-T0;
375
376 if (!report_latency(delta))
377 goto out_unlock;
378
b5130b1e
CE
379 if (likely(!is_tracing_stopped())) {
380 tracing_max_latency = delta;
381 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
382 }
352ad25a 383
352ad25a 384out_unlock:
b07c3f19 385 __wakeup_reset(wakeup_trace);
0199c4e6 386 arch_spin_unlock(&wakeup_lock);
e59494f4 387 local_irq_restore(flags);
352ad25a 388out:
b07c3f19 389 atomic_dec(&wakeup_trace->data[cpu]->disabled);
5b82a1b0
MD
390}
391
e309b41d 392static void __wakeup_reset(struct trace_array *tr)
352ad25a 393{
352ad25a
SR
394 wakeup_cpu = -1;
395 wakeup_prio = -1;
396
397 if (wakeup_task)
398 put_task_struct(wakeup_task);
399
400 wakeup_task = NULL;
401}
402
e309b41d 403static void wakeup_reset(struct trace_array *tr)
352ad25a
SR
404{
405 unsigned long flags;
406
2f26ebd5
SR
407 tracing_reset_online_cpus(tr);
408
e59494f4 409 local_irq_save(flags);
0199c4e6 410 arch_spin_lock(&wakeup_lock);
352ad25a 411 __wakeup_reset(tr);
0199c4e6 412 arch_spin_unlock(&wakeup_lock);
e59494f4 413 local_irq_restore(flags);
352ad25a
SR
414}
415
e309b41d 416static void
38516ab5 417probe_wakeup(void *ignore, struct task_struct *p, int success)
352ad25a 418{
f8ec1062 419 struct trace_array_cpu *data;
352ad25a
SR
420 int cpu = smp_processor_id();
421 unsigned long flags;
422 long disabled;
38697053 423 int pc;
352ad25a 424
b07c3f19
MD
425 if (likely(!tracer_enabled))
426 return;
427
428 tracing_record_cmdline(p);
429 tracing_record_cmdline(current);
430
3244351c 431 if ((wakeup_rt && !rt_task(p)) ||
352ad25a 432 p->prio >= wakeup_prio ||
b07c3f19 433 p->prio >= current->prio)
352ad25a
SR
434 return;
435
38697053 436 pc = preempt_count();
b07c3f19 437 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
352ad25a
SR
438 if (unlikely(disabled != 1))
439 goto out;
440
441 /* interrupts should be off from try_to_wake_up */
0199c4e6 442 arch_spin_lock(&wakeup_lock);
352ad25a
SR
443
444 /* check for races. */
445 if (!tracer_enabled || p->prio >= wakeup_prio)
446 goto out_locked;
447
448 /* reset the trace */
b07c3f19 449 __wakeup_reset(wakeup_trace);
352ad25a
SR
450
451 wakeup_cpu = task_cpu(p);
478142c3 452 wakeup_current_cpu = wakeup_cpu;
352ad25a
SR
453 wakeup_prio = p->prio;
454
455 wakeup_task = p;
456 get_task_struct(wakeup_task);
457
458 local_save_flags(flags);
459
f8ec1062
SR
460 data = wakeup_trace->data[wakeup_cpu];
461 data->preempt_timestamp = ftrace_now(cpu);
7be42151 462 tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
301fd748
SR
463
464 /*
465 * We must be careful in using CALLER_ADDR2. But since wake_up
466 * is not called by an assembly function (where as schedule is)
467 * it should be safe to use it here.
468 */
7495a5be 469 __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
352ad25a
SR
470
471out_locked:
0199c4e6 472 arch_spin_unlock(&wakeup_lock);
352ad25a 473out:
b07c3f19 474 atomic_dec(&wakeup_trace->data[cpu]->disabled);
352ad25a
SR
475}
476
e309b41d 477static void start_wakeup_tracer(struct trace_array *tr)
352ad25a 478{
5b82a1b0
MD
479 int ret;
480
38516ab5 481 ret = register_trace_sched_wakeup(probe_wakeup, NULL);
5b82a1b0 482 if (ret) {
b07c3f19 483 pr_info("wakeup trace: Couldn't activate tracepoint"
5b82a1b0
MD
484 " probe to kernel_sched_wakeup\n");
485 return;
486 }
487
38516ab5 488 ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
5b82a1b0 489 if (ret) {
b07c3f19 490 pr_info("wakeup trace: Couldn't activate tracepoint"
5b82a1b0
MD
491 " probe to kernel_sched_wakeup_new\n");
492 goto fail_deprobe;
493 }
494
38516ab5 495 ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
5b82a1b0 496 if (ret) {
b07c3f19 497 pr_info("sched trace: Couldn't activate tracepoint"
73d8b8bc 498 " probe to kernel_sched_switch\n");
5b82a1b0
MD
499 goto fail_deprobe_wake_new;
500 }
501
38516ab5 502 ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
478142c3
SR
503 if (ret) {
504 pr_info("wakeup trace: Couldn't activate tracepoint"
505 " probe to kernel_sched_migrate_task\n");
506 return;
507 }
508
352ad25a
SR
509 wakeup_reset(tr);
510
511 /*
512 * Don't let the tracer_enabled = 1 show up before
513 * the wakeup_task is reset. This may be overkill since
514 * wakeup_reset does a spin_unlock after setting the
515 * wakeup_task to NULL, but I want to be safe.
516 * This is a slow path anyway.
517 */
518 smp_wmb();
519
7495a5be
JO
520 if (start_func_tracer(is_graph()))
521 printk(KERN_ERR "failed to start wakeup tracer\n");
ad591240 522
352ad25a 523 return;
5b82a1b0 524fail_deprobe_wake_new:
38516ab5 525 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
5b82a1b0 526fail_deprobe:
38516ab5 527 unregister_trace_sched_wakeup(probe_wakeup, NULL);
352ad25a
SR
528}
529
e309b41d 530static void stop_wakeup_tracer(struct trace_array *tr)
352ad25a
SR
531{
532 tracer_enabled = 0;
7495a5be 533 stop_func_tracer(is_graph());
38516ab5
SR
534 unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
535 unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
536 unregister_trace_sched_wakeup(probe_wakeup, NULL);
537 unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
352ad25a
SR
538}
539
3244351c 540static int __wakeup_tracer_init(struct trace_array *tr)
352ad25a 541{
e9d25fe6
SR
542 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
543 trace_flags |= TRACE_ITER_LATENCY_FMT;
544
745b1626 545 tracing_max_latency = 0;
352ad25a 546 wakeup_trace = tr;
c76f0694 547 start_wakeup_tracer(tr);
1c80025a 548 return 0;
352ad25a
SR
549}
550
3244351c
SR
551static int wakeup_tracer_init(struct trace_array *tr)
552{
553 wakeup_rt = 0;
554 return __wakeup_tracer_init(tr);
555}
556
557static int wakeup_rt_tracer_init(struct trace_array *tr)
558{
559 wakeup_rt = 1;
560 return __wakeup_tracer_init(tr);
561}
562
e309b41d 563static void wakeup_tracer_reset(struct trace_array *tr)
352ad25a 564{
c76f0694
SR
565 stop_wakeup_tracer(tr);
566 /* make sure we put back any tasks we are tracing */
567 wakeup_reset(tr);
e9d25fe6
SR
568
569 if (!save_lat_flag)
570 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
352ad25a
SR
571}
572
9036990d
SR
573static void wakeup_tracer_start(struct trace_array *tr)
574{
575 wakeup_reset(tr);
576 tracer_enabled = 1;
9036990d
SR
577}
578
579static void wakeup_tracer_stop(struct trace_array *tr)
580{
581 tracer_enabled = 0;
352ad25a
SR
582}
583
584static struct tracer wakeup_tracer __read_mostly =
585{
586 .name = "wakeup",
587 .init = wakeup_tracer_init,
588 .reset = wakeup_tracer_reset,
9036990d
SR
589 .start = wakeup_tracer_start,
590 .stop = wakeup_tracer_stop,
352ad25a 591 .print_max = 1,
7495a5be
JO
592 .print_header = wakeup_print_header,
593 .print_line = wakeup_print_line,
594 .flags = &tracer_flags,
595 .set_flag = wakeup_set_flag,
60a11774
SR
596#ifdef CONFIG_FTRACE_SELFTEST
597 .selftest = trace_selftest_startup_wakeup,
598#endif
7495a5be
JO
599 .open = wakeup_trace_open,
600 .close = wakeup_trace_close,
ef710e10 601 .use_max_tr = 1,
352ad25a
SR
602};
603
3244351c
SR
604static struct tracer wakeup_rt_tracer __read_mostly =
605{
606 .name = "wakeup_rt",
607 .init = wakeup_rt_tracer_init,
608 .reset = wakeup_tracer_reset,
609 .start = wakeup_tracer_start,
610 .stop = wakeup_tracer_stop,
6eaaa5d5 611 .wait_pipe = poll_wait_pipe,
3244351c 612 .print_max = 1,
7495a5be
JO
613 .print_header = wakeup_print_header,
614 .print_line = wakeup_print_line,
615 .flags = &tracer_flags,
616 .set_flag = wakeup_set_flag,
3244351c
SR
617#ifdef CONFIG_FTRACE_SELFTEST
618 .selftest = trace_selftest_startup_wakeup,
619#endif
7495a5be
JO
620 .open = wakeup_trace_open,
621 .close = wakeup_trace_close,
ef710e10 622 .use_max_tr = 1,
3244351c
SR
623};
624
352ad25a
SR
625__init static int init_wakeup_tracer(void)
626{
627 int ret;
628
629 ret = register_tracer(&wakeup_tracer);
630 if (ret)
631 return ret;
632
3244351c
SR
633 ret = register_tracer(&wakeup_rt_tracer);
634 if (ret)
635 return ret;
636
352ad25a
SR
637 return 0;
638}
639device_initcall(init_wakeup_tracer);
This page took 0.175992 seconds and 5 git commands to generate.