Commit | Line | Data |
---|---|---|
352ad25a SR |
1 | /* |
2 | * trace task wakeup timings | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * Based on code from the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
6d49e352 | 10 | * Copyright (C) 2004 Nadia Yvette Chambers |
352ad25a SR |
11 | */ |
12 | #include <linux/module.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/debugfs.h> | |
15 | #include <linux/kallsyms.h> | |
16 | #include <linux/uaccess.h> | |
17 | #include <linux/ftrace.h> | |
8bd75c77 | 18 | #include <linux/sched/rt.h> |
ad8d75ff | 19 | #include <trace/events/sched.h> |
352ad25a SR |
20 | #include "trace.h" |
21 | ||
22 | static struct trace_array *wakeup_trace; | |
23 | static int __read_mostly tracer_enabled; | |
24 | ||
25 | static struct task_struct *wakeup_task; | |
26 | static int wakeup_cpu; | |
478142c3 | 27 | static int wakeup_current_cpu; |
352ad25a | 28 | static unsigned wakeup_prio = -1; |
3244351c | 29 | static int wakeup_rt; |
352ad25a | 30 | |
445c8951 | 31 | static arch_spinlock_t wakeup_lock = |
edc35bd7 | 32 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
352ad25a | 33 | |
7495a5be | 34 | static void wakeup_reset(struct trace_array *tr); |
e309b41d | 35 | static void __wakeup_reset(struct trace_array *tr); |
7495a5be JO |
36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); |
37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); | |
352ad25a | 38 | |
613f04a0 | 39 | static int save_flags; |
328df475 | 40 | static bool function_enabled; |
e9d25fe6 | 41 | |
7495a5be JO |
42 | #define TRACE_DISPLAY_GRAPH 1 |
43 | ||
44 | static struct tracer_opt trace_opts[] = { | |
45 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
46 | /* display latency trace as call graph */ | |
47 | { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, | |
48 | #endif | |
49 | { } /* Empty entry */ | |
50 | }; | |
51 | ||
52 | static struct tracer_flags tracer_flags = { | |
53 | .val = 0, | |
54 | .opts = trace_opts, | |
55 | }; | |
56 | ||
57 | #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) | |
58 | ||
606576ce | 59 | #ifdef CONFIG_FUNCTION_TRACER |
542181d3 | 60 | |
7e18d8e7 | 61 | /* |
542181d3 SR |
62 | * Prologue for the wakeup function tracers. |
63 | * | |
64 | * Returns 1 if it is OK to continue, and preemption | |
65 | * is disabled and data->disabled is incremented. | |
66 | * 0 if the trace is to be ignored, and preemption | |
67 | * is not disabled and data->disabled is | |
68 | * kept the same. | |
69 | * | |
70 | * Note, this function is also used outside this ifdef but | |
71 | * inside the #ifdef of the function graph tracer below. | |
72 | * This is OK, since the function graph tracer is | |
73 | * dependent on the function tracer. | |
7e18d8e7 | 74 | */ |
542181d3 SR |
75 | static int |
76 | func_prolog_preempt_disable(struct trace_array *tr, | |
77 | struct trace_array_cpu **data, | |
78 | int *pc) | |
7e18d8e7 | 79 | { |
7e18d8e7 | 80 | long disabled; |
7e18d8e7 SR |
81 | int cpu; |
82 | ||
83 | if (likely(!wakeup_task)) | |
542181d3 | 84 | return 0; |
7e18d8e7 | 85 | |
542181d3 | 86 | *pc = preempt_count(); |
5168ae50 | 87 | preempt_disable_notrace(); |
7e18d8e7 SR |
88 | |
89 | cpu = raw_smp_processor_id(); | |
478142c3 SR |
90 | if (cpu != wakeup_current_cpu) |
91 | goto out_enable; | |
92 | ||
12883efb | 93 | *data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
542181d3 | 94 | disabled = atomic_inc_return(&(*data)->disabled); |
7e18d8e7 SR |
95 | if (unlikely(disabled != 1)) |
96 | goto out; | |
97 | ||
542181d3 | 98 | return 1; |
7e18d8e7 | 99 | |
542181d3 SR |
100 | out: |
101 | atomic_dec(&(*data)->disabled); | |
7e18d8e7 | 102 | |
542181d3 SR |
103 | out_enable: |
104 | preempt_enable_notrace(); | |
105 | return 0; | |
106 | } | |
107 | ||
108 | /* | |
109 | * wakeup uses its own tracer function to keep the overhead down: | |
110 | */ | |
111 | static void | |
a1e2e31d SR |
112 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, |
113 | struct ftrace_ops *op, struct pt_regs *pt_regs) | |
542181d3 SR |
114 | { |
115 | struct trace_array *tr = wakeup_trace; | |
116 | struct trace_array_cpu *data; | |
117 | unsigned long flags; | |
118 | int pc; | |
119 | ||
120 | if (!func_prolog_preempt_disable(tr, &data, &pc)) | |
121 | return; | |
122 | ||
123 | local_irq_save(flags); | |
124 | trace_function(tr, ip, parent_ip, flags, pc); | |
e59494f4 | 125 | local_irq_restore(flags); |
7e18d8e7 | 126 | |
7e18d8e7 | 127 | atomic_dec(&data->disabled); |
5168ae50 | 128 | preempt_enable_notrace(); |
7e18d8e7 SR |
129 | } |
130 | ||
131 | static struct ftrace_ops trace_ops __read_mostly = | |
132 | { | |
133 | .func = wakeup_tracer_call, | |
4740974a | 134 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, |
7e18d8e7 | 135 | }; |
7e40798f | 136 | #endif /* CONFIG_FUNCTION_TRACER */ |
7495a5be | 137 | |
328df475 | 138 | static int register_wakeup_function(int graph, int set) |
7495a5be JO |
139 | { |
140 | int ret; | |
141 | ||
328df475 SRRH |
142 | /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */ |
143 | if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION))) | |
144 | return 0; | |
145 | ||
146 | if (graph) | |
7495a5be JO |
147 | ret = register_ftrace_graph(&wakeup_graph_return, |
148 | &wakeup_graph_entry); | |
328df475 SRRH |
149 | else |
150 | ret = register_ftrace_function(&trace_ops); | |
151 | ||
152 | if (!ret) | |
153 | function_enabled = true; | |
154 | ||
155 | return ret; | |
156 | } | |
157 | ||
158 | static void unregister_wakeup_function(int graph) | |
159 | { | |
160 | if (!function_enabled) | |
161 | return; | |
162 | ||
163 | if (graph) | |
164 | unregister_ftrace_graph(); | |
165 | else | |
166 | unregister_ftrace_function(&trace_ops); | |
167 | ||
168 | function_enabled = false; | |
169 | } | |
170 | ||
171 | static void wakeup_function_set(int set) | |
172 | { | |
173 | if (set) | |
174 | register_wakeup_function(is_graph(), 1); | |
175 | else | |
176 | unregister_wakeup_function(is_graph()); | |
177 | } | |
178 | ||
179 | static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set) | |
180 | { | |
181 | if (mask & TRACE_ITER_FUNCTION) | |
182 | wakeup_function_set(set); | |
183 | ||
184 | return trace_keep_overwrite(tracer, mask, set); | |
185 | } | |
186 | ||
187 | static int start_func_tracer(int graph) | |
188 | { | |
189 | int ret; | |
190 | ||
191 | ret = register_wakeup_function(graph, 0); | |
7495a5be JO |
192 | |
193 | if (!ret && tracing_is_enabled()) | |
194 | tracer_enabled = 1; | |
195 | else | |
196 | tracer_enabled = 0; | |
197 | ||
198 | return ret; | |
199 | } | |
200 | ||
201 | static void stop_func_tracer(int graph) | |
202 | { | |
203 | tracer_enabled = 0; | |
204 | ||
328df475 | 205 | unregister_wakeup_function(graph); |
7495a5be JO |
206 | } |
207 | ||
7495a5be JO |
208 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
209 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | |
210 | { | |
211 | ||
212 | if (!(bit & TRACE_DISPLAY_GRAPH)) | |
213 | return -EINVAL; | |
214 | ||
215 | if (!(is_graph() ^ set)) | |
216 | return 0; | |
217 | ||
218 | stop_func_tracer(!set); | |
219 | ||
220 | wakeup_reset(wakeup_trace); | |
221 | tracing_max_latency = 0; | |
222 | ||
223 | return start_func_tracer(set); | |
224 | } | |
225 | ||
226 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace) | |
227 | { | |
228 | struct trace_array *tr = wakeup_trace; | |
229 | struct trace_array_cpu *data; | |
230 | unsigned long flags; | |
542181d3 | 231 | int pc, ret = 0; |
7495a5be | 232 | |
542181d3 | 233 | if (!func_prolog_preempt_disable(tr, &data, &pc)) |
7495a5be JO |
234 | return 0; |
235 | ||
7495a5be JO |
236 | local_save_flags(flags); |
237 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
7495a5be | 238 | atomic_dec(&data->disabled); |
7495a5be | 239 | preempt_enable_notrace(); |
542181d3 | 240 | |
7495a5be JO |
241 | return ret; |
242 | } | |
243 | ||
244 | static void wakeup_graph_return(struct ftrace_graph_ret *trace) | |
245 | { | |
246 | struct trace_array *tr = wakeup_trace; | |
247 | struct trace_array_cpu *data; | |
248 | unsigned long flags; | |
542181d3 | 249 | int pc; |
7495a5be | 250 | |
542181d3 | 251 | if (!func_prolog_preempt_disable(tr, &data, &pc)) |
7495a5be JO |
252 | return; |
253 | ||
7495a5be JO |
254 | local_save_flags(flags); |
255 | __trace_graph_return(tr, trace, flags, pc); | |
7495a5be JO |
256 | atomic_dec(&data->disabled); |
257 | ||
7495a5be JO |
258 | preempt_enable_notrace(); |
259 | return; | |
260 | } | |
261 | ||
262 | static void wakeup_trace_open(struct trace_iterator *iter) | |
263 | { | |
264 | if (is_graph()) | |
265 | graph_trace_open(iter); | |
266 | } | |
267 | ||
268 | static void wakeup_trace_close(struct trace_iterator *iter) | |
269 | { | |
270 | if (iter->private) | |
271 | graph_trace_close(iter); | |
272 | } | |
273 | ||
321e68b0 JO |
274 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \ |
275 | TRACE_GRAPH_PRINT_ABS_TIME | \ | |
276 | TRACE_GRAPH_PRINT_DURATION) | |
7495a5be JO |
277 | |
278 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) | |
279 | { | |
280 | /* | |
281 | * In graph mode call the graph tracer output function, | |
282 | * otherwise go with the TRACE_FN event handler | |
283 | */ | |
284 | if (is_graph()) | |
285 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); | |
286 | ||
287 | return TRACE_TYPE_UNHANDLED; | |
288 | } | |
289 | ||
290 | static void wakeup_print_header(struct seq_file *s) | |
291 | { | |
292 | if (is_graph()) | |
293 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); | |
294 | else | |
295 | trace_default_header(s); | |
296 | } | |
297 | ||
298 | static void | |
299 | __trace_function(struct trace_array *tr, | |
300 | unsigned long ip, unsigned long parent_ip, | |
301 | unsigned long flags, int pc) | |
302 | { | |
303 | if (is_graph()) | |
304 | trace_graph_function(tr, ip, parent_ip, flags, pc); | |
305 | else | |
306 | trace_function(tr, ip, parent_ip, flags, pc); | |
307 | } | |
308 | #else | |
309 | #define __trace_function trace_function | |
310 | ||
311 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | |
312 | { | |
313 | return -EINVAL; | |
314 | } | |
315 | ||
316 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace) | |
317 | { | |
318 | return -1; | |
319 | } | |
320 | ||
321 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) | |
322 | { | |
323 | return TRACE_TYPE_UNHANDLED; | |
324 | } | |
325 | ||
326 | static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } | |
7495a5be JO |
327 | static void wakeup_trace_open(struct trace_iterator *iter) { } |
328 | static void wakeup_trace_close(struct trace_iterator *iter) { } | |
7e9a49ef JO |
329 | |
330 | #ifdef CONFIG_FUNCTION_TRACER | |
331 | static void wakeup_print_header(struct seq_file *s) | |
332 | { | |
333 | trace_default_header(s); | |
334 | } | |
335 | #else | |
336 | static void wakeup_print_header(struct seq_file *s) | |
337 | { | |
338 | trace_latency_header(s); | |
339 | } | |
340 | #endif /* CONFIG_FUNCTION_TRACER */ | |
7495a5be JO |
341 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
342 | ||
352ad25a SR |
343 | /* |
344 | * Should this new latency be reported/recorded? | |
345 | */ | |
e309b41d | 346 | static int report_latency(cycle_t delta) |
352ad25a SR |
347 | { |
348 | if (tracing_thresh) { | |
349 | if (delta < tracing_thresh) | |
350 | return 0; | |
351 | } else { | |
352 | if (delta <= tracing_max_latency) | |
353 | return 0; | |
354 | } | |
355 | return 1; | |
356 | } | |
357 | ||
38516ab5 SR |
358 | static void |
359 | probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) | |
478142c3 SR |
360 | { |
361 | if (task != wakeup_task) | |
362 | return; | |
363 | ||
364 | wakeup_current_cpu = cpu; | |
365 | } | |
366 | ||
5b82a1b0 | 367 | static void notrace |
38516ab5 SR |
368 | probe_wakeup_sched_switch(void *ignore, |
369 | struct task_struct *prev, struct task_struct *next) | |
352ad25a | 370 | { |
352ad25a SR |
371 | struct trace_array_cpu *data; |
372 | cycle_t T0, T1, delta; | |
373 | unsigned long flags; | |
374 | long disabled; | |
375 | int cpu; | |
38697053 | 376 | int pc; |
352ad25a | 377 | |
b07c3f19 MD |
378 | tracing_record_cmdline(prev); |
379 | ||
352ad25a SR |
380 | if (unlikely(!tracer_enabled)) |
381 | return; | |
382 | ||
383 | /* | |
384 | * When we start a new trace, we set wakeup_task to NULL | |
385 | * and then set tracer_enabled = 1. We want to make sure | |
386 | * that another CPU does not see the tracer_enabled = 1 | |
387 | * and the wakeup_task with an older task, that might | |
388 | * actually be the same as next. | |
389 | */ | |
390 | smp_rmb(); | |
391 | ||
392 | if (next != wakeup_task) | |
393 | return; | |
394 | ||
38697053 SR |
395 | pc = preempt_count(); |
396 | ||
352ad25a SR |
397 | /* disable local data, not wakeup_cpu data */ |
398 | cpu = raw_smp_processor_id(); | |
12883efb | 399 | disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
352ad25a SR |
400 | if (likely(disabled != 1)) |
401 | goto out; | |
402 | ||
e59494f4 | 403 | local_irq_save(flags); |
0199c4e6 | 404 | arch_spin_lock(&wakeup_lock); |
352ad25a SR |
405 | |
406 | /* We could race with grabbing wakeup_lock */ | |
407 | if (unlikely(!tracer_enabled || next != wakeup_task)) | |
408 | goto out_unlock; | |
409 | ||
9be24414 | 410 | /* The task we are waiting for is waking up */ |
12883efb | 411 | data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); |
9be24414 | 412 | |
7495a5be | 413 | __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); |
7be42151 | 414 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); |
352ad25a | 415 | |
352ad25a | 416 | T0 = data->preempt_timestamp; |
750ed1a4 | 417 | T1 = ftrace_now(cpu); |
352ad25a SR |
418 | delta = T1-T0; |
419 | ||
420 | if (!report_latency(delta)) | |
421 | goto out_unlock; | |
422 | ||
b5130b1e CE |
423 | if (likely(!is_tracing_stopped())) { |
424 | tracing_max_latency = delta; | |
425 | update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); | |
426 | } | |
352ad25a | 427 | |
352ad25a | 428 | out_unlock: |
b07c3f19 | 429 | __wakeup_reset(wakeup_trace); |
0199c4e6 | 430 | arch_spin_unlock(&wakeup_lock); |
e59494f4 | 431 | local_irq_restore(flags); |
352ad25a | 432 | out: |
12883efb | 433 | atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
5b82a1b0 MD |
434 | } |
435 | ||
e309b41d | 436 | static void __wakeup_reset(struct trace_array *tr) |
352ad25a | 437 | { |
352ad25a SR |
438 | wakeup_cpu = -1; |
439 | wakeup_prio = -1; | |
440 | ||
441 | if (wakeup_task) | |
442 | put_task_struct(wakeup_task); | |
443 | ||
444 | wakeup_task = NULL; | |
445 | } | |
446 | ||
e309b41d | 447 | static void wakeup_reset(struct trace_array *tr) |
352ad25a SR |
448 | { |
449 | unsigned long flags; | |
450 | ||
12883efb | 451 | tracing_reset_online_cpus(&tr->trace_buffer); |
2f26ebd5 | 452 | |
e59494f4 | 453 | local_irq_save(flags); |
0199c4e6 | 454 | arch_spin_lock(&wakeup_lock); |
352ad25a | 455 | __wakeup_reset(tr); |
0199c4e6 | 456 | arch_spin_unlock(&wakeup_lock); |
e59494f4 | 457 | local_irq_restore(flags); |
352ad25a SR |
458 | } |
459 | ||
e309b41d | 460 | static void |
38516ab5 | 461 | probe_wakeup(void *ignore, struct task_struct *p, int success) |
352ad25a | 462 | { |
f8ec1062 | 463 | struct trace_array_cpu *data; |
352ad25a SR |
464 | int cpu = smp_processor_id(); |
465 | unsigned long flags; | |
466 | long disabled; | |
38697053 | 467 | int pc; |
352ad25a | 468 | |
b07c3f19 MD |
469 | if (likely(!tracer_enabled)) |
470 | return; | |
471 | ||
472 | tracing_record_cmdline(p); | |
473 | tracing_record_cmdline(current); | |
474 | ||
3244351c | 475 | if ((wakeup_rt && !rt_task(p)) || |
352ad25a | 476 | p->prio >= wakeup_prio || |
b07c3f19 | 477 | p->prio >= current->prio) |
352ad25a SR |
478 | return; |
479 | ||
38697053 | 480 | pc = preempt_count(); |
12883efb | 481 | disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
352ad25a SR |
482 | if (unlikely(disabled != 1)) |
483 | goto out; | |
484 | ||
485 | /* interrupts should be off from try_to_wake_up */ | |
0199c4e6 | 486 | arch_spin_lock(&wakeup_lock); |
352ad25a SR |
487 | |
488 | /* check for races. */ | |
489 | if (!tracer_enabled || p->prio >= wakeup_prio) | |
490 | goto out_locked; | |
491 | ||
492 | /* reset the trace */ | |
b07c3f19 | 493 | __wakeup_reset(wakeup_trace); |
352ad25a SR |
494 | |
495 | wakeup_cpu = task_cpu(p); | |
478142c3 | 496 | wakeup_current_cpu = wakeup_cpu; |
352ad25a SR |
497 | wakeup_prio = p->prio; |
498 | ||
499 | wakeup_task = p; | |
500 | get_task_struct(wakeup_task); | |
501 | ||
502 | local_save_flags(flags); | |
503 | ||
12883efb | 504 | data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); |
f8ec1062 | 505 | data->preempt_timestamp = ftrace_now(cpu); |
7be42151 | 506 | tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); |
301fd748 SR |
507 | |
508 | /* | |
509 | * We must be careful in using CALLER_ADDR2. But since wake_up | |
510 | * is not called by an assembly function (where as schedule is) | |
511 | * it should be safe to use it here. | |
512 | */ | |
7495a5be | 513 | __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
352ad25a SR |
514 | |
515 | out_locked: | |
0199c4e6 | 516 | arch_spin_unlock(&wakeup_lock); |
352ad25a | 517 | out: |
12883efb | 518 | atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); |
352ad25a SR |
519 | } |
520 | ||
e309b41d | 521 | static void start_wakeup_tracer(struct trace_array *tr) |
352ad25a | 522 | { |
5b82a1b0 MD |
523 | int ret; |
524 | ||
38516ab5 | 525 | ret = register_trace_sched_wakeup(probe_wakeup, NULL); |
5b82a1b0 | 526 | if (ret) { |
b07c3f19 | 527 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
528 | " probe to kernel_sched_wakeup\n"); |
529 | return; | |
530 | } | |
531 | ||
38516ab5 | 532 | ret = register_trace_sched_wakeup_new(probe_wakeup, NULL); |
5b82a1b0 | 533 | if (ret) { |
b07c3f19 | 534 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
535 | " probe to kernel_sched_wakeup_new\n"); |
536 | goto fail_deprobe; | |
537 | } | |
538 | ||
38516ab5 | 539 | ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL); |
5b82a1b0 | 540 | if (ret) { |
b07c3f19 | 541 | pr_info("sched trace: Couldn't activate tracepoint" |
73d8b8bc | 542 | " probe to kernel_sched_switch\n"); |
5b82a1b0 MD |
543 | goto fail_deprobe_wake_new; |
544 | } | |
545 | ||
38516ab5 | 546 | ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); |
478142c3 SR |
547 | if (ret) { |
548 | pr_info("wakeup trace: Couldn't activate tracepoint" | |
549 | " probe to kernel_sched_migrate_task\n"); | |
550 | return; | |
551 | } | |
552 | ||
352ad25a SR |
553 | wakeup_reset(tr); |
554 | ||
555 | /* | |
556 | * Don't let the tracer_enabled = 1 show up before | |
557 | * the wakeup_task is reset. This may be overkill since | |
558 | * wakeup_reset does a spin_unlock after setting the | |
559 | * wakeup_task to NULL, but I want to be safe. | |
560 | * This is a slow path anyway. | |
561 | */ | |
562 | smp_wmb(); | |
563 | ||
7495a5be JO |
564 | if (start_func_tracer(is_graph())) |
565 | printk(KERN_ERR "failed to start wakeup tracer\n"); | |
ad591240 | 566 | |
352ad25a | 567 | return; |
5b82a1b0 | 568 | fail_deprobe_wake_new: |
38516ab5 | 569 | unregister_trace_sched_wakeup_new(probe_wakeup, NULL); |
5b82a1b0 | 570 | fail_deprobe: |
38516ab5 | 571 | unregister_trace_sched_wakeup(probe_wakeup, NULL); |
352ad25a SR |
572 | } |
573 | ||
e309b41d | 574 | static void stop_wakeup_tracer(struct trace_array *tr) |
352ad25a SR |
575 | { |
576 | tracer_enabled = 0; | |
7495a5be | 577 | stop_func_tracer(is_graph()); |
38516ab5 SR |
578 | unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); |
579 | unregister_trace_sched_wakeup_new(probe_wakeup, NULL); | |
580 | unregister_trace_sched_wakeup(probe_wakeup, NULL); | |
581 | unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); | |
352ad25a SR |
582 | } |
583 | ||
3244351c | 584 | static int __wakeup_tracer_init(struct trace_array *tr) |
352ad25a | 585 | { |
613f04a0 SRRH |
586 | save_flags = trace_flags; |
587 | ||
588 | /* non overwrite screws up the latency tracers */ | |
2b6080f2 SR |
589 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1); |
590 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1); | |
e9d25fe6 | 591 | |
745b1626 | 592 | tracing_max_latency = 0; |
352ad25a | 593 | wakeup_trace = tr; |
c76f0694 | 594 | start_wakeup_tracer(tr); |
1c80025a | 595 | return 0; |
352ad25a SR |
596 | } |
597 | ||
3244351c SR |
598 | static int wakeup_tracer_init(struct trace_array *tr) |
599 | { | |
600 | wakeup_rt = 0; | |
601 | return __wakeup_tracer_init(tr); | |
602 | } | |
603 | ||
604 | static int wakeup_rt_tracer_init(struct trace_array *tr) | |
605 | { | |
606 | wakeup_rt = 1; | |
607 | return __wakeup_tracer_init(tr); | |
608 | } | |
609 | ||
e309b41d | 610 | static void wakeup_tracer_reset(struct trace_array *tr) |
352ad25a | 611 | { |
613f04a0 SRRH |
612 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; |
613 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | |
614 | ||
c76f0694 SR |
615 | stop_wakeup_tracer(tr); |
616 | /* make sure we put back any tasks we are tracing */ | |
617 | wakeup_reset(tr); | |
e9d25fe6 | 618 | |
2b6080f2 SR |
619 | set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag); |
620 | set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag); | |
352ad25a SR |
621 | } |
622 | ||
9036990d SR |
623 | static void wakeup_tracer_start(struct trace_array *tr) |
624 | { | |
625 | wakeup_reset(tr); | |
626 | tracer_enabled = 1; | |
9036990d SR |
627 | } |
628 | ||
629 | static void wakeup_tracer_stop(struct trace_array *tr) | |
630 | { | |
631 | tracer_enabled = 0; | |
352ad25a SR |
632 | } |
633 | ||
634 | static struct tracer wakeup_tracer __read_mostly = | |
635 | { | |
636 | .name = "wakeup", | |
637 | .init = wakeup_tracer_init, | |
638 | .reset = wakeup_tracer_reset, | |
9036990d SR |
639 | .start = wakeup_tracer_start, |
640 | .stop = wakeup_tracer_stop, | |
f43c738b | 641 | .print_max = true, |
7495a5be JO |
642 | .print_header = wakeup_print_header, |
643 | .print_line = wakeup_print_line, | |
644 | .flags = &tracer_flags, | |
645 | .set_flag = wakeup_set_flag, | |
328df475 | 646 | .flag_changed = wakeup_flag_changed, |
60a11774 SR |
647 | #ifdef CONFIG_FTRACE_SELFTEST |
648 | .selftest = trace_selftest_startup_wakeup, | |
649 | #endif | |
7495a5be JO |
650 | .open = wakeup_trace_open, |
651 | .close = wakeup_trace_close, | |
f43c738b | 652 | .use_max_tr = true, |
352ad25a SR |
653 | }; |
654 | ||
3244351c SR |
655 | static struct tracer wakeup_rt_tracer __read_mostly = |
656 | { | |
657 | .name = "wakeup_rt", | |
658 | .init = wakeup_rt_tracer_init, | |
659 | .reset = wakeup_tracer_reset, | |
660 | .start = wakeup_tracer_start, | |
661 | .stop = wakeup_tracer_stop, | |
6eaaa5d5 | 662 | .wait_pipe = poll_wait_pipe, |
f43c738b | 663 | .print_max = true, |
7495a5be JO |
664 | .print_header = wakeup_print_header, |
665 | .print_line = wakeup_print_line, | |
666 | .flags = &tracer_flags, | |
667 | .set_flag = wakeup_set_flag, | |
328df475 | 668 | .flag_changed = wakeup_flag_changed, |
3244351c SR |
669 | #ifdef CONFIG_FTRACE_SELFTEST |
670 | .selftest = trace_selftest_startup_wakeup, | |
671 | #endif | |
7495a5be JO |
672 | .open = wakeup_trace_open, |
673 | .close = wakeup_trace_close, | |
f43c738b | 674 | .use_max_tr = true, |
3244351c SR |
675 | }; |
676 | ||
352ad25a SR |
677 | __init static int init_wakeup_tracer(void) |
678 | { | |
679 | int ret; | |
680 | ||
681 | ret = register_tracer(&wakeup_tracer); | |
682 | if (ret) | |
683 | return ret; | |
684 | ||
3244351c SR |
685 | ret = register_tracer(&wakeup_rt_tracer); |
686 | if (ret) | |
687 | return ret; | |
688 | ||
352ad25a SR |
689 | return 0; |
690 | } | |
6f415672 | 691 | core_initcall(init_wakeup_tracer); |