Commit | Line | Data |
---|---|---|
352ad25a SR |
1 | /* |
2 | * trace task wakeup timings | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * Based on code from the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
6d49e352 | 10 | * Copyright (C) 2004 Nadia Yvette Chambers |
352ad25a SR |
11 | */ |
12 | #include <linux/module.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/debugfs.h> | |
15 | #include <linux/kallsyms.h> | |
16 | #include <linux/uaccess.h> | |
17 | #include <linux/ftrace.h> | |
8bd75c77 | 18 | #include <linux/sched/rt.h> |
ad8d75ff | 19 | #include <trace/events/sched.h> |
352ad25a SR |
20 | #include "trace.h" |
21 | ||
22 | static struct trace_array *wakeup_trace; | |
23 | static int __read_mostly tracer_enabled; | |
24 | ||
25 | static struct task_struct *wakeup_task; | |
26 | static int wakeup_cpu; | |
478142c3 | 27 | static int wakeup_current_cpu; |
352ad25a | 28 | static unsigned wakeup_prio = -1; |
3244351c | 29 | static int wakeup_rt; |
352ad25a | 30 | |
445c8951 | 31 | static arch_spinlock_t wakeup_lock = |
edc35bd7 | 32 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
352ad25a | 33 | |
7495a5be | 34 | static void wakeup_reset(struct trace_array *tr); |
e309b41d | 35 | static void __wakeup_reset(struct trace_array *tr); |
7495a5be JO |
36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); |
37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); | |
352ad25a | 38 | |
613f04a0 | 39 | static int save_flags; |
e9d25fe6 | 40 | |
7495a5be JO |
41 | #define TRACE_DISPLAY_GRAPH 1 |
42 | ||
43 | static struct tracer_opt trace_opts[] = { | |
44 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | |
45 | /* display latency trace as call graph */ | |
46 | { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) }, | |
47 | #endif | |
48 | { } /* Empty entry */ | |
49 | }; | |
50 | ||
51 | static struct tracer_flags tracer_flags = { | |
52 | .val = 0, | |
53 | .opts = trace_opts, | |
54 | }; | |
55 | ||
56 | #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH) | |
57 | ||
606576ce | 58 | #ifdef CONFIG_FUNCTION_TRACER |
542181d3 | 59 | |
7e18d8e7 | 60 | /* |
542181d3 SR |
61 | * Prologue for the wakeup function tracers. |
62 | * | |
63 | * Returns 1 if it is OK to continue, and preemption | |
64 | * is disabled and data->disabled is incremented. | |
65 | * 0 if the trace is to be ignored, and preemption | |
66 | * is not disabled and data->disabled is | |
67 | * kept the same. | |
68 | * | |
69 | * Note, this function is also used outside this ifdef but | |
70 | * inside the #ifdef of the function graph tracer below. | |
71 | * This is OK, since the function graph tracer is | |
72 | * dependent on the function tracer. | |
7e18d8e7 | 73 | */ |
542181d3 SR |
74 | static int |
75 | func_prolog_preempt_disable(struct trace_array *tr, | |
76 | struct trace_array_cpu **data, | |
77 | int *pc) | |
7e18d8e7 | 78 | { |
7e18d8e7 | 79 | long disabled; |
7e18d8e7 SR |
80 | int cpu; |
81 | ||
82 | if (likely(!wakeup_task)) | |
542181d3 | 83 | return 0; |
7e18d8e7 | 84 | |
542181d3 | 85 | *pc = preempt_count(); |
5168ae50 | 86 | preempt_disable_notrace(); |
7e18d8e7 SR |
87 | |
88 | cpu = raw_smp_processor_id(); | |
478142c3 SR |
89 | if (cpu != wakeup_current_cpu) |
90 | goto out_enable; | |
91 | ||
542181d3 SR |
92 | *data = tr->data[cpu]; |
93 | disabled = atomic_inc_return(&(*data)->disabled); | |
7e18d8e7 SR |
94 | if (unlikely(disabled != 1)) |
95 | goto out; | |
96 | ||
542181d3 | 97 | return 1; |
7e18d8e7 | 98 | |
542181d3 SR |
99 | out: |
100 | atomic_dec(&(*data)->disabled); | |
7e18d8e7 | 101 | |
542181d3 SR |
102 | out_enable: |
103 | preempt_enable_notrace(); | |
104 | return 0; | |
105 | } | |
106 | ||
107 | /* | |
108 | * wakeup uses its own tracer function to keep the overhead down: | |
109 | */ | |
110 | static void | |
a1e2e31d SR |
111 | wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, |
112 | struct ftrace_ops *op, struct pt_regs *pt_regs) | |
542181d3 SR |
113 | { |
114 | struct trace_array *tr = wakeup_trace; | |
115 | struct trace_array_cpu *data; | |
116 | unsigned long flags; | |
117 | int pc; | |
118 | ||
119 | if (!func_prolog_preempt_disable(tr, &data, &pc)) | |
120 | return; | |
121 | ||
122 | local_irq_save(flags); | |
123 | trace_function(tr, ip, parent_ip, flags, pc); | |
e59494f4 | 124 | local_irq_restore(flags); |
7e18d8e7 | 125 | |
7e18d8e7 | 126 | atomic_dec(&data->disabled); |
5168ae50 | 127 | preempt_enable_notrace(); |
7e18d8e7 SR |
128 | } |
129 | ||
130 | static struct ftrace_ops trace_ops __read_mostly = | |
131 | { | |
132 | .func = wakeup_tracer_call, | |
4740974a | 133 | .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, |
7e18d8e7 | 134 | }; |
7e40798f | 135 | #endif /* CONFIG_FUNCTION_TRACER */ |
7495a5be JO |
136 | |
137 | static int start_func_tracer(int graph) | |
138 | { | |
139 | int ret; | |
140 | ||
141 | if (!graph) | |
142 | ret = register_ftrace_function(&trace_ops); | |
143 | else | |
144 | ret = register_ftrace_graph(&wakeup_graph_return, | |
145 | &wakeup_graph_entry); | |
146 | ||
147 | if (!ret && tracing_is_enabled()) | |
148 | tracer_enabled = 1; | |
149 | else | |
150 | tracer_enabled = 0; | |
151 | ||
152 | return ret; | |
153 | } | |
154 | ||
155 | static void stop_func_tracer(int graph) | |
156 | { | |
157 | tracer_enabled = 0; | |
158 | ||
159 | if (!graph) | |
160 | unregister_ftrace_function(&trace_ops); | |
161 | else | |
162 | unregister_ftrace_graph(); | |
163 | } | |
164 | ||
7495a5be JO |
165 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
166 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | |
167 | { | |
168 | ||
169 | if (!(bit & TRACE_DISPLAY_GRAPH)) | |
170 | return -EINVAL; | |
171 | ||
172 | if (!(is_graph() ^ set)) | |
173 | return 0; | |
174 | ||
175 | stop_func_tracer(!set); | |
176 | ||
177 | wakeup_reset(wakeup_trace); | |
178 | tracing_max_latency = 0; | |
179 | ||
180 | return start_func_tracer(set); | |
181 | } | |
182 | ||
183 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace) | |
184 | { | |
185 | struct trace_array *tr = wakeup_trace; | |
186 | struct trace_array_cpu *data; | |
187 | unsigned long flags; | |
542181d3 | 188 | int pc, ret = 0; |
7495a5be | 189 | |
542181d3 | 190 | if (!func_prolog_preempt_disable(tr, &data, &pc)) |
7495a5be JO |
191 | return 0; |
192 | ||
7495a5be JO |
193 | local_save_flags(flags); |
194 | ret = __trace_graph_entry(tr, trace, flags, pc); | |
7495a5be | 195 | atomic_dec(&data->disabled); |
7495a5be | 196 | preempt_enable_notrace(); |
542181d3 | 197 | |
7495a5be JO |
198 | return ret; |
199 | } | |
200 | ||
201 | static void wakeup_graph_return(struct ftrace_graph_ret *trace) | |
202 | { | |
203 | struct trace_array *tr = wakeup_trace; | |
204 | struct trace_array_cpu *data; | |
205 | unsigned long flags; | |
542181d3 | 206 | int pc; |
7495a5be | 207 | |
542181d3 | 208 | if (!func_prolog_preempt_disable(tr, &data, &pc)) |
7495a5be JO |
209 | return; |
210 | ||
7495a5be JO |
211 | local_save_flags(flags); |
212 | __trace_graph_return(tr, trace, flags, pc); | |
7495a5be JO |
213 | atomic_dec(&data->disabled); |
214 | ||
7495a5be JO |
215 | preempt_enable_notrace(); |
216 | return; | |
217 | } | |
218 | ||
219 | static void wakeup_trace_open(struct trace_iterator *iter) | |
220 | { | |
221 | if (is_graph()) | |
222 | graph_trace_open(iter); | |
223 | } | |
224 | ||
225 | static void wakeup_trace_close(struct trace_iterator *iter) | |
226 | { | |
227 | if (iter->private) | |
228 | graph_trace_close(iter); | |
229 | } | |
230 | ||
321e68b0 JO |
231 | #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \ |
232 | TRACE_GRAPH_PRINT_ABS_TIME | \ | |
233 | TRACE_GRAPH_PRINT_DURATION) | |
7495a5be JO |
234 | |
235 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) | |
236 | { | |
237 | /* | |
238 | * In graph mode call the graph tracer output function, | |
239 | * otherwise go with the TRACE_FN event handler | |
240 | */ | |
241 | if (is_graph()) | |
242 | return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS); | |
243 | ||
244 | return TRACE_TYPE_UNHANDLED; | |
245 | } | |
246 | ||
247 | static void wakeup_print_header(struct seq_file *s) | |
248 | { | |
249 | if (is_graph()) | |
250 | print_graph_headers_flags(s, GRAPH_TRACER_FLAGS); | |
251 | else | |
252 | trace_default_header(s); | |
253 | } | |
254 | ||
255 | static void | |
256 | __trace_function(struct trace_array *tr, | |
257 | unsigned long ip, unsigned long parent_ip, | |
258 | unsigned long flags, int pc) | |
259 | { | |
260 | if (is_graph()) | |
261 | trace_graph_function(tr, ip, parent_ip, flags, pc); | |
262 | else | |
263 | trace_function(tr, ip, parent_ip, flags, pc); | |
264 | } | |
265 | #else | |
266 | #define __trace_function trace_function | |
267 | ||
268 | static int wakeup_set_flag(u32 old_flags, u32 bit, int set) | |
269 | { | |
270 | return -EINVAL; | |
271 | } | |
272 | ||
273 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace) | |
274 | { | |
275 | return -1; | |
276 | } | |
277 | ||
278 | static enum print_line_t wakeup_print_line(struct trace_iterator *iter) | |
279 | { | |
280 | return TRACE_TYPE_UNHANDLED; | |
281 | } | |
282 | ||
283 | static void wakeup_graph_return(struct ftrace_graph_ret *trace) { } | |
7495a5be JO |
284 | static void wakeup_trace_open(struct trace_iterator *iter) { } |
285 | static void wakeup_trace_close(struct trace_iterator *iter) { } | |
7e9a49ef JO |
286 | |
287 | #ifdef CONFIG_FUNCTION_TRACER | |
288 | static void wakeup_print_header(struct seq_file *s) | |
289 | { | |
290 | trace_default_header(s); | |
291 | } | |
292 | #else | |
293 | static void wakeup_print_header(struct seq_file *s) | |
294 | { | |
295 | trace_latency_header(s); | |
296 | } | |
297 | #endif /* CONFIG_FUNCTION_TRACER */ | |
7495a5be JO |
298 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
299 | ||
352ad25a SR |
300 | /* |
301 | * Should this new latency be reported/recorded? | |
302 | */ | |
e309b41d | 303 | static int report_latency(cycle_t delta) |
352ad25a SR |
304 | { |
305 | if (tracing_thresh) { | |
306 | if (delta < tracing_thresh) | |
307 | return 0; | |
308 | } else { | |
309 | if (delta <= tracing_max_latency) | |
310 | return 0; | |
311 | } | |
312 | return 1; | |
313 | } | |
314 | ||
38516ab5 SR |
315 | static void |
316 | probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) | |
478142c3 SR |
317 | { |
318 | if (task != wakeup_task) | |
319 | return; | |
320 | ||
321 | wakeup_current_cpu = cpu; | |
322 | } | |
323 | ||
5b82a1b0 | 324 | static void notrace |
38516ab5 SR |
325 | probe_wakeup_sched_switch(void *ignore, |
326 | struct task_struct *prev, struct task_struct *next) | |
352ad25a | 327 | { |
352ad25a SR |
328 | struct trace_array_cpu *data; |
329 | cycle_t T0, T1, delta; | |
330 | unsigned long flags; | |
331 | long disabled; | |
332 | int cpu; | |
38697053 | 333 | int pc; |
352ad25a | 334 | |
b07c3f19 MD |
335 | tracing_record_cmdline(prev); |
336 | ||
352ad25a SR |
337 | if (unlikely(!tracer_enabled)) |
338 | return; | |
339 | ||
340 | /* | |
341 | * When we start a new trace, we set wakeup_task to NULL | |
342 | * and then set tracer_enabled = 1. We want to make sure | |
343 | * that another CPU does not see the tracer_enabled = 1 | |
344 | * and the wakeup_task with an older task, that might | |
345 | * actually be the same as next. | |
346 | */ | |
347 | smp_rmb(); | |
348 | ||
349 | if (next != wakeup_task) | |
350 | return; | |
351 | ||
38697053 SR |
352 | pc = preempt_count(); |
353 | ||
352ad25a SR |
354 | /* disable local data, not wakeup_cpu data */ |
355 | cpu = raw_smp_processor_id(); | |
b07c3f19 | 356 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
352ad25a SR |
357 | if (likely(disabled != 1)) |
358 | goto out; | |
359 | ||
e59494f4 | 360 | local_irq_save(flags); |
0199c4e6 | 361 | arch_spin_lock(&wakeup_lock); |
352ad25a SR |
362 | |
363 | /* We could race with grabbing wakeup_lock */ | |
364 | if (unlikely(!tracer_enabled || next != wakeup_task)) | |
365 | goto out_unlock; | |
366 | ||
9be24414 SR |
367 | /* The task we are waiting for is waking up */ |
368 | data = wakeup_trace->data[wakeup_cpu]; | |
369 | ||
7495a5be | 370 | __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); |
7be42151 | 371 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); |
352ad25a | 372 | |
352ad25a | 373 | T0 = data->preempt_timestamp; |
750ed1a4 | 374 | T1 = ftrace_now(cpu); |
352ad25a SR |
375 | delta = T1-T0; |
376 | ||
377 | if (!report_latency(delta)) | |
378 | goto out_unlock; | |
379 | ||
b5130b1e CE |
380 | if (likely(!is_tracing_stopped())) { |
381 | tracing_max_latency = delta; | |
382 | update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu); | |
383 | } | |
352ad25a | 384 | |
352ad25a | 385 | out_unlock: |
b07c3f19 | 386 | __wakeup_reset(wakeup_trace); |
0199c4e6 | 387 | arch_spin_unlock(&wakeup_lock); |
e59494f4 | 388 | local_irq_restore(flags); |
352ad25a | 389 | out: |
b07c3f19 | 390 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
5b82a1b0 MD |
391 | } |
392 | ||
e309b41d | 393 | static void __wakeup_reset(struct trace_array *tr) |
352ad25a | 394 | { |
352ad25a SR |
395 | wakeup_cpu = -1; |
396 | wakeup_prio = -1; | |
397 | ||
398 | if (wakeup_task) | |
399 | put_task_struct(wakeup_task); | |
400 | ||
401 | wakeup_task = NULL; | |
402 | } | |
403 | ||
e309b41d | 404 | static void wakeup_reset(struct trace_array *tr) |
352ad25a SR |
405 | { |
406 | unsigned long flags; | |
407 | ||
2f26ebd5 SR |
408 | tracing_reset_online_cpus(tr); |
409 | ||
e59494f4 | 410 | local_irq_save(flags); |
0199c4e6 | 411 | arch_spin_lock(&wakeup_lock); |
352ad25a | 412 | __wakeup_reset(tr); |
0199c4e6 | 413 | arch_spin_unlock(&wakeup_lock); |
e59494f4 | 414 | local_irq_restore(flags); |
352ad25a SR |
415 | } |
416 | ||
e309b41d | 417 | static void |
38516ab5 | 418 | probe_wakeup(void *ignore, struct task_struct *p, int success) |
352ad25a | 419 | { |
f8ec1062 | 420 | struct trace_array_cpu *data; |
352ad25a SR |
421 | int cpu = smp_processor_id(); |
422 | unsigned long flags; | |
423 | long disabled; | |
38697053 | 424 | int pc; |
352ad25a | 425 | |
b07c3f19 MD |
426 | if (likely(!tracer_enabled)) |
427 | return; | |
428 | ||
429 | tracing_record_cmdline(p); | |
430 | tracing_record_cmdline(current); | |
431 | ||
3244351c | 432 | if ((wakeup_rt && !rt_task(p)) || |
352ad25a | 433 | p->prio >= wakeup_prio || |
b07c3f19 | 434 | p->prio >= current->prio) |
352ad25a SR |
435 | return; |
436 | ||
38697053 | 437 | pc = preempt_count(); |
b07c3f19 | 438 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); |
352ad25a SR |
439 | if (unlikely(disabled != 1)) |
440 | goto out; | |
441 | ||
442 | /* interrupts should be off from try_to_wake_up */ | |
0199c4e6 | 443 | arch_spin_lock(&wakeup_lock); |
352ad25a SR |
444 | |
445 | /* check for races. */ | |
446 | if (!tracer_enabled || p->prio >= wakeup_prio) | |
447 | goto out_locked; | |
448 | ||
449 | /* reset the trace */ | |
b07c3f19 | 450 | __wakeup_reset(wakeup_trace); |
352ad25a SR |
451 | |
452 | wakeup_cpu = task_cpu(p); | |
478142c3 | 453 | wakeup_current_cpu = wakeup_cpu; |
352ad25a SR |
454 | wakeup_prio = p->prio; |
455 | ||
456 | wakeup_task = p; | |
457 | get_task_struct(wakeup_task); | |
458 | ||
459 | local_save_flags(flags); | |
460 | ||
f8ec1062 SR |
461 | data = wakeup_trace->data[wakeup_cpu]; |
462 | data->preempt_timestamp = ftrace_now(cpu); | |
7be42151 | 463 | tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); |
301fd748 SR |
464 | |
465 | /* | |
466 | * We must be careful in using CALLER_ADDR2. But since wake_up | |
467 | * is not called by an assembly function (where as schedule is) | |
468 | * it should be safe to use it here. | |
469 | */ | |
7495a5be | 470 | __trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
352ad25a SR |
471 | |
472 | out_locked: | |
0199c4e6 | 473 | arch_spin_unlock(&wakeup_lock); |
352ad25a | 474 | out: |
b07c3f19 | 475 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
352ad25a SR |
476 | } |
477 | ||
e309b41d | 478 | static void start_wakeup_tracer(struct trace_array *tr) |
352ad25a | 479 | { |
5b82a1b0 MD |
480 | int ret; |
481 | ||
38516ab5 | 482 | ret = register_trace_sched_wakeup(probe_wakeup, NULL); |
5b82a1b0 | 483 | if (ret) { |
b07c3f19 | 484 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
485 | " probe to kernel_sched_wakeup\n"); |
486 | return; | |
487 | } | |
488 | ||
38516ab5 | 489 | ret = register_trace_sched_wakeup_new(probe_wakeup, NULL); |
5b82a1b0 | 490 | if (ret) { |
b07c3f19 | 491 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
492 | " probe to kernel_sched_wakeup_new\n"); |
493 | goto fail_deprobe; | |
494 | } | |
495 | ||
38516ab5 | 496 | ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL); |
5b82a1b0 | 497 | if (ret) { |
b07c3f19 | 498 | pr_info("sched trace: Couldn't activate tracepoint" |
73d8b8bc | 499 | " probe to kernel_sched_switch\n"); |
5b82a1b0 MD |
500 | goto fail_deprobe_wake_new; |
501 | } | |
502 | ||
38516ab5 | 503 | ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); |
478142c3 SR |
504 | if (ret) { |
505 | pr_info("wakeup trace: Couldn't activate tracepoint" | |
506 | " probe to kernel_sched_migrate_task\n"); | |
507 | return; | |
508 | } | |
509 | ||
352ad25a SR |
510 | wakeup_reset(tr); |
511 | ||
512 | /* | |
513 | * Don't let the tracer_enabled = 1 show up before | |
514 | * the wakeup_task is reset. This may be overkill since | |
515 | * wakeup_reset does a spin_unlock after setting the | |
516 | * wakeup_task to NULL, but I want to be safe. | |
517 | * This is a slow path anyway. | |
518 | */ | |
519 | smp_wmb(); | |
520 | ||
7495a5be JO |
521 | if (start_func_tracer(is_graph())) |
522 | printk(KERN_ERR "failed to start wakeup tracer\n"); | |
ad591240 | 523 | |
352ad25a | 524 | return; |
5b82a1b0 | 525 | fail_deprobe_wake_new: |
38516ab5 | 526 | unregister_trace_sched_wakeup_new(probe_wakeup, NULL); |
5b82a1b0 | 527 | fail_deprobe: |
38516ab5 | 528 | unregister_trace_sched_wakeup(probe_wakeup, NULL); |
352ad25a SR |
529 | } |
530 | ||
e309b41d | 531 | static void stop_wakeup_tracer(struct trace_array *tr) |
352ad25a SR |
532 | { |
533 | tracer_enabled = 0; | |
7495a5be | 534 | stop_func_tracer(is_graph()); |
38516ab5 SR |
535 | unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); |
536 | unregister_trace_sched_wakeup_new(probe_wakeup, NULL); | |
537 | unregister_trace_sched_wakeup(probe_wakeup, NULL); | |
538 | unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL); | |
352ad25a SR |
539 | } |
540 | ||
3244351c | 541 | static int __wakeup_tracer_init(struct trace_array *tr) |
352ad25a | 542 | { |
613f04a0 SRRH |
543 | save_flags = trace_flags; |
544 | ||
545 | /* non overwrite screws up the latency tracers */ | |
546 | set_tracer_flag(TRACE_ITER_OVERWRITE, 1); | |
547 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); | |
e9d25fe6 | 548 | |
745b1626 | 549 | tracing_max_latency = 0; |
352ad25a | 550 | wakeup_trace = tr; |
c76f0694 | 551 | start_wakeup_tracer(tr); |
1c80025a | 552 | return 0; |
352ad25a SR |
553 | } |
554 | ||
3244351c SR |
555 | static int wakeup_tracer_init(struct trace_array *tr) |
556 | { | |
557 | wakeup_rt = 0; | |
558 | return __wakeup_tracer_init(tr); | |
559 | } | |
560 | ||
561 | static int wakeup_rt_tracer_init(struct trace_array *tr) | |
562 | { | |
563 | wakeup_rt = 1; | |
564 | return __wakeup_tracer_init(tr); | |
565 | } | |
566 | ||
e309b41d | 567 | static void wakeup_tracer_reset(struct trace_array *tr) |
352ad25a | 568 | { |
613f04a0 SRRH |
569 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; |
570 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | |
571 | ||
c76f0694 SR |
572 | stop_wakeup_tracer(tr); |
573 | /* make sure we put back any tasks we are tracing */ | |
574 | wakeup_reset(tr); | |
e9d25fe6 | 575 | |
613f04a0 SRRH |
576 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); |
577 | set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); | |
352ad25a SR |
578 | } |
579 | ||
9036990d SR |
580 | static void wakeup_tracer_start(struct trace_array *tr) |
581 | { | |
582 | wakeup_reset(tr); | |
583 | tracer_enabled = 1; | |
9036990d SR |
584 | } |
585 | ||
586 | static void wakeup_tracer_stop(struct trace_array *tr) | |
587 | { | |
588 | tracer_enabled = 0; | |
352ad25a SR |
589 | } |
590 | ||
591 | static struct tracer wakeup_tracer __read_mostly = | |
592 | { | |
593 | .name = "wakeup", | |
594 | .init = wakeup_tracer_init, | |
595 | .reset = wakeup_tracer_reset, | |
9036990d SR |
596 | .start = wakeup_tracer_start, |
597 | .stop = wakeup_tracer_stop, | |
f43c738b | 598 | .print_max = true, |
7495a5be JO |
599 | .print_header = wakeup_print_header, |
600 | .print_line = wakeup_print_line, | |
601 | .flags = &tracer_flags, | |
602 | .set_flag = wakeup_set_flag, | |
613f04a0 | 603 | .flag_changed = trace_keep_overwrite, |
60a11774 SR |
604 | #ifdef CONFIG_FTRACE_SELFTEST |
605 | .selftest = trace_selftest_startup_wakeup, | |
606 | #endif | |
7495a5be JO |
607 | .open = wakeup_trace_open, |
608 | .close = wakeup_trace_close, | |
f43c738b | 609 | .use_max_tr = true, |
352ad25a SR |
610 | }; |
611 | ||
3244351c SR |
612 | static struct tracer wakeup_rt_tracer __read_mostly = |
613 | { | |
614 | .name = "wakeup_rt", | |
615 | .init = wakeup_rt_tracer_init, | |
616 | .reset = wakeup_tracer_reset, | |
617 | .start = wakeup_tracer_start, | |
618 | .stop = wakeup_tracer_stop, | |
6eaaa5d5 | 619 | .wait_pipe = poll_wait_pipe, |
f43c738b | 620 | .print_max = true, |
7495a5be JO |
621 | .print_header = wakeup_print_header, |
622 | .print_line = wakeup_print_line, | |
623 | .flags = &tracer_flags, | |
624 | .set_flag = wakeup_set_flag, | |
613f04a0 | 625 | .flag_changed = trace_keep_overwrite, |
3244351c SR |
626 | #ifdef CONFIG_FTRACE_SELFTEST |
627 | .selftest = trace_selftest_startup_wakeup, | |
628 | #endif | |
7495a5be JO |
629 | .open = wakeup_trace_open, |
630 | .close = wakeup_trace_close, | |
f43c738b | 631 | .use_max_tr = true, |
3244351c SR |
632 | }; |
633 | ||
352ad25a SR |
634 | __init static int init_wakeup_tracer(void) |
635 | { | |
636 | int ret; | |
637 | ||
638 | ret = register_tracer(&wakeup_tracer); | |
639 | if (ret) | |
640 | return ret; | |
641 | ||
3244351c SR |
642 | ret = register_tracer(&wakeup_rt_tracer); |
643 | if (ret) | |
644 | return ret; | |
645 | ||
352ad25a SR |
646 | return 0; |
647 | } | |
6f415672 | 648 | core_initcall(init_wakeup_tracer); |