Merge v2.6.37-rc8 into powerpc/next
[deliverable/linux.git] / kernel / trace / trace_irqsoff.c
1 /*
2 * trace irqs off critical timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12 #include <linux/kallsyms.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 static struct trace_array *irqsoff_trace __read_mostly;
22 static int tracer_enabled __read_mostly;
23
24 static DEFINE_PER_CPU(int, tracing_cpu);
25
26 static DEFINE_SPINLOCK(max_trace_lock);
27
28 enum {
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
31 };
32
33 static int trace_type __read_mostly;
34
35 static int save_lat_flag;
36
37 static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38 static int start_irqsoff_tracer(struct trace_array *tr, int graph);
39
40 #ifdef CONFIG_PREEMPT_TRACER
41 static inline int
42 preempt_trace(void)
43 {
44 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
45 }
46 #else
47 # define preempt_trace() (0)
48 #endif
49
50 #ifdef CONFIG_IRQSOFF_TRACER
51 static inline int
52 irq_trace(void)
53 {
54 return ((trace_type & TRACER_IRQS_OFF) &&
55 irqs_disabled());
56 }
57 #else
58 # define irq_trace() (0)
59 #endif
60
61 #define TRACE_DISPLAY_GRAPH 1
62
63 static struct tracer_opt trace_opts[] = {
64 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
65 /* display latency trace as call graph */
66 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
67 #endif
68 { } /* Empty entry */
69 };
70
71 static struct tracer_flags tracer_flags = {
72 .val = 0,
73 .opts = trace_opts,
74 };
75
76 #define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
77
78 /*
79 * Sequence count - we record it when starting a measurement and
80 * skip the latency if the sequence has changed - some other section
81 * did a maximum and could disturb our measurement with serial console
82 * printouts, etc. Truly coinciding maximum latencies should be rare
83 * and what happens together happens separately as well, so this doesnt
84 * decrease the validity of the maximum found:
85 */
86 static __cacheline_aligned_in_smp unsigned long max_sequence;
87
88 #ifdef CONFIG_FUNCTION_TRACER
89 /*
90 * Prologue for the preempt and irqs off function tracers.
91 *
92 * Returns 1 if it is OK to continue, and data->disabled is
93 * incremented.
94 * 0 if the trace is to be ignored, and data->disabled
95 * is kept the same.
96 *
97 * Note, this function is also used outside this ifdef but
98 * inside the #ifdef of the function graph tracer below.
99 * This is OK, since the function graph tracer is
100 * dependent on the function tracer.
101 */
102 static int func_prolog_dec(struct trace_array *tr,
103 struct trace_array_cpu **data,
104 unsigned long *flags)
105 {
106 long disabled;
107 int cpu;
108
109 /*
110 * Does not matter if we preempt. We test the flags
111 * afterward, to see if irqs are disabled or not.
112 * If we preempt and get a false positive, the flags
113 * test will fail.
114 */
115 cpu = raw_smp_processor_id();
116 if (likely(!per_cpu(tracing_cpu, cpu)))
117 return 0;
118
119 local_save_flags(*flags);
120 /* slight chance to get a false positive on tracing_cpu */
121 if (!irqs_disabled_flags(*flags))
122 return 0;
123
124 *data = tr->data[cpu];
125 disabled = atomic_inc_return(&(*data)->disabled);
126
127 if (likely(disabled == 1))
128 return 1;
129
130 atomic_dec(&(*data)->disabled);
131
132 return 0;
133 }
134
135 /*
136 * irqsoff uses its own tracer function to keep the overhead down:
137 */
138 static void
139 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
140 {
141 struct trace_array *tr = irqsoff_trace;
142 struct trace_array_cpu *data;
143 unsigned long flags;
144
145 if (!func_prolog_dec(tr, &data, &flags))
146 return;
147
148 trace_function(tr, ip, parent_ip, flags, preempt_count());
149
150 atomic_dec(&data->disabled);
151 }
152
153 static struct ftrace_ops trace_ops __read_mostly =
154 {
155 .func = irqsoff_tracer_call,
156 };
157 #endif /* CONFIG_FUNCTION_TRACER */
158
159 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
160 static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
161 {
162 int cpu;
163
164 if (!(bit & TRACE_DISPLAY_GRAPH))
165 return -EINVAL;
166
167 if (!(is_graph() ^ set))
168 return 0;
169
170 stop_irqsoff_tracer(irqsoff_trace, !set);
171
172 for_each_possible_cpu(cpu)
173 per_cpu(tracing_cpu, cpu) = 0;
174
175 tracing_max_latency = 0;
176 tracing_reset_online_cpus(irqsoff_trace);
177
178 return start_irqsoff_tracer(irqsoff_trace, set);
179 }
180
181 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
182 {
183 struct trace_array *tr = irqsoff_trace;
184 struct trace_array_cpu *data;
185 unsigned long flags;
186 int ret;
187 int pc;
188
189 if (!func_prolog_dec(tr, &data, &flags))
190 return 0;
191
192 pc = preempt_count();
193 ret = __trace_graph_entry(tr, trace, flags, pc);
194 atomic_dec(&data->disabled);
195
196 return ret;
197 }
198
199 static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
200 {
201 struct trace_array *tr = irqsoff_trace;
202 struct trace_array_cpu *data;
203 unsigned long flags;
204 int pc;
205
206 if (!func_prolog_dec(tr, &data, &flags))
207 return;
208
209 pc = preempt_count();
210 __trace_graph_return(tr, trace, flags, pc);
211 atomic_dec(&data->disabled);
212 }
213
214 static void irqsoff_trace_open(struct trace_iterator *iter)
215 {
216 if (is_graph())
217 graph_trace_open(iter);
218
219 }
220
221 static void irqsoff_trace_close(struct trace_iterator *iter)
222 {
223 if (iter->private)
224 graph_trace_close(iter);
225 }
226
227 #define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
228 TRACE_GRAPH_PRINT_PROC)
229
230 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
231 {
232 /*
233 * In graph mode call the graph tracer output function,
234 * otherwise go with the TRACE_FN event handler
235 */
236 if (is_graph())
237 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
238
239 return TRACE_TYPE_UNHANDLED;
240 }
241
242 static void irqsoff_print_header(struct seq_file *s)
243 {
244 if (is_graph())
245 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
246 else
247 trace_default_header(s);
248 }
249
250 static void
251 __trace_function(struct trace_array *tr,
252 unsigned long ip, unsigned long parent_ip,
253 unsigned long flags, int pc)
254 {
255 if (is_graph())
256 trace_graph_function(tr, ip, parent_ip, flags, pc);
257 else
258 trace_function(tr, ip, parent_ip, flags, pc);
259 }
260
261 #else
262 #define __trace_function trace_function
263
264 static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
265 {
266 return -EINVAL;
267 }
268
269 static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
270 {
271 return -1;
272 }
273
274 static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
275 {
276 return TRACE_TYPE_UNHANDLED;
277 }
278
279 static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
280 static void irqsoff_print_header(struct seq_file *s) { }
281 static void irqsoff_trace_open(struct trace_iterator *iter) { }
282 static void irqsoff_trace_close(struct trace_iterator *iter) { }
283 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
284
285 /*
286 * Should this new latency be reported/recorded?
287 */
288 static int report_latency(cycle_t delta)
289 {
290 if (tracing_thresh) {
291 if (delta < tracing_thresh)
292 return 0;
293 } else {
294 if (delta <= tracing_max_latency)
295 return 0;
296 }
297 return 1;
298 }
299
300 static void
301 check_critical_timing(struct trace_array *tr,
302 struct trace_array_cpu *data,
303 unsigned long parent_ip,
304 int cpu)
305 {
306 cycle_t T0, T1, delta;
307 unsigned long flags;
308 int pc;
309
310 T0 = data->preempt_timestamp;
311 T1 = ftrace_now(cpu);
312 delta = T1-T0;
313
314 local_save_flags(flags);
315
316 pc = preempt_count();
317
318 if (!report_latency(delta))
319 goto out;
320
321 spin_lock_irqsave(&max_trace_lock, flags);
322
323 /* check if we are still the max latency */
324 if (!report_latency(delta))
325 goto out_unlock;
326
327 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
328 /* Skip 5 functions to get to the irq/preempt enable function */
329 __trace_stack(tr, flags, 5, pc);
330
331 if (data->critical_sequence != max_sequence)
332 goto out_unlock;
333
334 data->critical_end = parent_ip;
335
336 if (likely(!is_tracing_stopped())) {
337 tracing_max_latency = delta;
338 update_max_tr_single(tr, current, cpu);
339 }
340
341 max_sequence++;
342
343 out_unlock:
344 spin_unlock_irqrestore(&max_trace_lock, flags);
345
346 out:
347 data->critical_sequence = max_sequence;
348 data->preempt_timestamp = ftrace_now(cpu);
349 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
350 }
351
352 static inline void
353 start_critical_timing(unsigned long ip, unsigned long parent_ip)
354 {
355 int cpu;
356 struct trace_array *tr = irqsoff_trace;
357 struct trace_array_cpu *data;
358 unsigned long flags;
359
360 if (likely(!tracer_enabled))
361 return;
362
363 cpu = raw_smp_processor_id();
364
365 if (per_cpu(tracing_cpu, cpu))
366 return;
367
368 data = tr->data[cpu];
369
370 if (unlikely(!data) || atomic_read(&data->disabled))
371 return;
372
373 atomic_inc(&data->disabled);
374
375 data->critical_sequence = max_sequence;
376 data->preempt_timestamp = ftrace_now(cpu);
377 data->critical_start = parent_ip ? : ip;
378
379 local_save_flags(flags);
380
381 __trace_function(tr, ip, parent_ip, flags, preempt_count());
382
383 per_cpu(tracing_cpu, cpu) = 1;
384
385 atomic_dec(&data->disabled);
386 }
387
388 static inline void
389 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
390 {
391 int cpu;
392 struct trace_array *tr = irqsoff_trace;
393 struct trace_array_cpu *data;
394 unsigned long flags;
395
396 cpu = raw_smp_processor_id();
397 /* Always clear the tracing cpu on stopping the trace */
398 if (unlikely(per_cpu(tracing_cpu, cpu)))
399 per_cpu(tracing_cpu, cpu) = 0;
400 else
401 return;
402
403 if (!tracer_enabled)
404 return;
405
406 data = tr->data[cpu];
407
408 if (unlikely(!data) ||
409 !data->critical_start || atomic_read(&data->disabled))
410 return;
411
412 atomic_inc(&data->disabled);
413
414 local_save_flags(flags);
415 __trace_function(tr, ip, parent_ip, flags, preempt_count());
416 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
417 data->critical_start = 0;
418 atomic_dec(&data->disabled);
419 }
420
421 /* start and stop critical timings used to for stoppage (in idle) */
422 void start_critical_timings(void)
423 {
424 if (preempt_trace() || irq_trace())
425 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
426 }
427 EXPORT_SYMBOL_GPL(start_critical_timings);
428
429 void stop_critical_timings(void)
430 {
431 if (preempt_trace() || irq_trace())
432 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
433 }
434 EXPORT_SYMBOL_GPL(stop_critical_timings);
435
436 #ifdef CONFIG_IRQSOFF_TRACER
437 #ifdef CONFIG_PROVE_LOCKING
438 void time_hardirqs_on(unsigned long a0, unsigned long a1)
439 {
440 if (!preempt_trace() && irq_trace())
441 stop_critical_timing(a0, a1);
442 }
443
444 void time_hardirqs_off(unsigned long a0, unsigned long a1)
445 {
446 if (!preempt_trace() && irq_trace())
447 start_critical_timing(a0, a1);
448 }
449
450 #else /* !CONFIG_PROVE_LOCKING */
451
452 /*
453 * Stubs:
454 */
455
456 void early_boot_irqs_off(void)
457 {
458 }
459
460 void early_boot_irqs_on(void)
461 {
462 }
463
464 void trace_softirqs_on(unsigned long ip)
465 {
466 }
467
468 void trace_softirqs_off(unsigned long ip)
469 {
470 }
471
472 inline void print_irqtrace_events(struct task_struct *curr)
473 {
474 }
475
476 /*
477 * We are only interested in hardirq on/off events:
478 */
479 void trace_hardirqs_on(void)
480 {
481 if (!preempt_trace() && irq_trace())
482 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
483 }
484 EXPORT_SYMBOL(trace_hardirqs_on);
485
486 void trace_hardirqs_off(void)
487 {
488 if (!preempt_trace() && irq_trace())
489 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
490 }
491 EXPORT_SYMBOL(trace_hardirqs_off);
492
493 void trace_hardirqs_on_caller(unsigned long caller_addr)
494 {
495 if (!preempt_trace() && irq_trace())
496 stop_critical_timing(CALLER_ADDR0, caller_addr);
497 }
498 EXPORT_SYMBOL(trace_hardirqs_on_caller);
499
500 void trace_hardirqs_off_caller(unsigned long caller_addr)
501 {
502 if (!preempt_trace() && irq_trace())
503 start_critical_timing(CALLER_ADDR0, caller_addr);
504 }
505 EXPORT_SYMBOL(trace_hardirqs_off_caller);
506
507 #endif /* CONFIG_PROVE_LOCKING */
508 #endif /* CONFIG_IRQSOFF_TRACER */
509
510 #ifdef CONFIG_PREEMPT_TRACER
511 void trace_preempt_on(unsigned long a0, unsigned long a1)
512 {
513 if (preempt_trace())
514 stop_critical_timing(a0, a1);
515 }
516
517 void trace_preempt_off(unsigned long a0, unsigned long a1)
518 {
519 if (preempt_trace())
520 start_critical_timing(a0, a1);
521 }
522 #endif /* CONFIG_PREEMPT_TRACER */
523
524 static int start_irqsoff_tracer(struct trace_array *tr, int graph)
525 {
526 int ret = 0;
527
528 if (!graph)
529 ret = register_ftrace_function(&trace_ops);
530 else
531 ret = register_ftrace_graph(&irqsoff_graph_return,
532 &irqsoff_graph_entry);
533
534 if (!ret && tracing_is_enabled())
535 tracer_enabled = 1;
536 else
537 tracer_enabled = 0;
538
539 return ret;
540 }
541
542 static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
543 {
544 tracer_enabled = 0;
545
546 if (!graph)
547 unregister_ftrace_function(&trace_ops);
548 else
549 unregister_ftrace_graph();
550 }
551
552 static void __irqsoff_tracer_init(struct trace_array *tr)
553 {
554 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
555 trace_flags |= TRACE_ITER_LATENCY_FMT;
556
557 tracing_max_latency = 0;
558 irqsoff_trace = tr;
559 /* make sure that the tracer is visible */
560 smp_wmb();
561 tracing_reset_online_cpus(tr);
562
563 if (start_irqsoff_tracer(tr, is_graph()))
564 printk(KERN_ERR "failed to start irqsoff tracer\n");
565 }
566
567 static void irqsoff_tracer_reset(struct trace_array *tr)
568 {
569 stop_irqsoff_tracer(tr, is_graph());
570
571 if (!save_lat_flag)
572 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
573 }
574
575 static void irqsoff_tracer_start(struct trace_array *tr)
576 {
577 tracer_enabled = 1;
578 }
579
580 static void irqsoff_tracer_stop(struct trace_array *tr)
581 {
582 tracer_enabled = 0;
583 }
584
585 #ifdef CONFIG_IRQSOFF_TRACER
586 static int irqsoff_tracer_init(struct trace_array *tr)
587 {
588 trace_type = TRACER_IRQS_OFF;
589
590 __irqsoff_tracer_init(tr);
591 return 0;
592 }
593 static struct tracer irqsoff_tracer __read_mostly =
594 {
595 .name = "irqsoff",
596 .init = irqsoff_tracer_init,
597 .reset = irqsoff_tracer_reset,
598 .start = irqsoff_tracer_start,
599 .stop = irqsoff_tracer_stop,
600 .print_max = 1,
601 .print_header = irqsoff_print_header,
602 .print_line = irqsoff_print_line,
603 .flags = &tracer_flags,
604 .set_flag = irqsoff_set_flag,
605 #ifdef CONFIG_FTRACE_SELFTEST
606 .selftest = trace_selftest_startup_irqsoff,
607 #endif
608 .open = irqsoff_trace_open,
609 .close = irqsoff_trace_close,
610 .use_max_tr = 1,
611 };
612 # define register_irqsoff(trace) register_tracer(&trace)
613 #else
614 # define register_irqsoff(trace) do { } while (0)
615 #endif
616
617 #ifdef CONFIG_PREEMPT_TRACER
618 static int preemptoff_tracer_init(struct trace_array *tr)
619 {
620 trace_type = TRACER_PREEMPT_OFF;
621
622 __irqsoff_tracer_init(tr);
623 return 0;
624 }
625
626 static struct tracer preemptoff_tracer __read_mostly =
627 {
628 .name = "preemptoff",
629 .init = preemptoff_tracer_init,
630 .reset = irqsoff_tracer_reset,
631 .start = irqsoff_tracer_start,
632 .stop = irqsoff_tracer_stop,
633 .print_max = 1,
634 .print_header = irqsoff_print_header,
635 .print_line = irqsoff_print_line,
636 .flags = &tracer_flags,
637 .set_flag = irqsoff_set_flag,
638 #ifdef CONFIG_FTRACE_SELFTEST
639 .selftest = trace_selftest_startup_preemptoff,
640 #endif
641 .open = irqsoff_trace_open,
642 .close = irqsoff_trace_close,
643 .use_max_tr = 1,
644 };
645 # define register_preemptoff(trace) register_tracer(&trace)
646 #else
647 # define register_preemptoff(trace) do { } while (0)
648 #endif
649
650 #if defined(CONFIG_IRQSOFF_TRACER) && \
651 defined(CONFIG_PREEMPT_TRACER)
652
653 static int preemptirqsoff_tracer_init(struct trace_array *tr)
654 {
655 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
656
657 __irqsoff_tracer_init(tr);
658 return 0;
659 }
660
661 static struct tracer preemptirqsoff_tracer __read_mostly =
662 {
663 .name = "preemptirqsoff",
664 .init = preemptirqsoff_tracer_init,
665 .reset = irqsoff_tracer_reset,
666 .start = irqsoff_tracer_start,
667 .stop = irqsoff_tracer_stop,
668 .print_max = 1,
669 .print_header = irqsoff_print_header,
670 .print_line = irqsoff_print_line,
671 .flags = &tracer_flags,
672 .set_flag = irqsoff_set_flag,
673 #ifdef CONFIG_FTRACE_SELFTEST
674 .selftest = trace_selftest_startup_preemptirqsoff,
675 #endif
676 .open = irqsoff_trace_open,
677 .close = irqsoff_trace_close,
678 .use_max_tr = 1,
679 };
680
681 # define register_preemptirqsoff(trace) register_tracer(&trace)
682 #else
683 # define register_preemptirqsoff(trace) do { } while (0)
684 #endif
685
686 __init static int init_irqsoff_tracer(void)
687 {
688 register_irqsoff(irqsoff_tracer);
689 register_preemptoff(preemptoff_tracer);
690 register_preemptirqsoff(preemptirqsoff_tracer);
691
692 return 0;
693 }
694 device_initcall(init_irqsoff_tracer);
This page took 0.051847 seconds and 6 git commands to generate.