rtc: document the sysfs interface
[deliverable/linux.git] / kernel / trace / trace_irqsoff.c
CommitLineData
81d68a96 1/*
73d8b8bc 2 * trace irqs off critical timings
81d68a96
SR
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/kallsyms.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly;
23
6cd8a4bb
SR
24static DEFINE_PER_CPU(int, tracing_cpu);
25
89b2f978
SR
26static DEFINE_SPINLOCK(max_trace_lock);
27
6cd8a4bb
SR
28enum {
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
31};
32
33static int trace_type __read_mostly;
34
e9d25fe6
SR
35static int save_lat_flag;
36
6cd8a4bb 37#ifdef CONFIG_PREEMPT_TRACER
e309b41d 38static inline int
6cd8a4bb
SR
39preempt_trace(void)
40{
41 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
42}
43#else
44# define preempt_trace() (0)
45#endif
46
47#ifdef CONFIG_IRQSOFF_TRACER
e309b41d 48static inline int
6cd8a4bb
SR
49irq_trace(void)
50{
51 return ((trace_type & TRACER_IRQS_OFF) &&
52 irqs_disabled());
53}
54#else
55# define irq_trace() (0)
56#endif
57
81d68a96
SR
58/*
59 * Sequence count - we record it when starting a measurement and
60 * skip the latency if the sequence has changed - some other section
61 * did a maximum and could disturb our measurement with serial console
62 * printouts, etc. Truly coinciding maximum latencies should be rare
63 * and what happens together happens separately as well, so this doesnt
64 * decrease the validity of the maximum found:
65 */
66static __cacheline_aligned_in_smp unsigned long max_sequence;
67
606576ce 68#ifdef CONFIG_FUNCTION_TRACER
81d68a96
SR
69/*
70 * irqsoff uses its own tracer function to keep the overhead down:
71 */
e309b41d 72static void
81d68a96
SR
73irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
74{
75 struct trace_array *tr = irqsoff_trace;
76 struct trace_array_cpu *data;
77 unsigned long flags;
78 long disabled;
79 int cpu;
80
361943ad
SR
81 /*
82 * Does not matter if we preempt. We test the flags
83 * afterward, to see if irqs are disabled or not.
84 * If we preempt and get a false positive, the flags
85 * test will fail.
86 */
87 cpu = raw_smp_processor_id();
88 if (likely(!per_cpu(tracing_cpu, cpu)))
81d68a96
SR
89 return;
90
91 local_save_flags(flags);
361943ad
SR
92 /* slight chance to get a false positive on tracing_cpu */
93 if (!irqs_disabled_flags(flags))
94 return;
81d68a96 95
81d68a96
SR
96 data = tr->data[cpu];
97 disabled = atomic_inc_return(&data->disabled);
98
99 if (likely(disabled == 1))
7be42151 100 trace_function(tr, ip, parent_ip, flags, preempt_count());
81d68a96
SR
101
102 atomic_dec(&data->disabled);
103}
104
105static struct ftrace_ops trace_ops __read_mostly =
106{
107 .func = irqsoff_tracer_call,
108};
606576ce 109#endif /* CONFIG_FUNCTION_TRACER */
81d68a96
SR
110
111/*
112 * Should this new latency be reported/recorded?
113 */
e309b41d 114static int report_latency(cycle_t delta)
81d68a96
SR
115{
116 if (tracing_thresh) {
117 if (delta < tracing_thresh)
118 return 0;
119 } else {
120 if (delta <= tracing_max_latency)
121 return 0;
122 }
123 return 1;
124}
125
e309b41d 126static void
81d68a96
SR
127check_critical_timing(struct trace_array *tr,
128 struct trace_array_cpu *data,
129 unsigned long parent_ip,
130 int cpu)
131{
89b2f978 132 cycle_t T0, T1, delta;
81d68a96 133 unsigned long flags;
38697053 134 int pc;
81d68a96 135
81d68a96 136 T0 = data->preempt_timestamp;
750ed1a4 137 T1 = ftrace_now(cpu);
81d68a96
SR
138 delta = T1-T0;
139
140 local_save_flags(flags);
141
6450c1d3
SR
142 pc = preempt_count();
143
81d68a96
SR
144 if (!report_latency(delta))
145 goto out;
146
c7aafc54 147 spin_lock_irqsave(&max_trace_lock, flags);
81d68a96 148
89b2f978
SR
149 /* check if we are still the max latency */
150 if (!report_latency(delta))
151 goto out_unlock;
152
7be42151 153 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
81d68a96 154
81d68a96 155 if (data->critical_sequence != max_sequence)
89b2f978 156 goto out_unlock;
81d68a96 157
81d68a96
SR
158 data->critical_end = parent_ip;
159
b5130b1e
CE
160 if (likely(!is_tracing_stopped())) {
161 tracing_max_latency = delta;
162 update_max_tr_single(tr, current, cpu);
163 }
81d68a96 164
81d68a96
SR
165 max_sequence++;
166
89b2f978 167out_unlock:
c7aafc54 168 spin_unlock_irqrestore(&max_trace_lock, flags);
89b2f978 169
81d68a96
SR
170out:
171 data->critical_sequence = max_sequence;
750ed1a4 172 data->preempt_timestamp = ftrace_now(cpu);
7be42151 173 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
81d68a96
SR
174}
175
e309b41d 176static inline void
81d68a96
SR
177start_critical_timing(unsigned long ip, unsigned long parent_ip)
178{
179 int cpu;
180 struct trace_array *tr = irqsoff_trace;
181 struct trace_array_cpu *data;
182 unsigned long flags;
183
184 if (likely(!tracer_enabled))
185 return;
186
c5f888ca
SR
187 cpu = raw_smp_processor_id();
188
189 if (per_cpu(tracing_cpu, cpu))
6cd8a4bb
SR
190 return;
191
81d68a96
SR
192 data = tr->data[cpu];
193
c5f888ca 194 if (unlikely(!data) || atomic_read(&data->disabled))
81d68a96
SR
195 return;
196
197 atomic_inc(&data->disabled);
198
199 data->critical_sequence = max_sequence;
750ed1a4 200 data->preempt_timestamp = ftrace_now(cpu);
6cd8a4bb 201 data->critical_start = parent_ip ? : ip;
81d68a96
SR
202
203 local_save_flags(flags);
6cd8a4bb 204
7be42151 205 trace_function(tr, ip, parent_ip, flags, preempt_count());
81d68a96 206
c5f888ca 207 per_cpu(tracing_cpu, cpu) = 1;
6cd8a4bb 208
81d68a96
SR
209 atomic_dec(&data->disabled);
210}
211
e309b41d 212static inline void
81d68a96
SR
213stop_critical_timing(unsigned long ip, unsigned long parent_ip)
214{
215 int cpu;
216 struct trace_array *tr = irqsoff_trace;
217 struct trace_array_cpu *data;
218 unsigned long flags;
219
c5f888ca 220 cpu = raw_smp_processor_id();
6cd8a4bb 221 /* Always clear the tracing cpu on stopping the trace */
c5f888ca
SR
222 if (unlikely(per_cpu(tracing_cpu, cpu)))
223 per_cpu(tracing_cpu, cpu) = 0;
6cd8a4bb
SR
224 else
225 return;
226
227 if (!tracer_enabled)
81d68a96
SR
228 return;
229
81d68a96
SR
230 data = tr->data[cpu];
231
3928a8a2 232 if (unlikely(!data) ||
81d68a96
SR
233 !data->critical_start || atomic_read(&data->disabled))
234 return;
235
236 atomic_inc(&data->disabled);
c5f888ca 237
81d68a96 238 local_save_flags(flags);
7be42151 239 trace_function(tr, ip, parent_ip, flags, preempt_count());
6cd8a4bb 240 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
81d68a96
SR
241 data->critical_start = 0;
242 atomic_dec(&data->disabled);
243}
244
6cd8a4bb 245/* start and stop critical timings used to for stoppage (in idle) */
e309b41d 246void start_critical_timings(void)
81d68a96 247{
6cd8a4bb 248 if (preempt_trace() || irq_trace())
81d68a96
SR
249 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
250}
1fe37104 251EXPORT_SYMBOL_GPL(start_critical_timings);
81d68a96 252
e309b41d 253void stop_critical_timings(void)
81d68a96 254{
6cd8a4bb 255 if (preempt_trace() || irq_trace())
81d68a96
SR
256 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
257}
1fe37104 258EXPORT_SYMBOL_GPL(stop_critical_timings);
81d68a96 259
6cd8a4bb 260#ifdef CONFIG_IRQSOFF_TRACER
81d68a96 261#ifdef CONFIG_PROVE_LOCKING
e309b41d 262void time_hardirqs_on(unsigned long a0, unsigned long a1)
81d68a96 263{
6cd8a4bb 264 if (!preempt_trace() && irq_trace())
81d68a96
SR
265 stop_critical_timing(a0, a1);
266}
267
e309b41d 268void time_hardirqs_off(unsigned long a0, unsigned long a1)
81d68a96 269{
6cd8a4bb 270 if (!preempt_trace() && irq_trace())
81d68a96
SR
271 start_critical_timing(a0, a1);
272}
273
274#else /* !CONFIG_PROVE_LOCKING */
275
276/*
277 * Stubs:
278 */
279
280void early_boot_irqs_off(void)
281{
282}
283
284void early_boot_irqs_on(void)
285{
286}
287
288void trace_softirqs_on(unsigned long ip)
289{
290}
291
292void trace_softirqs_off(unsigned long ip)
293{
294}
295
e309b41d 296inline void print_irqtrace_events(struct task_struct *curr)
81d68a96
SR
297{
298}
299
300/*
301 * We are only interested in hardirq on/off events:
302 */
e309b41d 303void trace_hardirqs_on(void)
81d68a96 304{
6cd8a4bb 305 if (!preempt_trace() && irq_trace())
81d68a96
SR
306 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
307}
308EXPORT_SYMBOL(trace_hardirqs_on);
309
e309b41d 310void trace_hardirqs_off(void)
81d68a96 311{
6cd8a4bb 312 if (!preempt_trace() && irq_trace())
81d68a96
SR
313 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
314}
315EXPORT_SYMBOL(trace_hardirqs_off);
316
e309b41d 317void trace_hardirqs_on_caller(unsigned long caller_addr)
81d68a96 318{
6cd8a4bb 319 if (!preempt_trace() && irq_trace())
81d68a96
SR
320 stop_critical_timing(CALLER_ADDR0, caller_addr);
321}
322EXPORT_SYMBOL(trace_hardirqs_on_caller);
323
e309b41d 324void trace_hardirqs_off_caller(unsigned long caller_addr)
81d68a96 325{
6cd8a4bb 326 if (!preempt_trace() && irq_trace())
81d68a96
SR
327 start_critical_timing(CALLER_ADDR0, caller_addr);
328}
329EXPORT_SYMBOL(trace_hardirqs_off_caller);
330
331#endif /* CONFIG_PROVE_LOCKING */
6cd8a4bb
SR
332#endif /* CONFIG_IRQSOFF_TRACER */
333
334#ifdef CONFIG_PREEMPT_TRACER
e309b41d 335void trace_preempt_on(unsigned long a0, unsigned long a1)
6cd8a4bb 336{
1e01cb0c
SR
337 if (preempt_trace())
338 stop_critical_timing(a0, a1);
6cd8a4bb
SR
339}
340
e309b41d 341void trace_preempt_off(unsigned long a0, unsigned long a1)
6cd8a4bb 342{
1e01cb0c
SR
343 if (preempt_trace())
344 start_critical_timing(a0, a1);
6cd8a4bb
SR
345}
346#endif /* CONFIG_PREEMPT_TRACER */
81d68a96
SR
347
348static void start_irqsoff_tracer(struct trace_array *tr)
349{
81d68a96 350 register_ftrace_function(&trace_ops);
94523e81 351 if (tracing_is_enabled())
9036990d 352 tracer_enabled = 1;
94523e81 353 else
9036990d 354 tracer_enabled = 0;
81d68a96
SR
355}
356
357static void stop_irqsoff_tracer(struct trace_array *tr)
358{
81d68a96 359 tracer_enabled = 0;
89b2f978 360 unregister_ftrace_function(&trace_ops);
81d68a96
SR
361}
362
6cd8a4bb 363static void __irqsoff_tracer_init(struct trace_array *tr)
81d68a96 364{
e9d25fe6
SR
365 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
366 trace_flags |= TRACE_ITER_LATENCY_FMT;
367
745b1626 368 tracing_max_latency = 0;
81d68a96 369 irqsoff_trace = tr;
c5f888ca 370 /* make sure that the tracer is visible */
81d68a96 371 smp_wmb();
2f26ebd5 372 tracing_reset_online_cpus(tr);
c76f0694 373 start_irqsoff_tracer(tr);
81d68a96
SR
374}
375
376static void irqsoff_tracer_reset(struct trace_array *tr)
377{
c76f0694 378 stop_irqsoff_tracer(tr);
e9d25fe6
SR
379
380 if (!save_lat_flag)
381 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
81d68a96
SR
382}
383
9036990d
SR
384static void irqsoff_tracer_start(struct trace_array *tr)
385{
9036990d 386 tracer_enabled = 1;
9036990d
SR
387}
388
389static void irqsoff_tracer_stop(struct trace_array *tr)
390{
391 tracer_enabled = 0;
81d68a96
SR
392}
393
6cd8a4bb 394#ifdef CONFIG_IRQSOFF_TRACER
1c80025a 395static int irqsoff_tracer_init(struct trace_array *tr)
6cd8a4bb
SR
396{
397 trace_type = TRACER_IRQS_OFF;
398
399 __irqsoff_tracer_init(tr);
1c80025a 400 return 0;
6cd8a4bb 401}
81d68a96
SR
402static struct tracer irqsoff_tracer __read_mostly =
403{
404 .name = "irqsoff",
405 .init = irqsoff_tracer_init,
406 .reset = irqsoff_tracer_reset,
9036990d
SR
407 .start = irqsoff_tracer_start,
408 .stop = irqsoff_tracer_stop,
81d68a96 409 .print_max = 1,
60a11774
SR
410#ifdef CONFIG_FTRACE_SELFTEST
411 .selftest = trace_selftest_startup_irqsoff,
412#endif
81d68a96 413};
6cd8a4bb
SR
414# define register_irqsoff(trace) register_tracer(&trace)
415#else
416# define register_irqsoff(trace) do { } while (0)
417#endif
418
419#ifdef CONFIG_PREEMPT_TRACER
1c80025a 420static int preemptoff_tracer_init(struct trace_array *tr)
6cd8a4bb
SR
421{
422 trace_type = TRACER_PREEMPT_OFF;
423
424 __irqsoff_tracer_init(tr);
1c80025a 425 return 0;
6cd8a4bb
SR
426}
427
428static struct tracer preemptoff_tracer __read_mostly =
429{
430 .name = "preemptoff",
431 .init = preemptoff_tracer_init,
432 .reset = irqsoff_tracer_reset,
9036990d
SR
433 .start = irqsoff_tracer_start,
434 .stop = irqsoff_tracer_stop,
6cd8a4bb 435 .print_max = 1,
60a11774
SR
436#ifdef CONFIG_FTRACE_SELFTEST
437 .selftest = trace_selftest_startup_preemptoff,
438#endif
6cd8a4bb
SR
439};
440# define register_preemptoff(trace) register_tracer(&trace)
441#else
442# define register_preemptoff(trace) do { } while (0)
443#endif
444
445#if defined(CONFIG_IRQSOFF_TRACER) && \
446 defined(CONFIG_PREEMPT_TRACER)
447
1c80025a 448static int preemptirqsoff_tracer_init(struct trace_array *tr)
6cd8a4bb
SR
449{
450 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
451
452 __irqsoff_tracer_init(tr);
1c80025a 453 return 0;
6cd8a4bb
SR
454}
455
456static struct tracer preemptirqsoff_tracer __read_mostly =
457{
458 .name = "preemptirqsoff",
459 .init = preemptirqsoff_tracer_init,
460 .reset = irqsoff_tracer_reset,
9036990d
SR
461 .start = irqsoff_tracer_start,
462 .stop = irqsoff_tracer_stop,
6cd8a4bb 463 .print_max = 1,
60a11774
SR
464#ifdef CONFIG_FTRACE_SELFTEST
465 .selftest = trace_selftest_startup_preemptirqsoff,
466#endif
6cd8a4bb
SR
467};
468
469# define register_preemptirqsoff(trace) register_tracer(&trace)
470#else
471# define register_preemptirqsoff(trace) do { } while (0)
472#endif
81d68a96
SR
473
474__init static int init_irqsoff_tracer(void)
475{
6cd8a4bb
SR
476 register_irqsoff(irqsoff_tracer);
477 register_preemptoff(preemptoff_tracer);
478 register_preemptirqsoff(preemptirqsoff_tracer);
81d68a96
SR
479
480 return 0;
481}
482device_initcall(init_irqsoff_tracer);
This page took 0.124621 seconds and 5 git commands to generate.