V4L/DVB (13217): tda18271: handle rf_cal_on_startup properly during attach
[deliverable/linux.git] / kernel / trace / trace_irqsoff.c
1 /*
2 * trace irqs off critical timings
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12 #include <linux/kallsyms.h>
13 #include <linux/debugfs.h>
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/ftrace.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 static struct trace_array *irqsoff_trace __read_mostly;
22 static int tracer_enabled __read_mostly;
23
24 static DEFINE_PER_CPU(int, tracing_cpu);
25
26 static DEFINE_SPINLOCK(max_trace_lock);
27
28 enum {
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
31 };
32
33 static int trace_type __read_mostly;
34
35 static int save_lat_flag;
36
37 #ifdef CONFIG_PREEMPT_TRACER
38 static inline int
39 preempt_trace(void)
40 {
41 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
42 }
43 #else
44 # define preempt_trace() (0)
45 #endif
46
47 #ifdef CONFIG_IRQSOFF_TRACER
48 static inline int
49 irq_trace(void)
50 {
51 return ((trace_type & TRACER_IRQS_OFF) &&
52 irqs_disabled());
53 }
54 #else
55 # define irq_trace() (0)
56 #endif
57
58 /*
59 * Sequence count - we record it when starting a measurement and
60 * skip the latency if the sequence has changed - some other section
61 * did a maximum and could disturb our measurement with serial console
62 * printouts, etc. Truly coinciding maximum latencies should be rare
63 * and what happens together happens separately as well, so this doesnt
64 * decrease the validity of the maximum found:
65 */
66 static __cacheline_aligned_in_smp unsigned long max_sequence;
67
68 #ifdef CONFIG_FUNCTION_TRACER
69 /*
70 * irqsoff uses its own tracer function to keep the overhead down:
71 */
72 static void
73 irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
74 {
75 struct trace_array *tr = irqsoff_trace;
76 struct trace_array_cpu *data;
77 unsigned long flags;
78 long disabled;
79 int cpu;
80
81 /*
82 * Does not matter if we preempt. We test the flags
83 * afterward, to see if irqs are disabled or not.
84 * If we preempt and get a false positive, the flags
85 * test will fail.
86 */
87 cpu = raw_smp_processor_id();
88 if (likely(!per_cpu(tracing_cpu, cpu)))
89 return;
90
91 local_save_flags(flags);
92 /* slight chance to get a false positive on tracing_cpu */
93 if (!irqs_disabled_flags(flags))
94 return;
95
96 data = tr->data[cpu];
97 disabled = atomic_inc_return(&data->disabled);
98
99 if (likely(disabled == 1))
100 trace_function(tr, ip, parent_ip, flags, preempt_count());
101
102 atomic_dec(&data->disabled);
103 }
104
105 static struct ftrace_ops trace_ops __read_mostly =
106 {
107 .func = irqsoff_tracer_call,
108 };
109 #endif /* CONFIG_FUNCTION_TRACER */
110
111 /*
112 * Should this new latency be reported/recorded?
113 */
114 static int report_latency(cycle_t delta)
115 {
116 if (tracing_thresh) {
117 if (delta < tracing_thresh)
118 return 0;
119 } else {
120 if (delta <= tracing_max_latency)
121 return 0;
122 }
123 return 1;
124 }
125
126 static void
127 check_critical_timing(struct trace_array *tr,
128 struct trace_array_cpu *data,
129 unsigned long parent_ip,
130 int cpu)
131 {
132 cycle_t T0, T1, delta;
133 unsigned long flags;
134 int pc;
135
136 T0 = data->preempt_timestamp;
137 T1 = ftrace_now(cpu);
138 delta = T1-T0;
139
140 local_save_flags(flags);
141
142 pc = preempt_count();
143
144 if (!report_latency(delta))
145 goto out;
146
147 spin_lock_irqsave(&max_trace_lock, flags);
148
149 /* check if we are still the max latency */
150 if (!report_latency(delta))
151 goto out_unlock;
152
153 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
154
155 if (data->critical_sequence != max_sequence)
156 goto out_unlock;
157
158 data->critical_end = parent_ip;
159
160 if (likely(!is_tracing_stopped())) {
161 tracing_max_latency = delta;
162 update_max_tr_single(tr, current, cpu);
163 }
164
165 max_sequence++;
166
167 out_unlock:
168 spin_unlock_irqrestore(&max_trace_lock, flags);
169
170 out:
171 data->critical_sequence = max_sequence;
172 data->preempt_timestamp = ftrace_now(cpu);
173 trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
174 }
175
176 static inline void
177 start_critical_timing(unsigned long ip, unsigned long parent_ip)
178 {
179 int cpu;
180 struct trace_array *tr = irqsoff_trace;
181 struct trace_array_cpu *data;
182 unsigned long flags;
183
184 if (likely(!tracer_enabled))
185 return;
186
187 cpu = raw_smp_processor_id();
188
189 if (per_cpu(tracing_cpu, cpu))
190 return;
191
192 data = tr->data[cpu];
193
194 if (unlikely(!data) || atomic_read(&data->disabled))
195 return;
196
197 atomic_inc(&data->disabled);
198
199 data->critical_sequence = max_sequence;
200 data->preempt_timestamp = ftrace_now(cpu);
201 data->critical_start = parent_ip ? : ip;
202
203 local_save_flags(flags);
204
205 trace_function(tr, ip, parent_ip, flags, preempt_count());
206
207 per_cpu(tracing_cpu, cpu) = 1;
208
209 atomic_dec(&data->disabled);
210 }
211
212 static inline void
213 stop_critical_timing(unsigned long ip, unsigned long parent_ip)
214 {
215 int cpu;
216 struct trace_array *tr = irqsoff_trace;
217 struct trace_array_cpu *data;
218 unsigned long flags;
219
220 cpu = raw_smp_processor_id();
221 /* Always clear the tracing cpu on stopping the trace */
222 if (unlikely(per_cpu(tracing_cpu, cpu)))
223 per_cpu(tracing_cpu, cpu) = 0;
224 else
225 return;
226
227 if (!tracer_enabled)
228 return;
229
230 data = tr->data[cpu];
231
232 if (unlikely(!data) ||
233 !data->critical_start || atomic_read(&data->disabled))
234 return;
235
236 atomic_inc(&data->disabled);
237
238 local_save_flags(flags);
239 trace_function(tr, ip, parent_ip, flags, preempt_count());
240 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
241 data->critical_start = 0;
242 atomic_dec(&data->disabled);
243 }
244
245 /* start and stop critical timings used to for stoppage (in idle) */
246 void start_critical_timings(void)
247 {
248 if (preempt_trace() || irq_trace())
249 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
250 }
251 EXPORT_SYMBOL_GPL(start_critical_timings);
252
253 void stop_critical_timings(void)
254 {
255 if (preempt_trace() || irq_trace())
256 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
257 }
258 EXPORT_SYMBOL_GPL(stop_critical_timings);
259
260 #ifdef CONFIG_IRQSOFF_TRACER
261 #ifdef CONFIG_PROVE_LOCKING
262 void time_hardirqs_on(unsigned long a0, unsigned long a1)
263 {
264 if (!preempt_trace() && irq_trace())
265 stop_critical_timing(a0, a1);
266 }
267
268 void time_hardirqs_off(unsigned long a0, unsigned long a1)
269 {
270 if (!preempt_trace() && irq_trace())
271 start_critical_timing(a0, a1);
272 }
273
274 #else /* !CONFIG_PROVE_LOCKING */
275
276 /*
277 * Stubs:
278 */
279
280 void early_boot_irqs_off(void)
281 {
282 }
283
284 void early_boot_irqs_on(void)
285 {
286 }
287
288 void trace_softirqs_on(unsigned long ip)
289 {
290 }
291
292 void trace_softirqs_off(unsigned long ip)
293 {
294 }
295
296 inline void print_irqtrace_events(struct task_struct *curr)
297 {
298 }
299
300 /*
301 * We are only interested in hardirq on/off events:
302 */
303 void trace_hardirqs_on(void)
304 {
305 if (!preempt_trace() && irq_trace())
306 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
307 }
308 EXPORT_SYMBOL(trace_hardirqs_on);
309
310 void trace_hardirqs_off(void)
311 {
312 if (!preempt_trace() && irq_trace())
313 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
314 }
315 EXPORT_SYMBOL(trace_hardirqs_off);
316
317 void trace_hardirqs_on_caller(unsigned long caller_addr)
318 {
319 if (!preempt_trace() && irq_trace())
320 stop_critical_timing(CALLER_ADDR0, caller_addr);
321 }
322 EXPORT_SYMBOL(trace_hardirqs_on_caller);
323
324 void trace_hardirqs_off_caller(unsigned long caller_addr)
325 {
326 if (!preempt_trace() && irq_trace())
327 start_critical_timing(CALLER_ADDR0, caller_addr);
328 }
329 EXPORT_SYMBOL(trace_hardirqs_off_caller);
330
331 #endif /* CONFIG_PROVE_LOCKING */
332 #endif /* CONFIG_IRQSOFF_TRACER */
333
334 #ifdef CONFIG_PREEMPT_TRACER
335 void trace_preempt_on(unsigned long a0, unsigned long a1)
336 {
337 if (preempt_trace())
338 stop_critical_timing(a0, a1);
339 }
340
341 void trace_preempt_off(unsigned long a0, unsigned long a1)
342 {
343 if (preempt_trace())
344 start_critical_timing(a0, a1);
345 }
346 #endif /* CONFIG_PREEMPT_TRACER */
347
348 static void start_irqsoff_tracer(struct trace_array *tr)
349 {
350 register_ftrace_function(&trace_ops);
351 if (tracing_is_enabled())
352 tracer_enabled = 1;
353 else
354 tracer_enabled = 0;
355 }
356
357 static void stop_irqsoff_tracer(struct trace_array *tr)
358 {
359 tracer_enabled = 0;
360 unregister_ftrace_function(&trace_ops);
361 }
362
363 static void __irqsoff_tracer_init(struct trace_array *tr)
364 {
365 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
366 trace_flags |= TRACE_ITER_LATENCY_FMT;
367
368 tracing_max_latency = 0;
369 irqsoff_trace = tr;
370 /* make sure that the tracer is visible */
371 smp_wmb();
372 tracing_reset_online_cpus(tr);
373 start_irqsoff_tracer(tr);
374 }
375
376 static void irqsoff_tracer_reset(struct trace_array *tr)
377 {
378 stop_irqsoff_tracer(tr);
379
380 if (!save_lat_flag)
381 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
382 }
383
384 static void irqsoff_tracer_start(struct trace_array *tr)
385 {
386 tracer_enabled = 1;
387 }
388
389 static void irqsoff_tracer_stop(struct trace_array *tr)
390 {
391 tracer_enabled = 0;
392 }
393
394 #ifdef CONFIG_IRQSOFF_TRACER
395 static int irqsoff_tracer_init(struct trace_array *tr)
396 {
397 trace_type = TRACER_IRQS_OFF;
398
399 __irqsoff_tracer_init(tr);
400 return 0;
401 }
402 static struct tracer irqsoff_tracer __read_mostly =
403 {
404 .name = "irqsoff",
405 .init = irqsoff_tracer_init,
406 .reset = irqsoff_tracer_reset,
407 .start = irqsoff_tracer_start,
408 .stop = irqsoff_tracer_stop,
409 .print_max = 1,
410 #ifdef CONFIG_FTRACE_SELFTEST
411 .selftest = trace_selftest_startup_irqsoff,
412 #endif
413 };
414 # define register_irqsoff(trace) register_tracer(&trace)
415 #else
416 # define register_irqsoff(trace) do { } while (0)
417 #endif
418
419 #ifdef CONFIG_PREEMPT_TRACER
420 static int preemptoff_tracer_init(struct trace_array *tr)
421 {
422 trace_type = TRACER_PREEMPT_OFF;
423
424 __irqsoff_tracer_init(tr);
425 return 0;
426 }
427
428 static struct tracer preemptoff_tracer __read_mostly =
429 {
430 .name = "preemptoff",
431 .init = preemptoff_tracer_init,
432 .reset = irqsoff_tracer_reset,
433 .start = irqsoff_tracer_start,
434 .stop = irqsoff_tracer_stop,
435 .print_max = 1,
436 #ifdef CONFIG_FTRACE_SELFTEST
437 .selftest = trace_selftest_startup_preemptoff,
438 #endif
439 };
440 # define register_preemptoff(trace) register_tracer(&trace)
441 #else
442 # define register_preemptoff(trace) do { } while (0)
443 #endif
444
445 #if defined(CONFIG_IRQSOFF_TRACER) && \
446 defined(CONFIG_PREEMPT_TRACER)
447
448 static int preemptirqsoff_tracer_init(struct trace_array *tr)
449 {
450 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
451
452 __irqsoff_tracer_init(tr);
453 return 0;
454 }
455
456 static struct tracer preemptirqsoff_tracer __read_mostly =
457 {
458 .name = "preemptirqsoff",
459 .init = preemptirqsoff_tracer_init,
460 .reset = irqsoff_tracer_reset,
461 .start = irqsoff_tracer_start,
462 .stop = irqsoff_tracer_stop,
463 .print_max = 1,
464 #ifdef CONFIG_FTRACE_SELFTEST
465 .selftest = trace_selftest_startup_preemptirqsoff,
466 #endif
467 };
468
469 # define register_preemptirqsoff(trace) register_tracer(&trace)
470 #else
471 # define register_preemptirqsoff(trace) do { } while (0)
472 #endif
473
474 __init static int init_irqsoff_tracer(void)
475 {
476 register_irqsoff(irqsoff_tracer);
477 register_preemptoff(preemptoff_tracer);
478 register_preemptirqsoff(preemptirqsoff_tracer);
479
480 return 0;
481 }
482 device_initcall(init_irqsoff_tracer);
This page took 0.055256 seconds and 5 git commands to generate.