Commit | Line | Data |
---|---|---|
81d68a96 SR |
1 | /* |
2 | * trace irqs off criticall timings | |
3 | * | |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | |
6 | * | |
7 | * From code in the latency_tracer, that is: | |
8 | * | |
9 | * Copyright (C) 2004-2006 Ingo Molnar | |
10 | * Copyright (C) 2004 William Lee Irwin III | |
11 | */ | |
12 | #include <linux/kallsyms.h> | |
13 | #include <linux/debugfs.h> | |
14 | #include <linux/uaccess.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/ftrace.h> | |
17 | #include <linux/fs.h> | |
18 | ||
19 | #include "trace.h" | |
20 | ||
21 | static struct trace_array *irqsoff_trace __read_mostly; | |
22 | static int tracer_enabled __read_mostly; | |
23 | ||
6cd8a4bb SR |
24 | static DEFINE_PER_CPU(int, tracing_cpu); |
25 | ||
89b2f978 SR |
26 | static DEFINE_SPINLOCK(max_trace_lock); |
27 | ||
6cd8a4bb SR |
28 | enum { |
29 | TRACER_IRQS_OFF = (1 << 1), | |
30 | TRACER_PREEMPT_OFF = (1 << 2), | |
31 | }; | |
32 | ||
33 | static int trace_type __read_mostly; | |
34 | ||
35 | #ifdef CONFIG_PREEMPT_TRACER | |
e309b41d | 36 | static inline int |
6cd8a4bb SR |
37 | preempt_trace(void) |
38 | { | |
39 | return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count()); | |
40 | } | |
41 | #else | |
42 | # define preempt_trace() (0) | |
43 | #endif | |
44 | ||
45 | #ifdef CONFIG_IRQSOFF_TRACER | |
e309b41d | 46 | static inline int |
6cd8a4bb SR |
47 | irq_trace(void) |
48 | { | |
49 | return ((trace_type & TRACER_IRQS_OFF) && | |
50 | irqs_disabled()); | |
51 | } | |
52 | #else | |
53 | # define irq_trace() (0) | |
54 | #endif | |
55 | ||
81d68a96 SR |
56 | /* |
57 | * Sequence count - we record it when starting a measurement and | |
58 | * skip the latency if the sequence has changed - some other section | |
59 | * did a maximum and could disturb our measurement with serial console | |
60 | * printouts, etc. Truly coinciding maximum latencies should be rare | |
61 | * and what happens together happens separately as well, so this doesnt | |
62 | * decrease the validity of the maximum found: | |
63 | */ | |
64 | static __cacheline_aligned_in_smp unsigned long max_sequence; | |
65 | ||
66 | #ifdef CONFIG_FTRACE | |
67 | /* | |
68 | * irqsoff uses its own tracer function to keep the overhead down: | |
69 | */ | |
e309b41d | 70 | static void |
81d68a96 SR |
71 | irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) |
72 | { | |
73 | struct trace_array *tr = irqsoff_trace; | |
74 | struct trace_array_cpu *data; | |
75 | unsigned long flags; | |
76 | long disabled; | |
77 | int cpu; | |
78 | ||
361943ad SR |
79 | /* |
80 | * Does not matter if we preempt. We test the flags | |
81 | * afterward, to see if irqs are disabled or not. | |
82 | * If we preempt and get a false positive, the flags | |
83 | * test will fail. | |
84 | */ | |
85 | cpu = raw_smp_processor_id(); | |
86 | if (likely(!per_cpu(tracing_cpu, cpu))) | |
81d68a96 SR |
87 | return; |
88 | ||
89 | local_save_flags(flags); | |
361943ad SR |
90 | /* slight chance to get a false positive on tracing_cpu */ |
91 | if (!irqs_disabled_flags(flags)) | |
92 | return; | |
81d68a96 | 93 | |
81d68a96 SR |
94 | data = tr->data[cpu]; |
95 | disabled = atomic_inc_return(&data->disabled); | |
96 | ||
97 | if (likely(disabled == 1)) | |
6fb44b71 | 98 | trace_function(tr, data, ip, parent_ip, flags); |
81d68a96 SR |
99 | |
100 | atomic_dec(&data->disabled); | |
101 | } | |
102 | ||
103 | static struct ftrace_ops trace_ops __read_mostly = | |
104 | { | |
105 | .func = irqsoff_tracer_call, | |
106 | }; | |
107 | #endif /* CONFIG_FTRACE */ | |
108 | ||
109 | /* | |
110 | * Should this new latency be reported/recorded? | |
111 | */ | |
e309b41d | 112 | static int report_latency(cycle_t delta) |
81d68a96 SR |
113 | { |
114 | if (tracing_thresh) { | |
115 | if (delta < tracing_thresh) | |
116 | return 0; | |
117 | } else { | |
118 | if (delta <= tracing_max_latency) | |
119 | return 0; | |
120 | } | |
121 | return 1; | |
122 | } | |
123 | ||
e309b41d | 124 | static void |
81d68a96 SR |
125 | check_critical_timing(struct trace_array *tr, |
126 | struct trace_array_cpu *data, | |
127 | unsigned long parent_ip, | |
128 | int cpu) | |
129 | { | |
130 | unsigned long latency, t0, t1; | |
89b2f978 | 131 | cycle_t T0, T1, delta; |
81d68a96 SR |
132 | unsigned long flags; |
133 | ||
134 | /* | |
135 | * usecs conversion is slow so we try to delay the conversion | |
136 | * as long as possible: | |
137 | */ | |
138 | T0 = data->preempt_timestamp; | |
750ed1a4 | 139 | T1 = ftrace_now(cpu); |
81d68a96 SR |
140 | delta = T1-T0; |
141 | ||
142 | local_save_flags(flags); | |
143 | ||
144 | if (!report_latency(delta)) | |
145 | goto out; | |
146 | ||
c7aafc54 | 147 | spin_lock_irqsave(&max_trace_lock, flags); |
81d68a96 | 148 | |
89b2f978 SR |
149 | /* check if we are still the max latency */ |
150 | if (!report_latency(delta)) | |
151 | goto out_unlock; | |
152 | ||
6fb44b71 | 153 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); |
81d68a96 SR |
154 | |
155 | latency = nsecs_to_usecs(delta); | |
156 | ||
157 | if (data->critical_sequence != max_sequence) | |
89b2f978 | 158 | goto out_unlock; |
81d68a96 SR |
159 | |
160 | tracing_max_latency = delta; | |
161 | t0 = nsecs_to_usecs(T0); | |
162 | t1 = nsecs_to_usecs(T1); | |
163 | ||
164 | data->critical_end = parent_ip; | |
165 | ||
166 | update_max_tr_single(tr, current, cpu); | |
167 | ||
81d68a96 SR |
168 | max_sequence++; |
169 | ||
89b2f978 | 170 | out_unlock: |
c7aafc54 | 171 | spin_unlock_irqrestore(&max_trace_lock, flags); |
89b2f978 | 172 | |
81d68a96 SR |
173 | out: |
174 | data->critical_sequence = max_sequence; | |
750ed1a4 | 175 | data->preempt_timestamp = ftrace_now(cpu); |
3928a8a2 | 176 | tracing_reset(tr, cpu); |
6fb44b71 | 177 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); |
81d68a96 SR |
178 | } |
179 | ||
e309b41d | 180 | static inline void |
81d68a96 SR |
181 | start_critical_timing(unsigned long ip, unsigned long parent_ip) |
182 | { | |
183 | int cpu; | |
184 | struct trace_array *tr = irqsoff_trace; | |
185 | struct trace_array_cpu *data; | |
186 | unsigned long flags; | |
187 | ||
188 | if (likely(!tracer_enabled)) | |
189 | return; | |
190 | ||
c5f888ca SR |
191 | cpu = raw_smp_processor_id(); |
192 | ||
193 | if (per_cpu(tracing_cpu, cpu)) | |
6cd8a4bb SR |
194 | return; |
195 | ||
81d68a96 SR |
196 | data = tr->data[cpu]; |
197 | ||
c5f888ca | 198 | if (unlikely(!data) || atomic_read(&data->disabled)) |
81d68a96 SR |
199 | return; |
200 | ||
201 | atomic_inc(&data->disabled); | |
202 | ||
203 | data->critical_sequence = max_sequence; | |
750ed1a4 | 204 | data->preempt_timestamp = ftrace_now(cpu); |
6cd8a4bb | 205 | data->critical_start = parent_ip ? : ip; |
3928a8a2 | 206 | tracing_reset(tr, cpu); |
81d68a96 SR |
207 | |
208 | local_save_flags(flags); | |
6cd8a4bb | 209 | |
6fb44b71 | 210 | trace_function(tr, data, ip, parent_ip, flags); |
81d68a96 | 211 | |
c5f888ca | 212 | per_cpu(tracing_cpu, cpu) = 1; |
6cd8a4bb | 213 | |
81d68a96 SR |
214 | atomic_dec(&data->disabled); |
215 | } | |
216 | ||
e309b41d | 217 | static inline void |
81d68a96 SR |
218 | stop_critical_timing(unsigned long ip, unsigned long parent_ip) |
219 | { | |
220 | int cpu; | |
221 | struct trace_array *tr = irqsoff_trace; | |
222 | struct trace_array_cpu *data; | |
223 | unsigned long flags; | |
224 | ||
c5f888ca | 225 | cpu = raw_smp_processor_id(); |
6cd8a4bb | 226 | /* Always clear the tracing cpu on stopping the trace */ |
c5f888ca SR |
227 | if (unlikely(per_cpu(tracing_cpu, cpu))) |
228 | per_cpu(tracing_cpu, cpu) = 0; | |
6cd8a4bb SR |
229 | else |
230 | return; | |
231 | ||
232 | if (!tracer_enabled) | |
81d68a96 SR |
233 | return; |
234 | ||
81d68a96 SR |
235 | data = tr->data[cpu]; |
236 | ||
3928a8a2 | 237 | if (unlikely(!data) || |
81d68a96 SR |
238 | !data->critical_start || atomic_read(&data->disabled)) |
239 | return; | |
240 | ||
241 | atomic_inc(&data->disabled); | |
c5f888ca | 242 | |
81d68a96 | 243 | local_save_flags(flags); |
6fb44b71 | 244 | trace_function(tr, data, ip, parent_ip, flags); |
6cd8a4bb | 245 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
81d68a96 SR |
246 | data->critical_start = 0; |
247 | atomic_dec(&data->disabled); | |
248 | } | |
249 | ||
6cd8a4bb | 250 | /* start and stop critical timings used to for stoppage (in idle) */ |
e309b41d | 251 | void start_critical_timings(void) |
81d68a96 | 252 | { |
6cd8a4bb | 253 | if (preempt_trace() || irq_trace()) |
81d68a96 SR |
254 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
255 | } | |
1fe37104 | 256 | EXPORT_SYMBOL_GPL(start_critical_timings); |
81d68a96 | 257 | |
e309b41d | 258 | void stop_critical_timings(void) |
81d68a96 | 259 | { |
6cd8a4bb | 260 | if (preempt_trace() || irq_trace()) |
81d68a96 SR |
261 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
262 | } | |
1fe37104 | 263 | EXPORT_SYMBOL_GPL(stop_critical_timings); |
81d68a96 | 264 | |
6cd8a4bb | 265 | #ifdef CONFIG_IRQSOFF_TRACER |
81d68a96 | 266 | #ifdef CONFIG_PROVE_LOCKING |
e309b41d | 267 | void time_hardirqs_on(unsigned long a0, unsigned long a1) |
81d68a96 | 268 | { |
6cd8a4bb | 269 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
270 | stop_critical_timing(a0, a1); |
271 | } | |
272 | ||
e309b41d | 273 | void time_hardirqs_off(unsigned long a0, unsigned long a1) |
81d68a96 | 274 | { |
6cd8a4bb | 275 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
276 | start_critical_timing(a0, a1); |
277 | } | |
278 | ||
279 | #else /* !CONFIG_PROVE_LOCKING */ | |
280 | ||
281 | /* | |
282 | * Stubs: | |
283 | */ | |
284 | ||
285 | void early_boot_irqs_off(void) | |
286 | { | |
287 | } | |
288 | ||
289 | void early_boot_irqs_on(void) | |
290 | { | |
291 | } | |
292 | ||
293 | void trace_softirqs_on(unsigned long ip) | |
294 | { | |
295 | } | |
296 | ||
297 | void trace_softirqs_off(unsigned long ip) | |
298 | { | |
299 | } | |
300 | ||
e309b41d | 301 | inline void print_irqtrace_events(struct task_struct *curr) |
81d68a96 SR |
302 | { |
303 | } | |
304 | ||
305 | /* | |
306 | * We are only interested in hardirq on/off events: | |
307 | */ | |
e309b41d | 308 | void trace_hardirqs_on(void) |
81d68a96 | 309 | { |
6cd8a4bb | 310 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
311 | stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
312 | } | |
313 | EXPORT_SYMBOL(trace_hardirqs_on); | |
314 | ||
e309b41d | 315 | void trace_hardirqs_off(void) |
81d68a96 | 316 | { |
6cd8a4bb | 317 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
318 | start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); |
319 | } | |
320 | EXPORT_SYMBOL(trace_hardirqs_off); | |
321 | ||
e309b41d | 322 | void trace_hardirqs_on_caller(unsigned long caller_addr) |
81d68a96 | 323 | { |
6cd8a4bb | 324 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
325 | stop_critical_timing(CALLER_ADDR0, caller_addr); |
326 | } | |
327 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | |
328 | ||
e309b41d | 329 | void trace_hardirqs_off_caller(unsigned long caller_addr) |
81d68a96 | 330 | { |
6cd8a4bb | 331 | if (!preempt_trace() && irq_trace()) |
81d68a96 SR |
332 | start_critical_timing(CALLER_ADDR0, caller_addr); |
333 | } | |
334 | EXPORT_SYMBOL(trace_hardirqs_off_caller); | |
335 | ||
336 | #endif /* CONFIG_PROVE_LOCKING */ | |
6cd8a4bb SR |
337 | #endif /* CONFIG_IRQSOFF_TRACER */ |
338 | ||
339 | #ifdef CONFIG_PREEMPT_TRACER | |
e309b41d | 340 | void trace_preempt_on(unsigned long a0, unsigned long a1) |
6cd8a4bb | 341 | { |
1e01cb0c SR |
342 | if (preempt_trace()) |
343 | stop_critical_timing(a0, a1); | |
6cd8a4bb SR |
344 | } |
345 | ||
e309b41d | 346 | void trace_preempt_off(unsigned long a0, unsigned long a1) |
6cd8a4bb | 347 | { |
1e01cb0c SR |
348 | if (preempt_trace()) |
349 | start_critical_timing(a0, a1); | |
6cd8a4bb SR |
350 | } |
351 | #endif /* CONFIG_PREEMPT_TRACER */ | |
81d68a96 SR |
352 | |
353 | static void start_irqsoff_tracer(struct trace_array *tr) | |
354 | { | |
81d68a96 | 355 | register_ftrace_function(&trace_ops); |
89b2f978 | 356 | tracer_enabled = 1; |
81d68a96 SR |
357 | } |
358 | ||
359 | static void stop_irqsoff_tracer(struct trace_array *tr) | |
360 | { | |
81d68a96 | 361 | tracer_enabled = 0; |
89b2f978 | 362 | unregister_ftrace_function(&trace_ops); |
81d68a96 SR |
363 | } |
364 | ||
6cd8a4bb | 365 | static void __irqsoff_tracer_init(struct trace_array *tr) |
81d68a96 SR |
366 | { |
367 | irqsoff_trace = tr; | |
c5f888ca | 368 | /* make sure that the tracer is visible */ |
81d68a96 SR |
369 | smp_wmb(); |
370 | ||
371 | if (tr->ctrl) | |
372 | start_irqsoff_tracer(tr); | |
373 | } | |
374 | ||
375 | static void irqsoff_tracer_reset(struct trace_array *tr) | |
376 | { | |
377 | if (tr->ctrl) | |
378 | stop_irqsoff_tracer(tr); | |
379 | } | |
380 | ||
381 | static void irqsoff_tracer_ctrl_update(struct trace_array *tr) | |
382 | { | |
383 | if (tr->ctrl) | |
384 | start_irqsoff_tracer(tr); | |
385 | else | |
386 | stop_irqsoff_tracer(tr); | |
387 | } | |
388 | ||
e309b41d | 389 | static void irqsoff_tracer_open(struct trace_iterator *iter) |
81d68a96 SR |
390 | { |
391 | /* stop the trace while dumping */ | |
392 | if (iter->tr->ctrl) | |
393 | stop_irqsoff_tracer(iter->tr); | |
394 | } | |
395 | ||
e309b41d | 396 | static void irqsoff_tracer_close(struct trace_iterator *iter) |
81d68a96 SR |
397 | { |
398 | if (iter->tr->ctrl) | |
399 | start_irqsoff_tracer(iter->tr); | |
400 | } | |
401 | ||
6cd8a4bb SR |
402 | #ifdef CONFIG_IRQSOFF_TRACER |
403 | static void irqsoff_tracer_init(struct trace_array *tr) | |
404 | { | |
405 | trace_type = TRACER_IRQS_OFF; | |
406 | ||
407 | __irqsoff_tracer_init(tr); | |
408 | } | |
81d68a96 SR |
409 | static struct tracer irqsoff_tracer __read_mostly = |
410 | { | |
411 | .name = "irqsoff", | |
412 | .init = irqsoff_tracer_init, | |
413 | .reset = irqsoff_tracer_reset, | |
414 | .open = irqsoff_tracer_open, | |
415 | .close = irqsoff_tracer_close, | |
416 | .ctrl_update = irqsoff_tracer_ctrl_update, | |
417 | .print_max = 1, | |
60a11774 SR |
418 | #ifdef CONFIG_FTRACE_SELFTEST |
419 | .selftest = trace_selftest_startup_irqsoff, | |
420 | #endif | |
81d68a96 | 421 | }; |
6cd8a4bb SR |
422 | # define register_irqsoff(trace) register_tracer(&trace) |
423 | #else | |
424 | # define register_irqsoff(trace) do { } while (0) | |
425 | #endif | |
426 | ||
427 | #ifdef CONFIG_PREEMPT_TRACER | |
428 | static void preemptoff_tracer_init(struct trace_array *tr) | |
429 | { | |
430 | trace_type = TRACER_PREEMPT_OFF; | |
431 | ||
432 | __irqsoff_tracer_init(tr); | |
433 | } | |
434 | ||
435 | static struct tracer preemptoff_tracer __read_mostly = | |
436 | { | |
437 | .name = "preemptoff", | |
438 | .init = preemptoff_tracer_init, | |
439 | .reset = irqsoff_tracer_reset, | |
440 | .open = irqsoff_tracer_open, | |
441 | .close = irqsoff_tracer_close, | |
442 | .ctrl_update = irqsoff_tracer_ctrl_update, | |
443 | .print_max = 1, | |
60a11774 SR |
444 | #ifdef CONFIG_FTRACE_SELFTEST |
445 | .selftest = trace_selftest_startup_preemptoff, | |
446 | #endif | |
6cd8a4bb SR |
447 | }; |
448 | # define register_preemptoff(trace) register_tracer(&trace) | |
449 | #else | |
450 | # define register_preemptoff(trace) do { } while (0) | |
451 | #endif | |
452 | ||
453 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | |
454 | defined(CONFIG_PREEMPT_TRACER) | |
455 | ||
456 | static void preemptirqsoff_tracer_init(struct trace_array *tr) | |
457 | { | |
458 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | |
459 | ||
460 | __irqsoff_tracer_init(tr); | |
461 | } | |
462 | ||
463 | static struct tracer preemptirqsoff_tracer __read_mostly = | |
464 | { | |
465 | .name = "preemptirqsoff", | |
466 | .init = preemptirqsoff_tracer_init, | |
467 | .reset = irqsoff_tracer_reset, | |
468 | .open = irqsoff_tracer_open, | |
469 | .close = irqsoff_tracer_close, | |
470 | .ctrl_update = irqsoff_tracer_ctrl_update, | |
471 | .print_max = 1, | |
60a11774 SR |
472 | #ifdef CONFIG_FTRACE_SELFTEST |
473 | .selftest = trace_selftest_startup_preemptirqsoff, | |
474 | #endif | |
6cd8a4bb SR |
475 | }; |
476 | ||
477 | # define register_preemptirqsoff(trace) register_tracer(&trace) | |
478 | #else | |
479 | # define register_preemptirqsoff(trace) do { } while (0) | |
480 | #endif | |
81d68a96 SR |
481 | |
482 | __init static int init_irqsoff_tracer(void) | |
483 | { | |
6cd8a4bb SR |
484 | register_irqsoff(irqsoff_tracer); |
485 | register_preemptoff(preemptoff_tracer); | |
486 | register_preemptirqsoff(preemptirqsoff_tracer); | |
81d68a96 SR |
487 | |
488 | return 0; | |
489 | } | |
490 | device_initcall(init_irqsoff_tracer); |