usb: gadget: r8a66597-udc: use dev_*() instead of printk()
[deliverable/linux.git] / kernel / trace / trace_irqsoff.c
CommitLineData
81d68a96 1/*
73d8b8bc 2 * trace irqs off critical timings
81d68a96
SR
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * From code in the latency_tracer, that is:
8 *
9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III
11 */
12#include <linux/kallsyms.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/module.h>
16#include <linux/ftrace.h>
17#include <linux/fs.h>
18
19#include "trace.h"
20
21static struct trace_array *irqsoff_trace __read_mostly;
22static int tracer_enabled __read_mostly;
23
6cd8a4bb
SR
24static DEFINE_PER_CPU(int, tracing_cpu);
25
89b2f978
SR
26static DEFINE_SPINLOCK(max_trace_lock);
27
6cd8a4bb
SR
28enum {
29 TRACER_IRQS_OFF = (1 << 1),
30 TRACER_PREEMPT_OFF = (1 << 2),
31};
32
33static int trace_type __read_mostly;
34
e9d25fe6
SR
35static int save_lat_flag;
36
62b915f1
JO
37static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38static int start_irqsoff_tracer(struct trace_array *tr, int graph);
39
6cd8a4bb 40#ifdef CONFIG_PREEMPT_TRACER
e309b41d 41static inline int
6cd8a4bb
SR
42preempt_trace(void)
43{
44 return ((trace_type & TRACER_PREEMPT_OFF) && preempt_count());
45}
46#else
47# define preempt_trace() (0)
48#endif
49
50#ifdef CONFIG_IRQSOFF_TRACER
e309b41d 51static inline int
6cd8a4bb
SR
52irq_trace(void)
53{
54 return ((trace_type & TRACER_IRQS_OFF) &&
55 irqs_disabled());
56}
57#else
58# define irq_trace() (0)
59#endif
60
62b915f1
JO
61#define TRACE_DISPLAY_GRAPH 1
62
63static struct tracer_opt trace_opts[] = {
64#ifdef CONFIG_FUNCTION_GRAPH_TRACER
65 /* display latency trace as call graph */
66 { TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
67#endif
68 { } /* Empty entry */
69};
70
71static struct tracer_flags tracer_flags = {
72 .val = 0,
73 .opts = trace_opts,
74};
75
76#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
77
81d68a96
SR
78/*
79 * Sequence count - we record it when starting a measurement and
80 * skip the latency if the sequence has changed - some other section
81 * did a maximum and could disturb our measurement with serial console
82 * printouts, etc. Truly coinciding maximum latencies should be rare
25985edc 83 * and what happens together happens separately as well, so this doesn't
81d68a96
SR
84 * decrease the validity of the maximum found:
85 */
86static __cacheline_aligned_in_smp unsigned long max_sequence;
87
606576ce 88#ifdef CONFIG_FUNCTION_TRACER
81d68a96 89/*
5e6d2b9c
SR
90 * Prologue for the preempt and irqs off function tracers.
91 *
92 * Returns 1 if it is OK to continue, and data->disabled is
93 * incremented.
94 * 0 if the trace is to be ignored, and data->disabled
95 * is kept the same.
96 *
97 * Note, this function is also used outside this ifdef but
98 * inside the #ifdef of the function graph tracer below.
99 * This is OK, since the function graph tracer is
100 * dependent on the function tracer.
81d68a96 101 */
5e6d2b9c
SR
102static int func_prolog_dec(struct trace_array *tr,
103 struct trace_array_cpu **data,
104 unsigned long *flags)
81d68a96 105{
81d68a96
SR
106 long disabled;
107 int cpu;
108
361943ad
SR
109 /*
110 * Does not matter if we preempt. We test the flags
111 * afterward, to see if irqs are disabled or not.
112 * If we preempt and get a false positive, the flags
113 * test will fail.
114 */
115 cpu = raw_smp_processor_id();
116 if (likely(!per_cpu(tracing_cpu, cpu)))
5e6d2b9c 117 return 0;
81d68a96 118
5e6d2b9c 119 local_save_flags(*flags);
361943ad 120 /* slight chance to get a false positive on tracing_cpu */
5e6d2b9c
SR
121 if (!irqs_disabled_flags(*flags))
122 return 0;
81d68a96 123
5e6d2b9c
SR
124 *data = tr->data[cpu];
125 disabled = atomic_inc_return(&(*data)->disabled);
81d68a96
SR
126
127 if (likely(disabled == 1))
5e6d2b9c
SR
128 return 1;
129
130 atomic_dec(&(*data)->disabled);
131
132 return 0;
133}
134
135/*
136 * irqsoff uses its own tracer function to keep the overhead down:
137 */
138static void
139irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
140{
141 struct trace_array *tr = irqsoff_trace;
142 struct trace_array_cpu *data;
143 unsigned long flags;
144
145 if (!func_prolog_dec(tr, &data, &flags))
146 return;
147
148 trace_function(tr, ip, parent_ip, flags, preempt_count());
81d68a96
SR
149
150 atomic_dec(&data->disabled);
151}
152
153static struct ftrace_ops trace_ops __read_mostly =
154{
155 .func = irqsoff_tracer_call,
b848914c 156 .flags = FTRACE_OPS_FL_GLOBAL,
81d68a96 157};
606576ce 158#endif /* CONFIG_FUNCTION_TRACER */
81d68a96 159
62b915f1
JO
160#ifdef CONFIG_FUNCTION_GRAPH_TRACER
161static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
162{
163 int cpu;
164
165 if (!(bit & TRACE_DISPLAY_GRAPH))
166 return -EINVAL;
167
168 if (!(is_graph() ^ set))
169 return 0;
170
171 stop_irqsoff_tracer(irqsoff_trace, !set);
172
173 for_each_possible_cpu(cpu)
174 per_cpu(tracing_cpu, cpu) = 0;
175
176 tracing_max_latency = 0;
177 tracing_reset_online_cpus(irqsoff_trace);
178
179 return start_irqsoff_tracer(irqsoff_trace, set);
180}
181
182static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
183{
184 struct trace_array *tr = irqsoff_trace;
185 struct trace_array_cpu *data;
186 unsigned long flags;
62b915f1 187 int ret;
62b915f1
JO
188 int pc;
189
5e6d2b9c 190 if (!func_prolog_dec(tr, &data, &flags))
62b915f1
JO
191 return 0;
192
5e6d2b9c
SR
193 pc = preempt_count();
194 ret = __trace_graph_entry(tr, trace, flags, pc);
62b915f1 195 atomic_dec(&data->disabled);
5e6d2b9c 196
62b915f1
JO
197 return ret;
198}
199
200static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
201{
202 struct trace_array *tr = irqsoff_trace;
203 struct trace_array_cpu *data;
204 unsigned long flags;
62b915f1
JO
205 int pc;
206
5e6d2b9c 207 if (!func_prolog_dec(tr, &data, &flags))
62b915f1
JO
208 return;
209
5e6d2b9c
SR
210 pc = preempt_count();
211 __trace_graph_return(tr, trace, flags, pc);
62b915f1
JO
212 atomic_dec(&data->disabled);
213}
214
215static void irqsoff_trace_open(struct trace_iterator *iter)
216{
217 if (is_graph())
218 graph_trace_open(iter);
219
220}
221
222static void irqsoff_trace_close(struct trace_iterator *iter)
223{
224 if (iter->private)
225 graph_trace_close(iter);
226}
227
228#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_CPU | \
321e68b0
JO
229 TRACE_GRAPH_PRINT_PROC | \
230 TRACE_GRAPH_PRINT_ABS_TIME | \
231 TRACE_GRAPH_PRINT_DURATION)
62b915f1
JO
232
233static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
234{
62b915f1
JO
235 /*
236 * In graph mode call the graph tracer output function,
237 * otherwise go with the TRACE_FN event handler
238 */
239 if (is_graph())
0a772620 240 return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
62b915f1
JO
241
242 return TRACE_TYPE_UNHANDLED;
243}
244
245static void irqsoff_print_header(struct seq_file *s)
246{
0a772620
JO
247 if (is_graph())
248 print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
249 else
62b915f1
JO
250 trace_default_header(s);
251}
252
62b915f1
JO
253static void
254__trace_function(struct trace_array *tr,
255 unsigned long ip, unsigned long parent_ip,
256 unsigned long flags, int pc)
257{
0a772620
JO
258 if (is_graph())
259 trace_graph_function(tr, ip, parent_ip, flags, pc);
260 else
62b915f1 261 trace_function(tr, ip, parent_ip, flags, pc);
62b915f1
JO
262}
263
264#else
265#define __trace_function trace_function
266
267static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
268{
269 return -EINVAL;
270}
271
272static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
273{
274 return -1;
275}
276
277static enum print_line_t irqsoff_print_line(struct trace_iterator *iter)
278{
279 return TRACE_TYPE_UNHANDLED;
280}
281
282static void irqsoff_graph_return(struct ftrace_graph_ret *trace) { }
283static void irqsoff_print_header(struct seq_file *s) { }
284static void irqsoff_trace_open(struct trace_iterator *iter) { }
285static void irqsoff_trace_close(struct trace_iterator *iter) { }
286#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
287
81d68a96
SR
288/*
289 * Should this new latency be reported/recorded?
290 */
e309b41d 291static int report_latency(cycle_t delta)
81d68a96
SR
292{
293 if (tracing_thresh) {
294 if (delta < tracing_thresh)
295 return 0;
296 } else {
297 if (delta <= tracing_max_latency)
298 return 0;
299 }
300 return 1;
301}
302
e309b41d 303static void
81d68a96
SR
304check_critical_timing(struct trace_array *tr,
305 struct trace_array_cpu *data,
306 unsigned long parent_ip,
307 int cpu)
308{
89b2f978 309 cycle_t T0, T1, delta;
81d68a96 310 unsigned long flags;
38697053 311 int pc;
81d68a96 312
81d68a96 313 T0 = data->preempt_timestamp;
750ed1a4 314 T1 = ftrace_now(cpu);
81d68a96
SR
315 delta = T1-T0;
316
317 local_save_flags(flags);
318
6450c1d3
SR
319 pc = preempt_count();
320
81d68a96
SR
321 if (!report_latency(delta))
322 goto out;
323
c7aafc54 324 spin_lock_irqsave(&max_trace_lock, flags);
81d68a96 325
89b2f978
SR
326 /* check if we are still the max latency */
327 if (!report_latency(delta))
328 goto out_unlock;
329
62b915f1 330 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
cc51a0fc
SR
331 /* Skip 5 functions to get to the irq/preempt enable function */
332 __trace_stack(tr, flags, 5, pc);
81d68a96 333
81d68a96 334 if (data->critical_sequence != max_sequence)
89b2f978 335 goto out_unlock;
81d68a96 336
81d68a96
SR
337 data->critical_end = parent_ip;
338
b5130b1e
CE
339 if (likely(!is_tracing_stopped())) {
340 tracing_max_latency = delta;
341 update_max_tr_single(tr, current, cpu);
342 }
81d68a96 343
81d68a96
SR
344 max_sequence++;
345
89b2f978 346out_unlock:
c7aafc54 347 spin_unlock_irqrestore(&max_trace_lock, flags);
89b2f978 348
81d68a96
SR
349out:
350 data->critical_sequence = max_sequence;
750ed1a4 351 data->preempt_timestamp = ftrace_now(cpu);
62b915f1 352 __trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc);
81d68a96
SR
353}
354
e309b41d 355static inline void
81d68a96
SR
356start_critical_timing(unsigned long ip, unsigned long parent_ip)
357{
358 int cpu;
359 struct trace_array *tr = irqsoff_trace;
360 struct trace_array_cpu *data;
361 unsigned long flags;
362
363 if (likely(!tracer_enabled))
364 return;
365
c5f888ca
SR
366 cpu = raw_smp_processor_id();
367
368 if (per_cpu(tracing_cpu, cpu))
6cd8a4bb
SR
369 return;
370
81d68a96
SR
371 data = tr->data[cpu];
372
c5f888ca 373 if (unlikely(!data) || atomic_read(&data->disabled))
81d68a96
SR
374 return;
375
376 atomic_inc(&data->disabled);
377
378 data->critical_sequence = max_sequence;
750ed1a4 379 data->preempt_timestamp = ftrace_now(cpu);
6cd8a4bb 380 data->critical_start = parent_ip ? : ip;
81d68a96
SR
381
382 local_save_flags(flags);
6cd8a4bb 383
62b915f1 384 __trace_function(tr, ip, parent_ip, flags, preempt_count());
81d68a96 385
c5f888ca 386 per_cpu(tracing_cpu, cpu) = 1;
6cd8a4bb 387
81d68a96
SR
388 atomic_dec(&data->disabled);
389}
390
e309b41d 391static inline void
81d68a96
SR
392stop_critical_timing(unsigned long ip, unsigned long parent_ip)
393{
394 int cpu;
395 struct trace_array *tr = irqsoff_trace;
396 struct trace_array_cpu *data;
397 unsigned long flags;
398
c5f888ca 399 cpu = raw_smp_processor_id();
6cd8a4bb 400 /* Always clear the tracing cpu on stopping the trace */
c5f888ca
SR
401 if (unlikely(per_cpu(tracing_cpu, cpu)))
402 per_cpu(tracing_cpu, cpu) = 0;
6cd8a4bb
SR
403 else
404 return;
405
406 if (!tracer_enabled)
81d68a96
SR
407 return;
408
81d68a96
SR
409 data = tr->data[cpu];
410
3928a8a2 411 if (unlikely(!data) ||
81d68a96
SR
412 !data->critical_start || atomic_read(&data->disabled))
413 return;
414
415 atomic_inc(&data->disabled);
c5f888ca 416
81d68a96 417 local_save_flags(flags);
62b915f1 418 __trace_function(tr, ip, parent_ip, flags, preempt_count());
6cd8a4bb 419 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
81d68a96
SR
420 data->critical_start = 0;
421 atomic_dec(&data->disabled);
422}
423
6cd8a4bb 424/* start and stop critical timings used to for stoppage (in idle) */
e309b41d 425void start_critical_timings(void)
81d68a96 426{
6cd8a4bb 427 if (preempt_trace() || irq_trace())
81d68a96
SR
428 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
429}
1fe37104 430EXPORT_SYMBOL_GPL(start_critical_timings);
81d68a96 431
e309b41d 432void stop_critical_timings(void)
81d68a96 433{
6cd8a4bb 434 if (preempt_trace() || irq_trace())
81d68a96
SR
435 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
436}
1fe37104 437EXPORT_SYMBOL_GPL(stop_critical_timings);
81d68a96 438
6cd8a4bb 439#ifdef CONFIG_IRQSOFF_TRACER
81d68a96 440#ifdef CONFIG_PROVE_LOCKING
e309b41d 441void time_hardirqs_on(unsigned long a0, unsigned long a1)
81d68a96 442{
6cd8a4bb 443 if (!preempt_trace() && irq_trace())
81d68a96
SR
444 stop_critical_timing(a0, a1);
445}
446
e309b41d 447void time_hardirqs_off(unsigned long a0, unsigned long a1)
81d68a96 448{
6cd8a4bb 449 if (!preempt_trace() && irq_trace())
81d68a96
SR
450 start_critical_timing(a0, a1);
451}
452
453#else /* !CONFIG_PROVE_LOCKING */
454
455/*
456 * Stubs:
457 */
458
81d68a96
SR
459void trace_softirqs_on(unsigned long ip)
460{
461}
462
463void trace_softirqs_off(unsigned long ip)
464{
465}
466
e309b41d 467inline void print_irqtrace_events(struct task_struct *curr)
81d68a96
SR
468{
469}
470
471/*
472 * We are only interested in hardirq on/off events:
473 */
e309b41d 474void trace_hardirqs_on(void)
81d68a96 475{
6cd8a4bb 476 if (!preempt_trace() && irq_trace())
81d68a96
SR
477 stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
478}
479EXPORT_SYMBOL(trace_hardirqs_on);
480
e309b41d 481void trace_hardirqs_off(void)
81d68a96 482{
6cd8a4bb 483 if (!preempt_trace() && irq_trace())
81d68a96
SR
484 start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
485}
486EXPORT_SYMBOL(trace_hardirqs_off);
487
e309b41d 488void trace_hardirqs_on_caller(unsigned long caller_addr)
81d68a96 489{
6cd8a4bb 490 if (!preempt_trace() && irq_trace())
81d68a96
SR
491 stop_critical_timing(CALLER_ADDR0, caller_addr);
492}
493EXPORT_SYMBOL(trace_hardirqs_on_caller);
494
e309b41d 495void trace_hardirqs_off_caller(unsigned long caller_addr)
81d68a96 496{
6cd8a4bb 497 if (!preempt_trace() && irq_trace())
81d68a96
SR
498 start_critical_timing(CALLER_ADDR0, caller_addr);
499}
500EXPORT_SYMBOL(trace_hardirqs_off_caller);
501
502#endif /* CONFIG_PROVE_LOCKING */
6cd8a4bb
SR
503#endif /* CONFIG_IRQSOFF_TRACER */
504
505#ifdef CONFIG_PREEMPT_TRACER
e309b41d 506void trace_preempt_on(unsigned long a0, unsigned long a1)
6cd8a4bb 507{
1e01cb0c
SR
508 if (preempt_trace())
509 stop_critical_timing(a0, a1);
6cd8a4bb
SR
510}
511
e309b41d 512void trace_preempt_off(unsigned long a0, unsigned long a1)
6cd8a4bb 513{
1e01cb0c
SR
514 if (preempt_trace())
515 start_critical_timing(a0, a1);
6cd8a4bb
SR
516}
517#endif /* CONFIG_PREEMPT_TRACER */
81d68a96 518
62b915f1 519static int start_irqsoff_tracer(struct trace_array *tr, int graph)
81d68a96 520{
62b915f1
JO
521 int ret = 0;
522
523 if (!graph)
524 ret = register_ftrace_function(&trace_ops);
525 else
526 ret = register_ftrace_graph(&irqsoff_graph_return,
527 &irqsoff_graph_entry);
528
529 if (!ret && tracing_is_enabled())
9036990d 530 tracer_enabled = 1;
94523e81 531 else
9036990d 532 tracer_enabled = 0;
62b915f1
JO
533
534 return ret;
81d68a96
SR
535}
536
62b915f1 537static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
81d68a96 538{
81d68a96 539 tracer_enabled = 0;
62b915f1
JO
540
541 if (!graph)
542 unregister_ftrace_function(&trace_ops);
543 else
544 unregister_ftrace_graph();
81d68a96
SR
545}
546
6cd8a4bb 547static void __irqsoff_tracer_init(struct trace_array *tr)
81d68a96 548{
e9d25fe6
SR
549 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT;
550 trace_flags |= TRACE_ITER_LATENCY_FMT;
551
745b1626 552 tracing_max_latency = 0;
81d68a96 553 irqsoff_trace = tr;
c5f888ca 554 /* make sure that the tracer is visible */
81d68a96 555 smp_wmb();
2f26ebd5 556 tracing_reset_online_cpus(tr);
62b915f1
JO
557
558 if (start_irqsoff_tracer(tr, is_graph()))
559 printk(KERN_ERR "failed to start irqsoff tracer\n");
81d68a96
SR
560}
561
562static void irqsoff_tracer_reset(struct trace_array *tr)
563{
62b915f1 564 stop_irqsoff_tracer(tr, is_graph());
e9d25fe6
SR
565
566 if (!save_lat_flag)
567 trace_flags &= ~TRACE_ITER_LATENCY_FMT;
81d68a96
SR
568}
569
9036990d
SR
570static void irqsoff_tracer_start(struct trace_array *tr)
571{
9036990d 572 tracer_enabled = 1;
9036990d
SR
573}
574
575static void irqsoff_tracer_stop(struct trace_array *tr)
576{
577 tracer_enabled = 0;
81d68a96
SR
578}
579
6cd8a4bb 580#ifdef CONFIG_IRQSOFF_TRACER
1c80025a 581static int irqsoff_tracer_init(struct trace_array *tr)
6cd8a4bb
SR
582{
583 trace_type = TRACER_IRQS_OFF;
584
585 __irqsoff_tracer_init(tr);
1c80025a 586 return 0;
6cd8a4bb 587}
81d68a96
SR
588static struct tracer irqsoff_tracer __read_mostly =
589{
590 .name = "irqsoff",
591 .init = irqsoff_tracer_init,
592 .reset = irqsoff_tracer_reset,
9036990d
SR
593 .start = irqsoff_tracer_start,
594 .stop = irqsoff_tracer_stop,
81d68a96 595 .print_max = 1,
62b915f1
JO
596 .print_header = irqsoff_print_header,
597 .print_line = irqsoff_print_line,
598 .flags = &tracer_flags,
599 .set_flag = irqsoff_set_flag,
60a11774
SR
600#ifdef CONFIG_FTRACE_SELFTEST
601 .selftest = trace_selftest_startup_irqsoff,
602#endif
62b915f1
JO
603 .open = irqsoff_trace_open,
604 .close = irqsoff_trace_close,
ef710e10 605 .use_max_tr = 1,
81d68a96 606};
6cd8a4bb
SR
607# define register_irqsoff(trace) register_tracer(&trace)
608#else
609# define register_irqsoff(trace) do { } while (0)
610#endif
611
612#ifdef CONFIG_PREEMPT_TRACER
1c80025a 613static int preemptoff_tracer_init(struct trace_array *tr)
6cd8a4bb
SR
614{
615 trace_type = TRACER_PREEMPT_OFF;
616
617 __irqsoff_tracer_init(tr);
1c80025a 618 return 0;
6cd8a4bb
SR
619}
620
621static struct tracer preemptoff_tracer __read_mostly =
622{
623 .name = "preemptoff",
624 .init = preemptoff_tracer_init,
625 .reset = irqsoff_tracer_reset,
9036990d
SR
626 .start = irqsoff_tracer_start,
627 .stop = irqsoff_tracer_stop,
6cd8a4bb 628 .print_max = 1,
62b915f1
JO
629 .print_header = irqsoff_print_header,
630 .print_line = irqsoff_print_line,
631 .flags = &tracer_flags,
632 .set_flag = irqsoff_set_flag,
60a11774
SR
633#ifdef CONFIG_FTRACE_SELFTEST
634 .selftest = trace_selftest_startup_preemptoff,
635#endif
62b915f1
JO
636 .open = irqsoff_trace_open,
637 .close = irqsoff_trace_close,
ef710e10 638 .use_max_tr = 1,
6cd8a4bb
SR
639};
640# define register_preemptoff(trace) register_tracer(&trace)
641#else
642# define register_preemptoff(trace) do { } while (0)
643#endif
644
645#if defined(CONFIG_IRQSOFF_TRACER) && \
646 defined(CONFIG_PREEMPT_TRACER)
647
1c80025a 648static int preemptirqsoff_tracer_init(struct trace_array *tr)
6cd8a4bb
SR
649{
650 trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF;
651
652 __irqsoff_tracer_init(tr);
1c80025a 653 return 0;
6cd8a4bb
SR
654}
655
656static struct tracer preemptirqsoff_tracer __read_mostly =
657{
658 .name = "preemptirqsoff",
659 .init = preemptirqsoff_tracer_init,
660 .reset = irqsoff_tracer_reset,
9036990d
SR
661 .start = irqsoff_tracer_start,
662 .stop = irqsoff_tracer_stop,
6cd8a4bb 663 .print_max = 1,
62b915f1
JO
664 .print_header = irqsoff_print_header,
665 .print_line = irqsoff_print_line,
666 .flags = &tracer_flags,
667 .set_flag = irqsoff_set_flag,
60a11774
SR
668#ifdef CONFIG_FTRACE_SELFTEST
669 .selftest = trace_selftest_startup_preemptirqsoff,
670#endif
62b915f1
JO
671 .open = irqsoff_trace_open,
672 .close = irqsoff_trace_close,
ef710e10 673 .use_max_tr = 1,
6cd8a4bb
SR
674};
675
676# define register_preemptirqsoff(trace) register_tracer(&trace)
677#else
678# define register_preemptirqsoff(trace) do { } while (0)
679#endif
81d68a96
SR
680
681__init static int init_irqsoff_tracer(void)
682{
6cd8a4bb
SR
683 register_irqsoff(irqsoff_tracer);
684 register_preemptoff(preemptoff_tracer);
685 register_preemptirqsoff(preemptirqsoff_tracer);
81d68a96
SR
686
687 return 0;
688}
689device_initcall(init_irqsoff_tracer);
This page took 0.199307 seconds and 5 git commands to generate.