Merge branches 'tracing/kmemtrace2' and 'tracing/ftrace' into tracing/urgent
[deliverable/linux.git] / kernel / trace / trace_sched_switch.c
1 /*
2 * trace context switch
3 *
4 * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7 #include <linux/module.h>
8 #include <linux/fs.h>
9 #include <linux/debugfs.h>
10 #include <linux/kallsyms.h>
11 #include <linux/uaccess.h>
12 #include <linux/ftrace.h>
13 #include <trace/sched.h>
14
15 #include "trace.h"
16
17 static struct trace_array *ctx_trace;
18 static int __read_mostly tracer_enabled;
19 static int sched_ref;
20 static DEFINE_MUTEX(sched_register_mutex);
21
22 static void
23 probe_sched_switch(struct rq *__rq, struct task_struct *prev,
24 struct task_struct *next)
25 {
26 struct trace_array_cpu *data;
27 unsigned long flags;
28 int cpu;
29 int pc;
30
31 if (!sched_ref)
32 return;
33
34 tracing_record_cmdline(prev);
35 tracing_record_cmdline(next);
36
37 if (!tracer_enabled)
38 return;
39
40 pc = preempt_count();
41 local_irq_save(flags);
42 cpu = raw_smp_processor_id();
43 data = ctx_trace->data[cpu];
44
45 if (likely(!atomic_read(&data->disabled)))
46 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
47
48 local_irq_restore(flags);
49 }
50
51 static void
52 probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success)
53 {
54 struct trace_array_cpu *data;
55 unsigned long flags;
56 int cpu, pc;
57
58 if (!likely(tracer_enabled))
59 return;
60
61 pc = preempt_count();
62 tracing_record_cmdline(current);
63
64 local_irq_save(flags);
65 cpu = raw_smp_processor_id();
66 data = ctx_trace->data[cpu];
67
68 if (likely(!atomic_read(&data->disabled)))
69 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
70 flags, pc);
71
72 local_irq_restore(flags);
73 }
74
75 static int tracing_sched_register(void)
76 {
77 int ret;
78
79 ret = register_trace_sched_wakeup(probe_sched_wakeup);
80 if (ret) {
81 pr_info("wakeup trace: Couldn't activate tracepoint"
82 " probe to kernel_sched_wakeup\n");
83 return ret;
84 }
85
86 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
87 if (ret) {
88 pr_info("wakeup trace: Couldn't activate tracepoint"
89 " probe to kernel_sched_wakeup_new\n");
90 goto fail_deprobe;
91 }
92
93 ret = register_trace_sched_switch(probe_sched_switch);
94 if (ret) {
95 pr_info("sched trace: Couldn't activate tracepoint"
96 " probe to kernel_sched_schedule\n");
97 goto fail_deprobe_wake_new;
98 }
99
100 return ret;
101 fail_deprobe_wake_new:
102 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
103 fail_deprobe:
104 unregister_trace_sched_wakeup(probe_sched_wakeup);
105 return ret;
106 }
107
108 static void tracing_sched_unregister(void)
109 {
110 unregister_trace_sched_switch(probe_sched_switch);
111 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
112 unregister_trace_sched_wakeup(probe_sched_wakeup);
113 }
114
115 static void tracing_start_sched_switch(void)
116 {
117 mutex_lock(&sched_register_mutex);
118 if (!(sched_ref++))
119 tracing_sched_register();
120 mutex_unlock(&sched_register_mutex);
121 }
122
123 static void tracing_stop_sched_switch(void)
124 {
125 mutex_lock(&sched_register_mutex);
126 if (!(--sched_ref))
127 tracing_sched_unregister();
128 mutex_unlock(&sched_register_mutex);
129 }
130
131 void tracing_start_cmdline_record(void)
132 {
133 tracing_start_sched_switch();
134 }
135
136 void tracing_stop_cmdline_record(void)
137 {
138 tracing_stop_sched_switch();
139 }
140
141 /**
142 * tracing_start_sched_switch_record - start tracing context switches
143 *
144 * Turns on context switch tracing for a tracer.
145 */
146 void tracing_start_sched_switch_record(void)
147 {
148 if (unlikely(!ctx_trace)) {
149 WARN_ON(1);
150 return;
151 }
152
153 tracing_start_sched_switch();
154
155 mutex_lock(&sched_register_mutex);
156 tracer_enabled++;
157 mutex_unlock(&sched_register_mutex);
158 }
159
160 /**
161 * tracing_stop_sched_switch_record - start tracing context switches
162 *
163 * Turns off context switch tracing for a tracer.
164 */
165 void tracing_stop_sched_switch_record(void)
166 {
167 mutex_lock(&sched_register_mutex);
168 tracer_enabled--;
169 WARN_ON(tracer_enabled < 0);
170 mutex_unlock(&sched_register_mutex);
171
172 tracing_stop_sched_switch();
173 }
174
175 /**
176 * tracing_sched_switch_assign_trace - assign a trace array for ctx switch
177 * @tr: trace array pointer to assign
178 *
179 * Some tracers might want to record the context switches in their
180 * trace. This function lets those tracers assign the trace array
181 * to use.
182 */
183 void tracing_sched_switch_assign_trace(struct trace_array *tr)
184 {
185 ctx_trace = tr;
186 }
187
188 static void start_sched_trace(struct trace_array *tr)
189 {
190 tracing_reset_online_cpus(tr);
191 tracing_start_sched_switch_record();
192 }
193
194 static void stop_sched_trace(struct trace_array *tr)
195 {
196 tracing_stop_sched_switch_record();
197 }
198
199 static int sched_switch_trace_init(struct trace_array *tr)
200 {
201 ctx_trace = tr;
202 start_sched_trace(tr);
203 return 0;
204 }
205
206 static void sched_switch_trace_reset(struct trace_array *tr)
207 {
208 if (sched_ref)
209 stop_sched_trace(tr);
210 }
211
212 static void sched_switch_trace_start(struct trace_array *tr)
213 {
214 tracing_reset_online_cpus(tr);
215 tracing_start_sched_switch();
216 }
217
218 static void sched_switch_trace_stop(struct trace_array *tr)
219 {
220 tracing_stop_sched_switch();
221 }
222
223 static struct tracer sched_switch_trace __read_mostly =
224 {
225 .name = "sched_switch",
226 .init = sched_switch_trace_init,
227 .reset = sched_switch_trace_reset,
228 .start = sched_switch_trace_start,
229 .stop = sched_switch_trace_stop,
230 #ifdef CONFIG_FTRACE_SELFTEST
231 .selftest = trace_selftest_startup_sched_switch,
232 #endif
233 };
234
235 __init static int init_sched_switch_trace(void)
236 {
237 return register_tracer(&sched_switch_trace);
238 }
239 device_initcall(init_sched_switch_trace);
240
This page took 0.035405 seconds and 5 git commands to generate.