Commit | Line | Data |
---|---|---|
35e8e302 SR |
1 | /* |
2 | * trace context switch | |
3 | * | |
4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | */ | |
7 | #include <linux/module.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/debugfs.h> | |
10 | #include <linux/kallsyms.h> | |
11 | #include <linux/uaccess.h> | |
35e8e302 | 12 | #include <linux/ftrace.h> |
ad8d75ff | 13 | #include <trace/events/sched.h> |
35e8e302 SR |
14 | |
15 | #include "trace.h" | |
16 | ||
17 | static struct trace_array *ctx_trace; | |
18 | static int __read_mostly tracer_enabled; | |
efade6e7 FW |
19 | static int sched_ref; |
20 | static DEFINE_MUTEX(sched_register_mutex); | |
5fec6ddc | 21 | static int sched_stopped; |
35e8e302 | 22 | |
82e04af4 FW |
23 | |
24 | void | |
25 | tracing_sched_switch_trace(struct trace_array *tr, | |
26 | struct task_struct *prev, | |
27 | struct task_struct *next, | |
28 | unsigned long flags, int pc) | |
29 | { | |
30 | struct ftrace_event_call *call = &event_context_switch; | |
31 | struct ring_buffer_event *event; | |
32 | struct ctx_switch_entry *entry; | |
33 | ||
34 | event = trace_buffer_lock_reserve(tr, TRACE_CTX, | |
35 | sizeof(*entry), flags, pc); | |
36 | if (!event) | |
37 | return; | |
38 | entry = ring_buffer_event_data(event); | |
39 | entry->prev_pid = prev->pid; | |
40 | entry->prev_prio = prev->prio; | |
41 | entry->prev_state = prev->state; | |
42 | entry->next_pid = next->pid; | |
43 | entry->next_prio = next->prio; | |
44 | entry->next_state = next->state; | |
45 | entry->next_cpu = task_cpu(next); | |
46 | ||
47 | if (!filter_check_discard(call, entry, tr->buffer, event)) | |
48 | trace_buffer_unlock_commit(tr, event, flags, pc); | |
49 | } | |
50 | ||
e309b41d | 51 | static void |
b07c3f19 | 52 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
5b82a1b0 | 53 | struct task_struct *next) |
35e8e302 | 54 | { |
35e8e302 SR |
55 | struct trace_array_cpu *data; |
56 | unsigned long flags; | |
35e8e302 | 57 | int cpu; |
38697053 | 58 | int pc; |
35e8e302 | 59 | |
dcef788e | 60 | if (unlikely(!sched_ref)) |
b07c3f19 MD |
61 | return; |
62 | ||
41bc8144 SR |
63 | tracing_record_cmdline(prev); |
64 | tracing_record_cmdline(next); | |
65 | ||
dcef788e | 66 | if (!tracer_enabled || sched_stopped) |
35e8e302 SR |
67 | return; |
68 | ||
38697053 | 69 | pc = preempt_count(); |
18cef379 | 70 | local_irq_save(flags); |
35e8e302 | 71 | cpu = raw_smp_processor_id(); |
b07c3f19 | 72 | data = ctx_trace->data[cpu]; |
35e8e302 | 73 | |
3ea2e6d7 | 74 | if (likely(!atomic_read(&data->disabled))) |
7be42151 | 75 | tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); |
35e8e302 | 76 | |
18cef379 | 77 | local_irq_restore(flags); |
35e8e302 SR |
78 | } |
79 | ||
82e04af4 FW |
80 | void |
81 | tracing_sched_wakeup_trace(struct trace_array *tr, | |
82 | struct task_struct *wakee, | |
83 | struct task_struct *curr, | |
84 | unsigned long flags, int pc) | |
85 | { | |
86 | struct ftrace_event_call *call = &event_wakeup; | |
87 | struct ring_buffer_event *event; | |
88 | struct ctx_switch_entry *entry; | |
89 | ||
90 | event = trace_buffer_lock_reserve(tr, TRACE_WAKE, | |
91 | sizeof(*entry), flags, pc); | |
92 | if (!event) | |
93 | return; | |
94 | entry = ring_buffer_event_data(event); | |
95 | entry->prev_pid = curr->pid; | |
96 | entry->prev_prio = curr->prio; | |
97 | entry->prev_state = curr->state; | |
98 | entry->next_pid = wakee->pid; | |
99 | entry->next_prio = wakee->prio; | |
100 | entry->next_state = wakee->state; | |
101 | entry->next_cpu = task_cpu(wakee); | |
102 | ||
103 | if (!filter_check_discard(call, entry, tr->buffer, event)) | |
104 | ring_buffer_unlock_commit(tr->buffer, event); | |
105 | ftrace_trace_stack(tr, flags, 6, pc); | |
106 | ftrace_trace_userstack(tr, flags, pc); | |
107 | } | |
108 | ||
4e655519 | 109 | static void |
468a15bb | 110 | probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) |
57422797 | 111 | { |
57422797 IM |
112 | struct trace_array_cpu *data; |
113 | unsigned long flags; | |
38697053 | 114 | int cpu, pc; |
57422797 | 115 | |
dcef788e | 116 | if (unlikely(!sched_ref)) |
57422797 IM |
117 | return; |
118 | ||
b07c3f19 | 119 | tracing_record_cmdline(current); |
d9af56fb | 120 | |
dcef788e | 121 | if (!tracer_enabled || sched_stopped) |
8bcae09b Z |
122 | return; |
123 | ||
dcef788e | 124 | pc = preempt_count(); |
57422797 IM |
125 | local_irq_save(flags); |
126 | cpu = raw_smp_processor_id(); | |
b07c3f19 | 127 | data = ctx_trace->data[cpu]; |
57422797 | 128 | |
3ea2e6d7 | 129 | if (likely(!atomic_read(&data->disabled))) |
7be42151 | 130 | tracing_sched_wakeup_trace(ctx_trace, wakee, current, |
38697053 | 131 | flags, pc); |
57422797 | 132 | |
57422797 IM |
133 | local_irq_restore(flags); |
134 | } | |
135 | ||
5b82a1b0 MD |
136 | static int tracing_sched_register(void) |
137 | { | |
138 | int ret; | |
139 | ||
b07c3f19 | 140 | ret = register_trace_sched_wakeup(probe_sched_wakeup); |
5b82a1b0 | 141 | if (ret) { |
b07c3f19 | 142 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
143 | " probe to kernel_sched_wakeup\n"); |
144 | return ret; | |
145 | } | |
146 | ||
b07c3f19 | 147 | ret = register_trace_sched_wakeup_new(probe_sched_wakeup); |
5b82a1b0 | 148 | if (ret) { |
b07c3f19 | 149 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
150 | " probe to kernel_sched_wakeup_new\n"); |
151 | goto fail_deprobe; | |
152 | } | |
153 | ||
b07c3f19 | 154 | ret = register_trace_sched_switch(probe_sched_switch); |
5b82a1b0 | 155 | if (ret) { |
b07c3f19 | 156 | pr_info("sched trace: Couldn't activate tracepoint" |
73d8b8bc | 157 | " probe to kernel_sched_switch\n"); |
5b82a1b0 MD |
158 | goto fail_deprobe_wake_new; |
159 | } | |
160 | ||
161 | return ret; | |
162 | fail_deprobe_wake_new: | |
b07c3f19 | 163 | unregister_trace_sched_wakeup_new(probe_sched_wakeup); |
5b82a1b0 | 164 | fail_deprobe: |
b07c3f19 | 165 | unregister_trace_sched_wakeup(probe_sched_wakeup); |
5b82a1b0 MD |
166 | return ret; |
167 | } | |
168 | ||
169 | static void tracing_sched_unregister(void) | |
170 | { | |
b07c3f19 MD |
171 | unregister_trace_sched_switch(probe_sched_switch); |
172 | unregister_trace_sched_wakeup_new(probe_sched_wakeup); | |
173 | unregister_trace_sched_wakeup(probe_sched_wakeup); | |
5b82a1b0 MD |
174 | } |
175 | ||
f2252935 | 176 | static void tracing_start_sched_switch(void) |
5b82a1b0 | 177 | { |
efade6e7 | 178 | mutex_lock(&sched_register_mutex); |
e168e051 | 179 | if (!(sched_ref++)) |
5b82a1b0 | 180 | tracing_sched_register(); |
efade6e7 | 181 | mutex_unlock(&sched_register_mutex); |
5b82a1b0 MD |
182 | } |
183 | ||
f2252935 | 184 | static void tracing_stop_sched_switch(void) |
5b82a1b0 | 185 | { |
efade6e7 | 186 | mutex_lock(&sched_register_mutex); |
e168e051 | 187 | if (!(--sched_ref)) |
5b82a1b0 | 188 | tracing_sched_unregister(); |
efade6e7 | 189 | mutex_unlock(&sched_register_mutex); |
5b82a1b0 MD |
190 | } |
191 | ||
41bc8144 SR |
192 | void tracing_start_cmdline_record(void) |
193 | { | |
194 | tracing_start_sched_switch(); | |
195 | } | |
196 | ||
197 | void tracing_stop_cmdline_record(void) | |
198 | { | |
199 | tracing_stop_sched_switch(); | |
200 | } | |
201 | ||
75f5c47d | 202 | /** |
e168e051 SR |
203 | * tracing_start_sched_switch_record - start tracing context switches |
204 | * | |
205 | * Turns on context switch tracing for a tracer. | |
206 | */ | |
207 | void tracing_start_sched_switch_record(void) | |
208 | { | |
209 | if (unlikely(!ctx_trace)) { | |
210 | WARN_ON(1); | |
211 | return; | |
212 | } | |
213 | ||
214 | tracing_start_sched_switch(); | |
215 | ||
216 | mutex_lock(&sched_register_mutex); | |
217 | tracer_enabled++; | |
218 | mutex_unlock(&sched_register_mutex); | |
219 | } | |
220 | ||
221 | /** | |
222 | * tracing_stop_sched_switch_record - start tracing context switches | |
223 | * | |
224 | * Turns off context switch tracing for a tracer. | |
225 | */ | |
226 | void tracing_stop_sched_switch_record(void) | |
227 | { | |
228 | mutex_lock(&sched_register_mutex); | |
229 | tracer_enabled--; | |
230 | WARN_ON(tracer_enabled < 0); | |
231 | mutex_unlock(&sched_register_mutex); | |
232 | ||
233 | tracing_stop_sched_switch(); | |
234 | } | |
235 | ||
236 | /** | |
237 | * tracing_sched_switch_assign_trace - assign a trace array for ctx switch | |
75f5c47d SR |
238 | * @tr: trace array pointer to assign |
239 | * | |
240 | * Some tracers might want to record the context switches in their | |
241 | * trace. This function lets those tracers assign the trace array | |
242 | * to use. | |
243 | */ | |
e168e051 | 244 | void tracing_sched_switch_assign_trace(struct trace_array *tr) |
75f5c47d SR |
245 | { |
246 | ctx_trace = tr; | |
247 | } | |
248 | ||
e309b41d | 249 | static void stop_sched_trace(struct trace_array *tr) |
35e8e302 | 250 | { |
e168e051 | 251 | tracing_stop_sched_switch_record(); |
35e8e302 SR |
252 | } |
253 | ||
1c80025a | 254 | static int sched_switch_trace_init(struct trace_array *tr) |
35e8e302 SR |
255 | { |
256 | ctx_trace = tr; | |
5fec6ddc | 257 | tracing_reset_online_cpus(tr); |
b6f11df2 | 258 | tracing_start_sched_switch_record(); |
1c80025a | 259 | return 0; |
35e8e302 SR |
260 | } |
261 | ||
e309b41d | 262 | static void sched_switch_trace_reset(struct trace_array *tr) |
35e8e302 | 263 | { |
c76f0694 | 264 | if (sched_ref) |
35e8e302 SR |
265 | stop_sched_trace(tr); |
266 | } | |
267 | ||
9036990d SR |
268 | static void sched_switch_trace_start(struct trace_array *tr) |
269 | { | |
5fec6ddc | 270 | sched_stopped = 0; |
9036990d SR |
271 | } |
272 | ||
273 | static void sched_switch_trace_stop(struct trace_array *tr) | |
274 | { | |
5fec6ddc | 275 | sched_stopped = 1; |
9036990d SR |
276 | } |
277 | ||
75f5c47d | 278 | static struct tracer sched_switch_trace __read_mostly = |
35e8e302 SR |
279 | { |
280 | .name = "sched_switch", | |
281 | .init = sched_switch_trace_init, | |
282 | .reset = sched_switch_trace_reset, | |
9036990d SR |
283 | .start = sched_switch_trace_start, |
284 | .stop = sched_switch_trace_stop, | |
6eaaa5d5 | 285 | .wait_pipe = poll_wait_pipe, |
60a11774 SR |
286 | #ifdef CONFIG_FTRACE_SELFTEST |
287 | .selftest = trace_selftest_startup_sched_switch, | |
288 | #endif | |
35e8e302 SR |
289 | }; |
290 | ||
291 | __init static int init_sched_switch_trace(void) | |
292 | { | |
293 | return register_tracer(&sched_switch_trace); | |
294 | } | |
295 | device_initcall(init_sched_switch_trace); | |
c71dd42d | 296 |