Commit | Line | Data |
---|---|---|
35e8e302 SR |
1 | /* |
2 | * trace context switch | |
3 | * | |
4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | */ | |
7 | #include <linux/module.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/debugfs.h> | |
10 | #include <linux/kallsyms.h> | |
11 | #include <linux/uaccess.h> | |
35e8e302 | 12 | #include <linux/ftrace.h> |
ad8d75ff | 13 | #include <trace/events/sched.h> |
35e8e302 SR |
14 | |
15 | #include "trace.h" | |
16 | ||
17 | static struct trace_array *ctx_trace; | |
18 | static int __read_mostly tracer_enabled; | |
efade6e7 FW |
19 | static int sched_ref; |
20 | static DEFINE_MUTEX(sched_register_mutex); | |
5fec6ddc | 21 | static int sched_stopped; |
35e8e302 | 22 | |
82e04af4 FW |
23 | |
24 | void | |
25 | tracing_sched_switch_trace(struct trace_array *tr, | |
26 | struct task_struct *prev, | |
27 | struct task_struct *next, | |
28 | unsigned long flags, int pc) | |
29 | { | |
30 | struct ftrace_event_call *call = &event_context_switch; | |
e77405ad | 31 | struct ring_buffer *buffer = tr->buffer; |
82e04af4 FW |
32 | struct ring_buffer_event *event; |
33 | struct ctx_switch_entry *entry; | |
34 | ||
e77405ad | 35 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, |
82e04af4 FW |
36 | sizeof(*entry), flags, pc); |
37 | if (!event) | |
38 | return; | |
39 | entry = ring_buffer_event_data(event); | |
40 | entry->prev_pid = prev->pid; | |
41 | entry->prev_prio = prev->prio; | |
42 | entry->prev_state = prev->state; | |
43 | entry->next_pid = next->pid; | |
44 | entry->next_prio = next->prio; | |
45 | entry->next_state = next->state; | |
46 | entry->next_cpu = task_cpu(next); | |
47 | ||
e77405ad SR |
48 | if (!filter_check_discard(call, entry, buffer, event)) |
49 | trace_buffer_unlock_commit(buffer, event, flags, pc); | |
82e04af4 FW |
50 | } |
51 | ||
e309b41d | 52 | static void |
38516ab5 | 53 | probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) |
35e8e302 | 54 | { |
35e8e302 SR |
55 | struct trace_array_cpu *data; |
56 | unsigned long flags; | |
35e8e302 | 57 | int cpu; |
38697053 | 58 | int pc; |
35e8e302 | 59 | |
dcef788e | 60 | if (unlikely(!sched_ref)) |
b07c3f19 MD |
61 | return; |
62 | ||
41bc8144 SR |
63 | tracing_record_cmdline(prev); |
64 | tracing_record_cmdline(next); | |
65 | ||
dcef788e | 66 | if (!tracer_enabled || sched_stopped) |
35e8e302 SR |
67 | return; |
68 | ||
38697053 | 69 | pc = preempt_count(); |
18cef379 | 70 | local_irq_save(flags); |
35e8e302 | 71 | cpu = raw_smp_processor_id(); |
b07c3f19 | 72 | data = ctx_trace->data[cpu]; |
35e8e302 | 73 | |
3ea2e6d7 | 74 | if (likely(!atomic_read(&data->disabled))) |
7be42151 | 75 | tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); |
35e8e302 | 76 | |
18cef379 | 77 | local_irq_restore(flags); |
35e8e302 SR |
78 | } |
79 | ||
82e04af4 FW |
80 | void |
81 | tracing_sched_wakeup_trace(struct trace_array *tr, | |
82 | struct task_struct *wakee, | |
83 | struct task_struct *curr, | |
84 | unsigned long flags, int pc) | |
85 | { | |
86 | struct ftrace_event_call *call = &event_wakeup; | |
87 | struct ring_buffer_event *event; | |
88 | struct ctx_switch_entry *entry; | |
e77405ad | 89 | struct ring_buffer *buffer = tr->buffer; |
82e04af4 | 90 | |
e77405ad | 91 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, |
82e04af4 FW |
92 | sizeof(*entry), flags, pc); |
93 | if (!event) | |
94 | return; | |
95 | entry = ring_buffer_event_data(event); | |
96 | entry->prev_pid = curr->pid; | |
97 | entry->prev_prio = curr->prio; | |
98 | entry->prev_state = curr->state; | |
99 | entry->next_pid = wakee->pid; | |
100 | entry->next_prio = wakee->prio; | |
101 | entry->next_state = wakee->state; | |
102 | entry->next_cpu = task_cpu(wakee); | |
103 | ||
e77405ad SR |
104 | if (!filter_check_discard(call, entry, buffer, event)) |
105 | ring_buffer_unlock_commit(buffer, event); | |
106 | ftrace_trace_stack(tr->buffer, flags, 6, pc); | |
107 | ftrace_trace_userstack(tr->buffer, flags, pc); | |
82e04af4 FW |
108 | } |
109 | ||
4e655519 | 110 | static void |
38516ab5 | 111 | probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) |
57422797 | 112 | { |
57422797 IM |
113 | struct trace_array_cpu *data; |
114 | unsigned long flags; | |
38697053 | 115 | int cpu, pc; |
57422797 | 116 | |
dcef788e | 117 | if (unlikely(!sched_ref)) |
57422797 IM |
118 | return; |
119 | ||
b07c3f19 | 120 | tracing_record_cmdline(current); |
d9af56fb | 121 | |
dcef788e | 122 | if (!tracer_enabled || sched_stopped) |
8bcae09b Z |
123 | return; |
124 | ||
dcef788e | 125 | pc = preempt_count(); |
57422797 IM |
126 | local_irq_save(flags); |
127 | cpu = raw_smp_processor_id(); | |
b07c3f19 | 128 | data = ctx_trace->data[cpu]; |
57422797 | 129 | |
3ea2e6d7 | 130 | if (likely(!atomic_read(&data->disabled))) |
7be42151 | 131 | tracing_sched_wakeup_trace(ctx_trace, wakee, current, |
38697053 | 132 | flags, pc); |
57422797 | 133 | |
57422797 IM |
134 | local_irq_restore(flags); |
135 | } | |
136 | ||
5b82a1b0 MD |
137 | static int tracing_sched_register(void) |
138 | { | |
139 | int ret; | |
140 | ||
38516ab5 | 141 | ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL); |
5b82a1b0 | 142 | if (ret) { |
b07c3f19 | 143 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
144 | " probe to kernel_sched_wakeup\n"); |
145 | return ret; | |
146 | } | |
147 | ||
38516ab5 | 148 | ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL); |
5b82a1b0 | 149 | if (ret) { |
b07c3f19 | 150 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
151 | " probe to kernel_sched_wakeup_new\n"); |
152 | goto fail_deprobe; | |
153 | } | |
154 | ||
38516ab5 | 155 | ret = register_trace_sched_switch(probe_sched_switch, NULL); |
5b82a1b0 | 156 | if (ret) { |
b07c3f19 | 157 | pr_info("sched trace: Couldn't activate tracepoint" |
73d8b8bc | 158 | " probe to kernel_sched_switch\n"); |
5b82a1b0 MD |
159 | goto fail_deprobe_wake_new; |
160 | } | |
161 | ||
162 | return ret; | |
163 | fail_deprobe_wake_new: | |
38516ab5 | 164 | unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL); |
5b82a1b0 | 165 | fail_deprobe: |
38516ab5 | 166 | unregister_trace_sched_wakeup(probe_sched_wakeup, NULL); |
5b82a1b0 MD |
167 | return ret; |
168 | } | |
169 | ||
170 | static void tracing_sched_unregister(void) | |
171 | { | |
38516ab5 SR |
172 | unregister_trace_sched_switch(probe_sched_switch, NULL); |
173 | unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL); | |
174 | unregister_trace_sched_wakeup(probe_sched_wakeup, NULL); | |
5b82a1b0 MD |
175 | } |
176 | ||
f2252935 | 177 | static void tracing_start_sched_switch(void) |
5b82a1b0 | 178 | { |
efade6e7 | 179 | mutex_lock(&sched_register_mutex); |
e168e051 | 180 | if (!(sched_ref++)) |
5b82a1b0 | 181 | tracing_sched_register(); |
efade6e7 | 182 | mutex_unlock(&sched_register_mutex); |
5b82a1b0 MD |
183 | } |
184 | ||
f2252935 | 185 | static void tracing_stop_sched_switch(void) |
5b82a1b0 | 186 | { |
efade6e7 | 187 | mutex_lock(&sched_register_mutex); |
e168e051 | 188 | if (!(--sched_ref)) |
5b82a1b0 | 189 | tracing_sched_unregister(); |
efade6e7 | 190 | mutex_unlock(&sched_register_mutex); |
5b82a1b0 MD |
191 | } |
192 | ||
41bc8144 SR |
193 | void tracing_start_cmdline_record(void) |
194 | { | |
195 | tracing_start_sched_switch(); | |
196 | } | |
197 | ||
198 | void tracing_stop_cmdline_record(void) | |
199 | { | |
200 | tracing_stop_sched_switch(); | |
201 | } | |
202 | ||
75f5c47d | 203 | /** |
e168e051 SR |
204 | * tracing_start_sched_switch_record - start tracing context switches |
205 | * | |
206 | * Turns on context switch tracing for a tracer. | |
207 | */ | |
208 | void tracing_start_sched_switch_record(void) | |
209 | { | |
210 | if (unlikely(!ctx_trace)) { | |
211 | WARN_ON(1); | |
212 | return; | |
213 | } | |
214 | ||
215 | tracing_start_sched_switch(); | |
216 | ||
217 | mutex_lock(&sched_register_mutex); | |
218 | tracer_enabled++; | |
219 | mutex_unlock(&sched_register_mutex); | |
220 | } | |
221 | ||
222 | /** | |
223 | * tracing_stop_sched_switch_record - start tracing context switches | |
224 | * | |
225 | * Turns off context switch tracing for a tracer. | |
226 | */ | |
227 | void tracing_stop_sched_switch_record(void) | |
228 | { | |
229 | mutex_lock(&sched_register_mutex); | |
230 | tracer_enabled--; | |
231 | WARN_ON(tracer_enabled < 0); | |
232 | mutex_unlock(&sched_register_mutex); | |
233 | ||
234 | tracing_stop_sched_switch(); | |
235 | } | |
236 | ||
237 | /** | |
238 | * tracing_sched_switch_assign_trace - assign a trace array for ctx switch | |
75f5c47d SR |
239 | * @tr: trace array pointer to assign |
240 | * | |
241 | * Some tracers might want to record the context switches in their | |
242 | * trace. This function lets those tracers assign the trace array | |
243 | * to use. | |
244 | */ | |
e168e051 | 245 | void tracing_sched_switch_assign_trace(struct trace_array *tr) |
75f5c47d SR |
246 | { |
247 | ctx_trace = tr; | |
248 | } | |
249 | ||
e309b41d | 250 | static void stop_sched_trace(struct trace_array *tr) |
35e8e302 | 251 | { |
e168e051 | 252 | tracing_stop_sched_switch_record(); |
35e8e302 SR |
253 | } |
254 | ||
1c80025a | 255 | static int sched_switch_trace_init(struct trace_array *tr) |
35e8e302 SR |
256 | { |
257 | ctx_trace = tr; | |
5fec6ddc | 258 | tracing_reset_online_cpus(tr); |
b6f11df2 | 259 | tracing_start_sched_switch_record(); |
1c80025a | 260 | return 0; |
35e8e302 SR |
261 | } |
262 | ||
e309b41d | 263 | static void sched_switch_trace_reset(struct trace_array *tr) |
35e8e302 | 264 | { |
c76f0694 | 265 | if (sched_ref) |
35e8e302 SR |
266 | stop_sched_trace(tr); |
267 | } | |
268 | ||
9036990d SR |
269 | static void sched_switch_trace_start(struct trace_array *tr) |
270 | { | |
5fec6ddc | 271 | sched_stopped = 0; |
9036990d SR |
272 | } |
273 | ||
274 | static void sched_switch_trace_stop(struct trace_array *tr) | |
275 | { | |
5fec6ddc | 276 | sched_stopped = 1; |
9036990d SR |
277 | } |
278 | ||
75f5c47d | 279 | static struct tracer sched_switch_trace __read_mostly = |
35e8e302 SR |
280 | { |
281 | .name = "sched_switch", | |
282 | .init = sched_switch_trace_init, | |
283 | .reset = sched_switch_trace_reset, | |
9036990d SR |
284 | .start = sched_switch_trace_start, |
285 | .stop = sched_switch_trace_stop, | |
6eaaa5d5 | 286 | .wait_pipe = poll_wait_pipe, |
60a11774 SR |
287 | #ifdef CONFIG_FTRACE_SELFTEST |
288 | .selftest = trace_selftest_startup_sched_switch, | |
289 | #endif | |
35e8e302 SR |
290 | }; |
291 | ||
292 | __init static int init_sched_switch_trace(void) | |
293 | { | |
294 | return register_tracer(&sched_switch_trace); | |
295 | } | |
296 | device_initcall(init_sched_switch_trace); | |
c71dd42d | 297 |