Commit | Line | Data |
---|---|---|
35e8e302 SR |
1 | /* |
2 | * trace context switch | |
3 | * | |
4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | */ | |
7 | #include <linux/module.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/debugfs.h> | |
10 | #include <linux/kallsyms.h> | |
11 | #include <linux/uaccess.h> | |
35e8e302 | 12 | #include <linux/ftrace.h> |
ad8d75ff | 13 | #include <trace/events/sched.h> |
35e8e302 SR |
14 | |
15 | #include "trace.h" | |
16 | ||
17 | static struct trace_array *ctx_trace; | |
18 | static int __read_mostly tracer_enabled; | |
efade6e7 FW |
19 | static int sched_ref; |
20 | static DEFINE_MUTEX(sched_register_mutex); | |
5fec6ddc | 21 | static int sched_stopped; |
35e8e302 | 22 | |
82e04af4 FW |
23 | |
24 | void | |
25 | tracing_sched_switch_trace(struct trace_array *tr, | |
26 | struct task_struct *prev, | |
27 | struct task_struct *next, | |
28 | unsigned long flags, int pc) | |
29 | { | |
30 | struct ftrace_event_call *call = &event_context_switch; | |
12883efb | 31 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
82e04af4 FW |
32 | struct ring_buffer_event *event; |
33 | struct ctx_switch_entry *entry; | |
34 | ||
e77405ad | 35 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, |
82e04af4 FW |
36 | sizeof(*entry), flags, pc); |
37 | if (!event) | |
38 | return; | |
39 | entry = ring_buffer_event_data(event); | |
40 | entry->prev_pid = prev->pid; | |
41 | entry->prev_prio = prev->prio; | |
42 | entry->prev_state = prev->state; | |
43 | entry->next_pid = next->pid; | |
44 | entry->next_prio = next->prio; | |
45 | entry->next_state = next->state; | |
46 | entry->next_cpu = task_cpu(next); | |
47 | ||
e77405ad SR |
48 | if (!filter_check_discard(call, entry, buffer, event)) |
49 | trace_buffer_unlock_commit(buffer, event, flags, pc); | |
82e04af4 FW |
50 | } |
51 | ||
e309b41d | 52 | static void |
38516ab5 | 53 | probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) |
35e8e302 | 54 | { |
35e8e302 SR |
55 | struct trace_array_cpu *data; |
56 | unsigned long flags; | |
35e8e302 | 57 | int cpu; |
38697053 | 58 | int pc; |
35e8e302 | 59 | |
dcef788e | 60 | if (unlikely(!sched_ref)) |
b07c3f19 MD |
61 | return; |
62 | ||
41bc8144 SR |
63 | tracing_record_cmdline(prev); |
64 | tracing_record_cmdline(next); | |
65 | ||
dcef788e | 66 | if (!tracer_enabled || sched_stopped) |
35e8e302 SR |
67 | return; |
68 | ||
38697053 | 69 | pc = preempt_count(); |
18cef379 | 70 | local_irq_save(flags); |
35e8e302 | 71 | cpu = raw_smp_processor_id(); |
12883efb | 72 | data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu); |
35e8e302 | 73 | |
3ea2e6d7 | 74 | if (likely(!atomic_read(&data->disabled))) |
7be42151 | 75 | tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); |
35e8e302 | 76 | |
18cef379 | 77 | local_irq_restore(flags); |
35e8e302 SR |
78 | } |
79 | ||
82e04af4 FW |
80 | void |
81 | tracing_sched_wakeup_trace(struct trace_array *tr, | |
82 | struct task_struct *wakee, | |
83 | struct task_struct *curr, | |
84 | unsigned long flags, int pc) | |
85 | { | |
86 | struct ftrace_event_call *call = &event_wakeup; | |
87 | struct ring_buffer_event *event; | |
88 | struct ctx_switch_entry *entry; | |
12883efb | 89 | struct ring_buffer *buffer = tr->trace_buffer.buffer; |
82e04af4 | 90 | |
e77405ad | 91 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, |
82e04af4 FW |
92 | sizeof(*entry), flags, pc); |
93 | if (!event) | |
94 | return; | |
95 | entry = ring_buffer_event_data(event); | |
96 | entry->prev_pid = curr->pid; | |
97 | entry->prev_prio = curr->prio; | |
98 | entry->prev_state = curr->state; | |
99 | entry->next_pid = wakee->pid; | |
100 | entry->next_prio = wakee->prio; | |
101 | entry->next_state = wakee->state; | |
102 | entry->next_cpu = task_cpu(wakee); | |
103 | ||
e77405ad | 104 | if (!filter_check_discard(call, entry, buffer, event)) |
0d5c6e1c | 105 | trace_buffer_unlock_commit(buffer, event, flags, pc); |
82e04af4 FW |
106 | } |
107 | ||
4e655519 | 108 | static void |
38516ab5 | 109 | probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) |
57422797 | 110 | { |
57422797 IM |
111 | struct trace_array_cpu *data; |
112 | unsigned long flags; | |
38697053 | 113 | int cpu, pc; |
57422797 | 114 | |
dcef788e | 115 | if (unlikely(!sched_ref)) |
57422797 IM |
116 | return; |
117 | ||
b07c3f19 | 118 | tracing_record_cmdline(current); |
d9af56fb | 119 | |
dcef788e | 120 | if (!tracer_enabled || sched_stopped) |
8bcae09b Z |
121 | return; |
122 | ||
dcef788e | 123 | pc = preempt_count(); |
57422797 IM |
124 | local_irq_save(flags); |
125 | cpu = raw_smp_processor_id(); | |
12883efb | 126 | data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu); |
57422797 | 127 | |
3ea2e6d7 | 128 | if (likely(!atomic_read(&data->disabled))) |
7be42151 | 129 | tracing_sched_wakeup_trace(ctx_trace, wakee, current, |
38697053 | 130 | flags, pc); |
57422797 | 131 | |
57422797 IM |
132 | local_irq_restore(flags); |
133 | } | |
134 | ||
5b82a1b0 MD |
135 | static int tracing_sched_register(void) |
136 | { | |
137 | int ret; | |
138 | ||
38516ab5 | 139 | ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL); |
5b82a1b0 | 140 | if (ret) { |
b07c3f19 | 141 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
142 | " probe to kernel_sched_wakeup\n"); |
143 | return ret; | |
144 | } | |
145 | ||
38516ab5 | 146 | ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL); |
5b82a1b0 | 147 | if (ret) { |
b07c3f19 | 148 | pr_info("wakeup trace: Couldn't activate tracepoint" |
5b82a1b0 MD |
149 | " probe to kernel_sched_wakeup_new\n"); |
150 | goto fail_deprobe; | |
151 | } | |
152 | ||
38516ab5 | 153 | ret = register_trace_sched_switch(probe_sched_switch, NULL); |
5b82a1b0 | 154 | if (ret) { |
b07c3f19 | 155 | pr_info("sched trace: Couldn't activate tracepoint" |
73d8b8bc | 156 | " probe to kernel_sched_switch\n"); |
5b82a1b0 MD |
157 | goto fail_deprobe_wake_new; |
158 | } | |
159 | ||
160 | return ret; | |
161 | fail_deprobe_wake_new: | |
38516ab5 | 162 | unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL); |
5b82a1b0 | 163 | fail_deprobe: |
38516ab5 | 164 | unregister_trace_sched_wakeup(probe_sched_wakeup, NULL); |
5b82a1b0 MD |
165 | return ret; |
166 | } | |
167 | ||
168 | static void tracing_sched_unregister(void) | |
169 | { | |
38516ab5 SR |
170 | unregister_trace_sched_switch(probe_sched_switch, NULL); |
171 | unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL); | |
172 | unregister_trace_sched_wakeup(probe_sched_wakeup, NULL); | |
5b82a1b0 MD |
173 | } |
174 | ||
f2252935 | 175 | static void tracing_start_sched_switch(void) |
5b82a1b0 | 176 | { |
efade6e7 | 177 | mutex_lock(&sched_register_mutex); |
e168e051 | 178 | if (!(sched_ref++)) |
5b82a1b0 | 179 | tracing_sched_register(); |
efade6e7 | 180 | mutex_unlock(&sched_register_mutex); |
5b82a1b0 MD |
181 | } |
182 | ||
f2252935 | 183 | static void tracing_stop_sched_switch(void) |
5b82a1b0 | 184 | { |
efade6e7 | 185 | mutex_lock(&sched_register_mutex); |
e168e051 | 186 | if (!(--sched_ref)) |
5b82a1b0 | 187 | tracing_sched_unregister(); |
efade6e7 | 188 | mutex_unlock(&sched_register_mutex); |
5b82a1b0 MD |
189 | } |
190 | ||
41bc8144 SR |
191 | void tracing_start_cmdline_record(void) |
192 | { | |
193 | tracing_start_sched_switch(); | |
194 | } | |
195 | ||
196 | void tracing_stop_cmdline_record(void) | |
197 | { | |
198 | tracing_stop_sched_switch(); | |
199 | } | |
200 | ||
75f5c47d | 201 | /** |
e168e051 SR |
202 | * tracing_start_sched_switch_record - start tracing context switches |
203 | * | |
204 | * Turns on context switch tracing for a tracer. | |
205 | */ | |
206 | void tracing_start_sched_switch_record(void) | |
207 | { | |
208 | if (unlikely(!ctx_trace)) { | |
209 | WARN_ON(1); | |
210 | return; | |
211 | } | |
212 | ||
213 | tracing_start_sched_switch(); | |
214 | ||
215 | mutex_lock(&sched_register_mutex); | |
216 | tracer_enabled++; | |
217 | mutex_unlock(&sched_register_mutex); | |
218 | } | |
219 | ||
220 | /** | |
221 | * tracing_stop_sched_switch_record - start tracing context switches | |
222 | * | |
223 | * Turns off context switch tracing for a tracer. | |
224 | */ | |
225 | void tracing_stop_sched_switch_record(void) | |
226 | { | |
227 | mutex_lock(&sched_register_mutex); | |
228 | tracer_enabled--; | |
229 | WARN_ON(tracer_enabled < 0); | |
230 | mutex_unlock(&sched_register_mutex); | |
231 | ||
232 | tracing_stop_sched_switch(); | |
233 | } | |
234 | ||
235 | /** | |
236 | * tracing_sched_switch_assign_trace - assign a trace array for ctx switch | |
75f5c47d SR |
237 | * @tr: trace array pointer to assign |
238 | * | |
239 | * Some tracers might want to record the context switches in their | |
240 | * trace. This function lets those tracers assign the trace array | |
241 | * to use. | |
242 | */ | |
e168e051 | 243 | void tracing_sched_switch_assign_trace(struct trace_array *tr) |
75f5c47d SR |
244 | { |
245 | ctx_trace = tr; | |
246 | } | |
247 |