Commit | Line | Data |
---|---|---|
35e8e302 SR |
1 | /* |
2 | * trace context switch | |
3 | * | |
4 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> | |
5 | * | |
6 | */ | |
7 | #include <linux/module.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/debugfs.h> | |
10 | #include <linux/kallsyms.h> | |
11 | #include <linux/uaccess.h> | |
12 | #include <linux/marker.h> | |
13 | #include <linux/ftrace.h> | |
14 | ||
15 | #include "trace.h" | |
16 | ||
17 | static struct trace_array *ctx_trace; | |
18 | static int __read_mostly tracer_enabled; | |
19 | ||
e309b41d | 20 | static void |
4e655519 | 21 | ctx_switch_func(void *__rq, struct task_struct *prev, struct task_struct *next) |
35e8e302 SR |
22 | { |
23 | struct trace_array *tr = ctx_trace; | |
24 | struct trace_array_cpu *data; | |
25 | unsigned long flags; | |
26 | long disabled; | |
27 | int cpu; | |
28 | ||
29 | if (!tracer_enabled) | |
30 | return; | |
31 | ||
18cef379 | 32 | local_irq_save(flags); |
35e8e302 SR |
33 | cpu = raw_smp_processor_id(); |
34 | data = tr->data[cpu]; | |
35 | disabled = atomic_inc_return(&data->disabled); | |
36 | ||
4e655519 | 37 | if (likely(disabled == 1)) { |
35e8e302 | 38 | tracing_sched_switch_trace(tr, data, prev, next, flags); |
4ac3ba41 IM |
39 | if (trace_flags & TRACE_ITER_SCHED_TREE) |
40 | ftrace_all_fair_tasks(__rq, tr, data); | |
4e655519 | 41 | } |
35e8e302 SR |
42 | |
43 | atomic_dec(&data->disabled); | |
18cef379 | 44 | local_irq_restore(flags); |
35e8e302 SR |
45 | } |
46 | ||
4e655519 IM |
47 | static void |
48 | wakeup_func(void *__rq, struct task_struct *wakee, struct task_struct *curr) | |
57422797 IM |
49 | { |
50 | struct trace_array *tr = ctx_trace; | |
51 | struct trace_array_cpu *data; | |
52 | unsigned long flags; | |
53 | long disabled; | |
54 | int cpu; | |
55 | ||
56 | if (!tracer_enabled) | |
57 | return; | |
58 | ||
59 | local_irq_save(flags); | |
60 | cpu = raw_smp_processor_id(); | |
61 | data = tr->data[cpu]; | |
62 | disabled = atomic_inc_return(&data->disabled); | |
63 | ||
4e655519 | 64 | if (likely(disabled == 1)) { |
57422797 | 65 | tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); |
4ac3ba41 IM |
66 | if (trace_flags & TRACE_ITER_SCHED_TREE) |
67 | ftrace_all_fair_tasks(__rq, tr, data); | |
4e655519 | 68 | } |
57422797 IM |
69 | |
70 | atomic_dec(&data->disabled); | |
71 | local_irq_restore(flags); | |
72 | } | |
73 | ||
4e655519 IM |
74 | void |
75 | ftrace_ctx_switch(void *__rq, struct task_struct *prev, | |
76 | struct task_struct *next) | |
35e8e302 SR |
77 | { |
78 | tracing_record_cmdline(prev); | |
79 | ||
80 | /* | |
81 | * If tracer_switch_func only points to the local | |
82 | * switch func, it still needs the ptr passed to it. | |
83 | */ | |
4e655519 | 84 | ctx_switch_func(__rq, prev, next); |
35e8e302 SR |
85 | |
86 | /* | |
87 | * Chain to the wakeup tracer (this is a NOP if disabled): | |
88 | */ | |
89 | wakeup_sched_switch(prev, next); | |
90 | } | |
91 | ||
57422797 | 92 | void |
4e655519 IM |
93 | ftrace_wake_up_task(void *__rq, struct task_struct *wakee, |
94 | struct task_struct *curr) | |
57422797 IM |
95 | { |
96 | tracing_record_cmdline(curr); | |
97 | ||
4e655519 | 98 | wakeup_func(__rq, wakee, curr); |
57422797 IM |
99 | |
100 | /* | |
101 | * Chain to the wakeup tracer (this is a NOP if disabled): | |
102 | */ | |
103 | wakeup_sched_wakeup(wakee, curr); | |
104 | } | |
105 | ||
e309b41d | 106 | static void sched_switch_reset(struct trace_array *tr) |
35e8e302 SR |
107 | { |
108 | int cpu; | |
109 | ||
750ed1a4 | 110 | tr->time_start = ftrace_now(tr->cpu); |
35e8e302 SR |
111 | |
112 | for_each_online_cpu(cpu) | |
113 | tracing_reset(tr->data[cpu]); | |
114 | } | |
115 | ||
e309b41d | 116 | static void start_sched_trace(struct trace_array *tr) |
35e8e302 SR |
117 | { |
118 | sched_switch_reset(tr); | |
119 | tracer_enabled = 1; | |
120 | } | |
121 | ||
e309b41d | 122 | static void stop_sched_trace(struct trace_array *tr) |
35e8e302 SR |
123 | { |
124 | tracer_enabled = 0; | |
125 | } | |
126 | ||
e309b41d | 127 | static void sched_switch_trace_init(struct trace_array *tr) |
35e8e302 SR |
128 | { |
129 | ctx_trace = tr; | |
130 | ||
131 | if (tr->ctrl) | |
132 | start_sched_trace(tr); | |
133 | } | |
134 | ||
e309b41d | 135 | static void sched_switch_trace_reset(struct trace_array *tr) |
35e8e302 SR |
136 | { |
137 | if (tr->ctrl) | |
138 | stop_sched_trace(tr); | |
139 | } | |
140 | ||
141 | static void sched_switch_trace_ctrl_update(struct trace_array *tr) | |
142 | { | |
143 | /* When starting a new trace, reset the buffers */ | |
144 | if (tr->ctrl) | |
145 | start_sched_trace(tr); | |
146 | else | |
147 | stop_sched_trace(tr); | |
148 | } | |
149 | ||
150 | static struct tracer sched_switch_trace __read_mostly = | |
151 | { | |
152 | .name = "sched_switch", | |
153 | .init = sched_switch_trace_init, | |
154 | .reset = sched_switch_trace_reset, | |
155 | .ctrl_update = sched_switch_trace_ctrl_update, | |
60a11774 SR |
156 | #ifdef CONFIG_FTRACE_SELFTEST |
157 | .selftest = trace_selftest_startup_sched_switch, | |
158 | #endif | |
35e8e302 SR |
159 | }; |
160 | ||
161 | __init static int init_sched_switch_trace(void) | |
162 | { | |
163 | return register_tracer(&sched_switch_trace); | |
164 | } | |
165 | device_initcall(init_sched_switch_trace); |