Merge branches 'tracing/blktrace', 'tracing/ftrace', 'tracing/function-graph-tracer...
[deliverable/linux.git] / kernel / trace / trace_branch.c
CommitLineData
1f0d69a9
SR
1/*
2 * unlikely profiler
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/debugfs.h>
10#include <linux/uaccess.h>
11#include <linux/module.h>
12#include <linux/ftrace.h>
13#include <linux/hash.h>
14#include <linux/fs.h>
15#include <asm/local.h>
16#include "trace.h"
17
2ed84eeb 18#ifdef CONFIG_BRANCH_TRACER
52f232cb 19
9f029e83
SR
20static int branch_tracing_enabled __read_mostly;
21static DEFINE_MUTEX(branch_tracing_mutex);
22static struct trace_array *branch_tracer;
52f232cb
SR
23
24static void
9f029e83 25probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
52f232cb 26{
9f029e83 27 struct trace_array *tr = branch_tracer;
52f232cb 28 struct ring_buffer_event *event;
9f029e83 29 struct trace_branch *entry;
52f232cb
SR
30 unsigned long flags, irq_flags;
31 int cpu, pc;
32 const char *p;
33
34 /*
35 * I would love to save just the ftrace_likely_data pointer, but
36 * this code can also be used by modules. Ugly things can happen
37 * if the module is unloaded, and then we go and read the
38 * pointer. This is slower, but much safer.
39 */
40
41 if (unlikely(!tr))
42 return;
43
072b40a1 44 raw_local_irq_save(flags);
52f232cb
SR
45 cpu = raw_smp_processor_id();
46 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
47 goto out;
48
49 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
50 &irq_flags);
51 if (!event)
52 goto out;
53
54 pc = preempt_count();
55 entry = ring_buffer_event_data(event);
56 tracing_generic_entry_update(&entry->ent, flags, pc);
9f029e83 57 entry->ent.type = TRACE_BRANCH;
52f232cb
SR
58
59 /* Strip off the path, only save the file */
60 p = f->file + strlen(f->file);
61 while (p >= f->file && *p != '/')
62 p--;
63 p++;
64
65 strncpy(entry->func, f->func, TRACE_FUNC_SIZE);
66 strncpy(entry->file, p, TRACE_FILE_SIZE);
67 entry->func[TRACE_FUNC_SIZE] = 0;
68 entry->file[TRACE_FILE_SIZE] = 0;
69 entry->line = f->line;
70 entry->correct = val == expect;
71
72 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
73
74 out:
75 atomic_dec(&tr->data[cpu]->disabled);
072b40a1 76 raw_local_irq_restore(flags);
52f232cb
SR
77}
78
79static inline
9f029e83 80void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
52f232cb 81{
9f029e83 82 if (!branch_tracing_enabled)
52f232cb
SR
83 return;
84
85 probe_likely_condition(f, val, expect);
86}
87
9f029e83 88int enable_branch_tracing(struct trace_array *tr)
52f232cb
SR
89{
90 int ret = 0;
91
9f029e83
SR
92 mutex_lock(&branch_tracing_mutex);
93 branch_tracer = tr;
52f232cb
SR
94 /*
95 * Must be seen before enabling. The reader is a condition
96 * where we do not need a matching rmb()
97 */
98 smp_wmb();
9f029e83
SR
99 branch_tracing_enabled++;
100 mutex_unlock(&branch_tracing_mutex);
52f232cb
SR
101
102 return ret;
103}
104
9f029e83 105void disable_branch_tracing(void)
52f232cb 106{
9f029e83 107 mutex_lock(&branch_tracing_mutex);
52f232cb 108
9f029e83 109 if (!branch_tracing_enabled)
52f232cb
SR
110 goto out_unlock;
111
9f029e83 112 branch_tracing_enabled--;
52f232cb
SR
113
114 out_unlock:
9f029e83 115 mutex_unlock(&branch_tracing_mutex);
52f232cb 116}
80e5ea45
SR
117
118static void start_branch_trace(struct trace_array *tr)
119{
120 enable_branch_tracing(tr);
121}
122
123static void stop_branch_trace(struct trace_array *tr)
124{
125 disable_branch_tracing();
126}
127
1c80025a 128static int branch_trace_init(struct trace_array *tr)
80e5ea45
SR
129{
130 int cpu;
131
132 for_each_online_cpu(cpu)
133 tracing_reset(tr, cpu);
134
135 start_branch_trace(tr);
1c80025a 136 return 0;
80e5ea45
SR
137}
138
139static void branch_trace_reset(struct trace_array *tr)
140{
141 stop_branch_trace(tr);
142}
143
144struct tracer branch_trace __read_mostly =
145{
146 .name = "branch",
147 .init = branch_trace_init,
148 .reset = branch_trace_reset,
149#ifdef CONFIG_FTRACE_SELFTEST
150 .selftest = trace_selftest_startup_branch,
151#endif
152};
153
154__init static int init_branch_trace(void)
155{
156 return register_tracer(&branch_trace);
157}
158
159device_initcall(init_branch_trace);
52f232cb
SR
160#else
161static inline
9f029e83 162void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect)
52f232cb
SR
163{
164}
2ed84eeb 165#endif /* CONFIG_BRANCH_TRACER */
52f232cb 166
9f029e83 167void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect)
1f0d69a9 168{
52f232cb
SR
169 /*
170 * I would love to have a trace point here instead, but the
171 * trace point code is so inundated with unlikely and likely
172 * conditions that the recursive nightmare that exists is too
173 * much to try to get working. At least for now.
174 */
175 trace_likely_condition(f, val, expect);
176
1f0d69a9
SR
177 /* FIXME: Make this atomic! */
178 if (val == expect)
179 f->correct++;
180 else
181 f->incorrect++;
182}
183EXPORT_SYMBOL(ftrace_likely_update);
184
185struct ftrace_pointer {
186 void *start;
187 void *stop;
2bcd521a 188 int hit;
1f0d69a9
SR
189};
190
191static void *
192t_next(struct seq_file *m, void *v, loff_t *pos)
193{
0429149f 194 const struct ftrace_pointer *f = m->private;
9f029e83 195 struct ftrace_branch_data *p = v;
1f0d69a9
SR
196
197 (*pos)++;
198
199 if (v == (void *)1)
200 return f->start;
201
202 ++p;
203
204 if ((void *)p >= (void *)f->stop)
205 return NULL;
206
207 return p;
208}
209
210static void *t_start(struct seq_file *m, loff_t *pos)
211{
212 void *t = (void *)1;
213 loff_t l = 0;
214
215 for (; t && l < *pos; t = t_next(m, t, &l))
216 ;
217
218 return t;
219}
220
221static void t_stop(struct seq_file *m, void *p)
222{
223}
224
225static int t_show(struct seq_file *m, void *v)
226{
0429149f 227 const struct ftrace_pointer *fp = m->private;
9f029e83 228 struct ftrace_branch_data *p = v;
1f0d69a9 229 const char *f;
bac28bfe 230 long percent;
1f0d69a9
SR
231
232 if (v == (void *)1) {
2bcd521a
SR
233 if (fp->hit)
234 seq_printf(m, " miss hit %% ");
235 else
236 seq_printf(m, " correct incorrect %% ");
237 seq_printf(m, " Function "
1f0d69a9
SR
238 " File Line\n"
239 " ------- --------- - "
240 " -------- "
241 " ---- ----\n");
242 return 0;
243 }
244
245 /* Only print the file, not the path */
246 f = p->file + strlen(p->file);
247 while (f >= p->file && *f != '/')
248 f--;
249 f++;
250
2bcd521a
SR
251 /*
252 * The miss is overlayed on correct, and hit on incorrect.
253 */
1f0d69a9
SR
254 if (p->correct) {
255 percent = p->incorrect * 100;
256 percent /= p->correct + p->incorrect;
257 } else
bac28bfe 258 percent = p->incorrect ? 100 : -1;
1f0d69a9 259
bac28bfe
SR
260 seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
261 if (percent < 0)
262 seq_printf(m, " X ");
263 else
264 seq_printf(m, "%3ld ", percent);
1f0d69a9
SR
265 seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
266 return 0;
267}
268
269static struct seq_operations tracing_likely_seq_ops = {
270 .start = t_start,
271 .next = t_next,
272 .stop = t_stop,
273 .show = t_show,
274};
275
45b79749 276static int tracing_branch_open(struct inode *inode, struct file *file)
1f0d69a9
SR
277{
278 int ret;
279
280 ret = seq_open(file, &tracing_likely_seq_ops);
281 if (!ret) {
282 struct seq_file *m = file->private_data;
283 m->private = (void *)inode->i_private;
284 }
285
286 return ret;
287}
288
45b79749
SR
289static const struct file_operations tracing_branch_fops = {
290 .open = tracing_branch_open,
1f0d69a9
SR
291 .read = seq_read,
292 .llseek = seq_lseek,
293};
294
2bcd521a
SR
295#ifdef CONFIG_PROFILE_ALL_BRANCHES
296extern unsigned long __start_branch_profile[];
297extern unsigned long __stop_branch_profile[];
298
0429149f 299static const struct ftrace_pointer ftrace_branch_pos = {
2bcd521a
SR
300 .start = __start_branch_profile,
301 .stop = __stop_branch_profile,
302 .hit = 1,
303};
304
305#endif /* CONFIG_PROFILE_ALL_BRANCHES */
306
45b79749
SR
307extern unsigned long __start_annotated_branch_profile[];
308extern unsigned long __stop_annotated_branch_profile[];
1f0d69a9 309
45b79749
SR
310static const struct ftrace_pointer ftrace_annotated_branch_pos = {
311 .start = __start_annotated_branch_profile,
312 .stop = __stop_annotated_branch_profile,
1f0d69a9
SR
313};
314
9f029e83 315static __init int ftrace_branch_init(void)
1f0d69a9
SR
316{
317 struct dentry *d_tracer;
318 struct dentry *entry;
319
320 d_tracer = tracing_init_dentry();
321
45b79749 322 entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer,
0429149f 323 (void *)&ftrace_annotated_branch_pos,
45b79749 324 &tracing_branch_fops);
1f0d69a9 325 if (!entry)
45b79749
SR
326 pr_warning("Could not create debugfs "
327 "'profile_annotatet_branch' entry\n");
1f0d69a9 328
2bcd521a
SR
329#ifdef CONFIG_PROFILE_ALL_BRANCHES
330 entry = debugfs_create_file("profile_branch", 0444, d_tracer,
0429149f 331 (void *)&ftrace_branch_pos,
2bcd521a
SR
332 &tracing_branch_fops);
333 if (!entry)
334 pr_warning("Could not create debugfs"
335 " 'profile_branch' entry\n");
336#endif
337
1f0d69a9
SR
338 return 0;
339}
340
9f029e83 341device_initcall(ftrace_branch_init);
This page took 0.065252 seconds and 5 git commands to generate.