x86, bts: correct comment style in ds.c
[deliverable/linux.git] / kernel / trace / trace_hw_branches.c
CommitLineData
1e9b51c2 1/*
a93751ca 2 * h/w branch tracer for x86 based on bts
1e9b51c2 3 *
5c5317de
MM
4 * Copyright (C) 2008-2009 Intel Corporation.
5 * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
1e9b51c2 6 */
2d542cf3
IM
7#include <linux/spinlock.h>
8#include <linux/kallsyms.h>
1e9b51c2
MM
9#include <linux/debugfs.h>
10#include <linux/ftrace.h>
2d542cf3 11#include <linux/module.h>
5c5317de
MM
12#include <linux/cpu.h>
13#include <linux/smp.h>
2d542cf3 14#include <linux/fs.h>
1e9b51c2
MM
15
16#include <asm/ds.h>
17
18#include "trace.h"
f0868d1e 19#include "trace_output.h"
1e9b51c2
MM
20
21
22#define SIZEOF_BTS (1 << 13)
23
2d542cf3
IM
24/*
25 * The tracer lock protects the below per-cpu tracer array.
26 * It needs to be held to:
27 * - start tracing on all cpus
28 * - stop tracing on all cpus
29 * - start tracing on a single hotplug cpu
30 * - stop tracing on a single hotplug cpu
31 * - read the trace from all cpus
32 * - read the trace from a single cpu
33 */
34static DEFINE_SPINLOCK(bts_tracer_lock);
1e9b51c2
MM
35static DEFINE_PER_CPU(struct bts_tracer *, tracer);
36static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
37
38#define this_tracer per_cpu(tracer, smp_processor_id())
39#define this_buffer per_cpu(buffer, smp_processor_id())
40
5c5317de 41static int __read_mostly trace_hw_branches_enabled;
b1818748 42static struct trace_array *hw_branch_trace __read_mostly;
1e9b51c2 43
5c5317de
MM
44
45/*
46 * Start tracing on the current cpu.
47 * The argument is ignored.
48 *
2d542cf3 49 * pre: bts_tracer_lock must be locked.
5c5317de 50 */
1e9b51c2
MM
51static void bts_trace_start_cpu(void *arg)
52{
a93751ca
MM
53 if (this_tracer)
54 ds_release_bts(this_tracer);
55
1e9b51c2
MM
56 this_tracer =
57 ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS,
a93751ca
MM
58 /* ovfl = */ NULL, /* th = */ (size_t)-1,
59 BTS_KERNEL);
1e9b51c2
MM
60 if (IS_ERR(this_tracer)) {
61 this_tracer = NULL;
62 return;
63 }
1e9b51c2
MM
64}
65
66static void bts_trace_start(struct trace_array *tr)
67{
2d542cf3 68 spin_lock(&bts_tracer_lock);
1e9b51c2 69
5c5317de
MM
70 on_each_cpu(bts_trace_start_cpu, NULL, 1);
71 trace_hw_branches_enabled = 1;
1e9b51c2 72
2d542cf3 73 spin_unlock(&bts_tracer_lock);
1e9b51c2
MM
74}
75
5c5317de 76/*
c3706f00 77 * Stop tracing on the current cpu.
5c5317de
MM
78 * The argument is ignored.
79 *
2d542cf3 80 * pre: bts_tracer_lock must be locked.
5c5317de 81 */
1e9b51c2
MM
82static void bts_trace_stop_cpu(void *arg)
83{
84 if (this_tracer) {
1e9b51c2
MM
85 ds_release_bts(this_tracer);
86 this_tracer = NULL;
87 }
88}
89
90static void bts_trace_stop(struct trace_array *tr)
91{
2d542cf3 92 spin_lock(&bts_tracer_lock);
5c5317de
MM
93
94 trace_hw_branches_enabled = 0;
95 on_each_cpu(bts_trace_stop_cpu, NULL, 1);
1e9b51c2 96
2d542cf3 97 spin_unlock(&bts_tracer_lock);
5c5317de
MM
98}
99
100static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
101 unsigned long action, void *hcpu)
102{
103 unsigned int cpu = (unsigned long)hcpu;
104
2d542cf3 105 spin_lock(&bts_tracer_lock);
5c5317de
MM
106
107 if (!trace_hw_branches_enabled)
108 goto out;
109
110 switch (action) {
111 case CPU_ONLINE:
112 case CPU_DOWN_FAILED:
113 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
114 break;
115 case CPU_DOWN_PREPARE:
1e9b51c2 116 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
5c5317de
MM
117 break;
118 }
119
120 out:
2d542cf3 121 spin_unlock(&bts_tracer_lock);
5c5317de 122 return NOTIFY_DONE;
1e9b51c2
MM
123}
124
5c5317de
MM
125static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
126 .notifier_call = bts_hotcpu_handler
127};
128
5e01cb69 129static int bts_trace_init(struct trace_array *tr)
1e9b51c2 130{
b1818748
MM
131 hw_branch_trace = tr;
132
1e9b51c2
MM
133 bts_trace_start(tr);
134
135 return 0;
136}
137
5e01cb69 138static void bts_trace_reset(struct trace_array *tr)
5c5317de
MM
139{
140 bts_trace_stop(tr);
5c5317de
MM
141}
142
1e9b51c2
MM
143static void bts_trace_print_header(struct seq_file *m)
144{
11edda06 145 seq_puts(m, "# CPU# TO <- FROM\n");
1e9b51c2
MM
146}
147
148static enum print_line_t bts_trace_print_line(struct trace_iterator *iter)
149{
150 struct trace_entry *entry = iter->ent;
151 struct trace_seq *seq = &iter->seq;
a93751ca 152 struct hw_branch_entry *it;
11edda06 153 unsigned long symflags = TRACE_ITER_SYM_OFFSET;
1e9b51c2
MM
154
155 trace_assign_type(it, entry);
156
a93751ca 157 if (entry->type == TRACE_HW_BRANCHES) {
1830b52d 158 if (trace_seq_printf(seq, "%4d ", iter->cpu) &&
11edda06
MM
159 seq_print_ip_sym(seq, it->to, symflags) &&
160 trace_seq_printf(seq, "\t <- ") &&
161 seq_print_ip_sym(seq, it->from, symflags) &&
a93751ca
MM
162 trace_seq_printf(seq, "\n"))
163 return TRACE_TYPE_HANDLED;
164 return TRACE_TYPE_PARTIAL_LINE;;
1e9b51c2
MM
165 }
166 return TRACE_TYPE_UNHANDLED;
167}
168
b1818748 169void trace_hw_branch(u64 from, u64 to)
1e9b51c2 170{
b1818748 171 struct trace_array *tr = hw_branch_trace;
1e9b51c2 172 struct ring_buffer_event *event;
a93751ca 173 struct hw_branch_entry *entry;
0a987751 174 unsigned long irq1;
5c5317de 175 int cpu;
1e9b51c2 176
5c5317de
MM
177 if (unlikely(!tr))
178 return;
179
180 if (unlikely(!trace_hw_branches_enabled))
1e9b51c2 181 return;
5c5317de
MM
182
183 local_irq_save(irq1);
184 cpu = raw_smp_processor_id();
185 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
186 goto out;
187
51a763dd
ACM
188 event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES,
189 sizeof(*entry), 0, 0);
5c5317de
MM
190 if (!event)
191 goto out;
1e9b51c2
MM
192 entry = ring_buffer_event_data(event);
193 tracing_generic_entry_update(&entry->ent, 0, from);
a93751ca 194 entry->ent.type = TRACE_HW_BRANCHES;
1e9b51c2
MM
195 entry->from = from;
196 entry->to = to;
51a763dd 197 trace_buffer_unlock_commit(tr, event, 0, 0);
5c5317de
MM
198
199 out:
200 atomic_dec(&tr->data[cpu]->disabled);
201 local_irq_restore(irq1);
1e9b51c2
MM
202}
203
b1818748 204static void trace_bts_at(const struct bts_trace *trace, void *at)
1e9b51c2 205{
a93751ca
MM
206 struct bts_struct bts;
207 int err = 0;
1e9b51c2 208
a93751ca
MM
209 WARN_ON_ONCE(!trace->read);
210 if (!trace->read)
1e9b51c2
MM
211 return;
212
a93751ca
MM
213 err = trace->read(this_tracer, at, &bts);
214 if (err < 0)
215 return;
1e9b51c2 216
a93751ca
MM
217 switch (bts.qualifier) {
218 case BTS_BRANCH:
b1818748 219 trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to);
a93751ca
MM
220 break;
221 }
1e9b51c2
MM
222}
223
5c5317de
MM
224/*
225 * Collect the trace on the current cpu and write it into the ftrace buffer.
226 *
2d542cf3 227 * pre: bts_tracer_lock must be locked
5c5317de 228 */
1e9b51c2
MM
229static void trace_bts_cpu(void *arg)
230{
231 struct trace_array *tr = (struct trace_array *) arg;
a93751ca
MM
232 const struct bts_trace *trace;
233 unsigned char *at;
1e9b51c2 234
b1818748 235 if (unlikely(!tr))
1e9b51c2
MM
236 return;
237
5c5317de
MM
238 if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled)))
239 return;
240
b1818748
MM
241 if (unlikely(!this_tracer))
242 return;
243
a93751ca
MM
244 ds_suspend_bts(this_tracer);
245 trace = ds_read_bts(this_tracer);
246 if (!trace)
1e9b51c2
MM
247 goto out;
248
a93751ca
MM
249 for (at = trace->ds.top; (void *)at < trace->ds.end;
250 at += trace->ds.size)
b1818748 251 trace_bts_at(trace, at);
1e9b51c2 252
a93751ca
MM
253 for (at = trace->ds.begin; (void *)at < trace->ds.top;
254 at += trace->ds.size)
b1818748 255 trace_bts_at(trace, at);
1e9b51c2
MM
256
257out:
a93751ca 258 ds_resume_bts(this_tracer);
1e9b51c2
MM
259}
260
261static void trace_bts_prepare(struct trace_iterator *iter)
262{
2d542cf3 263 spin_lock(&bts_tracer_lock);
5c5317de
MM
264
265 on_each_cpu(trace_bts_cpu, iter->tr, 1);
1e9b51c2 266
2d542cf3 267 spin_unlock(&bts_tracer_lock);
1e9b51c2
MM
268}
269
e23b8ad8
MM
270static void trace_bts_close(struct trace_iterator *iter)
271{
272 tracing_reset_online_cpus(iter->tr);
273}
274
b1818748
MM
275void trace_hw_branch_oops(void)
276{
2d542cf3 277 spin_lock(&bts_tracer_lock);
b1818748
MM
278
279 trace_bts_cpu(hw_branch_trace);
280
2d542cf3 281 spin_unlock(&bts_tracer_lock);
b1818748
MM
282}
283
1e9b51c2
MM
284struct tracer bts_tracer __read_mostly =
285{
a93751ca 286 .name = "hw-branch-tracer",
1e9b51c2 287 .init = bts_trace_init,
5c5317de 288 .reset = bts_trace_reset,
1e9b51c2
MM
289 .print_header = bts_trace_print_header,
290 .print_line = bts_trace_print_line,
291 .start = bts_trace_start,
292 .stop = bts_trace_stop,
e23b8ad8
MM
293 .open = trace_bts_prepare,
294 .close = trace_bts_close
1e9b51c2
MM
295};
296
297__init static int init_bts_trace(void)
298{
5e01cb69 299 register_hotcpu_notifier(&bts_hotcpu_notifier);
1e9b51c2
MM
300 return register_tracer(&bts_tracer);
301}
302device_initcall(init_bts_trace);
This page took 0.050982 seconds and 5 git commands to generate.