tracing: Remove duplicate id information in event structure
[deliverable/linux.git] / kernel / trace / trace_event_perf.c
1 /*
2 * trace event based perf event profiling/tracing
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
11
12 DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
13 EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
14
15 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
16
17 static char *perf_trace_buf;
18 static char *perf_trace_buf_nmi;
19
20 /*
21 * Force it to be aligned to unsigned long to avoid misaligned accesses
22 * suprises
23 */
24 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
25 perf_trace_t;
26
27 /* Count the events in use (per event id, not per instance) */
28 static int total_ref_count;
29
30 static int perf_trace_event_enable(struct ftrace_event_call *event)
31 {
32 char *buf;
33 int ret = -ENOMEM;
34
35 if (event->perf_refcount++ > 0)
36 return 0;
37
38 if (!total_ref_count) {
39 buf = (char *)alloc_percpu(perf_trace_t);
40 if (!buf)
41 goto fail_buf;
42
43 rcu_assign_pointer(perf_trace_buf, buf);
44
45 buf = (char *)alloc_percpu(perf_trace_t);
46 if (!buf)
47 goto fail_buf_nmi;
48
49 rcu_assign_pointer(perf_trace_buf_nmi, buf);
50 }
51
52 if (event->class->reg)
53 ret = event->class->reg(event, TRACE_REG_PERF_REGISTER);
54 else
55 ret = tracepoint_probe_register(event->name,
56 event->class->perf_probe,
57 event);
58 if (!ret) {
59 total_ref_count++;
60 return 0;
61 }
62
63 fail_buf_nmi:
64 if (!total_ref_count) {
65 free_percpu(perf_trace_buf_nmi);
66 free_percpu(perf_trace_buf);
67 perf_trace_buf_nmi = NULL;
68 perf_trace_buf = NULL;
69 }
70 fail_buf:
71 event->perf_refcount--;
72
73 return ret;
74 }
75
76 int perf_trace_enable(int event_id)
77 {
78 struct ftrace_event_call *event;
79 int ret = -EINVAL;
80
81 mutex_lock(&event_mutex);
82 list_for_each_entry(event, &ftrace_events, list) {
83 if (event->event.type == event_id &&
84 event->class && event->class->perf_probe &&
85 try_module_get(event->mod)) {
86 ret = perf_trace_event_enable(event);
87 break;
88 }
89 }
90 mutex_unlock(&event_mutex);
91
92 return ret;
93 }
94
95 static void perf_trace_event_disable(struct ftrace_event_call *event)
96 {
97 char *buf, *nmi_buf;
98
99 if (--event->perf_refcount > 0)
100 return;
101
102 if (event->class->reg)
103 event->class->reg(event, TRACE_REG_PERF_UNREGISTER);
104 else
105 tracepoint_probe_unregister(event->name, event->class->perf_probe, event);
106
107 if (!--total_ref_count) {
108 buf = perf_trace_buf;
109 rcu_assign_pointer(perf_trace_buf, NULL);
110
111 nmi_buf = perf_trace_buf_nmi;
112 rcu_assign_pointer(perf_trace_buf_nmi, NULL);
113
114 /*
115 * Ensure every events in profiling have finished before
116 * releasing the buffers
117 */
118 synchronize_sched();
119
120 free_percpu(buf);
121 free_percpu(nmi_buf);
122 }
123 }
124
125 void perf_trace_disable(int event_id)
126 {
127 struct ftrace_event_call *event;
128
129 mutex_lock(&event_mutex);
130 list_for_each_entry(event, &ftrace_events, list) {
131 if (event->event.type == event_id) {
132 perf_trace_event_disable(event);
133 module_put(event->mod);
134 break;
135 }
136 }
137 mutex_unlock(&event_mutex);
138 }
139
140 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
141 int *rctxp, unsigned long *irq_flags)
142 {
143 struct trace_entry *entry;
144 char *trace_buf, *raw_data;
145 int pc, cpu;
146
147 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
148
149 pc = preempt_count();
150
151 /* Protect the per cpu buffer, begin the rcu read side */
152 local_irq_save(*irq_flags);
153
154 *rctxp = perf_swevent_get_recursion_context();
155 if (*rctxp < 0)
156 goto err_recursion;
157
158 cpu = smp_processor_id();
159
160 if (in_nmi())
161 trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
162 else
163 trace_buf = rcu_dereference_sched(perf_trace_buf);
164
165 if (!trace_buf)
166 goto err;
167
168 raw_data = per_cpu_ptr(trace_buf, cpu);
169
170 /* zero the dead bytes from align to not leak stack to user */
171 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
172
173 entry = (struct trace_entry *)raw_data;
174 tracing_generic_entry_update(entry, *irq_flags, pc);
175 entry->type = type;
176
177 return raw_data;
178 err:
179 perf_swevent_put_recursion_context(*rctxp);
180 err_recursion:
181 local_irq_restore(*irq_flags);
182 return NULL;
183 }
184 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
This page took 0.037377 seconds and 6 git commands to generate.