perf/tracing: Fix regression of perf losing kprobe events
[deliverable/linux.git] / kernel / trace / trace_event_perf.c
1 /*
2 * trace event based perf event profiling/tracing
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */
7
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
11
12 EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
13
14 static char *perf_trace_buf[4];
15
16 /*
17 * Force it to be aligned to unsigned long to avoid misaligned accesses
18 * suprises
19 */
20 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
21 perf_trace_t;
22
23 /* Count the events in use (per event id, not per instance) */
24 static int total_ref_count;
25
26 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
27 struct perf_event *p_event)
28 {
29 struct hlist_head *list;
30 int ret = -ENOMEM;
31 int cpu;
32
33 p_event->tp_event = tp_event;
34 if (tp_event->perf_refcount++ > 0)
35 return 0;
36
37 list = alloc_percpu(struct hlist_head);
38 if (!list)
39 goto fail;
40
41 for_each_possible_cpu(cpu)
42 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
43
44 tp_event->perf_events = list;
45
46 if (!total_ref_count) {
47 char *buf;
48 int i;
49
50 for (i = 0; i < 4; i++) {
51 buf = (char *)alloc_percpu(perf_trace_t);
52 if (!buf)
53 goto fail;
54
55 perf_trace_buf[i] = buf;
56 }
57 }
58
59 if (tp_event->class->reg)
60 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
61 else
62 ret = tracepoint_probe_register(tp_event->name,
63 tp_event->class->perf_probe,
64 tp_event);
65
66 if (ret)
67 goto fail;
68
69 total_ref_count++;
70 return 0;
71
72 fail:
73 if (!total_ref_count) {
74 int i;
75
76 for (i = 0; i < 4; i++) {
77 free_percpu(perf_trace_buf[i]);
78 perf_trace_buf[i] = NULL;
79 }
80 }
81
82 if (!--tp_event->perf_refcount) {
83 free_percpu(tp_event->perf_events);
84 tp_event->perf_events = NULL;
85 }
86
87 return ret;
88 }
89
90 int perf_trace_init(struct perf_event *p_event)
91 {
92 struct ftrace_event_call *tp_event;
93 int event_id = p_event->attr.config;
94 int ret = -EINVAL;
95
96 mutex_lock(&event_mutex);
97 list_for_each_entry(tp_event, &ftrace_events, list) {
98 if (tp_event->event.type == event_id &&
99 tp_event->class &&
100 (tp_event->class->perf_probe ||
101 tp_event->class->reg) &&
102 try_module_get(tp_event->mod)) {
103 ret = perf_trace_event_init(tp_event, p_event);
104 break;
105 }
106 }
107 mutex_unlock(&event_mutex);
108
109 return ret;
110 }
111
112 int perf_trace_enable(struct perf_event *p_event)
113 {
114 struct ftrace_event_call *tp_event = p_event->tp_event;
115 struct hlist_head *list;
116
117 list = tp_event->perf_events;
118 if (WARN_ON_ONCE(!list))
119 return -EINVAL;
120
121 list = this_cpu_ptr(list);
122 hlist_add_head_rcu(&p_event->hlist_entry, list);
123
124 return 0;
125 }
126
127 void perf_trace_disable(struct perf_event *p_event)
128 {
129 hlist_del_rcu(&p_event->hlist_entry);
130 }
131
132 void perf_trace_destroy(struct perf_event *p_event)
133 {
134 struct ftrace_event_call *tp_event = p_event->tp_event;
135 int i;
136
137 mutex_lock(&event_mutex);
138 if (--tp_event->perf_refcount > 0)
139 goto out;
140
141 if (tp_event->class->reg)
142 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
143 else
144 tracepoint_probe_unregister(tp_event->name,
145 tp_event->class->perf_probe,
146 tp_event);
147
148 /*
149 * Ensure our callback won't be called anymore. See
150 * tracepoint_probe_unregister() and __DO_TRACE().
151 */
152 synchronize_sched();
153
154 free_percpu(tp_event->perf_events);
155 tp_event->perf_events = NULL;
156
157 if (!--total_ref_count) {
158 for (i = 0; i < 4; i++) {
159 free_percpu(perf_trace_buf[i]);
160 perf_trace_buf[i] = NULL;
161 }
162 }
163 out:
164 mutex_unlock(&event_mutex);
165 }
166
167 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
168 struct pt_regs *regs, int *rctxp)
169 {
170 struct trace_entry *entry;
171 unsigned long flags;
172 char *raw_data;
173 int pc;
174
175 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
176
177 pc = preempt_count();
178
179 *rctxp = perf_swevent_get_recursion_context();
180 if (*rctxp < 0)
181 return NULL;
182
183 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
184
185 /* zero the dead bytes from align to not leak stack to user */
186 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
187
188 entry = (struct trace_entry *)raw_data;
189 local_save_flags(flags);
190 tracing_generic_entry_update(entry, flags, pc);
191 entry->type = type;
192
193 return raw_data;
194 }
195 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
This page took 0.041612 seconds and 5 git commands to generate.