rcu: Remove redundant check for rcu_head misalignment
[deliverable/linux.git] / kernel / trace / trace_event_perf.c
CommitLineData
ac199db0 1/*
97d5a220 2 * trace event based perf event profiling/tracing
ac199db0
PZ
3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
c530665c 5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
ac199db0
PZ
6 */
7
558e6547 8#include <linux/module.h>
430ad5a6 9#include <linux/kprobes.h>
ac199db0
PZ
10#include "trace.h"
11
6016ee13 12static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
20ab4425 13
eb1e7961
FW
14/*
15 * Force it to be aligned to unsigned long to avoid misaligned accesses
16 * suprises
17 */
18typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19 perf_trace_t;
ce71b9df 20
20ab4425 21/* Count the events in use (per event id, not per instance) */
97d5a220 22static int total_ref_count;
20ab4425 23
61c32659
FW
24static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
25 struct perf_event *p_event)
26{
27 /* No tracing, just counting, so no obvious leak */
28 if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
29 return 0;
30
31 /* Some events are ok to be traced by non-root users... */
32 if (p_event->attach_state == PERF_ATTACH_TASK) {
33 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
34 return 0;
35 }
36
37 /*
38 * ...otherwise raw tracepoint data can be a severe data leak,
39 * only allow root to have these.
40 */
41 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
42 return -EPERM;
43
44 return 0;
45}
46
1c024eca
PZ
47static int perf_trace_event_init(struct ftrace_event_call *tp_event,
48 struct perf_event *p_event)
e5e25cf4 49{
6016ee13 50 struct hlist_head __percpu *list;
61c32659 51 int ret;
1c024eca 52 int cpu;
20ab4425 53
61c32659
FW
54 ret = perf_trace_event_perm(tp_event, p_event);
55 if (ret)
56 return ret;
57
1c024eca
PZ
58 p_event->tp_event = tp_event;
59 if (tp_event->perf_refcount++ > 0)
e5e25cf4
FW
60 return 0;
61
61c32659
FW
62 ret = -ENOMEM;
63
1c024eca
PZ
64 list = alloc_percpu(struct hlist_head);
65 if (!list)
66 goto fail;
67
68 for_each_possible_cpu(cpu)
69 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
20ab4425 70
1c024eca 71 tp_event->perf_events = list;
e5e25cf4 72
97d5a220 73 if (!total_ref_count) {
6016ee13 74 char __percpu *buf;
b7e2ecef 75 int i;
20ab4425 76
7ae07ea3 77 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
6016ee13 78 buf = (char __percpu *)alloc_percpu(perf_trace_t);
b7e2ecef 79 if (!buf)
1c024eca 80 goto fail;
20ab4425 81
1c024eca 82 perf_trace_buf[i] = buf;
b7e2ecef 83 }
20ab4425
FW
84 }
85
a1d0ce82 86 ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
1c024eca
PZ
87 if (ret)
88 goto fail;
20ab4425 89
1c024eca
PZ
90 total_ref_count++;
91 return 0;
92
93fail:
97d5a220 94 if (!total_ref_count) {
b7e2ecef
PZ
95 int i;
96
7ae07ea3 97 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
b7e2ecef
PZ
98 free_percpu(perf_trace_buf[i]);
99 perf_trace_buf[i] = NULL;
100 }
fe8e5b5a 101 }
1c024eca
PZ
102
103 if (!--tp_event->perf_refcount) {
104 free_percpu(tp_event->perf_events);
105 tp_event->perf_events = NULL;
fe8e5b5a 106 }
20ab4425
FW
107
108 return ret;
e5e25cf4
FW
109}
110
1c024eca 111int perf_trace_init(struct perf_event *p_event)
ac199db0 112{
1c024eca
PZ
113 struct ftrace_event_call *tp_event;
114 int event_id = p_event->attr.config;
20c8928a 115 int ret = -EINVAL;
ac199db0 116
20c8928a 117 mutex_lock(&event_mutex);
1c024eca 118 list_for_each_entry(tp_event, &ftrace_events, list) {
ff5f149b 119 if (tp_event->event.type == event_id &&
a1d0ce82 120 tp_event->class && tp_event->class->reg &&
1c024eca
PZ
121 try_module_get(tp_event->mod)) {
122 ret = perf_trace_event_init(tp_event, p_event);
9cb627d5
LZ
123 if (ret)
124 module_put(tp_event->mod);
20c8928a
LZ
125 break;
126 }
ac199db0 127 }
20c8928a 128 mutex_unlock(&event_mutex);
ac199db0 129
20c8928a 130 return ret;
ac199db0
PZ
131}
132
a4eaf7f1 133int perf_trace_add(struct perf_event *p_event, int flags)
e5e25cf4 134{
1c024eca 135 struct ftrace_event_call *tp_event = p_event->tp_event;
6016ee13 136 struct hlist_head __percpu *pcpu_list;
1c024eca 137 struct hlist_head *list;
20ab4425 138
6016ee13
NK
139 pcpu_list = tp_event->perf_events;
140 if (WARN_ON_ONCE(!pcpu_list))
1c024eca 141 return -EINVAL;
20ab4425 142
a4eaf7f1
PZ
143 if (!(flags & PERF_EF_START))
144 p_event->hw.state = PERF_HES_STOPPED;
145
6016ee13 146 list = this_cpu_ptr(pcpu_list);
1c024eca 147 hlist_add_head_rcu(&p_event->hlist_entry, list);
20ab4425 148
1c024eca
PZ
149 return 0;
150}
20ab4425 151
a4eaf7f1 152void perf_trace_del(struct perf_event *p_event, int flags)
1c024eca
PZ
153{
154 hlist_del_rcu(&p_event->hlist_entry);
e5e25cf4
FW
155}
156
1c024eca 157void perf_trace_destroy(struct perf_event *p_event)
ac199db0 158{
1c024eca
PZ
159 struct ftrace_event_call *tp_event = p_event->tp_event;
160 int i;
ac199db0 161
2e97942f 162 mutex_lock(&event_mutex);
1c024eca 163 if (--tp_event->perf_refcount > 0)
2e97942f 164 goto out;
1c024eca 165
a1d0ce82 166 tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
1c024eca 167
3771f077 168 /*
669336e4
FW
169 * Ensure our callback won't be called anymore. The buffers
170 * will be freed after that.
3771f077 171 */
669336e4 172 tracepoint_synchronize_unregister();
3771f077 173
1c024eca
PZ
174 free_percpu(tp_event->perf_events);
175 tp_event->perf_events = NULL;
176
177 if (!--total_ref_count) {
7ae07ea3 178 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
1c024eca
PZ
179 free_percpu(perf_trace_buf[i]);
180 perf_trace_buf[i] = NULL;
20c8928a 181 }
ac199db0 182 }
2e97942f 183out:
9cb627d5 184 module_put(tp_event->mod);
2e97942f 185 mutex_unlock(&event_mutex);
ac199db0 186}
430ad5a6 187
97d5a220 188__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
b7e2ecef 189 struct pt_regs *regs, int *rctxp)
430ad5a6
XG
190{
191 struct trace_entry *entry;
87f44bbc 192 unsigned long flags;
1c024eca 193 char *raw_data;
b7e2ecef 194 int pc;
430ad5a6 195
eb1e7961
FW
196 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
197
430ad5a6
XG
198 pc = preempt_count();
199
430ad5a6
XG
200 *rctxp = perf_swevent_get_recursion_context();
201 if (*rctxp < 0)
1c024eca 202 return NULL;
430ad5a6 203
3771f077 204 raw_data = this_cpu_ptr(perf_trace_buf[*rctxp]);
430ad5a6
XG
205
206 /* zero the dead bytes from align to not leak stack to user */
eb1e7961 207 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
430ad5a6
XG
208
209 entry = (struct trace_entry *)raw_data;
87f44bbc
PZ
210 local_save_flags(flags);
211 tracing_generic_entry_update(entry, flags, pc);
430ad5a6
XG
212 entry->type = type;
213
214 return raw_data;
430ad5a6 215}
97d5a220 216EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
This page took 0.155499 seconds and 5 git commands to generate.