Fix build and load against linux-2.6.33.x
[deliverable/lttng-modules.git] / lttng-context-perf-counters.c
CommitLineData
833ad6a0 1/*
886d51a3 2 * lttng-context-perf-counters.c
833ad6a0
MD
3 *
4 * LTTng performance monitoring counters (perf-counters) integration module.
5 *
886d51a3
MD
6 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
833ad6a0
MD
21 */
22
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/perf_event.h>
26#include <linux/list.h>
c24a0d71 27#include <linux/string.h>
5ca7b8a3 28#include <linux/cpu.h>
a90917c3 29#include "lttng-events.h"
c24a0d71
MD
30#include "wrapper/ringbuffer/frontend_types.h"
31#include "wrapper/vmalloc.h"
90f5546c 32#include "wrapper/perf.h"
a90917c3 33#include "lttng-tracer.h"
833ad6a0 34
f1676205
MD
35static
36size_t perf_counter_get_size(size_t offset)
37{
38 size_t size = 0;
39
a90917c3 40 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
f1676205
MD
41 size += sizeof(uint64_t);
42 return size;
43}
44
833ad6a0
MD
45static
46void perf_counter_record(struct lttng_ctx_field *field,
47 struct lib_ring_buffer_ctx *ctx,
a90917c3 48 struct lttng_channel *chan)
833ad6a0
MD
49{
50 struct perf_event *event;
51 uint64_t value;
52
2001023e 53 event = field->u.perf_counter->e[ctx->cpu];
0478c519 54 if (likely(event)) {
7b745a96
MD
55 if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
56 value = 0;
57 } else {
58 event->pmu->read(event);
59 value = local64_read(&event->count);
60 }
f91fd73b
MD
61 } else {
62 /*
63 * Perf chooses not to be clever and not to support enabling a
64 * perf counter before the cpu is brought up. Therefore, we need
65 * to support having events coming (e.g. scheduler events)
66 * before the counter is setup. Write an arbitrary 0 in this
67 * case.
68 */
69 value = 0;
70 }
a90917c3 71 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
833ad6a0
MD
72 chan->ops->event_write(ctx, &value, sizeof(value));
73}
74
90f5546c
MD
75#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
76static
77void overflow_callback(struct perf_event *event,
78 struct perf_sample_data *data,
79 struct pt_regs *regs)
80{
81}
82#else
833ad6a0
MD
83static
84void overflow_callback(struct perf_event *event, int nmi,
85 struct perf_sample_data *data,
86 struct pt_regs *regs)
87{
88}
90f5546c 89#endif
833ad6a0 90
2dccf128
MD
91static
92void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
93{
2001023e 94 struct perf_event **events = field->u.perf_counter->e;
2dccf128
MD
95 int cpu;
96
8289661d 97 get_online_cpus();
2dccf128
MD
98 for_each_online_cpu(cpu)
99 perf_event_release_kernel(events[cpu]);
8289661d
MD
100 put_online_cpus();
101#ifdef CONFIG_HOTPLUG_CPU
2001023e 102 unregister_cpu_notifier(&field->u.perf_counter->nb);
8289661d 103#endif
c24a0d71 104 kfree(field->event_field.name);
2001023e 105 kfree(field->u.perf_counter->attr);
2dccf128 106 kfree(events);
2001023e 107 kfree(field->u.perf_counter);
2dccf128
MD
108}
109
8289661d
MD
110#ifdef CONFIG_HOTPLUG_CPU
111
112/**
113 * lttng_perf_counter_hp_callback - CPU hotplug callback
114 * @nb: notifier block
115 * @action: hotplug action to take
116 * @hcpu: CPU number
117 *
118 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
119 *
120 * We can setup perf counters when the cpu is online (up prepare seems to be too
121 * soon).
122 */
123static
e8f071d5 124int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
8289661d
MD
125 unsigned long action,
126 void *hcpu)
127{
128 unsigned int cpu = (unsigned long) hcpu;
2001023e
MD
129 struct lttng_perf_counter_field *perf_field =
130 container_of(nb, struct lttng_perf_counter_field, nb);
131 struct perf_event **events = perf_field->e;
132 struct perf_event_attr *attr = perf_field->attr;
f91fd73b 133 struct perf_event *pevent;
8289661d 134
2001023e 135 if (!perf_field->hp_enable)
8289661d
MD
136 return NOTIFY_OK;
137
138 switch (action) {
139 case CPU_ONLINE:
140 case CPU_ONLINE_FROZEN:
90f5546c 141 pevent = wrapper_perf_event_create_kernel_counter(attr,
8289661d 142 cpu, NULL, overflow_callback);
0478c519 143 if (!pevent || IS_ERR(pevent))
8289661d 144 return NOTIFY_BAD;
7b745a96
MD
145 if (pevent->state == PERF_EVENT_STATE_ERROR) {
146 perf_event_release_kernel(pevent);
147 return NOTIFY_BAD;
148 }
f91fd73b
MD
149 barrier(); /* Create perf counter before setting event */
150 events[cpu] = pevent;
8289661d
MD
151 break;
152 case CPU_UP_CANCELED:
153 case CPU_UP_CANCELED_FROZEN:
154 case CPU_DEAD:
155 case CPU_DEAD_FROZEN:
f91fd73b
MD
156 pevent = events[cpu];
157 events[cpu] = NULL;
158 barrier(); /* NULLify event before perf counter teardown */
159 perf_event_release_kernel(pevent);
8289661d
MD
160 break;
161 }
162 return NOTIFY_OK;
163}
164
165#endif
166
833ad6a0
MD
167int lttng_add_perf_counter_to_ctx(uint32_t type,
168 uint64_t config,
c24a0d71 169 const char *name,
2dccf128 170 struct lttng_ctx **ctx)
833ad6a0
MD
171{
172 struct lttng_ctx_field *field;
2001023e 173 struct lttng_perf_counter_field *perf_field;
833ad6a0
MD
174 struct perf_event **events;
175 struct perf_event_attr *attr;
176 int ret;
177 int cpu;
c24a0d71 178 char *name_alloc;
833ad6a0
MD
179
180 events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
181 if (!events)
182 return -ENOMEM;
183
2001023e 184 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
833ad6a0
MD
185 if (!attr) {
186 ret = -ENOMEM;
187 goto error_attr;
188 }
189
190 attr->type = type;
191 attr->config = config;
192 attr->size = sizeof(struct perf_event_attr);
193 attr->pinned = 1;
194 attr->disabled = 0;
195
2001023e
MD
196 perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
197 if (!perf_field) {
198 ret = -ENOMEM;
199 goto error_alloc_perf_field;
200 }
201 perf_field->e = events;
202 perf_field->attr = attr;
203
c24a0d71 204 name_alloc = kstrdup(name, GFP_KERNEL);
bef96e48
MD
205 if (!name_alloc) {
206 ret = -ENOMEM;
c24a0d71 207 goto name_alloc_error;
bef96e48 208 }
c24a0d71 209
2dccf128
MD
210 field = lttng_append_context(ctx);
211 if (!field) {
212 ret = -ENOMEM;
8289661d 213 goto append_context_error;
833ad6a0 214 }
2001023e 215 if (lttng_find_context(*ctx, name_alloc)) {
44252f0f
MD
216 ret = -EEXIST;
217 goto find_error;
218 }
8289661d
MD
219
220#ifdef CONFIG_HOTPLUG_CPU
2001023e 221 perf_field->nb.notifier_call =
8289661d 222 lttng_perf_counter_cpu_hp_callback;
2001023e
MD
223 perf_field->nb.priority = 0;
224 register_cpu_notifier(&perf_field->nb);
8289661d
MD
225#endif
226
227 get_online_cpus();
228 for_each_online_cpu(cpu) {
90f5546c 229 events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
8289661d 230 cpu, NULL, overflow_callback);
0478c519 231 if (!events[cpu] || IS_ERR(events[cpu])) {
8289661d
MD
232 ret = -EINVAL;
233 goto counter_error;
234 }
7b745a96
MD
235 if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
236 ret = -EBUSY;
237 goto counter_busy;
238 }
8289661d
MD
239 }
240 put_online_cpus();
241
2dccf128 242 field->destroy = lttng_destroy_perf_counter_field;
833ad6a0 243
c24a0d71 244 field->event_field.name = name_alloc;
8070f5c0 245 field->event_field.type.atype = atype_integer;
9d7d747f 246 field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT;
a90917c3 247 field->event_field.type.u.basic.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT;
06254b0f 248 field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(uint64_t);
8070f5c0
MD
249 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
250 field->event_field.type.u.basic.integer.base = 10;
251 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
f1676205
MD
252 field->get_size = perf_counter_get_size;
253 field->record = perf_counter_record;
2001023e
MD
254 field->u.perf_counter = perf_field;
255 perf_field->hp_enable = 1;
833ad6a0
MD
256
257 wrapper_vmalloc_sync_all();
258 return 0;
259
7b745a96 260counter_busy:
8289661d 261counter_error:
833ad6a0 262 for_each_online_cpu(cpu) {
0478c519 263 if (events[cpu] && !IS_ERR(events[cpu]))
833ad6a0
MD
264 perf_event_release_kernel(events[cpu]);
265 }
8289661d
MD
266 put_online_cpus();
267#ifdef CONFIG_HOTPLUG_CPU
2001023e 268 unregister_cpu_notifier(&perf_field->nb);
8289661d 269#endif
44252f0f 270find_error:
8289661d
MD
271 lttng_remove_context_field(ctx, field);
272append_context_error:
273 kfree(name_alloc);
274name_alloc_error:
2001023e
MD
275 kfree(perf_field);
276error_alloc_perf_field:
833ad6a0
MD
277 kfree(attr);
278error_attr:
279 kfree(events);
280 return ret;
281}
282
833ad6a0
MD
283MODULE_LICENSE("GPL and additional rights");
284MODULE_AUTHOR("Mathieu Desnoyers");
285MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");
This page took 0.049973 seconds and 5 git commands to generate.