2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * LTTng performance monitoring counters (perf-counters) integration module.
7 * Dual LGPL v2.1/GPL v2 license.
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/perf_event.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include "ltt-events.h"
16 #include "wrapper/ringbuffer/frontend_types.h"
17 #include "wrapper/vmalloc.h"
18 #include "ltt-tracer.h"
21 size_t perf_counter_get_size(size_t offset
)
25 size
+= lib_ring_buffer_align(offset
, ltt_alignof(uint64_t));
26 size
+= sizeof(uint64_t);
31 void perf_counter_record(struct lttng_ctx_field
*field
,
32 struct lib_ring_buffer_ctx
*ctx
,
33 struct ltt_channel
*chan
)
35 struct perf_event
*event
;
38 event
= field
->u
.perf_counter
->e
[ctx
->cpu
];
40 if (unlikely(event
->state
== PERF_EVENT_STATE_ERROR
)) {
43 event
->pmu
->read(event
);
44 value
= local64_read(&event
->count
);
48 * Perf chooses not to be clever and not to support enabling a
49 * perf counter before the cpu is brought up. Therefore, we need
50 * to support having events coming (e.g. scheduler events)
51 * before the counter is setup. Write an arbitrary 0 in this
56 lib_ring_buffer_align_ctx(ctx
, ltt_alignof(value
));
57 chan
->ops
->event_write(ctx
, &value
, sizeof(value
));
61 void overflow_callback(struct perf_event
*event
, int nmi
,
62 struct perf_sample_data
*data
,
68 void lttng_destroy_perf_counter_field(struct lttng_ctx_field
*field
)
70 struct perf_event
**events
= field
->u
.perf_counter
->e
;
74 for_each_online_cpu(cpu
)
75 perf_event_release_kernel(events
[cpu
]);
77 #ifdef CONFIG_HOTPLUG_CPU
78 unregister_cpu_notifier(&field
->u
.perf_counter
->nb
);
80 kfree(field
->event_field
.name
);
81 kfree(field
->u
.perf_counter
->attr
);
83 kfree(field
->u
.perf_counter
);
86 #ifdef CONFIG_HOTPLUG_CPU
89 * lttng_perf_counter_hp_callback - CPU hotplug callback
91 * @action: hotplug action to take
94 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
96 * We can setup perf counters when the cpu is online (up prepare seems to be too
100 int __cpuinit
lttng_perf_counter_cpu_hp_callback(struct notifier_block
*nb
,
101 unsigned long action
,
104 unsigned int cpu
= (unsigned long) hcpu
;
105 struct lttng_perf_counter_field
*perf_field
=
106 container_of(nb
, struct lttng_perf_counter_field
, nb
);
107 struct perf_event
**events
= perf_field
->e
;
108 struct perf_event_attr
*attr
= perf_field
->attr
;
109 struct perf_event
*pevent
;
111 if (!perf_field
->hp_enable
)
116 case CPU_ONLINE_FROZEN
:
117 pevent
= perf_event_create_kernel_counter(attr
,
118 cpu
, NULL
, overflow_callback
);
119 if (!pevent
|| IS_ERR(pevent
))
121 if (pevent
->state
== PERF_EVENT_STATE_ERROR
) {
122 perf_event_release_kernel(pevent
);
125 barrier(); /* Create perf counter before setting event */
126 events
[cpu
] = pevent
;
128 case CPU_UP_CANCELED
:
129 case CPU_UP_CANCELED_FROZEN
:
131 case CPU_DEAD_FROZEN
:
132 pevent
= events
[cpu
];
134 barrier(); /* NULLify event before perf counter teardown */
135 perf_event_release_kernel(pevent
);
143 int lttng_add_perf_counter_to_ctx(uint32_t type
,
146 struct lttng_ctx
**ctx
)
148 struct lttng_ctx_field
*field
;
149 struct lttng_perf_counter_field
*perf_field
;
150 struct perf_event
**events
;
151 struct perf_event_attr
*attr
;
156 events
= kzalloc(num_possible_cpus() * sizeof(*events
), GFP_KERNEL
);
160 attr
= kzalloc(sizeof(struct perf_event_attr
), GFP_KERNEL
);
167 attr
->config
= config
;
168 attr
->size
= sizeof(struct perf_event_attr
);
172 perf_field
= kzalloc(sizeof(struct lttng_perf_counter_field
), GFP_KERNEL
);
175 goto error_alloc_perf_field
;
177 perf_field
->e
= events
;
178 perf_field
->attr
= attr
;
180 name_alloc
= kstrdup(name
, GFP_KERNEL
);
183 goto name_alloc_error
;
186 field
= lttng_append_context(ctx
);
189 goto append_context_error
;
191 if (lttng_find_context(*ctx
, name_alloc
)) {
196 #ifdef CONFIG_HOTPLUG_CPU
197 perf_field
->nb
.notifier_call
=
198 lttng_perf_counter_cpu_hp_callback
;
199 perf_field
->nb
.priority
= 0;
200 register_cpu_notifier(&perf_field
->nb
);
204 for_each_online_cpu(cpu
) {
205 events
[cpu
] = perf_event_create_kernel_counter(attr
,
206 cpu
, NULL
, overflow_callback
);
207 if (!events
[cpu
] || IS_ERR(events
[cpu
])) {
211 if (events
[cpu
]->state
== PERF_EVENT_STATE_ERROR
) {
218 field
->destroy
= lttng_destroy_perf_counter_field
;
220 field
->event_field
.name
= name_alloc
;
221 field
->event_field
.type
.atype
= atype_integer
;
222 field
->event_field
.type
.u
.basic
.integer
.size
= sizeof(unsigned long) * CHAR_BIT
;
223 field
->event_field
.type
.u
.basic
.integer
.alignment
= ltt_alignof(unsigned long) * CHAR_BIT
;
224 field
->event_field
.type
.u
.basic
.integer
.signedness
= is_signed_type(unsigned long);
225 field
->event_field
.type
.u
.basic
.integer
.reverse_byte_order
= 0;
226 field
->event_field
.type
.u
.basic
.integer
.base
= 10;
227 field
->event_field
.type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
228 field
->get_size
= perf_counter_get_size
;
229 field
->record
= perf_counter_record
;
230 field
->u
.perf_counter
= perf_field
;
231 perf_field
->hp_enable
= 1;
233 wrapper_vmalloc_sync_all();
238 for_each_online_cpu(cpu
) {
239 if (events
[cpu
] && !IS_ERR(events
[cpu
]))
240 perf_event_release_kernel(events
[cpu
]);
243 #ifdef CONFIG_HOTPLUG_CPU
244 unregister_cpu_notifier(&perf_field
->nb
);
247 lttng_remove_context_field(ctx
, field
);
248 append_context_error
:
252 error_alloc_perf_field
:
259 MODULE_LICENSE("GPL and additional rights");
260 MODULE_AUTHOR("Mathieu Desnoyers");
261 MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");