2 * lttng-context-perf-counters.c
4 * LTTng UST performance monitoring counters (perf-counters) integration.
6 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <sys/types.h>
29 #include <sys/syscall.h>
30 #include <linux/perf_event.h>
31 #include <lttng/ust-events.h>
32 #include <lttng/ust-tracer.h>
33 #include <lttng/ringbuffer-config.h>
34 #include <urcu/system.h>
35 #include <urcu/arch.h>
36 #include <urcu/rculist.h>
39 #include <usterr-signal-safe.h>
41 #include "lttng-tracer-core.h"
44 * We use a global perf counter key and iterate on per-thread RCU lists
45 * of fields in the fast path, even though this is not strictly speaking
46 * what would provide the best fast-path complexity, to ensure teardown
47 * of sessions vs thread exit is handled racelessly.
49 * Updates and traversals of thread_list are protected by UST lock.
50 * Updates to rcu_field_list are protected by UST lock.
53 struct lttng_perf_counter_thread_field
{
54 struct lttng_perf_counter_field
*field
; /* Back reference */
55 struct perf_event_mmap_page
*pc
;
56 struct cds_list_head thread_field_node
; /* Per-field list of thread fields (node) */
57 struct cds_list_head rcu_field_node
; /* RCU per-thread list of fields (node) */
60 struct lttng_perf_counter_thread
{
61 struct cds_list_head rcu_field_list
; /* RCU per-thread list of fields */
64 struct lttng_perf_counter_field
{
65 struct perf_event_attr attr
;
66 struct cds_list_head thread_field_list
; /* Per-field list of thread fields */
69 static pthread_key_t perf_counter_key
;
72 size_t perf_counter_get_size(size_t offset
)
76 size
+= lib_ring_buffer_align(offset
, lttng_alignof(uint64_t));
77 size
+= sizeof(uint64_t);
81 #if defined(__x86_64__) || defined(__i386__)
84 uint64_t rdpmc(unsigned int counter
)
86 unsigned int low
, high
;
88 asm volatile("rdpmc" : "=a" (low
), "=d" (high
) : "c" (counter
));
90 return low
| ((uint64_t) high
) << 32;
93 #else /* defined(__x86_64__) || defined(__i386__) */
95 #error "Perf event counters are only supported on x86 so far."
97 #endif /* #else defined(__x86_64__) || defined(__i386__) */
100 uint64_t read_perf_counter(struct perf_event_mmap_page
*pc
)
105 if (caa_unlikely(!pc
))
109 seq
= CMM_LOAD_SHARED(pc
->lock
);
114 count
= pc
->offset
+ rdpmc(idx
- 1);
119 } while (CMM_LOAD_SHARED(pc
->lock
) != seq
);
125 int sys_perf_event_open(struct perf_event_attr
*attr
,
126 pid_t pid
, int cpu
, int group_fd
,
129 return syscall(SYS_perf_event_open
, attr
, pid
, cpu
,
134 struct perf_event_mmap_page
*setup_perf(struct perf_event_attr
*attr
)
139 fd
= sys_perf_event_open(attr
, 0, -1, -1, 0);
143 perf_addr
= mmap(NULL
, sizeof(struct perf_event_mmap_page
),
144 PROT_READ
, MAP_SHARED
, fd
, 0);
145 if (perf_addr
== MAP_FAILED
)
149 perror("Error closing LTTng-UST perf memory mapping FD");
155 void unmap_perf_page(struct perf_event_mmap_page
*pc
)
161 ret
= munmap(pc
, sizeof(struct perf_event_mmap_page
));
163 PERROR("Error in munmap");
169 struct lttng_perf_counter_thread
*alloc_perf_counter_thread(void)
171 struct lttng_perf_counter_thread
*perf_thread
;
172 sigset_t newmask
, oldmask
;
175 ret
= sigfillset(&newmask
);
178 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
181 /* Check again with signals disabled */
182 perf_thread
= pthread_getspecific(perf_counter_key
);
185 perf_thread
= zmalloc(sizeof(*perf_thread
));
188 CDS_INIT_LIST_HEAD(&perf_thread
->rcu_field_list
);
189 ret
= pthread_setspecific(perf_counter_key
, perf_thread
);
193 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
200 struct lttng_perf_counter_thread_field
*
201 add_thread_field(struct lttng_perf_counter_field
*perf_field
,
202 struct lttng_perf_counter_thread
*perf_thread
)
204 struct lttng_perf_counter_thread_field
*thread_field
;
205 sigset_t newmask
, oldmask
;
208 ret
= sigfillset(&newmask
);
211 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
214 /* Check again with signals disabled */
215 cds_list_for_each_entry_rcu(thread_field
, &perf_thread
->rcu_field_list
,
217 if (thread_field
->field
== perf_field
)
220 thread_field
= zmalloc(sizeof(*thread_field
));
223 thread_field
->field
= perf_field
;
224 thread_field
->pc
= setup_perf(&perf_field
->attr
);
225 /* Note: thread_field->pc can be NULL if setup_perf() fails. */
227 cds_list_add_rcu(&thread_field
->rcu_field_node
,
228 &perf_thread
->rcu_field_list
);
229 cds_list_add(&thread_field
->thread_field_node
,
230 &perf_field
->thread_field_list
);
233 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
240 struct lttng_perf_counter_thread_field
*
241 get_thread_field(struct lttng_perf_counter_field
*field
)
243 struct lttng_perf_counter_thread
*perf_thread
;
244 struct lttng_perf_counter_thread_field
*thread_field
;
246 perf_thread
= pthread_getspecific(perf_counter_key
);
248 perf_thread
= alloc_perf_counter_thread();
249 cds_list_for_each_entry_rcu(thread_field
, &perf_thread
->rcu_field_list
,
251 if (thread_field
->field
== field
)
254 /* perf_counter_thread_field not found, need to add one */
255 return add_thread_field(field
, perf_thread
);
259 uint64_t wrapper_perf_counter_read(struct lttng_ctx_field
*field
)
261 struct lttng_perf_counter_field
*perf_field
;
262 struct lttng_perf_counter_thread_field
*perf_thread_field
;
264 perf_field
= field
->u
.perf_counter
;
265 perf_thread_field
= get_thread_field(perf_field
);
266 return read_perf_counter(perf_thread_field
->pc
);
270 void perf_counter_record(struct lttng_ctx_field
*field
,
271 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
272 struct lttng_channel
*chan
)
276 value
= wrapper_perf_counter_read(field
);
277 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(value
));
278 chan
->ops
->event_write(ctx
, &value
, sizeof(value
));
282 void perf_counter_get_value(struct lttng_ctx_field
*field
,
283 union lttng_ctx_value
*value
)
287 v
= wrapper_perf_counter_read(field
);
291 /* Called with UST lock held */
293 void lttng_destroy_perf_thread_field(
294 struct lttng_perf_counter_thread_field
*thread_field
)
296 unmap_perf_page(thread_field
->pc
);
297 cds_list_del_rcu(&thread_field
->rcu_field_node
);
298 cds_list_del(&thread_field
->thread_field_node
);
303 void lttng_destroy_perf_thread_key(void *_key
)
305 struct lttng_perf_counter_thread
*perf_thread
= _key
;
306 struct lttng_perf_counter_thread_field
*pos
, *p
;
309 cds_list_for_each_entry_safe(pos
, p
, &perf_thread
->rcu_field_list
,
311 lttng_destroy_perf_thread_field(pos
);
316 /* Called with UST lock held */
318 void lttng_destroy_perf_counter_field(struct lttng_ctx_field
*field
)
320 struct lttng_perf_counter_field
*perf_field
;
321 struct lttng_perf_counter_thread_field
*pos
, *p
;
323 free((char *) field
->event_field
.name
);
324 perf_field
= field
->u
.perf_counter
;
326 * This put is performed when no threads can concurrently
327 * perform a "get" concurrently, thanks to urcu-bp grace
330 cds_list_for_each_entry_safe(pos
, p
, &perf_field
->thread_field_list
,
332 lttng_destroy_perf_thread_field(pos
);
336 /* Called with UST lock held */
337 int lttng_add_perf_counter_to_ctx(uint32_t type
,
340 struct lttng_ctx
**ctx
)
342 struct lttng_ctx_field
*field
;
343 struct lttng_perf_counter_field
*perf_field
;
344 struct perf_event_mmap_page
*tmp_pc
;
348 name_alloc
= strdup(name
);
351 goto name_alloc_error
;
353 perf_field
= zmalloc(sizeof(*perf_field
));
356 goto perf_field_alloc_error
;
358 field
= lttng_append_context(ctx
);
361 goto append_context_error
;
363 if (lttng_find_context(*ctx
, name_alloc
)) {
368 field
->destroy
= lttng_destroy_perf_counter_field
;
370 field
->event_field
.name
= name_alloc
;
371 field
->event_field
.type
.atype
= atype_integer
;
372 field
->event_field
.type
.u
.basic
.integer
.size
=
373 sizeof(uint64_t) * CHAR_BIT
;
374 field
->event_field
.type
.u
.basic
.integer
.alignment
=
375 lttng_alignof(uint64_t) * CHAR_BIT
;
376 field
->event_field
.type
.u
.basic
.integer
.signedness
=
377 lttng_is_signed_type(uint64_t);
378 field
->event_field
.type
.u
.basic
.integer
.reverse_byte_order
= 0;
379 field
->event_field
.type
.u
.basic
.integer
.base
= 10;
380 field
->event_field
.type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
381 field
->get_size
= perf_counter_get_size
;
382 field
->record
= perf_counter_record
;
383 field
->get_value
= perf_counter_get_value
;
385 perf_field
->attr
.type
= type
;
386 perf_field
->attr
.config
= config
;
387 perf_field
->attr
.exclude_kernel
= 1;
388 CDS_INIT_LIST_HEAD(&perf_field
->thread_field_list
);
389 field
->u
.perf_counter
= perf_field
;
391 /* Ensure that this perf counter can be used in this process. */
392 tmp_pc
= setup_perf(&perf_field
->attr
);
397 unmap_perf_page(tmp_pc
);
400 * Contexts can only be added before tracing is started, so we
401 * don't have to synchronize against concurrent threads using
405 lttng_context_update(*ctx
);
410 lttng_remove_context_field(ctx
, field
);
411 append_context_error
:
413 perf_field_alloc_error
:
419 int lttng_perf_counter_init(void)
423 ret
= pthread_key_create(&perf_counter_key
,
424 lttng_destroy_perf_thread_key
);
430 void lttng_perf_counter_exit(void)
434 ret
= pthread_key_delete(perf_counter_key
);
437 PERROR("Error in pthread_key_delete");