Fix: tracepoint.h: Disable address sanitizer on pointer array section variables
[lttng-ust.git] / liblttng-ust / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #define _LGPL_SOURCE
25 #include <stdio.h>
26 #include <urcu/list.h>
27 #include <urcu/hlist.h>
28 #include <pthread.h>
29 #include <errno.h>
30 #include <sys/shm.h>
31 #include <sys/ipc.h>
32 #include <stdint.h>
33 #include <stddef.h>
34 #include <inttypes.h>
35 #include <time.h>
36 #include <stdbool.h>
37 #include <lttng/ust-endian.h>
38 #include "clock.h"
39
40 #include <urcu-bp.h>
41 #include <urcu/compiler.h>
42 #include <urcu/uatomic.h>
43 #include <urcu/arch.h>
44
45 #include <lttng/tracepoint.h>
46 #include <lttng/ust-events.h>
47
48 #include <usterr-signal-safe.h>
49 #include <helper.h>
50 #include <lttng/ust-ctl.h>
51 #include <ust-comm.h>
52 #include <lttng/ust-dynamic-type.h>
53 #include <lttng/ust-context-provider.h>
54 #include "error.h"
55 #include "compat.h"
56 #include "lttng-ust-uuid.h"
57
58 #include "tracepoint-internal.h"
59 #include "string-utils.h"
60 #include "lttng-tracer.h"
61 #include "lttng-tracer-core.h"
62 #include "lttng-ust-statedump.h"
63 #include "wait.h"
64 #include "../libringbuffer/shm.h"
65 #include "jhash.h"
66
67 /*
68 * All operations within this file are called by the communication
69 * thread, under ust_lock protection.
70 */
71
72 static CDS_LIST_HEAD(sessions);
73
74 struct cds_list_head *_lttng_get_sessions(void)
75 {
76 return &sessions;
77 }
78
79 static void _lttng_event_destroy(struct lttng_event *event);
80 static void _lttng_enum_destroy(struct lttng_enum *_enum);
81
82 static
83 void lttng_session_lazy_sync_enablers(struct lttng_session *session);
84 static
85 void lttng_session_sync_enablers(struct lttng_session *session);
86 static
87 void lttng_enabler_destroy(struct lttng_enabler *enabler);
88
89 /*
90 * Called with ust lock held.
91 */
92 int lttng_session_active(void)
93 {
94 struct lttng_session *iter;
95
96 cds_list_for_each_entry(iter, &sessions, node) {
97 if (iter->active)
98 return 1;
99 }
100 return 0;
101 }
102
103 static
104 int lttng_loglevel_match(int loglevel,
105 unsigned int has_loglevel,
106 enum lttng_ust_loglevel_type req_type,
107 int req_loglevel)
108 {
109 if (!has_loglevel)
110 loglevel = TRACE_DEFAULT;
111 switch (req_type) {
112 case LTTNG_UST_LOGLEVEL_RANGE:
113 if (loglevel <= req_loglevel
114 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
115 return 1;
116 else
117 return 0;
118 case LTTNG_UST_LOGLEVEL_SINGLE:
119 if (loglevel == req_loglevel
120 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
121 return 1;
122 else
123 return 0;
124 case LTTNG_UST_LOGLEVEL_ALL:
125 default:
126 if (loglevel <= TRACE_DEBUG)
127 return 1;
128 else
129 return 0;
130 }
131 }
132
133 void synchronize_trace(void)
134 {
135 synchronize_rcu();
136 }
137
138 struct lttng_session *lttng_session_create(void)
139 {
140 struct lttng_session *session;
141 int i;
142
143 session = zmalloc(sizeof(struct lttng_session));
144 if (!session)
145 return NULL;
146 if (lttng_session_context_init(&session->ctx)) {
147 free(session);
148 return NULL;
149 }
150 CDS_INIT_LIST_HEAD(&session->chan_head);
151 CDS_INIT_LIST_HEAD(&session->events_head);
152 CDS_INIT_LIST_HEAD(&session->enums_head);
153 CDS_INIT_LIST_HEAD(&session->enablers_head);
154 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
155 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
156 for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
157 CDS_INIT_HLIST_HEAD(&session->enums_ht.table[i]);
158 cds_list_add(&session->node, &sessions);
159 return session;
160 }
161
162 /*
163 * Only used internally at session destruction.
164 */
165 static
166 void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
167 {
168 struct channel *chan;
169 struct lttng_ust_shm_handle *handle;
170
171 cds_list_del(&lttng_chan->node);
172 lttng_destroy_context(lttng_chan->ctx);
173 chan = lttng_chan->chan;
174 handle = lttng_chan->handle;
175 /*
176 * note: lttng_chan is private data contained within handle. It
177 * will be freed along with the handle.
178 */
179 channel_destroy(chan, handle, 0);
180 }
181
182 static
183 void register_event(struct lttng_event *event)
184 {
185 int ret;
186 const struct lttng_event_desc *desc;
187
188 assert(event->registered == 0);
189 desc = event->desc;
190 ret = __tracepoint_probe_register_queue_release(desc->name,
191 desc->probe_callback,
192 event, desc->signature);
193 WARN_ON_ONCE(ret);
194 if (!ret)
195 event->registered = 1;
196 }
197
198 static
199 void unregister_event(struct lttng_event *event)
200 {
201 int ret;
202 const struct lttng_event_desc *desc;
203
204 assert(event->registered == 1);
205 desc = event->desc;
206 ret = __tracepoint_probe_unregister_queue_release(desc->name,
207 desc->probe_callback,
208 event);
209 WARN_ON_ONCE(ret);
210 if (!ret)
211 event->registered = 0;
212 }
213
214 /*
215 * Only used internally at session destruction.
216 */
217 static
218 void _lttng_event_unregister(struct lttng_event *event)
219 {
220 if (event->registered)
221 unregister_event(event);
222 }
223
224 void lttng_session_destroy(struct lttng_session *session)
225 {
226 struct lttng_channel *chan, *tmpchan;
227 struct lttng_event *event, *tmpevent;
228 struct lttng_enum *_enum, *tmp_enum;
229 struct lttng_enabler *enabler, *tmpenabler;
230
231 CMM_ACCESS_ONCE(session->active) = 0;
232 cds_list_for_each_entry(event, &session->events_head, node) {
233 _lttng_event_unregister(event);
234 }
235 synchronize_trace(); /* Wait for in-flight events to complete */
236 __tracepoint_probe_prune_release_queue();
237 cds_list_for_each_entry_safe(enabler, tmpenabler,
238 &session->enablers_head, node)
239 lttng_enabler_destroy(enabler);
240 cds_list_for_each_entry_safe(event, tmpevent,
241 &session->events_head, node)
242 _lttng_event_destroy(event);
243 cds_list_for_each_entry_safe(_enum, tmp_enum,
244 &session->enums_head, node)
245 _lttng_enum_destroy(_enum);
246 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
247 _lttng_channel_unmap(chan);
248 cds_list_del(&session->node);
249 lttng_destroy_context(session->ctx);
250 free(session);
251 }
252
253 static
254 int lttng_enum_create(const struct lttng_enum_desc *desc,
255 struct lttng_session *session)
256 {
257 const char *enum_name = desc->name;
258 struct lttng_enum *_enum;
259 struct cds_hlist_head *head;
260 int ret = 0;
261 size_t name_len = strlen(enum_name);
262 uint32_t hash;
263 int notify_socket;
264
265 /* Check if this enum is already registered for this session. */
266 hash = jhash(enum_name, name_len, 0);
267 head = &session->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
268
269 _enum = lttng_ust_enum_get_from_desc(session, desc);
270 if (_enum) {
271 ret = -EEXIST;
272 goto exist;
273 }
274
275 notify_socket = lttng_get_notify_socket(session->owner);
276 if (notify_socket < 0) {
277 ret = notify_socket;
278 goto socket_error;
279 }
280
281 _enum = zmalloc(sizeof(*_enum));
282 if (!_enum) {
283 ret = -ENOMEM;
284 goto cache_error;
285 }
286 _enum->session = session;
287 _enum->desc = desc;
288
289 ret = ustcomm_register_enum(notify_socket,
290 session->objd,
291 enum_name,
292 desc->nr_entries,
293 desc->entries,
294 &_enum->id);
295 if (ret < 0) {
296 DBG("Error (%d) registering enumeration to sessiond", ret);
297 goto sessiond_register_error;
298 }
299 cds_list_add(&_enum->node, &session->enums_head);
300 cds_hlist_add_head(&_enum->hlist, head);
301 return 0;
302
303 sessiond_register_error:
304 free(_enum);
305 cache_error:
306 socket_error:
307 exist:
308 return ret;
309 }
310
311 static
312 int lttng_create_enum_check(const struct lttng_type *type,
313 struct lttng_session *session)
314 {
315 switch (type->atype) {
316 case atype_enum:
317 {
318 const struct lttng_enum_desc *enum_desc;
319 int ret;
320
321 enum_desc = type->u.basic.enumeration.desc;
322 ret = lttng_enum_create(enum_desc, session);
323 if (ret && ret != -EEXIST) {
324 DBG("Unable to create enum error: (%d)", ret);
325 return ret;
326 }
327 break;
328 }
329 case atype_dynamic:
330 {
331 const struct lttng_event_field *tag_field_generic;
332 const struct lttng_enum_desc *enum_desc;
333 int ret;
334
335 tag_field_generic = lttng_ust_dynamic_type_tag_field();
336 enum_desc = tag_field_generic->type.u.basic.enumeration.desc;
337 ret = lttng_enum_create(enum_desc, session);
338 if (ret && ret != -EEXIST) {
339 DBG("Unable to create enum error: (%d)", ret);
340 return ret;
341 }
342 break;
343 }
344 default:
345 /* TODO: nested types when they become supported. */
346 break;
347 }
348 return 0;
349 }
350
351 static
352 int lttng_create_all_event_enums(size_t nr_fields,
353 const struct lttng_event_field *event_fields,
354 struct lttng_session *session)
355 {
356 size_t i;
357 int ret;
358
359 /* For each field, ensure enum is part of the session. */
360 for (i = 0; i < nr_fields; i++) {
361 const struct lttng_type *type = &event_fields[i].type;
362
363 ret = lttng_create_enum_check(type, session);
364 if (ret)
365 return ret;
366 }
367 return 0;
368 }
369
370 static
371 int lttng_create_all_ctx_enums(size_t nr_fields,
372 const struct lttng_ctx_field *ctx_fields,
373 struct lttng_session *session)
374 {
375 size_t i;
376 int ret;
377
378 /* For each field, ensure enum is part of the session. */
379 for (i = 0; i < nr_fields; i++) {
380 const struct lttng_type *type = &ctx_fields[i].event_field.type;
381
382 ret = lttng_create_enum_check(type, session);
383 if (ret)
384 return ret;
385 }
386 return 0;
387 }
388
389 /*
390 * Ensure that a state-dump will be performed for this session at the end
391 * of the current handle_message().
392 */
393 int lttng_session_statedump(struct lttng_session *session)
394 {
395 session->statedump_pending = 1;
396 lttng_ust_sockinfo_session_enabled(session->owner);
397 return 0;
398 }
399
400 int lttng_session_enable(struct lttng_session *session)
401 {
402 int ret = 0;
403 struct lttng_channel *chan;
404 int notify_socket;
405
406 if (session->active) {
407 ret = -EBUSY;
408 goto end;
409 }
410
411 notify_socket = lttng_get_notify_socket(session->owner);
412 if (notify_socket < 0)
413 return notify_socket;
414
415 /* Set transient enabler state to "enabled" */
416 session->tstate = 1;
417
418 /* We need to sync enablers with session before activation. */
419 lttng_session_sync_enablers(session);
420
421 /*
422 * Snapshot the number of events per channel to know the type of header
423 * we need to use.
424 */
425 cds_list_for_each_entry(chan, &session->chan_head, node) {
426 const struct lttng_ctx *ctx;
427 const struct lttng_ctx_field *fields = NULL;
428 size_t nr_fields = 0;
429 uint32_t chan_id;
430
431 /* don't change it if session stop/restart */
432 if (chan->header_type)
433 continue;
434 ctx = chan->ctx;
435 if (ctx) {
436 nr_fields = ctx->nr_fields;
437 fields = ctx->fields;
438 ret = lttng_create_all_ctx_enums(nr_fields, fields,
439 session);
440 if (ret < 0) {
441 DBG("Error (%d) adding enum to session", ret);
442 return ret;
443 }
444 }
445 ret = ustcomm_register_channel(notify_socket,
446 session,
447 session->objd,
448 chan->objd,
449 nr_fields,
450 fields,
451 &chan_id,
452 &chan->header_type);
453 if (ret) {
454 DBG("Error (%d) registering channel to sessiond", ret);
455 return ret;
456 }
457 if (chan_id != chan->id) {
458 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
459 chan_id, chan->id);
460 return -EINVAL;
461 }
462 }
463
464 /* Set atomically the state to "active" */
465 CMM_ACCESS_ONCE(session->active) = 1;
466 CMM_ACCESS_ONCE(session->been_active) = 1;
467
468 ret = lttng_session_statedump(session);
469 if (ret)
470 return ret;
471 end:
472 return ret;
473 }
474
475 int lttng_session_disable(struct lttng_session *session)
476 {
477 int ret = 0;
478
479 if (!session->active) {
480 ret = -EBUSY;
481 goto end;
482 }
483 /* Set atomically the state to "inactive" */
484 CMM_ACCESS_ONCE(session->active) = 0;
485
486 /* Set transient enabler state to "disabled" */
487 session->tstate = 0;
488 lttng_session_sync_enablers(session);
489 end:
490 return ret;
491 }
492
493 int lttng_channel_enable(struct lttng_channel *channel)
494 {
495 int ret = 0;
496
497 if (channel->enabled) {
498 ret = -EBUSY;
499 goto end;
500 }
501 /* Set transient enabler state to "enabled" */
502 channel->tstate = 1;
503 lttng_session_sync_enablers(channel->session);
504 /* Set atomically the state to "enabled" */
505 CMM_ACCESS_ONCE(channel->enabled) = 1;
506 end:
507 return ret;
508 }
509
510 int lttng_channel_disable(struct lttng_channel *channel)
511 {
512 int ret = 0;
513
514 if (!channel->enabled) {
515 ret = -EBUSY;
516 goto end;
517 }
518 /* Set atomically the state to "disabled" */
519 CMM_ACCESS_ONCE(channel->enabled) = 0;
520 /* Set transient enabler state to "enabled" */
521 channel->tstate = 0;
522 lttng_session_sync_enablers(channel->session);
523 end:
524 return ret;
525 }
526
527 /*
528 * Supports event creation while tracing session is active.
529 */
530 static
531 int lttng_event_create(const struct lttng_event_desc *desc,
532 struct lttng_channel *chan)
533 {
534 const char *event_name = desc->name;
535 struct lttng_event *event;
536 struct lttng_session *session = chan->session;
537 struct cds_hlist_head *head;
538 int ret = 0;
539 size_t name_len = strlen(event_name);
540 uint32_t hash;
541 int notify_socket, loglevel;
542 const char *uri;
543
544 hash = jhash(event_name, name_len, 0);
545 head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
546
547 notify_socket = lttng_get_notify_socket(session->owner);
548 if (notify_socket < 0) {
549 ret = notify_socket;
550 goto socket_error;
551 }
552
553 ret = lttng_create_all_event_enums(desc->nr_fields, desc->fields,
554 session);
555 if (ret < 0) {
556 DBG("Error (%d) adding enum to session", ret);
557 goto create_enum_error;
558 }
559
560 /*
561 * Check if loglevel match. Refuse to connect event if not.
562 */
563 event = zmalloc(sizeof(struct lttng_event));
564 if (!event) {
565 ret = -ENOMEM;
566 goto cache_error;
567 }
568 event->chan = chan;
569
570 /* Event will be enabled by enabler sync. */
571 event->enabled = 0;
572 event->registered = 0;
573 CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
574 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
575 event->desc = desc;
576
577 if (desc->loglevel)
578 loglevel = *(*event->desc->loglevel);
579 else
580 loglevel = TRACE_DEFAULT;
581 if (desc->u.ext.model_emf_uri)
582 uri = *(desc->u.ext.model_emf_uri);
583 else
584 uri = NULL;
585
586 /* Fetch event ID from sessiond */
587 ret = ustcomm_register_event(notify_socket,
588 session,
589 session->objd,
590 chan->objd,
591 event_name,
592 loglevel,
593 desc->signature,
594 desc->nr_fields,
595 desc->fields,
596 uri,
597 &event->id);
598 if (ret < 0) {
599 DBG("Error (%d) registering event to sessiond", ret);
600 goto sessiond_register_error;
601 }
602
603 /* Populate lttng_event structure before tracepoint registration. */
604 cmm_smp_wmb();
605 cds_list_add(&event->node, &chan->session->events_head);
606 cds_hlist_add_head(&event->hlist, head);
607 return 0;
608
609 sessiond_register_error:
610 free(event);
611 cache_error:
612 create_enum_error:
613 socket_error:
614 return ret;
615 }
616
617 static
618 int lttng_desc_match_star_glob_enabler(const struct lttng_event_desc *desc,
619 struct lttng_enabler *enabler)
620 {
621 int loglevel = 0;
622 unsigned int has_loglevel = 0;
623
624 assert(enabler->type == LTTNG_ENABLER_STAR_GLOB);
625 if (!strutils_star_glob_match(enabler->event_param.name, SIZE_MAX,
626 desc->name, SIZE_MAX))
627 return 0;
628 if (desc->loglevel) {
629 loglevel = *(*desc->loglevel);
630 has_loglevel = 1;
631 }
632 if (!lttng_loglevel_match(loglevel,
633 has_loglevel,
634 enabler->event_param.loglevel_type,
635 enabler->event_param.loglevel))
636 return 0;
637 return 1;
638 }
639
640 static
641 int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
642 struct lttng_enabler *enabler)
643 {
644 int loglevel = 0;
645 unsigned int has_loglevel = 0;
646
647 assert(enabler->type == LTTNG_ENABLER_EVENT);
648 if (strcmp(desc->name, enabler->event_param.name))
649 return 0;
650 if (desc->loglevel) {
651 loglevel = *(*desc->loglevel);
652 has_loglevel = 1;
653 }
654 if (!lttng_loglevel_match(loglevel,
655 has_loglevel,
656 enabler->event_param.loglevel_type,
657 enabler->event_param.loglevel))
658 return 0;
659 return 1;
660 }
661
662 static
663 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
664 struct lttng_enabler *enabler)
665 {
666 switch (enabler->type) {
667 case LTTNG_ENABLER_STAR_GLOB:
668 {
669 struct lttng_ust_excluder_node *excluder;
670
671 if (!lttng_desc_match_star_glob_enabler(desc, enabler)) {
672 return 0;
673 }
674
675 /*
676 * If the matching event matches with an excluder,
677 * return 'does not match'
678 */
679 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
680 int count;
681
682 for (count = 0; count < excluder->excluder.count; count++) {
683 int len;
684 char *excluder_name;
685
686 excluder_name = (char *) (excluder->excluder.names)
687 + count * LTTNG_UST_SYM_NAME_LEN;
688 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
689 if (len > 0 && strutils_star_glob_match(excluder_name, len, desc->name, SIZE_MAX))
690 return 0;
691 }
692 }
693 return 1;
694 }
695 case LTTNG_ENABLER_EVENT:
696 return lttng_desc_match_event_enabler(desc, enabler);
697 default:
698 return -EINVAL;
699 }
700 }
701
702 static
703 int lttng_event_match_enabler(struct lttng_event *event,
704 struct lttng_enabler *enabler)
705 {
706 if (lttng_desc_match_enabler(event->desc, enabler)
707 && event->chan == enabler->chan)
708 return 1;
709 else
710 return 0;
711 }
712
713 static
714 struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event,
715 struct lttng_enabler *enabler)
716 {
717 struct lttng_enabler_ref *enabler_ref;
718
719 cds_list_for_each_entry(enabler_ref,
720 &event->enablers_ref_head, node) {
721 if (enabler_ref->ref == enabler)
722 return enabler_ref;
723 }
724 return NULL;
725 }
726
727 /*
728 * Create struct lttng_event if it is missing and present in the list of
729 * tracepoint probes.
730 */
731 static
732 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
733 {
734 struct lttng_session *session = enabler->chan->session;
735 struct lttng_probe_desc *probe_desc;
736 const struct lttng_event_desc *desc;
737 struct lttng_event *event;
738 int i;
739 struct cds_list_head *probe_list;
740
741 probe_list = lttng_get_probe_list_head();
742 /*
743 * For each probe event, if we find that a probe event matches
744 * our enabler, create an associated lttng_event if not
745 * already present.
746 */
747 cds_list_for_each_entry(probe_desc, probe_list, head) {
748 for (i = 0; i < probe_desc->nr_events; i++) {
749 int found = 0, ret;
750 struct cds_hlist_head *head;
751 struct cds_hlist_node *node;
752 const char *event_name;
753 size_t name_len;
754 uint32_t hash;
755
756 desc = probe_desc->event_desc[i];
757 if (!lttng_desc_match_enabler(desc, enabler))
758 continue;
759 event_name = desc->name;
760 name_len = strlen(event_name);
761
762 /*
763 * Check if already created.
764 */
765 hash = jhash(event_name, name_len, 0);
766 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
767 cds_hlist_for_each_entry(event, node, head, hlist) {
768 if (event->desc == desc
769 && event->chan == enabler->chan) {
770 found = 1;
771 break;
772 }
773 }
774 if (found)
775 continue;
776
777 /*
778 * We need to create an event for this
779 * event probe.
780 */
781 ret = lttng_event_create(probe_desc->event_desc[i],
782 enabler->chan);
783 if (ret) {
784 DBG("Unable to create event %s, error %d\n",
785 probe_desc->event_desc[i]->name, ret);
786 }
787 }
788 }
789 }
790
791 /*
792 * Iterate over all the UST sessions to unregister and destroy all probes from
793 * the probe provider descriptor received as argument. Must me called with the
794 * ust_lock held.
795 */
796 void lttng_probe_provider_unregister_events(struct lttng_probe_desc *provider_desc)
797 {
798 struct cds_hlist_node *node, *tmp_node;
799 struct cds_list_head *sessionsp;
800 struct lttng_session *session;
801 struct cds_hlist_head *head;
802 struct lttng_event *event;
803 unsigned int i, j;
804
805 /* Get handle on list of sessions. */
806 sessionsp = _lttng_get_sessions();
807
808 /*
809 * Iterate over all events in the probe provider descriptions and sessions
810 * to queue the unregistration of the events.
811 */
812 for (i = 0; i < provider_desc->nr_events; i++) {
813 const struct lttng_event_desc *event_desc;
814 const char *event_name;
815 size_t name_len;
816 uint32_t hash;
817
818 event_desc = provider_desc->event_desc[i];
819 event_name = event_desc->name;
820 name_len = strlen(event_name);
821 hash = jhash(event_name, name_len, 0);
822
823 /* Iterate over all session to find the current event description. */
824 cds_list_for_each_entry(session, sessionsp, node) {
825 /*
826 * Get the list of events in the hashtable bucket and iterate to
827 * find the event matching this descriptor.
828 */
829 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
830 cds_hlist_for_each_entry(event, node, head, hlist) {
831 if (event_desc == event->desc) {
832 /* Queue the unregistration of this event. */
833 _lttng_event_unregister(event);
834 break;
835 }
836 }
837 }
838 }
839
840 /* Wait for grace period. */
841 synchronize_trace();
842 /* Prune the unregistration queue. */
843 __tracepoint_probe_prune_release_queue();
844
845 /*
846 * It is now safe to destroy the events and remove them from the event list
847 * and hashtables.
848 */
849 for (i = 0; i < provider_desc->nr_events; i++) {
850 const struct lttng_event_desc *event_desc;
851 const char *event_name;
852 size_t name_len;
853 uint32_t hash;
854
855 event_desc = provider_desc->event_desc[i];
856 event_name = event_desc->name;
857 name_len = strlen(event_name);
858 hash = jhash(event_name, name_len, 0);
859
860 /* Iterate over all sessions to find the current event description. */
861 cds_list_for_each_entry(session, sessionsp, node) {
862 /*
863 * Get the list of events in the hashtable bucket and iterate to
864 * find the event matching this descriptor.
865 */
866 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
867 cds_hlist_for_each_entry_safe(event, node, tmp_node, head, hlist) {
868 if (event_desc == event->desc) {
869 /* Destroy enums of the current event. */
870 for (j = 0; j < event->desc->nr_fields; j++) {
871 const struct lttng_enum_desc *enum_desc;
872 const struct lttng_event_field *field;
873 struct lttng_enum *curr_enum;
874
875 field = &(event->desc->fields[j]);
876 if (field->type.atype != atype_enum) {
877 continue;
878 }
879
880 enum_desc = field->type.u.basic.enumeration.desc;
881 curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc);
882 if (curr_enum) {
883 _lttng_enum_destroy(curr_enum);
884 }
885 }
886
887 /* Destroy event. */
888 _lttng_event_destroy(event);
889 break;
890 }
891 }
892 }
893 }
894 }
895
896 /*
897 * Create events associated with an enabler (if not already present),
898 * and add backward reference from the event to the enabler.
899 */
900 static
901 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
902 {
903 struct lttng_session *session = enabler->chan->session;
904 struct lttng_event *event;
905
906 /* First ensure that probe events are created for this enabler. */
907 lttng_create_event_if_missing(enabler);
908
909 /* For each event matching enabler in session event list. */
910 cds_list_for_each_entry(event, &session->events_head, node) {
911 struct lttng_enabler_ref *enabler_ref;
912
913 if (!lttng_event_match_enabler(event, enabler))
914 continue;
915
916 enabler_ref = lttng_event_enabler_ref(event, enabler);
917 if (!enabler_ref) {
918 /*
919 * If no backward ref, create it.
920 * Add backward ref from event to enabler.
921 */
922 enabler_ref = zmalloc(sizeof(*enabler_ref));
923 if (!enabler_ref)
924 return -ENOMEM;
925 enabler_ref->ref = enabler;
926 cds_list_add(&enabler_ref->node,
927 &event->enablers_ref_head);
928 }
929
930 /*
931 * Link filter bytecodes if not linked yet.
932 */
933 lttng_enabler_event_link_bytecode(event, enabler);
934
935 /* TODO: merge event context. */
936 }
937 return 0;
938 }
939
940 /*
941 * Called at library load: connect the probe on all enablers matching
942 * this event.
943 * Called with session mutex held.
944 */
945 int lttng_fix_pending_events(void)
946 {
947 struct lttng_session *session;
948
949 cds_list_for_each_entry(session, &sessions, node) {
950 lttng_session_lazy_sync_enablers(session);
951 }
952 return 0;
953 }
954
955 /*
956 * For each session of the owner thread, execute pending statedump.
957 * Only dump state for the sessions owned by the caller thread, because
958 * we don't keep ust_lock across the entire iteration.
959 */
960 void lttng_handle_pending_statedump(void *owner)
961 {
962 struct lttng_session *session;
963
964 /* Execute state dump */
965 do_lttng_ust_statedump(owner);
966
967 /* Clear pending state dump */
968 if (ust_lock()) {
969 goto end;
970 }
971 cds_list_for_each_entry(session, &sessions, node) {
972 if (session->owner != owner)
973 continue;
974 if (!session->statedump_pending)
975 continue;
976 session->statedump_pending = 0;
977 }
978 end:
979 ust_unlock();
980 return;
981 }
982
983 /*
984 * Only used internally at session destruction.
985 */
986 static
987 void _lttng_event_destroy(struct lttng_event *event)
988 {
989 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
990
991 /* Remove from event list. */
992 cds_list_del(&event->node);
993 /* Remove from event hash table. */
994 cds_hlist_del(&event->hlist);
995
996 lttng_destroy_context(event->ctx);
997 lttng_free_event_filter_runtime(event);
998 /* Free event enabler refs */
999 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1000 &event->enablers_ref_head, node)
1001 free(enabler_ref);
1002 free(event);
1003 }
1004
1005 static
1006 void _lttng_enum_destroy(struct lttng_enum *_enum)
1007 {
1008 cds_list_del(&_enum->node);
1009 cds_hlist_del(&_enum->hlist);
1010 free(_enum);
1011 }
1012
1013 void lttng_ust_events_exit(void)
1014 {
1015 struct lttng_session *session, *tmpsession;
1016
1017 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
1018 lttng_session_destroy(session);
1019 }
1020
1021 /*
1022 * Enabler management.
1023 */
1024 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
1025 struct lttng_ust_event *event_param,
1026 struct lttng_channel *chan)
1027 {
1028 struct lttng_enabler *enabler;
1029
1030 enabler = zmalloc(sizeof(*enabler));
1031 if (!enabler)
1032 return NULL;
1033 enabler->type = type;
1034 CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
1035 CDS_INIT_LIST_HEAD(&enabler->excluder_head);
1036 memcpy(&enabler->event_param, event_param,
1037 sizeof(enabler->event_param));
1038 enabler->chan = chan;
1039 /* ctx left NULL */
1040 enabler->enabled = 0;
1041 cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
1042 lttng_session_lazy_sync_enablers(enabler->chan->session);
1043 return enabler;
1044 }
1045
1046 int lttng_enabler_enable(struct lttng_enabler *enabler)
1047 {
1048 enabler->enabled = 1;
1049 lttng_session_lazy_sync_enablers(enabler->chan->session);
1050 return 0;
1051 }
1052
1053 int lttng_enabler_disable(struct lttng_enabler *enabler)
1054 {
1055 enabler->enabled = 0;
1056 lttng_session_lazy_sync_enablers(enabler->chan->session);
1057 return 0;
1058 }
1059
1060 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1061 struct lttng_ust_filter_bytecode_node *bytecode)
1062 {
1063 bytecode->enabler = enabler;
1064 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
1065 lttng_session_lazy_sync_enablers(enabler->chan->session);
1066 return 0;
1067 }
1068
1069 int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
1070 struct lttng_ust_excluder_node *excluder)
1071 {
1072 excluder->enabler = enabler;
1073 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
1074 lttng_session_lazy_sync_enablers(enabler->chan->session);
1075 return 0;
1076 }
1077
1078 int lttng_attach_context(struct lttng_ust_context *context_param,
1079 union ust_args *uargs,
1080 struct lttng_ctx **ctx, struct lttng_session *session)
1081 {
1082 /*
1083 * We cannot attach a context after trace has been started for a
1084 * session because the metadata does not allow expressing this
1085 * information outside of the original channel scope.
1086 */
1087 if (session->been_active)
1088 return -EPERM;
1089
1090 switch (context_param->ctx) {
1091 case LTTNG_UST_CONTEXT_PTHREAD_ID:
1092 return lttng_add_pthread_id_to_ctx(ctx);
1093 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
1094 {
1095 struct lttng_ust_perf_counter_ctx *perf_ctx_param;
1096
1097 perf_ctx_param = &context_param->u.perf_counter;
1098 return lttng_add_perf_counter_to_ctx(
1099 perf_ctx_param->type,
1100 perf_ctx_param->config,
1101 perf_ctx_param->name,
1102 ctx);
1103 }
1104 case LTTNG_UST_CONTEXT_VTID:
1105 return lttng_add_vtid_to_ctx(ctx);
1106 case LTTNG_UST_CONTEXT_VPID:
1107 return lttng_add_vpid_to_ctx(ctx);
1108 case LTTNG_UST_CONTEXT_PROCNAME:
1109 return lttng_add_procname_to_ctx(ctx);
1110 case LTTNG_UST_CONTEXT_IP:
1111 return lttng_add_ip_to_ctx(ctx);
1112 case LTTNG_UST_CONTEXT_CPU_ID:
1113 return lttng_add_cpu_id_to_ctx(ctx);
1114 case LTTNG_UST_CONTEXT_APP_CONTEXT:
1115 return lttng_ust_add_app_context_to_ctx_rcu(uargs->app_context.ctxname,
1116 ctx);
1117 case LTTNG_UST_CONTEXT_CGROUP_NS:
1118 return lttng_add_cgroup_ns_to_ctx(ctx);
1119 case LTTNG_UST_CONTEXT_IPC_NS:
1120 return lttng_add_ipc_ns_to_ctx(ctx);
1121 case LTTNG_UST_CONTEXT_MNT_NS:
1122 return lttng_add_mnt_ns_to_ctx(ctx);
1123 case LTTNG_UST_CONTEXT_NET_NS:
1124 return lttng_add_net_ns_to_ctx(ctx);
1125 case LTTNG_UST_CONTEXT_PID_NS:
1126 return lttng_add_pid_ns_to_ctx(ctx);
1127 case LTTNG_UST_CONTEXT_USER_NS:
1128 return lttng_add_user_ns_to_ctx(ctx);
1129 case LTTNG_UST_CONTEXT_UTS_NS:
1130 return lttng_add_uts_ns_to_ctx(ctx);
1131 case LTTNG_UST_CONTEXT_VUID:
1132 return lttng_add_vuid_to_ctx(ctx);
1133 case LTTNG_UST_CONTEXT_VEUID:
1134 return lttng_add_veuid_to_ctx(ctx);
1135 case LTTNG_UST_CONTEXT_VSUID:
1136 return lttng_add_vsuid_to_ctx(ctx);
1137 case LTTNG_UST_CONTEXT_VGID:
1138 return lttng_add_vgid_to_ctx(ctx);
1139 case LTTNG_UST_CONTEXT_VEGID:
1140 return lttng_add_vegid_to_ctx(ctx);
1141 case LTTNG_UST_CONTEXT_VSGID:
1142 return lttng_add_vsgid_to_ctx(ctx);
1143 default:
1144 return -EINVAL;
1145 }
1146 }
1147
1148 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
1149 struct lttng_ust_context *context_param)
1150 {
1151 #if 0 // disabled for now.
1152 struct lttng_session *session = enabler->chan->session;
1153 int ret;
1154
1155 ret = lttng_attach_context(context_param, &enabler->ctx,
1156 session);
1157 if (ret)
1158 return ret;
1159 lttng_session_lazy_sync_enablers(enabler->chan->session);
1160 #endif
1161 return -ENOSYS;
1162 }
1163
1164 static
1165 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1166 {
1167 struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
1168 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
1169
1170 /* Destroy filter bytecode */
1171 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
1172 &enabler->filter_bytecode_head, node) {
1173 free(filter_node);
1174 }
1175
1176 /* Destroy excluders */
1177 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
1178 &enabler->excluder_head, node) {
1179 free(excluder_node);
1180 }
1181
1182 /* Destroy contexts */
1183 lttng_destroy_context(enabler->ctx);
1184
1185 cds_list_del(&enabler->node);
1186 free(enabler);
1187 }
1188
1189 /*
1190 * lttng_session_sync_enablers should be called just before starting a
1191 * session.
1192 */
1193 static
1194 void lttng_session_sync_enablers(struct lttng_session *session)
1195 {
1196 struct lttng_enabler *enabler;
1197 struct lttng_event *event;
1198
1199 cds_list_for_each_entry(enabler, &session->enablers_head, node)
1200 lttng_enabler_ref_events(enabler);
1201 /*
1202 * For each event, if at least one of its enablers is enabled,
1203 * and its channel and session transient states are enabled, we
1204 * enable the event, else we disable it.
1205 */
1206 cds_list_for_each_entry(event, &session->events_head, node) {
1207 struct lttng_enabler_ref *enabler_ref;
1208 struct lttng_bytecode_runtime *runtime;
1209 int enabled = 0, has_enablers_without_bytecode = 0;
1210
1211 /* Enable events */
1212 cds_list_for_each_entry(enabler_ref,
1213 &event->enablers_ref_head, node) {
1214 if (enabler_ref->ref->enabled) {
1215 enabled = 1;
1216 break;
1217 }
1218 }
1219 /*
1220 * Enabled state is based on union of enablers, with
1221 * intesection of session and channel transient enable
1222 * states.
1223 */
1224 enabled = enabled && session->tstate && event->chan->tstate;
1225
1226 CMM_STORE_SHARED(event->enabled, enabled);
1227 /*
1228 * Sync tracepoint registration with event enabled
1229 * state.
1230 */
1231 if (enabled) {
1232 if (!event->registered)
1233 register_event(event);
1234 } else {
1235 if (event->registered)
1236 unregister_event(event);
1237 }
1238
1239 /* Check if has enablers without bytecode enabled */
1240 cds_list_for_each_entry(enabler_ref,
1241 &event->enablers_ref_head, node) {
1242 if (enabler_ref->ref->enabled
1243 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1244 has_enablers_without_bytecode = 1;
1245 break;
1246 }
1247 }
1248 event->has_enablers_without_bytecode =
1249 has_enablers_without_bytecode;
1250
1251 /* Enable filters */
1252 cds_list_for_each_entry(runtime,
1253 &event->bytecode_runtime_head, node) {
1254 lttng_filter_sync_state(runtime);
1255 }
1256 }
1257 __tracepoint_probe_prune_release_queue();
1258 }
1259
1260 /*
1261 * Apply enablers to session events, adding events to session if need
1262 * be. It is required after each modification applied to an active
1263 * session, and right before session "start".
1264 * "lazy" sync means we only sync if required.
1265 */
1266 static
1267 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
1268 {
1269 /* We can skip if session is not active */
1270 if (!session->active)
1271 return;
1272 lttng_session_sync_enablers(session);
1273 }
1274
1275 /*
1276 * Update all sessions with the given app context.
1277 * Called with ust lock held.
1278 * This is invoked when an application context gets loaded/unloaded. It
1279 * ensures the context callbacks are in sync with the application
1280 * context (either app context callbacks, or dummy callbacks).
1281 */
1282 void lttng_ust_context_set_session_provider(const char *name,
1283 size_t (*get_size)(struct lttng_ctx_field *field, size_t offset),
1284 void (*record)(struct lttng_ctx_field *field,
1285 struct lttng_ust_lib_ring_buffer_ctx *ctx,
1286 struct lttng_channel *chan),
1287 void (*get_value)(struct lttng_ctx_field *field,
1288 struct lttng_ctx_value *value))
1289 {
1290 struct lttng_session *session;
1291
1292 cds_list_for_each_entry(session, &sessions, node) {
1293 struct lttng_channel *chan;
1294 struct lttng_event *event;
1295 int ret;
1296
1297 ret = lttng_ust_context_set_provider_rcu(&session->ctx,
1298 name, get_size, record, get_value);
1299 if (ret)
1300 abort();
1301 cds_list_for_each_entry(chan, &session->chan_head, node) {
1302 ret = lttng_ust_context_set_provider_rcu(&chan->ctx,
1303 name, get_size, record, get_value);
1304 if (ret)
1305 abort();
1306 }
1307 cds_list_for_each_entry(event, &session->events_head, node) {
1308 ret = lttng_ust_context_set_provider_rcu(&event->ctx,
1309 name, get_size, record, get_value);
1310 if (ret)
1311 abort();
1312 }
1313 }
1314 }
This page took 0.0999949999999999 seconds and 5 git commands to generate.