Fix: baddr_statedump deadlock with JUL tracing
[deliverable/lttng-ust.git] / liblttng-ust / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <urcu/list.h>
26 #include <urcu/hlist.h>
27 #include <pthread.h>
28 #include <errno.h>
29 #include <sys/shm.h>
30 #include <sys/ipc.h>
31 #include <stdint.h>
32 #include <stddef.h>
33 #include <inttypes.h>
34 #include <time.h>
35 #include <lttng/ust-endian.h>
36 #include "clock.h"
37
38 #include <urcu-bp.h>
39 #include <urcu/compiler.h>
40 #include <urcu/uatomic.h>
41 #include <urcu/arch.h>
42
43 #include <lttng/tracepoint.h>
44 #include <lttng/ust-events.h>
45
46 #include <usterr-signal-safe.h>
47 #include <helper.h>
48 #include <lttng/ust-ctl.h>
49 #include <ust-comm.h>
50 #include "error.h"
51 #include "compat.h"
52 #include "lttng-ust-uuid.h"
53
54 #include "tracepoint-internal.h"
55 #include "lttng-tracer.h"
56 #include "lttng-tracer-core.h"
57 #include "wait.h"
58 #include "../libringbuffer/shm.h"
59 #include "jhash.h"
60
61 /*
62 * The sessions mutex is the centralized mutex across UST tracing
63 * control and probe registration. All operations within this file are
64 * called by the communication thread, under ust_lock protection.
65 */
66 static pthread_mutex_t sessions_mutex = PTHREAD_MUTEX_INITIALIZER;
67
68 void ust_lock(void)
69 {
70 pthread_mutex_lock(&sessions_mutex);
71 }
72
73 void ust_unlock(void)
74 {
75 pthread_mutex_unlock(&sessions_mutex);
76 }
77
78 static CDS_LIST_HEAD(sessions);
79
80 static void _lttng_event_destroy(struct lttng_event *event);
81
82 static
83 void lttng_session_lazy_sync_enablers(struct lttng_session *session);
84 static
85 void lttng_session_sync_enablers(struct lttng_session *session);
86 static
87 void lttng_enabler_destroy(struct lttng_enabler *enabler);
88
89 /*
90 * Called with ust lock held.
91 */
92 int lttng_session_active(void)
93 {
94 struct lttng_session *iter;
95
96 cds_list_for_each_entry(iter, &sessions, node) {
97 if (iter->active)
98 return 1;
99 }
100 return 0;
101 }
102
103 static
104 int lttng_loglevel_match(int loglevel,
105 unsigned int has_loglevel,
106 enum lttng_ust_loglevel_type req_type,
107 int req_loglevel)
108 {
109 if (req_type == LTTNG_UST_LOGLEVEL_ALL)
110 return 1;
111 if (!has_loglevel)
112 loglevel = TRACE_DEFAULT;
113 switch (req_type) {
114 case LTTNG_UST_LOGLEVEL_RANGE:
115 if (loglevel <= req_loglevel || req_loglevel == -1)
116 return 1;
117 else
118 return 0;
119 case LTTNG_UST_LOGLEVEL_SINGLE:
120 if (loglevel == req_loglevel || req_loglevel == -1)
121 return 1;
122 else
123 return 0;
124 case LTTNG_UST_LOGLEVEL_ALL:
125 default:
126 return 1;
127 }
128 }
129
130 void synchronize_trace(void)
131 {
132 synchronize_rcu();
133 }
134
135 struct lttng_session *lttng_session_create(void)
136 {
137 struct lttng_session *session;
138 int i;
139
140 session = zmalloc(sizeof(struct lttng_session));
141 if (!session)
142 return NULL;
143 CDS_INIT_LIST_HEAD(&session->chan_head);
144 CDS_INIT_LIST_HEAD(&session->events_head);
145 CDS_INIT_LIST_HEAD(&session->enablers_head);
146 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
147 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
148 cds_list_add(&session->node, &sessions);
149 return session;
150 }
151
152 /*
153 * Only used internally at session destruction.
154 */
155 static
156 void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
157 {
158 struct channel *chan;
159 struct lttng_ust_shm_handle *handle;
160
161 cds_list_del(&lttng_chan->node);
162 lttng_destroy_context(lttng_chan->ctx);
163 chan = lttng_chan->chan;
164 handle = lttng_chan->handle;
165 /*
166 * note: lttng_chan is private data contained within handle. It
167 * will be freed along with the handle.
168 */
169 channel_destroy(chan, handle, 0);
170 }
171
172 static
173 void register_event(struct lttng_event *event)
174 {
175 int ret;
176 const struct lttng_event_desc *desc;
177
178 assert(event->registered == 0);
179 desc = event->desc;
180 ret = __tracepoint_probe_register(desc->name,
181 desc->probe_callback,
182 event, desc->signature);
183 WARN_ON_ONCE(ret);
184 if (!ret)
185 event->registered = 1;
186 }
187
188 static
189 void unregister_event(struct lttng_event *event)
190 {
191 int ret;
192 const struct lttng_event_desc *desc;
193
194 assert(event->registered == 1);
195 desc = event->desc;
196 ret = __tracepoint_probe_unregister(desc->name,
197 desc->probe_callback,
198 event);
199 WARN_ON_ONCE(ret);
200 if (!ret)
201 event->registered = 0;
202 }
203
204 /*
205 * Only used internally at session destruction.
206 */
207 static
208 void _lttng_event_unregister(struct lttng_event *event)
209 {
210 if (event->registered)
211 unregister_event(event);
212 }
213
214 void lttng_session_destroy(struct lttng_session *session)
215 {
216 struct lttng_channel *chan, *tmpchan;
217 struct lttng_event *event, *tmpevent;
218 struct lttng_enabler *enabler, *tmpenabler;
219
220 CMM_ACCESS_ONCE(session->active) = 0;
221 cds_list_for_each_entry(event, &session->events_head, node) {
222 _lttng_event_unregister(event);
223 }
224 synchronize_trace(); /* Wait for in-flight events to complete */
225 cds_list_for_each_entry_safe(enabler, tmpenabler,
226 &session->enablers_head, node)
227 lttng_enabler_destroy(enabler);
228 cds_list_for_each_entry_safe(event, tmpevent,
229 &session->events_head, node)
230 _lttng_event_destroy(event);
231 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
232 _lttng_channel_unmap(chan);
233 cds_list_del(&session->node);
234 free(session);
235 }
236
237 int lttng_session_enable(struct lttng_session *session)
238 {
239 int ret = 0;
240 struct lttng_channel *chan;
241 int notify_socket;
242
243 if (session->active) {
244 ret = -EBUSY;
245 goto end;
246 }
247
248 notify_socket = lttng_get_notify_socket(session->owner);
249 if (notify_socket < 0)
250 return notify_socket;
251
252 /* Set transient enabler state to "enabled" */
253 session->tstate = 1;
254 /* We need to sync enablers with session before activation. */
255 lttng_session_sync_enablers(session);
256
257 /*
258 * Snapshot the number of events per channel to know the type of header
259 * we need to use.
260 */
261 cds_list_for_each_entry(chan, &session->chan_head, node) {
262 const struct lttng_ctx *ctx;
263 const struct lttng_ctx_field *fields = NULL;
264 size_t nr_fields = 0;
265 uint32_t chan_id;
266
267 /* don't change it if session stop/restart */
268 if (chan->header_type)
269 continue;
270 ctx = chan->ctx;
271 if (ctx) {
272 nr_fields = ctx->nr_fields;
273 fields = ctx->fields;
274 }
275 ret = ustcomm_register_channel(notify_socket,
276 session->objd,
277 chan->objd,
278 nr_fields,
279 fields,
280 &chan_id,
281 &chan->header_type);
282 if (ret) {
283 DBG("Error (%d) registering channel to sessiond", ret);
284 return ret;
285 }
286 if (chan_id != chan->id) {
287 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
288 chan_id, chan->id);
289 return -EINVAL;
290 }
291 }
292
293 /* Set atomically the state to "active" */
294 CMM_ACCESS_ONCE(session->active) = 1;
295 CMM_ACCESS_ONCE(session->been_active) = 1;
296
297 session->statedump_pending = 1;
298 lttng_ust_sockinfo_session_enabled(session->owner);
299 end:
300 return ret;
301 }
302
303 int lttng_session_disable(struct lttng_session *session)
304 {
305 int ret = 0;
306
307 if (!session->active) {
308 ret = -EBUSY;
309 goto end;
310 }
311 /* Set atomically the state to "inactive" */
312 CMM_ACCESS_ONCE(session->active) = 0;
313
314 /* Set transient enabler state to "disabled" */
315 session->tstate = 0;
316 lttng_session_sync_enablers(session);
317 end:
318 return ret;
319 }
320
321 int lttng_channel_enable(struct lttng_channel *channel)
322 {
323 int ret = 0;
324
325 if (channel->enabled) {
326 ret = -EBUSY;
327 goto end;
328 }
329 /* Set transient enabler state to "enabled" */
330 channel->tstate = 1;
331 lttng_session_sync_enablers(channel->session);
332 /* Set atomically the state to "enabled" */
333 CMM_ACCESS_ONCE(channel->enabled) = 1;
334 end:
335 return ret;
336 }
337
338 int lttng_channel_disable(struct lttng_channel *channel)
339 {
340 int ret = 0;
341
342 if (!channel->enabled) {
343 ret = -EBUSY;
344 goto end;
345 }
346 /* Set atomically the state to "disabled" */
347 CMM_ACCESS_ONCE(channel->enabled) = 0;
348 /* Set transient enabler state to "enabled" */
349 channel->tstate = 0;
350 lttng_session_sync_enablers(channel->session);
351 end:
352 return ret;
353 }
354
355 /*
356 * Supports event creation while tracing session is active.
357 */
358 static
359 int lttng_event_create(const struct lttng_event_desc *desc,
360 struct lttng_channel *chan)
361 {
362 const char *event_name = desc->name;
363 struct lttng_event *event;
364 struct lttng_session *session = chan->session;
365 struct cds_hlist_head *head;
366 struct cds_hlist_node *node;
367 int ret = 0;
368 size_t name_len = strlen(event_name);
369 uint32_t hash;
370 int notify_socket, loglevel;
371 const char *uri;
372
373 hash = jhash(event_name, name_len, 0);
374 head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
375 cds_hlist_for_each_entry(event, node, head, hlist) {
376 assert(event->desc);
377 if (!strncmp(event->desc->name, desc->name,
378 LTTNG_UST_SYM_NAME_LEN - 1)
379 && chan == event->chan) {
380 ret = -EEXIST;
381 goto exist;
382 }
383 }
384
385 notify_socket = lttng_get_notify_socket(session->owner);
386 if (notify_socket < 0) {
387 ret = notify_socket;
388 goto socket_error;
389 }
390
391 /*
392 * Check if loglevel match. Refuse to connect event if not.
393 */
394 event = zmalloc(sizeof(struct lttng_event));
395 if (!event) {
396 ret = -ENOMEM;
397 goto cache_error;
398 }
399 event->chan = chan;
400
401 /* Event will be enabled by enabler sync. */
402 event->enabled = 0;
403 event->registered = 0;
404 CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
405 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
406 event->desc = desc;
407
408 if (desc->loglevel)
409 loglevel = *(*event->desc->loglevel);
410 else
411 loglevel = TRACE_DEFAULT;
412 if (desc->u.ext.model_emf_uri)
413 uri = *(desc->u.ext.model_emf_uri);
414 else
415 uri = NULL;
416
417 /* Fetch event ID from sessiond */
418 ret = ustcomm_register_event(notify_socket,
419 session->objd,
420 chan->objd,
421 event_name,
422 loglevel,
423 desc->signature,
424 desc->nr_fields,
425 desc->fields,
426 uri,
427 &event->id);
428 if (ret < 0) {
429 DBG("Error (%d) registering event to sessiond", ret);
430 goto sessiond_register_error;
431 }
432
433 /* Populate lttng_event structure before tracepoint registration. */
434 cmm_smp_wmb();
435 cds_list_add(&event->node, &chan->session->events_head);
436 cds_hlist_add_head(&event->hlist, head);
437 return 0;
438
439 sessiond_register_error:
440 free(event);
441 cache_error:
442 socket_error:
443 exist:
444 return ret;
445 }
446
447 static
448 int lttng_desc_match_wildcard_enabler(const struct lttng_event_desc *desc,
449 struct lttng_enabler *enabler)
450 {
451 int loglevel = 0;
452 unsigned int has_loglevel = 0;
453
454 assert(enabler->type == LTTNG_ENABLER_WILDCARD);
455 /* Compare excluding final '*' */
456 if (strncmp(desc->name, enabler->event_param.name,
457 strlen(enabler->event_param.name) - 1))
458 return 0;
459 if (desc->loglevel) {
460 loglevel = *(*desc->loglevel);
461 has_loglevel = 1;
462 }
463 if (!lttng_loglevel_match(loglevel,
464 has_loglevel,
465 enabler->event_param.loglevel_type,
466 enabler->event_param.loglevel))
467 return 0;
468 return 1;
469 }
470
471 static
472 int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
473 struct lttng_enabler *enabler)
474 {
475 int loglevel = 0;
476 unsigned int has_loglevel = 0;
477
478 assert(enabler->type == LTTNG_ENABLER_EVENT);
479 if (strcmp(desc->name, enabler->event_param.name))
480 return 0;
481 if (desc->loglevel) {
482 loglevel = *(*desc->loglevel);
483 has_loglevel = 1;
484 }
485 if (!lttng_loglevel_match(loglevel,
486 has_loglevel,
487 enabler->event_param.loglevel_type,
488 enabler->event_param.loglevel))
489 return 0;
490 return 1;
491 }
492
493 static
494 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
495 struct lttng_enabler *enabler)
496 {
497 struct lttng_ust_excluder_node *excluder;
498
499 /* If event matches with an excluder, return 'does not match' */
500 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
501 int count;
502
503 for (count = 0; count < excluder->excluder.count; count++) {
504 int found, len;
505 char *excluder_name;
506
507 excluder_name = (char *) (excluder->excluder.names)
508 + count * LTTNG_UST_SYM_NAME_LEN;
509 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
510 if (len > 0 && excluder_name[len - 1] == '*') {
511 found = !strncmp(desc->name, excluder_name,
512 len - 1);
513 } else {
514 found = !strncmp(desc->name, excluder_name,
515 LTTNG_UST_SYM_NAME_LEN - 1);
516 }
517 if (found) {
518 return 0;
519 }
520 }
521 }
522 switch (enabler->type) {
523 case LTTNG_ENABLER_WILDCARD:
524 return lttng_desc_match_wildcard_enabler(desc, enabler);
525 case LTTNG_ENABLER_EVENT:
526 return lttng_desc_match_event_enabler(desc, enabler);
527 default:
528 return -EINVAL;
529 }
530 }
531
532 static
533 int lttng_event_match_enabler(struct lttng_event *event,
534 struct lttng_enabler *enabler)
535 {
536 if (lttng_desc_match_enabler(event->desc, enabler)
537 && event->chan == enabler->chan)
538 return 1;
539 else
540 return 0;
541 }
542
543 static
544 struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event,
545 struct lttng_enabler *enabler)
546 {
547 struct lttng_enabler_ref *enabler_ref;
548
549 cds_list_for_each_entry(enabler_ref,
550 &event->enablers_ref_head, node) {
551 if (enabler_ref->ref == enabler)
552 return enabler_ref;
553 }
554 return NULL;
555 }
556
557 /*
558 * Create struct lttng_event if it is missing and present in the list of
559 * tracepoint probes.
560 */
561 static
562 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
563 {
564 struct lttng_session *session = enabler->chan->session;
565 struct lttng_probe_desc *probe_desc;
566 const struct lttng_event_desc *desc;
567 struct lttng_event *event;
568 int i;
569 struct cds_list_head *probe_list;
570
571 probe_list = lttng_get_probe_list_head();
572 /*
573 * For each probe event, if we find that a probe event matches
574 * our enabler, create an associated lttng_event if not
575 * already present.
576 */
577 cds_list_for_each_entry(probe_desc, probe_list, head) {
578 for (i = 0; i < probe_desc->nr_events; i++) {
579 int found = 0, ret;
580 struct cds_hlist_head *head;
581 struct cds_hlist_node *node;
582 const char *event_name;
583 size_t name_len;
584 uint32_t hash;
585
586 desc = probe_desc->event_desc[i];
587 if (!lttng_desc_match_enabler(desc, enabler))
588 continue;
589 event_name = desc->name;
590 name_len = strlen(event_name);
591
592 /*
593 * Check if already created.
594 */
595 hash = jhash(event_name, name_len, 0);
596 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
597 cds_hlist_for_each_entry(event, node, head, hlist) {
598 if (event->desc == desc
599 && event->chan == enabler->chan)
600 found = 1;
601 }
602 if (found)
603 continue;
604
605 /*
606 * We need to create an event for this
607 * event probe.
608 */
609 ret = lttng_event_create(probe_desc->event_desc[i],
610 enabler->chan);
611 if (ret) {
612 DBG("Unable to create event %s, error %d\n",
613 probe_desc->event_desc[i]->name, ret);
614 }
615 }
616 }
617 }
618
619 /*
620 * Create events associated with an enabler (if not already present),
621 * and add backward reference from the event to the enabler.
622 */
623 static
624 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
625 {
626 struct lttng_session *session = enabler->chan->session;
627 struct lttng_event *event;
628
629 /* First ensure that probe events are created for this enabler. */
630 lttng_create_event_if_missing(enabler);
631
632 /* For each event matching enabler in session event list. */
633 cds_list_for_each_entry(event, &session->events_head, node) {
634 struct lttng_enabler_ref *enabler_ref;
635
636 if (!lttng_event_match_enabler(event, enabler))
637 continue;
638
639 enabler_ref = lttng_event_enabler_ref(event, enabler);
640 if (!enabler_ref) {
641 /*
642 * If no backward ref, create it.
643 * Add backward ref from event to enabler.
644 */
645 enabler_ref = zmalloc(sizeof(*enabler_ref));
646 if (!enabler_ref)
647 return -ENOMEM;
648 enabler_ref->ref = enabler;
649 cds_list_add(&enabler_ref->node,
650 &event->enablers_ref_head);
651 }
652
653 /*
654 * Link filter bytecodes if not linked yet.
655 */
656 lttng_enabler_event_link_bytecode(event, enabler);
657
658 /* TODO: merge event context. */
659 }
660 return 0;
661 }
662
663 /*
664 * Called at library load: connect the probe on all enablers matching
665 * this event.
666 * Called with session mutex held.
667 */
668 int lttng_fix_pending_events(void)
669 {
670 struct lttng_session *session;
671
672 cds_list_for_each_entry(session, &sessions, node) {
673 lttng_session_lazy_sync_enablers(session);
674 }
675 return 0;
676 }
677
678 /*
679 * Called after session enable: For each session, execute pending statedumps.
680 */
681 int lttng_handle_pending_statedumps(t_statedump_func_ptr statedump_func_ptr)
682 {
683 struct lttng_session *session;
684
685 cds_list_for_each_entry(session, &sessions, node) {
686 if (session->statedump_pending) {
687 session->statedump_pending = 0;
688 statedump_func_ptr(session);
689 }
690 }
691 return 0;
692 }
693
694 /*
695 * Only used internally at session destruction.
696 */
697 static
698 void _lttng_event_destroy(struct lttng_event *event)
699 {
700 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
701
702 cds_list_del(&event->node);
703 lttng_destroy_context(event->ctx);
704 lttng_free_event_filter_runtime(event);
705 /* Free event enabler refs */
706 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
707 &event->enablers_ref_head, node)
708 free(enabler_ref);
709 free(event);
710 }
711
712 void lttng_ust_events_exit(void)
713 {
714 struct lttng_session *session, *tmpsession;
715
716 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
717 lttng_session_destroy(session);
718 }
719
720 /*
721 * Enabler management.
722 */
723 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
724 struct lttng_ust_event *event_param,
725 struct lttng_channel *chan)
726 {
727 struct lttng_enabler *enabler;
728
729 enabler = zmalloc(sizeof(*enabler));
730 if (!enabler)
731 return NULL;
732 enabler->type = type;
733 CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
734 CDS_INIT_LIST_HEAD(&enabler->excluder_head);
735 memcpy(&enabler->event_param, event_param,
736 sizeof(enabler->event_param));
737 enabler->chan = chan;
738 /* ctx left NULL */
739 enabler->enabled = 1;
740 cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
741 lttng_session_lazy_sync_enablers(enabler->chan->session);
742 return enabler;
743 }
744
745 int lttng_enabler_enable(struct lttng_enabler *enabler)
746 {
747 enabler->enabled = 1;
748 lttng_session_lazy_sync_enablers(enabler->chan->session);
749 return 0;
750 }
751
752 int lttng_enabler_disable(struct lttng_enabler *enabler)
753 {
754 enabler->enabled = 0;
755 lttng_session_lazy_sync_enablers(enabler->chan->session);
756 return 0;
757 }
758
759 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
760 struct lttng_ust_filter_bytecode_node *bytecode)
761 {
762 bytecode->enabler = enabler;
763 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
764 lttng_session_lazy_sync_enablers(enabler->chan->session);
765 return 0;
766 }
767
768 int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
769 struct lttng_ust_excluder_node *excluder)
770 {
771 excluder->enabler = enabler;
772 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
773 lttng_session_lazy_sync_enablers(enabler->chan->session);
774 return 0;
775 }
776
777 int lttng_attach_context(struct lttng_ust_context *context_param,
778 struct lttng_ctx **ctx, struct lttng_session *session)
779 {
780 /*
781 * We cannot attach a context after trace has been started for a
782 * session because the metadata does not allow expressing this
783 * information outside of the original channel scope.
784 */
785 if (session->been_active)
786 return -EPERM;
787
788 switch (context_param->ctx) {
789 case LTTNG_UST_CONTEXT_PTHREAD_ID:
790 return lttng_add_pthread_id_to_ctx(ctx);
791 case LTTNG_UST_CONTEXT_VTID:
792 return lttng_add_vtid_to_ctx(ctx);
793 case LTTNG_UST_CONTEXT_VPID:
794 return lttng_add_vpid_to_ctx(ctx);
795 case LTTNG_UST_CONTEXT_PROCNAME:
796 return lttng_add_procname_to_ctx(ctx);
797 case LTTNG_UST_CONTEXT_IP:
798 return lttng_add_ip_to_ctx(ctx);
799 default:
800 return -EINVAL;
801 }
802 }
803
804 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
805 struct lttng_ust_context *context_param)
806 {
807 #if 0 // disabled for now.
808 struct lttng_session *session = enabler->chan->session;
809 int ret;
810
811 ret = lttng_attach_context(context_param, &enabler->ctx,
812 session);
813 if (ret)
814 return ret;
815 lttng_session_lazy_sync_enablers(enabler->chan->session);
816 #endif
817 return -ENOSYS;
818 }
819
820 static
821 void lttng_enabler_destroy(struct lttng_enabler *enabler)
822 {
823 struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
824 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
825
826 /* Destroy filter bytecode */
827 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
828 &enabler->filter_bytecode_head, node) {
829 free(filter_node);
830 }
831
832 /* Destroy excluders */
833 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
834 &enabler->excluder_head, node) {
835 free(excluder_node);
836 }
837
838 /* Destroy contexts */
839 lttng_destroy_context(enabler->ctx);
840
841 cds_list_del(&enabler->node);
842 free(enabler);
843 }
844
845 /*
846 * lttng_session_sync_enablers should be called just before starting a
847 * session.
848 */
849 static
850 void lttng_session_sync_enablers(struct lttng_session *session)
851 {
852 struct lttng_enabler *enabler;
853 struct lttng_event *event;
854
855 cds_list_for_each_entry(enabler, &session->enablers_head, node)
856 lttng_enabler_ref_events(enabler);
857 /*
858 * For each event, if at least one of its enablers is enabled,
859 * and its channel and session transient states are enabled, we
860 * enable the event, else we disable it.
861 */
862 cds_list_for_each_entry(event, &session->events_head, node) {
863 struct lttng_enabler_ref *enabler_ref;
864 struct lttng_bytecode_runtime *runtime;
865 int enabled = 0, has_enablers_without_bytecode = 0;
866
867 /* Enable events */
868 cds_list_for_each_entry(enabler_ref,
869 &event->enablers_ref_head, node) {
870 if (enabler_ref->ref->enabled) {
871 enabled = 1;
872 break;
873 }
874 }
875 /*
876 * Enabled state is based on union of enablers, with
877 * intesection of session and channel transient enable
878 * states.
879 */
880 enabled = enabled && session->tstate && event->chan->tstate;
881
882 CMM_STORE_SHARED(event->enabled, enabled);
883 /*
884 * Sync tracepoint registration with event enabled
885 * state.
886 */
887 if (enabled) {
888 if (!event->registered)
889 register_event(event);
890 } else {
891 if (event->registered)
892 unregister_event(event);
893 }
894
895 /* Check if has enablers without bytecode enabled */
896 cds_list_for_each_entry(enabler_ref,
897 &event->enablers_ref_head, node) {
898 if (enabler_ref->ref->enabled
899 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
900 has_enablers_without_bytecode = 1;
901 break;
902 }
903 }
904 event->has_enablers_without_bytecode =
905 has_enablers_without_bytecode;
906
907 /* Enable filters */
908 cds_list_for_each_entry(runtime,
909 &event->bytecode_runtime_head, node) {
910 lttng_filter_sync_state(runtime);
911 }
912 }
913 }
914
915 /*
916 * Apply enablers to session events, adding events to session if need
917 * be. It is required after each modification applied to an active
918 * session, and right before session "start".
919 * "lazy" sync means we only sync if required.
920 */
921 static
922 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
923 {
924 /* We can skip if session is not active */
925 if (!session->active)
926 return;
927 lttng_session_sync_enablers(session);
928 }
This page took 0.054646 seconds and 5 git commands to generate.