2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
33 #include <common/common.h>
34 #include <common/utils.h>
35 #include <common/compat/poll.h>
36 #include <common/kernel-ctl/kernel-ctl.h>
37 #include <common/sessiond-comm/relayd.h>
38 #include <common/sessiond-comm/sessiond-comm.h>
39 #include <common/kernel-consumer/kernel-consumer.h>
40 #include <common/relayd/relayd.h>
41 #include <common/ust-consumer/ust-consumer.h>
45 struct lttng_consumer_global_data consumer_data
= {
48 .type
= LTTNG_CONSUMER_UNKNOWN
,
51 enum consumer_channel_action
{
53 CONSUMER_CHANNEL_QUIT
,
56 struct consumer_channel_msg
{
57 enum consumer_channel_action action
;
58 struct lttng_consumer_channel
*chan
;
62 * Flag to inform the polling thread to quit when all fd hung up. Updated by
63 * the consumer_thread_receive_fds when it notices that all fds has hung up.
64 * Also updated by the signal handler (consumer_should_exit()). Read by the
67 volatile int consumer_quit
;
70 * Global hash table containing respectively metadata and data streams. The
71 * stream element in this ht should only be updated by the metadata poll thread
72 * for the metadata and the data poll thread for the data.
74 static struct lttng_ht
*metadata_ht
;
75 static struct lttng_ht
*data_ht
;
78 * Notify a thread pipe to poll back again. This usually means that some global
79 * state has changed so we just send back the thread in a poll wait call.
81 static void notify_thread_pipe(int wpipe
)
86 struct lttng_consumer_stream
*null_stream
= NULL
;
88 ret
= write(wpipe
, &null_stream
, sizeof(null_stream
));
89 } while (ret
< 0 && errno
== EINTR
);
92 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
93 struct lttng_consumer_channel
*chan
,
94 enum consumer_channel_action action
)
96 struct consumer_channel_msg msg
;
102 ret
= write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
103 } while (ret
< 0 && errno
== EINTR
);
106 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
107 struct lttng_consumer_channel
**chan
,
108 enum consumer_channel_action
*action
)
110 struct consumer_channel_msg msg
;
114 ret
= read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
115 } while (ret
< 0 && errno
== EINTR
);
117 *action
= msg
.action
;
124 * Find a stream. The consumer_data.lock must be locked during this
127 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
130 struct lttng_ht_iter iter
;
131 struct lttng_ht_node_u64
*node
;
132 struct lttng_consumer_stream
*stream
= NULL
;
136 /* -1ULL keys are lookup failures */
137 if (key
== (uint64_t) -1ULL) {
143 lttng_ht_lookup(ht
, &key
, &iter
);
144 node
= lttng_ht_iter_get_node_u64(&iter
);
146 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
154 static void steal_stream_key(int key
, struct lttng_ht
*ht
)
156 struct lttng_consumer_stream
*stream
;
159 stream
= find_stream(key
, ht
);
163 * We don't want the lookup to match, but we still need
164 * to iterate on this stream when iterating over the hash table. Just
165 * change the node key.
167 stream
->node
.key
= -1ULL;
173 * Return a channel object for the given key.
175 * RCU read side lock MUST be acquired before calling this function and
176 * protects the channel ptr.
178 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
180 struct lttng_ht_iter iter
;
181 struct lttng_ht_node_u64
*node
;
182 struct lttng_consumer_channel
*channel
= NULL
;
184 /* -1ULL keys are lookup failures */
185 if (key
== (uint64_t) -1ULL) {
189 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
190 node
= lttng_ht_iter_get_node_u64(&iter
);
192 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
198 static void free_stream_rcu(struct rcu_head
*head
)
200 struct lttng_ht_node_u64
*node
=
201 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
202 struct lttng_consumer_stream
*stream
=
203 caa_container_of(node
, struct lttng_consumer_stream
, node
);
208 static void free_channel_rcu(struct rcu_head
*head
)
210 struct lttng_ht_node_u64
*node
=
211 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
212 struct lttng_consumer_channel
*channel
=
213 caa_container_of(node
, struct lttng_consumer_channel
, node
);
219 * RCU protected relayd socket pair free.
221 static void free_relayd_rcu(struct rcu_head
*head
)
223 struct lttng_ht_node_u64
*node
=
224 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
225 struct consumer_relayd_sock_pair
*relayd
=
226 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
229 * Close all sockets. This is done in the call RCU since we don't want the
230 * socket fds to be reassigned thus potentially creating bad state of the
233 * We do not have to lock the control socket mutex here since at this stage
234 * there is no one referencing to this relayd object.
236 (void) relayd_close(&relayd
->control_sock
);
237 (void) relayd_close(&relayd
->data_sock
);
243 * Destroy and free relayd socket pair object.
245 * This function MUST be called with the consumer_data lock acquired.
247 static void destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
250 struct lttng_ht_iter iter
;
252 if (relayd
== NULL
) {
256 DBG("Consumer destroy and close relayd socket pair");
258 iter
.iter
.node
= &relayd
->node
.node
;
259 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
261 /* We assume the relayd is being or is destroyed */
265 /* RCU free() call */
266 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
270 * Remove a channel from the global list protected by a mutex. This function is
271 * also responsible for freeing its data structures.
273 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
276 struct lttng_ht_iter iter
;
278 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
280 pthread_mutex_lock(&consumer_data
.lock
);
282 switch (consumer_data
.type
) {
283 case LTTNG_CONSUMER_KERNEL
:
285 case LTTNG_CONSUMER32_UST
:
286 case LTTNG_CONSUMER64_UST
:
287 lttng_ustconsumer_del_channel(channel
);
290 ERR("Unknown consumer_data type");
296 iter
.iter
.node
= &channel
->node
.node
;
297 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
301 call_rcu(&channel
->node
.head
, free_channel_rcu
);
303 pthread_mutex_unlock(&consumer_data
.lock
);
307 * Iterate over the relayd hash table and destroy each element. Finally,
308 * destroy the whole hash table.
310 static void cleanup_relayd_ht(void)
312 struct lttng_ht_iter iter
;
313 struct consumer_relayd_sock_pair
*relayd
;
317 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
319 destroy_relayd(relayd
);
324 lttng_ht_destroy(consumer_data
.relayd_ht
);
328 * Update the end point status of all streams having the given network sequence
329 * index (relayd index).
331 * It's atomically set without having the stream mutex locked which is fine
332 * because we handle the write/read race with a pipe wakeup for each thread.
334 static void update_endpoint_status_by_netidx(int net_seq_idx
,
335 enum consumer_endpoint_status status
)
337 struct lttng_ht_iter iter
;
338 struct lttng_consumer_stream
*stream
;
340 DBG("Consumer set delete flag on stream by idx %d", net_seq_idx
);
344 /* Let's begin with metadata */
345 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
346 if (stream
->net_seq_idx
== net_seq_idx
) {
347 uatomic_set(&stream
->endpoint_status
, status
);
348 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
352 /* Follow up by the data streams */
353 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
354 if (stream
->net_seq_idx
== net_seq_idx
) {
355 uatomic_set(&stream
->endpoint_status
, status
);
356 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
363 * Cleanup a relayd object by flagging every associated streams for deletion,
364 * destroying the object meaning removing it from the relayd hash table,
365 * closing the sockets and freeing the memory in a RCU call.
367 * If a local data context is available, notify the threads that the streams'
368 * state have changed.
370 static void cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
,
371 struct lttng_consumer_local_data
*ctx
)
377 DBG("Cleaning up relayd sockets");
379 /* Save the net sequence index before destroying the object */
380 netidx
= relayd
->net_seq_idx
;
383 * Delete the relayd from the relayd hash table, close the sockets and free
384 * the object in a RCU call.
386 destroy_relayd(relayd
);
388 /* Set inactive endpoint to all streams */
389 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
392 * With a local data context, notify the threads that the streams' state
393 * have changed. The write() action on the pipe acts as an "implicit"
394 * memory barrier ordering the updates of the end point status from the
395 * read of this status which happens AFTER receiving this notify.
398 notify_thread_pipe(ctx
->consumer_data_pipe
[1]);
399 notify_thread_pipe(ctx
->consumer_metadata_pipe
[1]);
404 * Flag a relayd socket pair for destruction. Destroy it if the refcount
407 * RCU read side lock MUST be aquired before calling this function.
409 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
413 /* Set destroy flag for this object */
414 uatomic_set(&relayd
->destroy_flag
, 1);
416 /* Destroy the relayd if refcount is 0 */
417 if (uatomic_read(&relayd
->refcount
) == 0) {
418 destroy_relayd(relayd
);
423 * Remove a stream from the global list protected by a mutex. This
424 * function is also responsible for freeing its data structures.
426 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
430 struct lttng_ht_iter iter
;
431 struct lttng_consumer_channel
*free_chan
= NULL
;
432 struct consumer_relayd_sock_pair
*relayd
;
436 DBG("Consumer del stream %d", stream
->wait_fd
);
439 /* Means the stream was allocated but not successfully added */
440 goto free_stream_rcu
;
443 pthread_mutex_lock(&consumer_data
.lock
);
444 pthread_mutex_lock(&stream
->lock
);
446 switch (consumer_data
.type
) {
447 case LTTNG_CONSUMER_KERNEL
:
448 if (stream
->mmap_base
!= NULL
) {
449 ret
= munmap(stream
->mmap_base
, stream
->mmap_len
);
455 case LTTNG_CONSUMER32_UST
:
456 case LTTNG_CONSUMER64_UST
:
457 lttng_ustconsumer_del_stream(stream
);
460 ERR("Unknown consumer_data type");
466 iter
.iter
.node
= &stream
->node
.node
;
467 ret
= lttng_ht_del(ht
, &iter
);
470 iter
.iter
.node
= &stream
->node_channel_id
.node
;
471 ret
= lttng_ht_del(consumer_data
.stream_per_chan_id_ht
, &iter
);
474 iter
.iter
.node
= &stream
->node_session_id
.node
;
475 ret
= lttng_ht_del(consumer_data
.stream_list_ht
, &iter
);
479 assert(consumer_data
.stream_count
> 0);
480 consumer_data
.stream_count
--;
482 if (stream
->out_fd
>= 0) {
483 ret
= close(stream
->out_fd
);
489 /* Check and cleanup relayd */
491 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
492 if (relayd
!= NULL
) {
493 uatomic_dec(&relayd
->refcount
);
494 assert(uatomic_read(&relayd
->refcount
) >= 0);
496 /* Closing streams requires to lock the control socket. */
497 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
498 ret
= relayd_send_close_stream(&relayd
->control_sock
,
499 stream
->relayd_stream_id
,
500 stream
->next_net_seq_num
- 1);
501 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
503 DBG("Unable to close stream on the relayd. Continuing");
505 * Continue here. There is nothing we can do for the relayd.
506 * Chances are that the relayd has closed the socket so we just
507 * continue cleaning up.
511 /* Both conditions are met, we destroy the relayd. */
512 if (uatomic_read(&relayd
->refcount
) == 0 &&
513 uatomic_read(&relayd
->destroy_flag
)) {
514 destroy_relayd(relayd
);
519 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
520 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
521 free_chan
= stream
->chan
;
525 consumer_data
.need_update
= 1;
526 pthread_mutex_unlock(&stream
->lock
);
527 pthread_mutex_unlock(&consumer_data
.lock
);
530 consumer_del_channel(free_chan
);
534 call_rcu(&stream
->node
.head
, free_stream_rcu
);
537 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
539 enum lttng_consumer_stream_state state
,
540 const char *channel_name
,
547 enum consumer_channel_type type
)
550 struct lttng_consumer_stream
*stream
;
552 stream
= zmalloc(sizeof(*stream
));
553 if (stream
== NULL
) {
554 PERROR("malloc struct lttng_consumer_stream");
561 stream
->key
= stream_key
;
563 stream
->out_fd_offset
= 0;
564 stream
->state
= state
;
567 stream
->net_seq_idx
= relayd_id
;
568 stream
->session_id
= session_id
;
569 pthread_mutex_init(&stream
->lock
, NULL
);
571 /* If channel is the metadata, flag this stream as metadata. */
572 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
573 stream
->metadata_flag
= 1;
574 /* Metadata is flat out. */
575 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
577 /* Format stream name to <channel_name>_<cpu_number> */
578 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
581 PERROR("snprintf stream name");
586 /* Key is always the wait_fd for streams. */
587 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
589 /* Init node per channel id key */
590 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
592 /* Init session id node with the stream session id */
593 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
595 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
" relayd_id %" PRIu64
", session_id %" PRIu64
,
596 stream
->name
, stream
->key
, channel_key
, stream
->net_seq_idx
, stream
->session_id
);
612 * Add a stream to the global list protected by a mutex.
614 static int add_stream(struct lttng_consumer_stream
*stream
,
618 struct consumer_relayd_sock_pair
*relayd
;
623 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
625 pthread_mutex_lock(&consumer_data
.lock
);
626 pthread_mutex_lock(&stream
->lock
);
629 /* Steal stream identifier to avoid having streams with the same key */
630 steal_stream_key(stream
->key
, ht
);
632 lttng_ht_add_unique_u64(ht
, &stream
->node
);
634 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
635 &stream
->node_channel_id
);
638 * Add stream to the stream_list_ht of the consumer data. No need to steal
639 * the key since the HT does not use it and we allow to add redundant keys
642 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
644 /* Check and cleanup relayd */
645 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
646 if (relayd
!= NULL
) {
647 uatomic_inc(&relayd
->refcount
);
650 /* Update channel refcount once added without error(s). */
651 uatomic_inc(&stream
->chan
->refcount
);
654 * When nb_init_stream_left reaches 0, we don't need to trigger any action
655 * in terms of destroying the associated channel, because the action that
656 * causes the count to become 0 also causes a stream to be added. The
657 * channel deletion will thus be triggered by the following removal of this
660 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
661 /* Increment refcount before decrementing nb_init_stream_left */
663 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
666 /* Update consumer data once the node is inserted. */
667 consumer_data
.stream_count
++;
668 consumer_data
.need_update
= 1;
671 pthread_mutex_unlock(&stream
->lock
);
672 pthread_mutex_unlock(&consumer_data
.lock
);
678 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
679 * be acquired before calling this.
681 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
684 struct lttng_ht_node_u64
*node
;
685 struct lttng_ht_iter iter
;
689 lttng_ht_lookup(consumer_data
.relayd_ht
,
690 &relayd
->net_seq_idx
, &iter
);
691 node
= lttng_ht_iter_get_node_u64(&iter
);
695 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
702 * Allocate and return a consumer relayd socket.
704 struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
707 struct consumer_relayd_sock_pair
*obj
= NULL
;
709 /* Negative net sequence index is a failure */
710 if (net_seq_idx
< 0) {
714 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
716 PERROR("zmalloc relayd sock");
720 obj
->net_seq_idx
= net_seq_idx
;
722 obj
->destroy_flag
= 0;
723 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
724 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
731 * Find a relayd socket pair in the global consumer data.
733 * Return the object if found else NULL.
734 * RCU read-side lock must be held across this call and while using the
737 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
739 struct lttng_ht_iter iter
;
740 struct lttng_ht_node_u64
*node
;
741 struct consumer_relayd_sock_pair
*relayd
= NULL
;
743 /* Negative keys are lookup failures */
744 if (key
== (uint64_t) -1ULL) {
748 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
750 node
= lttng_ht_iter_get_node_u64(&iter
);
752 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
760 * Handle stream for relayd transmission if the stream applies for network
761 * streaming where the net sequence index is set.
763 * Return destination file descriptor or negative value on error.
765 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
766 size_t data_size
, unsigned long padding
,
767 struct consumer_relayd_sock_pair
*relayd
)
770 struct lttcomm_relayd_data_hdr data_hdr
;
776 /* Reset data header */
777 memset(&data_hdr
, 0, sizeof(data_hdr
));
779 if (stream
->metadata_flag
) {
780 /* Caller MUST acquire the relayd control socket lock */
781 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
786 /* Metadata are always sent on the control socket. */
787 outfd
= relayd
->control_sock
.sock
.fd
;
789 /* Set header with stream information */
790 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
791 data_hdr
.data_size
= htobe32(data_size
);
792 data_hdr
.padding_size
= htobe32(padding
);
794 * Note that net_seq_num below is assigned with the *current* value of
795 * next_net_seq_num and only after that the next_net_seq_num will be
796 * increment. This is why when issuing a command on the relayd using
797 * this next value, 1 should always be substracted in order to compare
798 * the last seen sequence number on the relayd side to the last sent.
800 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
801 /* Other fields are zeroed previously */
803 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
809 ++stream
->next_net_seq_num
;
811 /* Set to go on data socket */
812 outfd
= relayd
->data_sock
.sock
.fd
;
820 * Allocate and return a new lttng_consumer_channel object using the given key
821 * to initialize the hash table node.
823 * On error, return NULL.
825 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
827 const char *pathname
,
832 enum lttng_event_output output
,
833 uint64_t tracefile_size
,
834 uint64_t tracefile_count
)
836 struct lttng_consumer_channel
*channel
;
838 channel
= zmalloc(sizeof(*channel
));
839 if (channel
== NULL
) {
840 PERROR("malloc struct lttng_consumer_channel");
845 channel
->refcount
= 0;
846 channel
->session_id
= session_id
;
849 channel
->relayd_id
= relayd_id
;
850 channel
->output
= output
;
851 channel
->tracefile_size
= tracefile_size
;
852 channel
->tracefile_count
= tracefile_count
;
854 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
855 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
857 strncpy(channel
->name
, name
, sizeof(channel
->name
));
858 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
860 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
862 channel
->wait_fd
= -1;
864 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
866 DBG("Allocated channel (key %" PRIu64
")", channel
->key
)
873 * Add a channel to the global list protected by a mutex.
875 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
876 struct lttng_consumer_local_data
*ctx
)
879 struct lttng_ht_node_u64
*node
;
880 struct lttng_ht_iter iter
;
882 pthread_mutex_lock(&consumer_data
.lock
);
885 lttng_ht_lookup(consumer_data
.channel_ht
, &channel
->key
, &iter
);
886 node
= lttng_ht_iter_get_node_u64(&iter
);
888 /* Channel already exist. Ignore the insertion */
889 ERR("Consumer add channel key %" PRIu64
" already exists!",
895 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
899 pthread_mutex_unlock(&consumer_data
.lock
);
901 if (!ret
&& channel
->wait_fd
!= -1 &&
902 channel
->metadata_stream
== NULL
) {
903 notify_channel_pipe(ctx
, channel
, CONSUMER_CHANNEL_ADD
);
909 * Allocate the pollfd structure and the local view of the out fds to avoid
910 * doing a lookup in the linked list and concurrency issues when writing is
911 * needed. Called with consumer_data.lock held.
913 * Returns the number of fds in the structures.
915 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
916 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
920 struct lttng_ht_iter iter
;
921 struct lttng_consumer_stream
*stream
;
926 assert(local_stream
);
928 DBG("Updating poll fd array");
930 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
932 * Only active streams with an active end point can be added to the
933 * poll set and local stream storage of the thread.
935 * There is a potential race here for endpoint_status to be updated
936 * just after the check. However, this is OK since the stream(s) will
937 * be deleted once the thread is notified that the end point state has
938 * changed where this function will be called back again.
940 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
941 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
945 * This clobbers way too much the debug output. Uncomment that if you
946 * need it for debugging purposes.
948 * DBG("Active FD %d", stream->wait_fd);
950 (*pollfd
)[i
].fd
= stream
->wait_fd
;
951 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
952 local_stream
[i
] = stream
;
958 * Insert the consumer_data_pipe at the end of the array and don't
959 * increment i so nb_fd is the number of real FD.
961 (*pollfd
)[i
].fd
= ctx
->consumer_data_pipe
[0];
962 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
967 * Poll on the should_quit pipe and the command socket return -1 on error and
968 * should exit, 0 if data is available on the command socket
970 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
975 num_rdy
= poll(consumer_sockpoll
, 2, -1);
978 * Restart interrupted system call.
980 if (errno
== EINTR
) {
983 PERROR("Poll error");
986 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
987 DBG("consumer_should_quit wake up");
997 * Set the error socket.
999 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1002 ctx
->consumer_error_socket
= sock
;
1006 * Set the command socket path.
1008 void lttng_consumer_set_command_sock_path(
1009 struct lttng_consumer_local_data
*ctx
, char *sock
)
1011 ctx
->consumer_command_sock_path
= sock
;
1015 * Send return code to the session daemon.
1016 * If the socket is not defined, we return 0, it is not a fatal error
1018 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1020 if (ctx
->consumer_error_socket
> 0) {
1021 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1022 sizeof(enum lttcomm_sessiond_command
));
1029 * Close all the tracefiles and stream fds and MUST be called when all
1030 * instances are destroyed i.e. when all threads were joined and are ended.
1032 void lttng_consumer_cleanup(void)
1034 struct lttng_ht_iter iter
;
1035 struct lttng_consumer_channel
*channel
;
1039 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1041 consumer_del_channel(channel
);
1046 lttng_ht_destroy(consumer_data
.channel_ht
);
1048 cleanup_relayd_ht();
1050 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1053 * This HT contains streams that are freed by either the metadata thread or
1054 * the data thread so we do *nothing* on the hash table and simply destroy
1057 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1061 * Called from signal handler.
1063 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1068 ret
= write(ctx
->consumer_should_quit
[1], "4", 1);
1069 } while (ret
< 0 && errno
== EINTR
);
1070 if (ret
< 0 || ret
!= 1) {
1071 PERROR("write consumer quit");
1074 DBG("Consumer flag that it should quit");
1077 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1080 int outfd
= stream
->out_fd
;
1083 * This does a blocking write-and-wait on any page that belongs to the
1084 * subbuffer prior to the one we just wrote.
1085 * Don't care about error values, as these are just hints and ways to
1086 * limit the amount of page cache used.
1088 if (orig_offset
< stream
->max_sb_size
) {
1091 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1092 stream
->max_sb_size
,
1093 SYNC_FILE_RANGE_WAIT_BEFORE
1094 | SYNC_FILE_RANGE_WRITE
1095 | SYNC_FILE_RANGE_WAIT_AFTER
);
1097 * Give hints to the kernel about how we access the file:
1098 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1101 * We need to call fadvise again after the file grows because the
1102 * kernel does not seem to apply fadvise to non-existing parts of the
1105 * Call fadvise _after_ having waited for the page writeback to
1106 * complete because the dirty page writeback semantic is not well
1107 * defined. So it can be expected to lead to lower throughput in
1110 posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1111 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1115 * Initialise the necessary environnement :
1116 * - create a new context
1117 * - create the poll_pipe
1118 * - create the should_quit pipe (for signal handler)
1119 * - create the thread pipe (for splice)
1121 * Takes a function pointer as argument, this function is called when data is
1122 * available on a buffer. This function is responsible to do the
1123 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1124 * buffer configuration and then kernctl_put_next_subbuf at the end.
1126 * Returns a pointer to the new context or NULL on error.
1128 struct lttng_consumer_local_data
*lttng_consumer_create(
1129 enum lttng_consumer_type type
,
1130 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1131 struct lttng_consumer_local_data
*ctx
),
1132 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1133 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1134 int (*update_stream
)(int stream_key
, uint32_t state
))
1137 struct lttng_consumer_local_data
*ctx
;
1139 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1140 consumer_data
.type
== type
);
1141 consumer_data
.type
= type
;
1143 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1145 PERROR("allocating context");
1149 ctx
->consumer_error_socket
= -1;
1150 ctx
->consumer_metadata_socket
= -1;
1151 /* assign the callbacks */
1152 ctx
->on_buffer_ready
= buffer_ready
;
1153 ctx
->on_recv_channel
= recv_channel
;
1154 ctx
->on_recv_stream
= recv_stream
;
1155 ctx
->on_update_stream
= update_stream
;
1157 ret
= pipe(ctx
->consumer_data_pipe
);
1159 PERROR("Error creating poll pipe");
1160 goto error_poll_pipe
;
1163 /* set read end of the pipe to non-blocking */
1164 ret
= fcntl(ctx
->consumer_data_pipe
[0], F_SETFL
, O_NONBLOCK
);
1166 PERROR("fcntl O_NONBLOCK");
1167 goto error_poll_fcntl
;
1170 /* set write end of the pipe to non-blocking */
1171 ret
= fcntl(ctx
->consumer_data_pipe
[1], F_SETFL
, O_NONBLOCK
);
1173 PERROR("fcntl O_NONBLOCK");
1174 goto error_poll_fcntl
;
1177 ret
= pipe(ctx
->consumer_should_quit
);
1179 PERROR("Error creating recv pipe");
1180 goto error_quit_pipe
;
1183 ret
= pipe(ctx
->consumer_thread_pipe
);
1185 PERROR("Error creating thread pipe");
1186 goto error_thread_pipe
;
1189 ret
= pipe(ctx
->consumer_channel_pipe
);
1191 PERROR("Error creating channel pipe");
1192 goto error_channel_pipe
;
1195 ret
= utils_create_pipe(ctx
->consumer_metadata_pipe
);
1197 goto error_metadata_pipe
;
1200 ret
= utils_create_pipe(ctx
->consumer_splice_metadata_pipe
);
1202 goto error_splice_pipe
;
1208 utils_close_pipe(ctx
->consumer_metadata_pipe
);
1209 error_metadata_pipe
:
1210 utils_close_pipe(ctx
->consumer_channel_pipe
);
1212 utils_close_pipe(ctx
->consumer_thread_pipe
);
1214 utils_close_pipe(ctx
->consumer_should_quit
);
1217 utils_close_pipe(ctx
->consumer_data_pipe
);
1225 * Close all fds associated with the instance and free the context.
1227 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1231 DBG("Consumer destroying it. Closing everything.");
1233 ret
= close(ctx
->consumer_error_socket
);
1237 ret
= close(ctx
->consumer_metadata_socket
);
1241 utils_close_pipe(ctx
->consumer_thread_pipe
);
1242 utils_close_pipe(ctx
->consumer_channel_pipe
);
1243 utils_close_pipe(ctx
->consumer_data_pipe
);
1244 utils_close_pipe(ctx
->consumer_should_quit
);
1245 utils_close_pipe(ctx
->consumer_splice_metadata_pipe
);
1247 unlink(ctx
->consumer_command_sock_path
);
1252 * Write the metadata stream id on the specified file descriptor.
1254 static int write_relayd_metadata_id(int fd
,
1255 struct lttng_consumer_stream
*stream
,
1256 struct consumer_relayd_sock_pair
*relayd
, unsigned long padding
)
1259 struct lttcomm_relayd_metadata_payload hdr
;
1261 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1262 hdr
.padding_size
= htobe32(padding
);
1264 ret
= write(fd
, (void *) &hdr
, sizeof(hdr
));
1265 } while (ret
< 0 && errno
== EINTR
);
1266 if (ret
< 0 || ret
!= sizeof(hdr
)) {
1268 * This error means that the fd's end is closed so ignore the perror
1269 * not to clubber the error output since this can happen in a normal
1272 if (errno
!= EPIPE
) {
1273 PERROR("write metadata stream id");
1275 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1277 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1278 * handle writting the missing part so report that as an error and
1279 * don't lie to the caller.
1284 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1285 stream
->relayd_stream_id
, padding
);
1292 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1293 * core function for writing trace buffers to either the local filesystem or
1296 * It must be called with the stream lock held.
1298 * Careful review MUST be put if any changes occur!
1300 * Returns the number of bytes written
1302 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1303 struct lttng_consumer_local_data
*ctx
,
1304 struct lttng_consumer_stream
*stream
, unsigned long len
,
1305 unsigned long padding
)
1307 unsigned long mmap_offset
;
1309 ssize_t ret
= 0, written
= 0;
1310 off_t orig_offset
= stream
->out_fd_offset
;
1311 /* Default is on the disk */
1312 int outfd
= stream
->out_fd
;
1313 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1314 unsigned int relayd_hang_up
= 0;
1316 /* RCU lock for the relayd pointer */
1319 /* Flag that the current stream if set for network streaming. */
1320 if (stream
->net_seq_idx
!= -1) {
1321 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1322 if (relayd
== NULL
) {
1327 /* get the offset inside the fd to mmap */
1328 switch (consumer_data
.type
) {
1329 case LTTNG_CONSUMER_KERNEL
:
1330 mmap_base
= stream
->mmap_base
;
1331 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1333 case LTTNG_CONSUMER32_UST
:
1334 case LTTNG_CONSUMER64_UST
:
1335 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1337 ERR("read mmap get mmap base for stream %s", stream
->name
);
1341 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1345 ERR("Unknown consumer_data type");
1350 PERROR("tracer ctl get_mmap_read_offset");
1355 /* Handle stream on the relayd if the output is on the network */
1357 unsigned long netlen
= len
;
1360 * Lock the control socket for the complete duration of the function
1361 * since from this point on we will use the socket.
1363 if (stream
->metadata_flag
) {
1364 /* Metadata requires the control socket. */
1365 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1366 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1369 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1371 /* Use the returned socket. */
1374 /* Write metadata stream id before payload */
1375 if (stream
->metadata_flag
) {
1376 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1379 /* Socket operation failed. We consider the relayd dead */
1380 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1388 /* Socket operation failed. We consider the relayd dead */
1389 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1393 /* Else, use the default set before which is the filesystem. */
1396 /* No streaming, we have to set the len with the full padding */
1400 * Check if we need to change the tracefile before writing the packet.
1402 if (stream
->chan
->tracefile_size
> 0 &&
1403 (stream
->tracefile_size_current
+ len
) >
1404 stream
->chan
->tracefile_size
) {
1405 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1406 stream
->name
, stream
->chan
->tracefile_size
,
1407 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1408 stream
->out_fd
, &(stream
->tracefile_count_current
));
1410 ERR("Rotating output file");
1413 outfd
= stream
->out_fd
= ret
;
1415 stream
->tracefile_size_current
+= len
;
1420 ret
= write(outfd
, mmap_base
+ mmap_offset
, len
);
1421 } while (ret
< 0 && errno
== EINTR
);
1422 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1425 * This is possible if the fd is closed on the other side (outfd)
1426 * or any write problem. It can be verbose a bit for a normal
1427 * execution if for instance the relayd is stopped abruptly. This
1428 * can happen so set this to a DBG statement.
1430 DBG("Error in file write mmap");
1434 /* Socket operation failed. We consider the relayd dead */
1435 if (errno
== EPIPE
|| errno
== EINVAL
) {
1440 } else if (ret
> len
) {
1441 PERROR("Error in file write (ret %zd > len %lu)", ret
, len
);
1449 /* This call is useless on a socket so better save a syscall. */
1451 /* This won't block, but will start writeout asynchronously */
1452 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret
,
1453 SYNC_FILE_RANGE_WRITE
);
1454 stream
->out_fd_offset
+= ret
;
1458 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1462 * This is a special case that the relayd has closed its socket. Let's
1463 * cleanup the relayd object and all associated streams.
1465 if (relayd
&& relayd_hang_up
) {
1466 cleanup_relayd(relayd
, ctx
);
1470 /* Unlock only if ctrl socket used */
1471 if (relayd
&& stream
->metadata_flag
) {
1472 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1480 * Splice the data from the ring buffer to the tracefile.
1482 * It must be called with the stream lock held.
1484 * Returns the number of bytes spliced.
1486 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1487 struct lttng_consumer_local_data
*ctx
,
1488 struct lttng_consumer_stream
*stream
, unsigned long len
,
1489 unsigned long padding
)
1491 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1493 off_t orig_offset
= stream
->out_fd_offset
;
1494 int fd
= stream
->wait_fd
;
1495 /* Default is on the disk */
1496 int outfd
= stream
->out_fd
;
1497 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1499 unsigned int relayd_hang_up
= 0;
1501 switch (consumer_data
.type
) {
1502 case LTTNG_CONSUMER_KERNEL
:
1504 case LTTNG_CONSUMER32_UST
:
1505 case LTTNG_CONSUMER64_UST
:
1506 /* Not supported for user space tracing */
1509 ERR("Unknown consumer_data type");
1513 /* RCU lock for the relayd pointer */
1516 /* Flag that the current stream if set for network streaming. */
1517 if (stream
->net_seq_idx
!= -1) {
1518 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1519 if (relayd
== NULL
) {
1525 * Choose right pipe for splice. Metadata and trace data are handled by
1526 * different threads hence the use of two pipes in order not to race or
1527 * corrupt the written data.
1529 if (stream
->metadata_flag
) {
1530 splice_pipe
= ctx
->consumer_splice_metadata_pipe
;
1532 splice_pipe
= ctx
->consumer_thread_pipe
;
1535 /* Write metadata stream id before payload */
1537 int total_len
= len
;
1539 if (stream
->metadata_flag
) {
1541 * Lock the control socket for the complete duration of the function
1542 * since from this point on we will use the socket.
1544 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1546 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1550 /* Socket operation failed. We consider the relayd dead */
1551 if (ret
== -EBADF
) {
1552 WARN("Remote relayd disconnected. Stopping");
1559 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1562 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1564 /* Use the returned socket. */
1567 /* Socket operation failed. We consider the relayd dead */
1568 if (ret
== -EBADF
) {
1569 WARN("Remote relayd disconnected. Stopping");
1576 /* No streaming, we have to set the len with the full padding */
1580 * Check if we need to change the tracefile before writing the packet.
1582 if (stream
->chan
->tracefile_size
> 0 &&
1583 (stream
->tracefile_size_current
+ len
) >
1584 stream
->chan
->tracefile_size
) {
1585 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1586 stream
->name
, stream
->chan
->tracefile_size
,
1587 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1588 stream
->out_fd
, &(stream
->tracefile_count_current
));
1590 ERR("Rotating output file");
1593 outfd
= stream
->out_fd
= ret
;
1595 stream
->tracefile_size_current
+= len
;
1599 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1600 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1601 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1602 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1603 DBG("splice chan to pipe, ret %zd", ret_splice
);
1604 if (ret_splice
< 0) {
1605 PERROR("Error in relay splice");
1607 written
= ret_splice
;
1613 /* Handle stream on the relayd if the output is on the network */
1615 if (stream
->metadata_flag
) {
1616 size_t metadata_payload_size
=
1617 sizeof(struct lttcomm_relayd_metadata_payload
);
1619 /* Update counter to fit the spliced data */
1620 ret_splice
+= metadata_payload_size
;
1621 len
+= metadata_payload_size
;
1623 * We do this so the return value can match the len passed as
1624 * argument to this function.
1626 written
-= metadata_payload_size
;
1630 /* Splice data out */
1631 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1632 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1633 DBG("Consumer splice pipe to file, ret %zd", ret_splice
);
1634 if (ret_splice
< 0) {
1635 PERROR("Error in file splice");
1637 written
= ret_splice
;
1639 /* Socket operation failed. We consider the relayd dead */
1640 if (errno
== EBADF
|| errno
== EPIPE
) {
1641 WARN("Remote relayd disconnected. Stopping");
1647 } else if (ret_splice
> len
) {
1649 PERROR("Wrote more data than requested %zd (len: %lu)",
1651 written
+= ret_splice
;
1657 /* This call is useless on a socket so better save a syscall. */
1659 /* This won't block, but will start writeout asynchronously */
1660 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1661 SYNC_FILE_RANGE_WRITE
);
1662 stream
->out_fd_offset
+= ret_splice
;
1664 written
+= ret_splice
;
1666 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1674 * This is a special case that the relayd has closed its socket. Let's
1675 * cleanup the relayd object and all associated streams.
1677 if (relayd
&& relayd_hang_up
) {
1678 cleanup_relayd(relayd
, ctx
);
1679 /* Skip splice error so the consumer does not fail */
1684 /* send the appropriate error description to sessiond */
1687 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1690 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1693 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1698 if (relayd
&& stream
->metadata_flag
) {
1699 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1707 * Take a snapshot for a specific fd
1709 * Returns 0 on success, < 0 on error
1711 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
1713 switch (consumer_data
.type
) {
1714 case LTTNG_CONSUMER_KERNEL
:
1715 return lttng_kconsumer_take_snapshot(stream
);
1716 case LTTNG_CONSUMER32_UST
:
1717 case LTTNG_CONSUMER64_UST
:
1718 return lttng_ustconsumer_take_snapshot(stream
);
1720 ERR("Unknown consumer_data type");
1727 * Get the produced position
1729 * Returns 0 on success, < 0 on error
1731 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
1734 switch (consumer_data
.type
) {
1735 case LTTNG_CONSUMER_KERNEL
:
1736 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
1737 case LTTNG_CONSUMER32_UST
:
1738 case LTTNG_CONSUMER64_UST
:
1739 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
1741 ERR("Unknown consumer_data type");
1747 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1748 int sock
, struct pollfd
*consumer_sockpoll
)
1750 switch (consumer_data
.type
) {
1751 case LTTNG_CONSUMER_KERNEL
:
1752 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1753 case LTTNG_CONSUMER32_UST
:
1754 case LTTNG_CONSUMER64_UST
:
1755 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1757 ERR("Unknown consumer_data type");
1764 * Iterate over all streams of the hashtable and free them properly.
1766 * WARNING: *MUST* be used with data stream only.
1768 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1770 struct lttng_ht_iter iter
;
1771 struct lttng_consumer_stream
*stream
;
1778 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1780 * Ignore return value since we are currently cleaning up so any error
1783 (void) consumer_del_stream(stream
, ht
);
1787 lttng_ht_destroy(ht
);
1791 * Iterate over all streams of the hashtable and free them properly.
1793 * XXX: Should not be only for metadata stream or else use an other name.
1795 static void destroy_stream_ht(struct lttng_ht
*ht
)
1797 struct lttng_ht_iter iter
;
1798 struct lttng_consumer_stream
*stream
;
1805 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1807 * Ignore return value since we are currently cleaning up so any error
1810 (void) consumer_del_metadata_stream(stream
, ht
);
1814 lttng_ht_destroy(ht
);
1817 void lttng_consumer_close_metadata(void)
1819 switch (consumer_data
.type
) {
1820 case LTTNG_CONSUMER_KERNEL
:
1822 * The Kernel consumer has a different metadata scheme so we don't
1823 * close anything because the stream will be closed by the session
1827 case LTTNG_CONSUMER32_UST
:
1828 case LTTNG_CONSUMER64_UST
:
1830 * Close all metadata streams. The metadata hash table is passed and
1831 * this call iterates over it by closing all wakeup fd. This is safe
1832 * because at this point we are sure that the metadata producer is
1833 * either dead or blocked.
1835 lttng_ustconsumer_close_metadata(metadata_ht
);
1838 ERR("Unknown consumer_data type");
1844 * Clean up a metadata stream and free its memory.
1846 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
1847 struct lttng_ht
*ht
)
1850 struct lttng_ht_iter iter
;
1851 struct lttng_consumer_channel
*free_chan
= NULL
;
1852 struct consumer_relayd_sock_pair
*relayd
;
1856 * This call should NEVER receive regular stream. It must always be
1857 * metadata stream and this is crucial for data structure synchronization.
1859 assert(stream
->metadata_flag
);
1861 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
1864 /* Means the stream was allocated but not successfully added */
1865 goto free_stream_rcu
;
1868 pthread_mutex_lock(&consumer_data
.lock
);
1869 pthread_mutex_lock(&stream
->lock
);
1871 switch (consumer_data
.type
) {
1872 case LTTNG_CONSUMER_KERNEL
:
1873 if (stream
->mmap_base
!= NULL
) {
1874 ret
= munmap(stream
->mmap_base
, stream
->mmap_len
);
1876 PERROR("munmap metadata stream");
1880 case LTTNG_CONSUMER32_UST
:
1881 case LTTNG_CONSUMER64_UST
:
1882 lttng_ustconsumer_del_stream(stream
);
1885 ERR("Unknown consumer_data type");
1891 iter
.iter
.node
= &stream
->node
.node
;
1892 ret
= lttng_ht_del(ht
, &iter
);
1895 iter
.iter
.node
= &stream
->node_channel_id
.node
;
1896 ret
= lttng_ht_del(consumer_data
.stream_per_chan_id_ht
, &iter
);
1899 iter
.iter
.node
= &stream
->node_session_id
.node
;
1900 ret
= lttng_ht_del(consumer_data
.stream_list_ht
, &iter
);
1904 if (stream
->out_fd
>= 0) {
1905 ret
= close(stream
->out_fd
);
1911 /* Check and cleanup relayd */
1913 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1914 if (relayd
!= NULL
) {
1915 uatomic_dec(&relayd
->refcount
);
1916 assert(uatomic_read(&relayd
->refcount
) >= 0);
1918 /* Closing streams requires to lock the control socket. */
1919 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1920 ret
= relayd_send_close_stream(&relayd
->control_sock
,
1921 stream
->relayd_stream_id
, stream
->next_net_seq_num
- 1);
1922 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1924 DBG("Unable to close stream on the relayd. Continuing");
1926 * Continue here. There is nothing we can do for the relayd.
1927 * Chances are that the relayd has closed the socket so we just
1928 * continue cleaning up.
1932 /* Both conditions are met, we destroy the relayd. */
1933 if (uatomic_read(&relayd
->refcount
) == 0 &&
1934 uatomic_read(&relayd
->destroy_flag
)) {
1935 destroy_relayd(relayd
);
1940 /* Atomically decrement channel refcount since other threads can use it. */
1941 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
1942 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
1943 /* Go for channel deletion! */
1944 free_chan
= stream
->chan
;
1948 pthread_mutex_unlock(&stream
->lock
);
1949 pthread_mutex_unlock(&consumer_data
.lock
);
1952 consumer_del_channel(free_chan
);
1956 call_rcu(&stream
->node
.head
, free_stream_rcu
);
1960 * Action done with the metadata stream when adding it to the consumer internal
1961 * data structures to handle it.
1963 static int add_metadata_stream(struct lttng_consumer_stream
*stream
,
1964 struct lttng_ht
*ht
)
1967 struct consumer_relayd_sock_pair
*relayd
;
1968 struct lttng_ht_iter iter
;
1969 struct lttng_ht_node_u64
*node
;
1974 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
1976 pthread_mutex_lock(&consumer_data
.lock
);
1977 pthread_mutex_lock(&stream
->lock
);
1980 * From here, refcounts are updated so be _careful_ when returning an error
1987 * Lookup the stream just to make sure it does not exist in our internal
1988 * state. This should NEVER happen.
1990 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
1991 node
= lttng_ht_iter_get_node_u64(&iter
);
1994 /* Find relayd and, if one is found, increment refcount. */
1995 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1996 if (relayd
!= NULL
) {
1997 uatomic_inc(&relayd
->refcount
);
2000 /* Update channel refcount once added without error(s). */
2001 uatomic_inc(&stream
->chan
->refcount
);
2004 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2005 * in terms of destroying the associated channel, because the action that
2006 * causes the count to become 0 also causes a stream to be added. The
2007 * channel deletion will thus be triggered by the following removal of this
2010 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2011 /* Increment refcount before decrementing nb_init_stream_left */
2013 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2016 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2018 lttng_ht_add_unique_u64(consumer_data
.stream_per_chan_id_ht
,
2019 &stream
->node_channel_id
);
2022 * Add stream to the stream_list_ht of the consumer data. No need to steal
2023 * the key since the HT does not use it and we allow to add redundant keys
2026 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2030 pthread_mutex_unlock(&stream
->lock
);
2031 pthread_mutex_unlock(&consumer_data
.lock
);
2036 * Delete data stream that are flagged for deletion (endpoint_status).
2038 static void validate_endpoint_status_data_stream(void)
2040 struct lttng_ht_iter iter
;
2041 struct lttng_consumer_stream
*stream
;
2043 DBG("Consumer delete flagged data stream");
2046 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2047 /* Validate delete flag of the stream */
2048 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2051 /* Delete it right now */
2052 consumer_del_stream(stream
, data_ht
);
2058 * Delete metadata stream that are flagged for deletion (endpoint_status).
2060 static void validate_endpoint_status_metadata_stream(
2061 struct lttng_poll_event
*pollset
)
2063 struct lttng_ht_iter iter
;
2064 struct lttng_consumer_stream
*stream
;
2066 DBG("Consumer delete flagged metadata stream");
2071 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2072 /* Validate delete flag of the stream */
2073 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2077 * Remove from pollset so the metadata thread can continue without
2078 * blocking on a deleted stream.
2080 lttng_poll_del(pollset
, stream
->wait_fd
);
2082 /* Delete it right now */
2083 consumer_del_metadata_stream(stream
, metadata_ht
);
2089 * Thread polls on metadata file descriptor and write them on disk or on the
2092 void *consumer_thread_metadata_poll(void *data
)
2095 uint32_t revents
, nb_fd
;
2096 struct lttng_consumer_stream
*stream
= NULL
;
2097 struct lttng_ht_iter iter
;
2098 struct lttng_ht_node_u64
*node
;
2099 struct lttng_poll_event events
;
2100 struct lttng_consumer_local_data
*ctx
= data
;
2103 rcu_register_thread();
2105 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2107 /* ENOMEM at this point. Better to bail out. */
2111 DBG("Thread metadata poll started");
2113 /* Size is set to 1 for the consumer_metadata pipe */
2114 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2116 ERR("Poll set creation failed");
2120 ret
= lttng_poll_add(&events
, ctx
->consumer_metadata_pipe
[0], LPOLLIN
);
2126 DBG("Metadata main loop started");
2129 /* Only the metadata pipe is set */
2130 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2135 DBG("Metadata poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2136 ret
= lttng_poll_wait(&events
, -1);
2137 DBG("Metadata event catched in thread");
2139 if (errno
== EINTR
) {
2140 ERR("Poll EINTR catched");
2148 /* From here, the event is a metadata wait fd */
2149 for (i
= 0; i
< nb_fd
; i
++) {
2150 revents
= LTTNG_POLL_GETEV(&events
, i
);
2151 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2153 /* Just don't waste time if no returned events for the fd */
2158 if (pollfd
== ctx
->consumer_metadata_pipe
[0]) {
2159 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2160 DBG("Metadata thread pipe hung up");
2162 * Remove the pipe from the poll set and continue the loop
2163 * since their might be data to consume.
2165 lttng_poll_del(&events
, ctx
->consumer_metadata_pipe
[0]);
2166 ret
= close(ctx
->consumer_metadata_pipe
[0]);
2168 PERROR("close metadata pipe");
2171 } else if (revents
& LPOLLIN
) {
2173 /* Get the stream pointer received */
2174 ret
= read(pollfd
, &stream
, sizeof(stream
));
2175 } while (ret
< 0 && errno
== EINTR
);
2177 ret
< sizeof(struct lttng_consumer_stream
*)) {
2178 PERROR("read metadata stream");
2180 * Let's continue here and hope we can still work
2181 * without stopping the consumer. XXX: Should we?
2186 /* A NULL stream means that the state has changed. */
2187 if (stream
== NULL
) {
2188 /* Check for deleted streams. */
2189 validate_endpoint_status_metadata_stream(&events
);
2193 DBG("Adding metadata stream %d to poll set",
2196 ret
= add_metadata_stream(stream
, metadata_ht
);
2198 ERR("Unable to add metadata stream");
2199 /* Stream was not setup properly. Continuing. */
2200 consumer_del_metadata_stream(stream
, NULL
);
2204 /* Add metadata stream to the global poll events list */
2205 lttng_poll_add(&events
, stream
->wait_fd
,
2206 LPOLLIN
| LPOLLPRI
);
2209 /* Handle other stream */
2215 uint64_t tmp_id
= (uint64_t) pollfd
;
2217 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2219 node
= lttng_ht_iter_get_node_u64(&iter
);
2222 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2225 /* Check for error event */
2226 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2227 DBG("Metadata fd %d is hup|err.", pollfd
);
2228 if (!stream
->hangup_flush_done
2229 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2230 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2231 DBG("Attempting to flush and consume the UST buffers");
2232 lttng_ustconsumer_on_stream_hangup(stream
);
2234 /* We just flushed the stream now read it. */
2236 len
= ctx
->on_buffer_ready(stream
, ctx
);
2238 * We don't check the return value here since if we get
2239 * a negative len, it means an error occured thus we
2240 * simply remove it from the poll set and free the
2246 lttng_poll_del(&events
, stream
->wait_fd
);
2248 * This call update the channel states, closes file descriptors
2249 * and securely free the stream.
2251 consumer_del_metadata_stream(stream
, metadata_ht
);
2252 } else if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2253 /* Get the data out of the metadata file descriptor */
2254 DBG("Metadata available on fd %d", pollfd
);
2255 assert(stream
->wait_fd
== pollfd
);
2257 len
= ctx
->on_buffer_ready(stream
, ctx
);
2258 /* It's ok to have an unavailable sub-buffer */
2259 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2260 /* Clean up stream from consumer and free it. */
2261 lttng_poll_del(&events
, stream
->wait_fd
);
2262 consumer_del_metadata_stream(stream
, metadata_ht
);
2263 } else if (len
> 0) {
2264 stream
->data_read
= 1;
2268 /* Release RCU lock for the stream looked up */
2275 DBG("Metadata poll thread exiting");
2277 lttng_poll_clean(&events
);
2279 destroy_stream_ht(metadata_ht
);
2281 rcu_unregister_thread();
2286 * This thread polls the fds in the set to consume the data and write
2287 * it to tracefile if necessary.
2289 void *consumer_thread_data_poll(void *data
)
2291 int num_rdy
, num_hup
, high_prio
, ret
, i
;
2292 struct pollfd
*pollfd
= NULL
;
2293 /* local view of the streams */
2294 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2295 /* local view of consumer_data.fds_count */
2297 struct lttng_consumer_local_data
*ctx
= data
;
2300 rcu_register_thread();
2302 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2303 if (data_ht
== NULL
) {
2304 /* ENOMEM at this point. Better to bail out. */
2308 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
));
2315 * the fds set has been updated, we need to update our
2316 * local array as well
2318 pthread_mutex_lock(&consumer_data
.lock
);
2319 if (consumer_data
.need_update
) {
2324 local_stream
= NULL
;
2326 /* allocate for all fds + 1 for the consumer_data_pipe */
2327 pollfd
= zmalloc((consumer_data
.stream_count
+ 1) * sizeof(struct pollfd
));
2328 if (pollfd
== NULL
) {
2329 PERROR("pollfd malloc");
2330 pthread_mutex_unlock(&consumer_data
.lock
);
2334 /* allocate for all fds + 1 for the consumer_data_pipe */
2335 local_stream
= zmalloc((consumer_data
.stream_count
+ 1) *
2336 sizeof(struct lttng_consumer_stream
));
2337 if (local_stream
== NULL
) {
2338 PERROR("local_stream malloc");
2339 pthread_mutex_unlock(&consumer_data
.lock
);
2342 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2345 ERR("Error in allocating pollfd or local_outfds");
2346 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2347 pthread_mutex_unlock(&consumer_data
.lock
);
2351 consumer_data
.need_update
= 0;
2353 pthread_mutex_unlock(&consumer_data
.lock
);
2355 /* No FDs and consumer_quit, consumer_cleanup the thread */
2356 if (nb_fd
== 0 && consumer_quit
== 1) {
2359 /* poll on the array of fds */
2361 DBG("polling on %d fd", nb_fd
+ 1);
2362 num_rdy
= poll(pollfd
, nb_fd
+ 1, -1);
2363 DBG("poll num_rdy : %d", num_rdy
);
2364 if (num_rdy
== -1) {
2366 * Restart interrupted system call.
2368 if (errno
== EINTR
) {
2371 PERROR("Poll error");
2372 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2374 } else if (num_rdy
== 0) {
2375 DBG("Polling thread timed out");
2380 * If the consumer_data_pipe triggered poll go directly to the
2381 * beginning of the loop to update the array. We want to prioritize
2382 * array update over low-priority reads.
2384 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2385 ssize_t pipe_readlen
;
2387 DBG("consumer_data_pipe wake up");
2388 /* Consume 1 byte of pipe data */
2390 pipe_readlen
= read(ctx
->consumer_data_pipe
[0], &new_stream
,
2391 sizeof(new_stream
));
2392 } while (pipe_readlen
== -1 && errno
== EINTR
);
2393 if (pipe_readlen
< 0) {
2394 PERROR("read consumer data pipe");
2395 /* Continue so we can at least handle the current stream(s). */
2400 * If the stream is NULL, just ignore it. It's also possible that
2401 * the sessiond poll thread changed the consumer_quit state and is
2402 * waking us up to test it.
2404 if (new_stream
== NULL
) {
2405 validate_endpoint_status_data_stream();
2409 ret
= add_stream(new_stream
, data_ht
);
2411 ERR("Consumer add stream %" PRIu64
" failed. Continuing",
2414 * At this point, if the add_stream fails, it is not in the
2415 * hash table thus passing the NULL value here.
2417 consumer_del_stream(new_stream
, NULL
);
2420 /* Continue to update the local streams and handle prio ones */
2424 /* Take care of high priority channels first. */
2425 for (i
= 0; i
< nb_fd
; i
++) {
2426 if (local_stream
[i
] == NULL
) {
2429 if (pollfd
[i
].revents
& POLLPRI
) {
2430 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2432 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2433 /* it's ok to have an unavailable sub-buffer */
2434 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2435 /* Clean the stream and free it. */
2436 consumer_del_stream(local_stream
[i
], data_ht
);
2437 local_stream
[i
] = NULL
;
2438 } else if (len
> 0) {
2439 local_stream
[i
]->data_read
= 1;
2445 * If we read high prio channel in this loop, try again
2446 * for more high prio data.
2452 /* Take care of low priority channels. */
2453 for (i
= 0; i
< nb_fd
; i
++) {
2454 if (local_stream
[i
] == NULL
) {
2457 if ((pollfd
[i
].revents
& POLLIN
) ||
2458 local_stream
[i
]->hangup_flush_done
) {
2459 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2460 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2461 /* it's ok to have an unavailable sub-buffer */
2462 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2463 /* Clean the stream and free it. */
2464 consumer_del_stream(local_stream
[i
], data_ht
);
2465 local_stream
[i
] = NULL
;
2466 } else if (len
> 0) {
2467 local_stream
[i
]->data_read
= 1;
2472 /* Handle hangup and errors */
2473 for (i
= 0; i
< nb_fd
; i
++) {
2474 if (local_stream
[i
] == NULL
) {
2477 if (!local_stream
[i
]->hangup_flush_done
2478 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2479 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2480 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2481 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2483 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2484 /* Attempt read again, for the data we just flushed. */
2485 local_stream
[i
]->data_read
= 1;
2488 * If the poll flag is HUP/ERR/NVAL and we have
2489 * read no data in this pass, we can remove the
2490 * stream from its hash table.
2492 if ((pollfd
[i
].revents
& POLLHUP
)) {
2493 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2494 if (!local_stream
[i
]->data_read
) {
2495 consumer_del_stream(local_stream
[i
], data_ht
);
2496 local_stream
[i
] = NULL
;
2499 } else if (pollfd
[i
].revents
& POLLERR
) {
2500 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2501 if (!local_stream
[i
]->data_read
) {
2502 consumer_del_stream(local_stream
[i
], data_ht
);
2503 local_stream
[i
] = NULL
;
2506 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2507 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2508 if (!local_stream
[i
]->data_read
) {
2509 consumer_del_stream(local_stream
[i
], data_ht
);
2510 local_stream
[i
] = NULL
;
2514 if (local_stream
[i
] != NULL
) {
2515 local_stream
[i
]->data_read
= 0;
2520 DBG("polling thread exiting");
2525 * Close the write side of the pipe so epoll_wait() in
2526 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2527 * read side of the pipe. If we close them both, epoll_wait strangely does
2528 * not return and could create a endless wait period if the pipe is the
2529 * only tracked fd in the poll set. The thread will take care of closing
2532 ret
= close(ctx
->consumer_metadata_pipe
[1]);
2534 PERROR("close data pipe");
2537 destroy_data_stream_ht(data_ht
);
2539 rcu_unregister_thread();
2544 * Close wake-up end of each stream belonging to the channel. This will
2545 * allow the poll() on the stream read-side to detect when the
2546 * write-side (application) finally closes them.
2549 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2551 struct lttng_ht
*ht
;
2552 struct lttng_consumer_stream
*stream
;
2553 struct lttng_ht_iter iter
;
2555 ht
= consumer_data
.stream_per_chan_id_ht
;
2558 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2559 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2560 ht
->match_fct
, &channel
->key
,
2561 &iter
.iter
, stream
, node_channel_id
.node
) {
2563 * Protect against teardown with mutex.
2565 pthread_mutex_lock(&stream
->lock
);
2566 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2569 switch (consumer_data
.type
) {
2570 case LTTNG_CONSUMER_KERNEL
:
2572 case LTTNG_CONSUMER32_UST
:
2573 case LTTNG_CONSUMER64_UST
:
2575 * Note: a mutex is taken internally within
2576 * liblttng-ust-ctl to protect timer wakeup_fd
2577 * use from concurrent close.
2579 lttng_ustconsumer_close_stream_wakeup(stream
);
2582 ERR("Unknown consumer_data type");
2586 pthread_mutex_unlock(&stream
->lock
);
2591 static void destroy_channel_ht(struct lttng_ht
*ht
)
2593 struct lttng_ht_iter iter
;
2594 struct lttng_consumer_channel
*channel
;
2602 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2603 ret
= lttng_ht_del(ht
, &iter
);
2608 lttng_ht_destroy(ht
);
2612 * This thread polls the channel fds to detect when they are being
2613 * closed. It closes all related streams if the channel is detected as
2614 * closed. It is currently only used as a shim layer for UST because the
2615 * consumerd needs to keep the per-stream wakeup end of pipes open for
2618 void *consumer_thread_channel_poll(void *data
)
2621 uint32_t revents
, nb_fd
;
2622 struct lttng_consumer_channel
*chan
= NULL
;
2623 struct lttng_ht_iter iter
;
2624 struct lttng_ht_node_u64
*node
;
2625 struct lttng_poll_event events
;
2626 struct lttng_consumer_local_data
*ctx
= data
;
2627 struct lttng_ht
*channel_ht
;
2629 rcu_register_thread();
2631 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2633 /* ENOMEM at this point. Better to bail out. */
2637 DBG("Thread channel poll started");
2639 /* Size is set to 1 for the consumer_channel pipe */
2640 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2642 ERR("Poll set creation failed");
2646 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2652 DBG("Channel main loop started");
2655 /* Only the channel pipe is set */
2656 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2661 DBG("Channel poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2662 ret
= lttng_poll_wait(&events
, -1);
2663 DBG("Channel event catched in thread");
2665 if (errno
== EINTR
) {
2666 ERR("Poll EINTR catched");
2674 /* From here, the event is a channel wait fd */
2675 for (i
= 0; i
< nb_fd
; i
++) {
2676 revents
= LTTNG_POLL_GETEV(&events
, i
);
2677 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2679 /* Just don't waste time if no returned events for the fd */
2683 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
2684 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2685 DBG("Channel thread pipe hung up");
2687 * Remove the pipe from the poll set and continue the loop
2688 * since their might be data to consume.
2690 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2692 } else if (revents
& LPOLLIN
) {
2693 enum consumer_channel_action action
;
2695 ret
= read_channel_pipe(ctx
, &chan
, &action
);
2697 ERR("Error reading channel pipe");
2702 case CONSUMER_CHANNEL_ADD
:
2703 DBG("Adding channel %d to poll set",
2706 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
2708 lttng_ht_add_unique_u64(channel_ht
,
2709 &chan
->wait_fd_node
);
2710 /* Add channel to the global poll events list */
2711 lttng_poll_add(&events
, chan
->wait_fd
,
2712 LPOLLIN
| LPOLLPRI
);
2714 case CONSUMER_CHANNEL_QUIT
:
2716 * Remove the pipe from the poll set and continue the loop
2717 * since their might be data to consume.
2719 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2722 ERR("Unknown action");
2727 /* Handle other stream */
2733 uint64_t tmp_id
= (uint64_t) pollfd
;
2735 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
2737 node
= lttng_ht_iter_get_node_u64(&iter
);
2740 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
2743 /* Check for error event */
2744 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2745 DBG("Channel fd %d is hup|err.", pollfd
);
2747 lttng_poll_del(&events
, chan
->wait_fd
);
2748 ret
= lttng_ht_del(channel_ht
, &iter
);
2750 consumer_close_channel_streams(chan
);
2752 /* Release our own refcount */
2753 if (!uatomic_sub_return(&chan
->refcount
, 1)
2754 && !uatomic_read(&chan
->nb_init_stream_left
)) {
2755 consumer_del_channel(chan
);
2759 /* Release RCU lock for the channel looked up */
2765 lttng_poll_clean(&events
);
2767 destroy_channel_ht(channel_ht
);
2769 DBG("Channel poll thread exiting");
2770 rcu_unregister_thread();
2774 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
2775 struct pollfd
*sockpoll
, int client_socket
)
2782 if (lttng_consumer_poll_socket(sockpoll
) < 0) {
2786 DBG("Metadata connection on client_socket");
2788 /* Blocking call, waiting for transmission */
2789 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
2790 if (ctx
->consumer_metadata_socket
< 0) {
2791 WARN("On accept metadata");
2802 * This thread listens on the consumerd socket and receives the file
2803 * descriptors from the session daemon.
2805 void *consumer_thread_sessiond_poll(void *data
)
2807 int sock
= -1, client_socket
, ret
;
2809 * structure to poll for incoming data on communication socket avoids
2810 * making blocking sockets.
2812 struct pollfd consumer_sockpoll
[2];
2813 struct lttng_consumer_local_data
*ctx
= data
;
2815 rcu_register_thread();
2817 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
2818 unlink(ctx
->consumer_command_sock_path
);
2819 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
2820 if (client_socket
< 0) {
2821 ERR("Cannot create command socket");
2825 ret
= lttcomm_listen_unix_sock(client_socket
);
2830 DBG("Sending ready command to lttng-sessiond");
2831 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
2832 /* return < 0 on error, but == 0 is not fatal */
2834 ERR("Error sending ready command to lttng-sessiond");
2838 ret
= fcntl(client_socket
, F_SETFL
, O_NONBLOCK
);
2840 PERROR("fcntl O_NONBLOCK");
2844 /* prepare the FDs to poll : to client socket and the should_quit pipe */
2845 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
2846 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
2847 consumer_sockpoll
[1].fd
= client_socket
;
2848 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
2850 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
2853 DBG("Connection on client_socket");
2855 /* Blocking call, waiting for transmission */
2856 sock
= lttcomm_accept_unix_sock(client_socket
);
2861 ret
= fcntl(sock
, F_SETFL
, O_NONBLOCK
);
2863 PERROR("fcntl O_NONBLOCK");
2868 * Setup metadata socket which is the second socket connection on the
2869 * command unix socket.
2871 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
2876 /* This socket is not useful anymore. */
2877 ret
= close(client_socket
);
2879 PERROR("close client_socket");
2883 /* update the polling structure to poll on the established socket */
2884 consumer_sockpoll
[1].fd
= sock
;
2885 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
2888 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
2891 DBG("Incoming command on sock");
2892 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2893 if (ret
== -ENOENT
) {
2894 DBG("Received STOP command");
2899 * This could simply be a session daemon quitting. Don't output
2902 DBG("Communication interrupted on command socket");
2905 if (consumer_quit
) {
2906 DBG("consumer_thread_receive_fds received quit from signal");
2909 DBG("received command on sock");
2912 DBG("Consumer thread sessiond poll exiting");
2915 * Close metadata streams since the producer is the session daemon which
2918 * NOTE: for now, this only applies to the UST tracer.
2920 lttng_consumer_close_metadata();
2923 * when all fds have hung up, the polling thread
2929 * Notify the data poll thread to poll back again and test the
2930 * consumer_quit state that we just set so to quit gracefully.
2932 notify_thread_pipe(ctx
->consumer_data_pipe
[1]);
2934 notify_channel_pipe(ctx
, NULL
, CONSUMER_CHANNEL_QUIT
);
2936 /* Cleaning up possibly open sockets. */
2940 PERROR("close sock sessiond poll");
2943 if (client_socket
>= 0) {
2946 PERROR("close client_socket sessiond poll");
2950 rcu_unregister_thread();
2954 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
2955 struct lttng_consumer_local_data
*ctx
)
2959 pthread_mutex_lock(&stream
->lock
);
2961 switch (consumer_data
.type
) {
2962 case LTTNG_CONSUMER_KERNEL
:
2963 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
2965 case LTTNG_CONSUMER32_UST
:
2966 case LTTNG_CONSUMER64_UST
:
2967 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
2970 ERR("Unknown consumer_data type");
2976 pthread_mutex_unlock(&stream
->lock
);
2980 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
2982 switch (consumer_data
.type
) {
2983 case LTTNG_CONSUMER_KERNEL
:
2984 return lttng_kconsumer_on_recv_stream(stream
);
2985 case LTTNG_CONSUMER32_UST
:
2986 case LTTNG_CONSUMER64_UST
:
2987 return lttng_ustconsumer_on_recv_stream(stream
);
2989 ERR("Unknown consumer_data type");
2996 * Allocate and set consumer data hash tables.
2998 void lttng_consumer_init(void)
3000 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3001 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3002 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3003 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3007 * Process the ADD_RELAYD command receive by a consumer.
3009 * This will create a relayd socket pair and add it to the relayd hash table.
3010 * The caller MUST acquire a RCU read side lock before calling it.
3012 int consumer_add_relayd_socket(int net_seq_idx
, int sock_type
,
3013 struct lttng_consumer_local_data
*ctx
, int sock
,
3014 struct pollfd
*consumer_sockpoll
,
3015 struct lttcomm_relayd_sock
*relayd_sock
, unsigned int sessiond_id
)
3017 int fd
= -1, ret
= -1, relayd_created
= 0;
3018 enum lttng_error_code ret_code
= LTTNG_OK
;
3019 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3022 assert(relayd_sock
);
3024 DBG("Consumer adding relayd socket (idx: %d)", net_seq_idx
);
3026 /* First send a status message before receiving the fds. */
3027 ret
= consumer_send_status_msg(sock
, ret_code
);
3029 /* Somehow, the session daemon is not responding anymore. */
3033 /* Get relayd reference if exists. */
3034 relayd
= consumer_find_relayd(net_seq_idx
);
3035 if (relayd
== NULL
) {
3036 /* Not found. Allocate one. */
3037 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3038 if (relayd
== NULL
) {
3039 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
3043 relayd
->sessiond_session_id
= (uint64_t) sessiond_id
;
3047 /* Poll on consumer socket. */
3048 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3053 /* Get relayd socket from session daemon */
3054 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3055 if (ret
!= sizeof(fd
)) {
3056 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3058 fd
= -1; /* Just in case it gets set with an invalid value. */
3062 /* We have the fds without error. Send status back. */
3063 ret
= consumer_send_status_msg(sock
, ret_code
);
3065 /* Somehow, the session daemon is not responding anymore. */
3069 /* Copy socket information and received FD */
3070 switch (sock_type
) {
3071 case LTTNG_STREAM_CONTROL
:
3072 /* Copy received lttcomm socket */
3073 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3074 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3075 /* Immediately try to close the created socket if valid. */
3076 if (relayd
->control_sock
.sock
.fd
>= 0) {
3077 if (close(relayd
->control_sock
.sock
.fd
)) {
3078 PERROR("close relayd control socket");
3081 /* Handle create_sock error. */
3086 /* Assign new file descriptor */
3087 relayd
->control_sock
.sock
.fd
= fd
;
3088 /* Assign version values. */
3089 relayd
->control_sock
.major
= relayd_sock
->major
;
3090 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3093 * Create a session on the relayd and store the returned id. Lock the
3094 * control socket mutex if the relayd was NOT created before.
3096 if (!relayd_created
) {
3097 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3099 ret
= relayd_create_session(&relayd
->control_sock
,
3100 &relayd
->relayd_session_id
);
3101 if (!relayd_created
) {
3102 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3106 * Close all sockets of a relayd object. It will be freed if it was
3107 * created at the error code path or else it will be garbage
3110 (void) relayd_close(&relayd
->control_sock
);
3111 (void) relayd_close(&relayd
->data_sock
);
3116 case LTTNG_STREAM_DATA
:
3117 /* Copy received lttcomm socket */
3118 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3119 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3120 /* Immediately try to close the created socket if valid. */
3121 if (relayd
->data_sock
.sock
.fd
>= 0) {
3122 if (close(relayd
->data_sock
.sock
.fd
)) {
3123 PERROR("close relayd data socket");
3126 /* Handle create_sock error. */
3131 /* Assign new file descriptor */
3132 relayd
->data_sock
.sock
.fd
= fd
;
3133 /* Assign version values. */
3134 relayd
->data_sock
.major
= relayd_sock
->major
;
3135 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3138 ERR("Unknown relayd socket type (%d)", sock_type
);
3143 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3144 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3145 relayd
->net_seq_idx
, fd
);
3148 * Add relayd socket pair to consumer data hashtable. If object already
3149 * exists or on error, the function gracefully returns.
3157 /* Close received socket if valid. */
3160 PERROR("close received socket");
3165 if (relayd_created
) {
3173 * Try to lock the stream mutex.
3175 * On success, 1 is returned else 0 indicating that the mutex is NOT lock.
3177 static int stream_try_lock(struct lttng_consumer_stream
*stream
)
3184 * Try to lock the stream mutex. On failure, we know that the stream is
3185 * being used else where hence there is data still being extracted.
3187 ret
= pthread_mutex_trylock(&stream
->lock
);
3189 /* For both EBUSY and EINVAL error, the mutex is NOT locked. */
3201 * Search for a relayd associated to the session id and return the reference.
3203 * A rcu read side lock MUST be acquire before calling this function and locked
3204 * until the relayd object is no longer necessary.
3206 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3208 struct lttng_ht_iter iter
;
3209 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3211 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3212 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3215 * Check by sessiond id which is unique here where the relayd session
3216 * id might not be when having multiple relayd.
3218 if (relayd
->sessiond_session_id
== id
) {
3219 /* Found the relayd. There can be only one per id. */
3231 * Check if for a given session id there is still data needed to be extract
3234 * Return 1 if data is pending or else 0 meaning ready to be read.
3236 int consumer_data_pending(uint64_t id
)
3239 struct lttng_ht_iter iter
;
3240 struct lttng_ht
*ht
;
3241 struct lttng_consumer_stream
*stream
;
3242 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3243 int (*data_pending
)(struct lttng_consumer_stream
*);
3245 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3248 pthread_mutex_lock(&consumer_data
.lock
);
3250 switch (consumer_data
.type
) {
3251 case LTTNG_CONSUMER_KERNEL
:
3252 data_pending
= lttng_kconsumer_data_pending
;
3254 case LTTNG_CONSUMER32_UST
:
3255 case LTTNG_CONSUMER64_UST
:
3256 data_pending
= lttng_ustconsumer_data_pending
;
3259 ERR("Unknown consumer data type");
3263 /* Ease our life a bit */
3264 ht
= consumer_data
.stream_list_ht
;
3266 relayd
= find_relayd_by_session_id(id
);
3268 /* Send init command for data pending. */
3269 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3270 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3271 relayd
->relayd_session_id
);
3272 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3274 /* Communication error thus the relayd so no data pending. */
3275 goto data_not_pending
;
3279 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3280 ht
->hash_fct(&id
, lttng_ht_seed
),
3282 &iter
.iter
, stream
, node_session_id
.node
) {
3283 /* If this call fails, the stream is being used hence data pending. */
3284 ret
= stream_try_lock(stream
);
3290 * A removed node from the hash table indicates that the stream has
3291 * been deleted thus having a guarantee that the buffers are closed
3292 * on the consumer side. However, data can still be transmitted
3293 * over the network so don't skip the relayd check.
3295 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3297 /* Check the stream if there is data in the buffers. */
3298 ret
= data_pending(stream
);
3300 pthread_mutex_unlock(&stream
->lock
);
3307 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3308 if (stream
->metadata_flag
) {
3309 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3310 stream
->relayd_stream_id
);
3312 ret
= relayd_data_pending(&relayd
->control_sock
,
3313 stream
->relayd_stream_id
,
3314 stream
->next_net_seq_num
- 1);
3316 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3318 pthread_mutex_unlock(&stream
->lock
);
3322 pthread_mutex_unlock(&stream
->lock
);
3326 unsigned int is_data_inflight
= 0;
3328 /* Send init command for data pending. */
3329 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3330 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3331 relayd
->relayd_session_id
, &is_data_inflight
);
3332 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3334 goto data_not_pending
;
3336 if (is_data_inflight
) {
3342 * Finding _no_ node in the hash table and no inflight data means that the
3343 * stream(s) have been removed thus data is guaranteed to be available for
3344 * analysis from the trace files.
3348 /* Data is available to be read by a viewer. */
3349 pthread_mutex_unlock(&consumer_data
.lock
);
3354 /* Data is still being extracted from buffers. */
3355 pthread_mutex_unlock(&consumer_data
.lock
);
3361 * Send a ret code status message to the sessiond daemon.
3363 * Return the sendmsg() return value.
3365 int consumer_send_status_msg(int sock
, int ret_code
)
3367 struct lttcomm_consumer_status_msg msg
;
3369 msg
.ret_code
= ret_code
;
3371 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3375 * Send a channel status message to the sessiond daemon.
3377 * Return the sendmsg() return value.
3379 int consumer_send_status_channel(int sock
,
3380 struct lttng_consumer_channel
*channel
)
3382 struct lttcomm_consumer_status_channel msg
;
3387 msg
.ret_code
= -LTTNG_ERR_UST_CHAN_FAIL
;
3389 msg
.ret_code
= LTTNG_OK
;
3390 msg
.key
= channel
->key
;
3391 msg
.stream_count
= channel
->streams
.count
;
3394 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));