2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
33 #include <bin/lttng-consumerd/health-consumerd.h>
34 #include <common/common.h>
35 #include <common/utils.h>
36 #include <common/compat/poll.h>
37 #include <common/index/index.h>
38 #include <common/kernel-ctl/kernel-ctl.h>
39 #include <common/sessiond-comm/relayd.h>
40 #include <common/sessiond-comm/sessiond-comm.h>
41 #include <common/kernel-consumer/kernel-consumer.h>
42 #include <common/relayd/relayd.h>
43 #include <common/ust-consumer/ust-consumer.h>
44 #include <common/consumer-timer.h>
47 #include "consumer-stream.h"
49 struct lttng_consumer_global_data consumer_data
= {
52 .type
= LTTNG_CONSUMER_UNKNOWN
,
55 enum consumer_channel_action
{
58 CONSUMER_CHANNEL_QUIT
,
61 struct consumer_channel_msg
{
62 enum consumer_channel_action action
;
63 struct lttng_consumer_channel
*chan
; /* add */
64 uint64_t key
; /* del */
68 * Flag to inform the polling thread to quit when all fd hung up. Updated by
69 * the consumer_thread_receive_fds when it notices that all fds has hung up.
70 * Also updated by the signal handler (consumer_should_exit()). Read by the
73 volatile int consumer_quit
;
76 * Global hash table containing respectively metadata and data streams. The
77 * stream element in this ht should only be updated by the metadata poll thread
78 * for the metadata and the data poll thread for the data.
80 static struct lttng_ht
*metadata_ht
;
81 static struct lttng_ht
*data_ht
;
84 * Notify a thread lttng pipe to poll back again. This usually means that some
85 * global state has changed so we just send back the thread in a poll wait
88 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
90 struct lttng_consumer_stream
*null_stream
= NULL
;
94 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
97 static void notify_health_quit_pipe(int *pipe
)
101 ret
= lttng_write(pipe
[1], "4", 1);
103 PERROR("write consumer health quit");
107 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
108 struct lttng_consumer_channel
*chan
,
110 enum consumer_channel_action action
)
112 struct consumer_channel_msg msg
;
115 memset(&msg
, 0, sizeof(msg
));
120 ret
= lttng_write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
121 if (ret
< sizeof(msg
)) {
122 PERROR("notify_channel_pipe write error");
126 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
129 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
132 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
133 struct lttng_consumer_channel
**chan
,
135 enum consumer_channel_action
*action
)
137 struct consumer_channel_msg msg
;
140 ret
= lttng_read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
141 if (ret
< sizeof(msg
)) {
145 *action
= msg
.action
;
153 * Find a stream. The consumer_data.lock must be locked during this
156 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
159 struct lttng_ht_iter iter
;
160 struct lttng_ht_node_u64
*node
;
161 struct lttng_consumer_stream
*stream
= NULL
;
165 /* -1ULL keys are lookup failures */
166 if (key
== (uint64_t) -1ULL) {
172 lttng_ht_lookup(ht
, &key
, &iter
);
173 node
= lttng_ht_iter_get_node_u64(&iter
);
175 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
183 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
185 struct lttng_consumer_stream
*stream
;
188 stream
= find_stream(key
, ht
);
190 stream
->key
= (uint64_t) -1ULL;
192 * We don't want the lookup to match, but we still need
193 * to iterate on this stream when iterating over the hash table. Just
194 * change the node key.
196 stream
->node
.key
= (uint64_t) -1ULL;
202 * Return a channel object for the given key.
204 * RCU read side lock MUST be acquired before calling this function and
205 * protects the channel ptr.
207 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
209 struct lttng_ht_iter iter
;
210 struct lttng_ht_node_u64
*node
;
211 struct lttng_consumer_channel
*channel
= NULL
;
213 /* -1ULL keys are lookup failures */
214 if (key
== (uint64_t) -1ULL) {
218 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
219 node
= lttng_ht_iter_get_node_u64(&iter
);
221 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
227 static void free_stream_rcu(struct rcu_head
*head
)
229 struct lttng_ht_node_u64
*node
=
230 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
231 struct lttng_consumer_stream
*stream
=
232 caa_container_of(node
, struct lttng_consumer_stream
, node
);
237 static void free_channel_rcu(struct rcu_head
*head
)
239 struct lttng_ht_node_u64
*node
=
240 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
241 struct lttng_consumer_channel
*channel
=
242 caa_container_of(node
, struct lttng_consumer_channel
, node
);
248 * RCU protected relayd socket pair free.
250 static void free_relayd_rcu(struct rcu_head
*head
)
252 struct lttng_ht_node_u64
*node
=
253 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
254 struct consumer_relayd_sock_pair
*relayd
=
255 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
258 * Close all sockets. This is done in the call RCU since we don't want the
259 * socket fds to be reassigned thus potentially creating bad state of the
262 * We do not have to lock the control socket mutex here since at this stage
263 * there is no one referencing to this relayd object.
265 (void) relayd_close(&relayd
->control_sock
);
266 (void) relayd_close(&relayd
->data_sock
);
272 * Destroy and free relayd socket pair object.
274 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
277 struct lttng_ht_iter iter
;
279 if (relayd
== NULL
) {
283 DBG("Consumer destroy and close relayd socket pair");
285 iter
.iter
.node
= &relayd
->node
.node
;
286 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
288 /* We assume the relayd is being or is destroyed */
292 /* RCU free() call */
293 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
297 * Remove a channel from the global list protected by a mutex. This function is
298 * also responsible for freeing its data structures.
300 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
303 struct lttng_ht_iter iter
;
304 struct lttng_consumer_stream
*stream
, *stmp
;
306 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
308 pthread_mutex_lock(&consumer_data
.lock
);
309 pthread_mutex_lock(&channel
->lock
);
311 /* Delete streams that might have been left in the stream list. */
312 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
314 cds_list_del(&stream
->send_node
);
316 * Once a stream is added to this list, the buffers were created so
317 * we have a guarantee that this call will succeed.
319 consumer_stream_destroy(stream
, NULL
);
322 if (channel
->live_timer_enabled
== 1) {
323 consumer_timer_live_stop(channel
);
326 switch (consumer_data
.type
) {
327 case LTTNG_CONSUMER_KERNEL
:
329 case LTTNG_CONSUMER32_UST
:
330 case LTTNG_CONSUMER64_UST
:
331 lttng_ustconsumer_del_channel(channel
);
334 ERR("Unknown consumer_data type");
340 iter
.iter
.node
= &channel
->node
.node
;
341 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
345 call_rcu(&channel
->node
.head
, free_channel_rcu
);
347 pthread_mutex_unlock(&channel
->lock
);
348 pthread_mutex_unlock(&consumer_data
.lock
);
352 * Iterate over the relayd hash table and destroy each element. Finally,
353 * destroy the whole hash table.
355 static void cleanup_relayd_ht(void)
357 struct lttng_ht_iter iter
;
358 struct consumer_relayd_sock_pair
*relayd
;
362 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
364 consumer_destroy_relayd(relayd
);
369 lttng_ht_destroy(consumer_data
.relayd_ht
);
373 * Update the end point status of all streams having the given network sequence
374 * index (relayd index).
376 * It's atomically set without having the stream mutex locked which is fine
377 * because we handle the write/read race with a pipe wakeup for each thread.
379 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
380 enum consumer_endpoint_status status
)
382 struct lttng_ht_iter iter
;
383 struct lttng_consumer_stream
*stream
;
385 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
389 /* Let's begin with metadata */
390 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
391 if (stream
->net_seq_idx
== net_seq_idx
) {
392 uatomic_set(&stream
->endpoint_status
, status
);
393 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
397 /* Follow up by the data streams */
398 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
399 if (stream
->net_seq_idx
== net_seq_idx
) {
400 uatomic_set(&stream
->endpoint_status
, status
);
401 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
408 * Cleanup a relayd object by flagging every associated streams for deletion,
409 * destroying the object meaning removing it from the relayd hash table,
410 * closing the sockets and freeing the memory in a RCU call.
412 * If a local data context is available, notify the threads that the streams'
413 * state have changed.
415 static void cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
,
416 struct lttng_consumer_local_data
*ctx
)
422 DBG("Cleaning up relayd sockets");
424 /* Save the net sequence index before destroying the object */
425 netidx
= relayd
->net_seq_idx
;
428 * Delete the relayd from the relayd hash table, close the sockets and free
429 * the object in a RCU call.
431 consumer_destroy_relayd(relayd
);
433 /* Set inactive endpoint to all streams */
434 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
437 * With a local data context, notify the threads that the streams' state
438 * have changed. The write() action on the pipe acts as an "implicit"
439 * memory barrier ordering the updates of the end point status from the
440 * read of this status which happens AFTER receiving this notify.
443 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
444 notify_thread_lttng_pipe(ctx
->consumer_metadata_pipe
);
449 * Flag a relayd socket pair for destruction. Destroy it if the refcount
452 * RCU read side lock MUST be aquired before calling this function.
454 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
458 /* Set destroy flag for this object */
459 uatomic_set(&relayd
->destroy_flag
, 1);
461 /* Destroy the relayd if refcount is 0 */
462 if (uatomic_read(&relayd
->refcount
) == 0) {
463 consumer_destroy_relayd(relayd
);
468 * Completly destroy stream from every visiable data structure and the given
471 * One this call returns, the stream object is not longer usable nor visible.
473 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
476 consumer_stream_destroy(stream
, ht
);
480 * XXX naming of del vs destroy is all mixed up.
482 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
484 consumer_stream_destroy(stream
, data_ht
);
487 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
489 consumer_stream_destroy(stream
, metadata_ht
);
492 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
494 enum lttng_consumer_stream_state state
,
495 const char *channel_name
,
502 enum consumer_channel_type type
,
503 unsigned int monitor
)
506 struct lttng_consumer_stream
*stream
;
508 stream
= zmalloc(sizeof(*stream
));
509 if (stream
== NULL
) {
510 PERROR("malloc struct lttng_consumer_stream");
517 stream
->key
= stream_key
;
519 stream
->out_fd_offset
= 0;
520 stream
->output_written
= 0;
521 stream
->state
= state
;
524 stream
->net_seq_idx
= relayd_id
;
525 stream
->session_id
= session_id
;
526 stream
->monitor
= monitor
;
527 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
528 stream
->index_fd
= -1;
529 pthread_mutex_init(&stream
->lock
, NULL
);
531 /* If channel is the metadata, flag this stream as metadata. */
532 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
533 stream
->metadata_flag
= 1;
534 /* Metadata is flat out. */
535 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
536 /* Live rendez-vous point. */
537 pthread_cond_init(&stream
->metadata_rdv
, NULL
);
538 pthread_mutex_init(&stream
->metadata_rdv_lock
, NULL
);
540 /* Format stream name to <channel_name>_<cpu_number> */
541 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
544 PERROR("snprintf stream name");
549 /* Key is always the wait_fd for streams. */
550 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
552 /* Init node per channel id key */
553 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
555 /* Init session id node with the stream session id */
556 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
558 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
559 " relayd_id %" PRIu64
", session_id %" PRIu64
,
560 stream
->name
, stream
->key
, channel_key
,
561 stream
->net_seq_idx
, stream
->session_id
);
577 * Add a stream to the global list protected by a mutex.
579 int consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
581 struct lttng_ht
*ht
= data_ht
;
587 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
589 pthread_mutex_lock(&consumer_data
.lock
);
590 pthread_mutex_lock(&stream
->chan
->lock
);
591 pthread_mutex_lock(&stream
->chan
->timer_lock
);
592 pthread_mutex_lock(&stream
->lock
);
595 /* Steal stream identifier to avoid having streams with the same key */
596 steal_stream_key(stream
->key
, ht
);
598 lttng_ht_add_unique_u64(ht
, &stream
->node
);
600 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
601 &stream
->node_channel_id
);
604 * Add stream to the stream_list_ht of the consumer data. No need to steal
605 * the key since the HT does not use it and we allow to add redundant keys
608 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
611 * When nb_init_stream_left reaches 0, we don't need to trigger any action
612 * in terms of destroying the associated channel, because the action that
613 * causes the count to become 0 also causes a stream to be added. The
614 * channel deletion will thus be triggered by the following removal of this
617 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
618 /* Increment refcount before decrementing nb_init_stream_left */
620 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
623 /* Update consumer data once the node is inserted. */
624 consumer_data
.stream_count
++;
625 consumer_data
.need_update
= 1;
628 pthread_mutex_unlock(&stream
->lock
);
629 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
630 pthread_mutex_unlock(&stream
->chan
->lock
);
631 pthread_mutex_unlock(&consumer_data
.lock
);
636 void consumer_del_data_stream(struct lttng_consumer_stream
*stream
)
638 consumer_del_stream(stream
, data_ht
);
642 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
643 * be acquired before calling this.
645 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
648 struct lttng_ht_node_u64
*node
;
649 struct lttng_ht_iter iter
;
653 lttng_ht_lookup(consumer_data
.relayd_ht
,
654 &relayd
->net_seq_idx
, &iter
);
655 node
= lttng_ht_iter_get_node_u64(&iter
);
659 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
666 * Allocate and return a consumer relayd socket.
668 struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
669 uint64_t net_seq_idx
)
671 struct consumer_relayd_sock_pair
*obj
= NULL
;
673 /* net sequence index of -1 is a failure */
674 if (net_seq_idx
== (uint64_t) -1ULL) {
678 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
680 PERROR("zmalloc relayd sock");
684 obj
->net_seq_idx
= net_seq_idx
;
686 obj
->destroy_flag
= 0;
687 obj
->control_sock
.sock
.fd
= -1;
688 obj
->data_sock
.sock
.fd
= -1;
689 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
690 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
697 * Find a relayd socket pair in the global consumer data.
699 * Return the object if found else NULL.
700 * RCU read-side lock must be held across this call and while using the
703 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
705 struct lttng_ht_iter iter
;
706 struct lttng_ht_node_u64
*node
;
707 struct consumer_relayd_sock_pair
*relayd
= NULL
;
709 /* Negative keys are lookup failures */
710 if (key
== (uint64_t) -1ULL) {
714 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
716 node
= lttng_ht_iter_get_node_u64(&iter
);
718 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
726 * Find a relayd and send the stream
728 * Returns 0 on success, < 0 on error
730 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
734 struct consumer_relayd_sock_pair
*relayd
;
737 assert(stream
->net_seq_idx
!= -1ULL);
740 /* The stream is not metadata. Get relayd reference if exists. */
742 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
743 if (relayd
!= NULL
) {
744 /* Add stream on the relayd */
745 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
746 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
747 path
, &stream
->relayd_stream_id
,
748 stream
->chan
->tracefile_size
, stream
->chan
->tracefile_count
);
749 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
754 uatomic_inc(&relayd
->refcount
);
755 stream
->sent_to_relayd
= 1;
757 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
758 stream
->key
, stream
->net_seq_idx
);
763 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
764 stream
->name
, stream
->key
, stream
->net_seq_idx
);
772 * Find a relayd and close the stream
774 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
776 struct consumer_relayd_sock_pair
*relayd
;
778 /* The stream is not metadata. Get relayd reference if exists. */
780 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
782 consumer_stream_relayd_close(stream
, relayd
);
788 * Handle stream for relayd transmission if the stream applies for network
789 * streaming where the net sequence index is set.
791 * Return destination file descriptor or negative value on error.
793 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
794 size_t data_size
, unsigned long padding
,
795 struct consumer_relayd_sock_pair
*relayd
)
798 struct lttcomm_relayd_data_hdr data_hdr
;
804 /* Reset data header */
805 memset(&data_hdr
, 0, sizeof(data_hdr
));
807 if (stream
->metadata_flag
) {
808 /* Caller MUST acquire the relayd control socket lock */
809 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
814 /* Metadata are always sent on the control socket. */
815 outfd
= relayd
->control_sock
.sock
.fd
;
817 /* Set header with stream information */
818 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
819 data_hdr
.data_size
= htobe32(data_size
);
820 data_hdr
.padding_size
= htobe32(padding
);
822 * Note that net_seq_num below is assigned with the *current* value of
823 * next_net_seq_num and only after that the next_net_seq_num will be
824 * increment. This is why when issuing a command on the relayd using
825 * this next value, 1 should always be substracted in order to compare
826 * the last seen sequence number on the relayd side to the last sent.
828 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
829 /* Other fields are zeroed previously */
831 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
837 ++stream
->next_net_seq_num
;
839 /* Set to go on data socket */
840 outfd
= relayd
->data_sock
.sock
.fd
;
848 * Allocate and return a new lttng_consumer_channel object using the given key
849 * to initialize the hash table node.
851 * On error, return NULL.
853 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
855 const char *pathname
,
860 enum lttng_event_output output
,
861 uint64_t tracefile_size
,
862 uint64_t tracefile_count
,
863 uint64_t session_id_per_pid
,
864 unsigned int monitor
,
865 unsigned int live_timer_interval
)
867 struct lttng_consumer_channel
*channel
;
869 channel
= zmalloc(sizeof(*channel
));
870 if (channel
== NULL
) {
871 PERROR("malloc struct lttng_consumer_channel");
876 channel
->refcount
= 0;
877 channel
->session_id
= session_id
;
878 channel
->session_id_per_pid
= session_id_per_pid
;
881 channel
->relayd_id
= relayd_id
;
882 channel
->tracefile_size
= tracefile_size
;
883 channel
->tracefile_count
= tracefile_count
;
884 channel
->monitor
= monitor
;
885 channel
->live_timer_interval
= live_timer_interval
;
886 pthread_mutex_init(&channel
->lock
, NULL
);
887 pthread_mutex_init(&channel
->timer_lock
, NULL
);
890 case LTTNG_EVENT_SPLICE
:
891 channel
->output
= CONSUMER_CHANNEL_SPLICE
;
893 case LTTNG_EVENT_MMAP
:
894 channel
->output
= CONSUMER_CHANNEL_MMAP
;
904 * In monitor mode, the streams associated with the channel will be put in
905 * a special list ONLY owned by this channel. So, the refcount is set to 1
906 * here meaning that the channel itself has streams that are referenced.
908 * On a channel deletion, once the channel is no longer visible, the
909 * refcount is decremented and checked for a zero value to delete it. With
910 * streams in no monitor mode, it will now be safe to destroy the channel.
912 if (!channel
->monitor
) {
913 channel
->refcount
= 1;
916 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
917 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
919 strncpy(channel
->name
, name
, sizeof(channel
->name
));
920 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
922 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
924 channel
->wait_fd
= -1;
926 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
928 DBG("Allocated channel (key %" PRIu64
")", channel
->key
)
935 * Add a channel to the global list protected by a mutex.
937 * On success 0 is returned else a negative value.
939 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
940 struct lttng_consumer_local_data
*ctx
)
943 struct lttng_ht_node_u64
*node
;
944 struct lttng_ht_iter iter
;
946 pthread_mutex_lock(&consumer_data
.lock
);
947 pthread_mutex_lock(&channel
->lock
);
948 pthread_mutex_lock(&channel
->timer_lock
);
951 lttng_ht_lookup(consumer_data
.channel_ht
, &channel
->key
, &iter
);
952 node
= lttng_ht_iter_get_node_u64(&iter
);
954 /* Channel already exist. Ignore the insertion */
955 ERR("Consumer add channel key %" PRIu64
" already exists!",
961 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
965 pthread_mutex_unlock(&channel
->timer_lock
);
966 pthread_mutex_unlock(&channel
->lock
);
967 pthread_mutex_unlock(&consumer_data
.lock
);
969 if (!ret
&& channel
->wait_fd
!= -1 &&
970 channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
971 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
977 * Allocate the pollfd structure and the local view of the out fds to avoid
978 * doing a lookup in the linked list and concurrency issues when writing is
979 * needed. Called with consumer_data.lock held.
981 * Returns the number of fds in the structures.
983 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
984 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
988 struct lttng_ht_iter iter
;
989 struct lttng_consumer_stream
*stream
;
994 assert(local_stream
);
996 DBG("Updating poll fd array");
998 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1000 * Only active streams with an active end point can be added to the
1001 * poll set and local stream storage of the thread.
1003 * There is a potential race here for endpoint_status to be updated
1004 * just after the check. However, this is OK since the stream(s) will
1005 * be deleted once the thread is notified that the end point state has
1006 * changed where this function will be called back again.
1008 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
1009 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
1013 * This clobbers way too much the debug output. Uncomment that if you
1014 * need it for debugging purposes.
1016 * DBG("Active FD %d", stream->wait_fd);
1018 (*pollfd
)[i
].fd
= stream
->wait_fd
;
1019 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1020 local_stream
[i
] = stream
;
1026 * Insert the consumer_data_pipe at the end of the array and don't
1027 * increment i so nb_fd is the number of real FD.
1029 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
1030 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1035 * Poll on the should_quit pipe and the command socket return -1 on error and
1036 * should exit, 0 if data is available on the command socket
1038 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1043 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1044 if (num_rdy
== -1) {
1046 * Restart interrupted system call.
1048 if (errno
== EINTR
) {
1051 PERROR("Poll error");
1054 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1055 DBG("consumer_should_quit wake up");
1065 * Set the error socket.
1067 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1070 ctx
->consumer_error_socket
= sock
;
1074 * Set the command socket path.
1076 void lttng_consumer_set_command_sock_path(
1077 struct lttng_consumer_local_data
*ctx
, char *sock
)
1079 ctx
->consumer_command_sock_path
= sock
;
1083 * Send return code to the session daemon.
1084 * If the socket is not defined, we return 0, it is not a fatal error
1086 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1088 if (ctx
->consumer_error_socket
> 0) {
1089 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1090 sizeof(enum lttcomm_sessiond_command
));
1097 * Close all the tracefiles and stream fds and MUST be called when all
1098 * instances are destroyed i.e. when all threads were joined and are ended.
1100 void lttng_consumer_cleanup(void)
1102 struct lttng_ht_iter iter
;
1103 struct lttng_consumer_channel
*channel
;
1107 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1109 consumer_del_channel(channel
);
1114 lttng_ht_destroy(consumer_data
.channel_ht
);
1116 cleanup_relayd_ht();
1118 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1121 * This HT contains streams that are freed by either the metadata thread or
1122 * the data thread so we do *nothing* on the hash table and simply destroy
1125 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1129 * Called from signal handler.
1131 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1136 ret
= lttng_write(ctx
->consumer_should_quit
[1], "4", 1);
1138 PERROR("write consumer quit");
1141 DBG("Consumer flag that it should quit");
1144 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1147 int outfd
= stream
->out_fd
;
1150 * This does a blocking write-and-wait on any page that belongs to the
1151 * subbuffer prior to the one we just wrote.
1152 * Don't care about error values, as these are just hints and ways to
1153 * limit the amount of page cache used.
1155 if (orig_offset
< stream
->max_sb_size
) {
1158 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1159 stream
->max_sb_size
,
1160 SYNC_FILE_RANGE_WAIT_BEFORE
1161 | SYNC_FILE_RANGE_WRITE
1162 | SYNC_FILE_RANGE_WAIT_AFTER
);
1164 * Give hints to the kernel about how we access the file:
1165 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1168 * We need to call fadvise again after the file grows because the
1169 * kernel does not seem to apply fadvise to non-existing parts of the
1172 * Call fadvise _after_ having waited for the page writeback to
1173 * complete because the dirty page writeback semantic is not well
1174 * defined. So it can be expected to lead to lower throughput in
1177 posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1178 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1182 * Initialise the necessary environnement :
1183 * - create a new context
1184 * - create the poll_pipe
1185 * - create the should_quit pipe (for signal handler)
1186 * - create the thread pipe (for splice)
1188 * Takes a function pointer as argument, this function is called when data is
1189 * available on a buffer. This function is responsible to do the
1190 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1191 * buffer configuration and then kernctl_put_next_subbuf at the end.
1193 * Returns a pointer to the new context or NULL on error.
1195 struct lttng_consumer_local_data
*lttng_consumer_create(
1196 enum lttng_consumer_type type
,
1197 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1198 struct lttng_consumer_local_data
*ctx
),
1199 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1200 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1201 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1204 struct lttng_consumer_local_data
*ctx
;
1206 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1207 consumer_data
.type
== type
);
1208 consumer_data
.type
= type
;
1210 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1212 PERROR("allocating context");
1216 ctx
->consumer_error_socket
= -1;
1217 ctx
->consumer_metadata_socket
= -1;
1218 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1219 /* assign the callbacks */
1220 ctx
->on_buffer_ready
= buffer_ready
;
1221 ctx
->on_recv_channel
= recv_channel
;
1222 ctx
->on_recv_stream
= recv_stream
;
1223 ctx
->on_update_stream
= update_stream
;
1225 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1226 if (!ctx
->consumer_data_pipe
) {
1227 goto error_poll_pipe
;
1230 ret
= pipe(ctx
->consumer_should_quit
);
1232 PERROR("Error creating recv pipe");
1233 goto error_quit_pipe
;
1236 ret
= pipe(ctx
->consumer_thread_pipe
);
1238 PERROR("Error creating thread pipe");
1239 goto error_thread_pipe
;
1242 ret
= pipe(ctx
->consumer_channel_pipe
);
1244 PERROR("Error creating channel pipe");
1245 goto error_channel_pipe
;
1248 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1249 if (!ctx
->consumer_metadata_pipe
) {
1250 goto error_metadata_pipe
;
1253 ret
= utils_create_pipe(ctx
->consumer_splice_metadata_pipe
);
1255 goto error_splice_pipe
;
1261 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1262 error_metadata_pipe
:
1263 utils_close_pipe(ctx
->consumer_channel_pipe
);
1265 utils_close_pipe(ctx
->consumer_thread_pipe
);
1267 utils_close_pipe(ctx
->consumer_should_quit
);
1269 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1277 * Close all fds associated with the instance and free the context.
1279 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1283 DBG("Consumer destroying it. Closing everything.");
1285 ret
= close(ctx
->consumer_error_socket
);
1289 ret
= close(ctx
->consumer_metadata_socket
);
1293 utils_close_pipe(ctx
->consumer_thread_pipe
);
1294 utils_close_pipe(ctx
->consumer_channel_pipe
);
1295 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1296 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1297 utils_close_pipe(ctx
->consumer_should_quit
);
1298 utils_close_pipe(ctx
->consumer_splice_metadata_pipe
);
1300 unlink(ctx
->consumer_command_sock_path
);
1305 * Write the metadata stream id on the specified file descriptor.
1307 static int write_relayd_metadata_id(int fd
,
1308 struct lttng_consumer_stream
*stream
,
1309 struct consumer_relayd_sock_pair
*relayd
, unsigned long padding
)
1312 struct lttcomm_relayd_metadata_payload hdr
;
1314 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1315 hdr
.padding_size
= htobe32(padding
);
1316 ret
= lttng_write(fd
, (void *) &hdr
, sizeof(hdr
));
1317 if (ret
< sizeof(hdr
)) {
1319 * This error means that the fd's end is closed so ignore the perror
1320 * not to clubber the error output since this can happen in a normal
1323 if (errno
!= EPIPE
) {
1324 PERROR("write metadata stream id");
1326 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1328 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1329 * handle writting the missing part so report that as an error and
1330 * don't lie to the caller.
1335 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1336 stream
->relayd_stream_id
, padding
);
1343 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1344 * core function for writing trace buffers to either the local filesystem or
1347 * It must be called with the stream lock held.
1349 * Careful review MUST be put if any changes occur!
1351 * Returns the number of bytes written
1353 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1354 struct lttng_consumer_local_data
*ctx
,
1355 struct lttng_consumer_stream
*stream
, unsigned long len
,
1356 unsigned long padding
,
1357 struct lttng_packet_index
*index
)
1359 unsigned long mmap_offset
;
1361 ssize_t ret
= 0, written
= 0;
1362 off_t orig_offset
= stream
->out_fd_offset
;
1363 /* Default is on the disk */
1364 int outfd
= stream
->out_fd
;
1365 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1366 unsigned int relayd_hang_up
= 0;
1368 /* RCU lock for the relayd pointer */
1371 /* Flag that the current stream if set for network streaming. */
1372 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1373 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1374 if (relayd
== NULL
) {
1380 /* get the offset inside the fd to mmap */
1381 switch (consumer_data
.type
) {
1382 case LTTNG_CONSUMER_KERNEL
:
1383 mmap_base
= stream
->mmap_base
;
1384 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1386 PERROR("tracer ctl get_mmap_read_offset");
1391 case LTTNG_CONSUMER32_UST
:
1392 case LTTNG_CONSUMER64_UST
:
1393 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1395 ERR("read mmap get mmap base for stream %s", stream
->name
);
1399 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1401 PERROR("tracer ctl get_mmap_read_offset");
1407 ERR("Unknown consumer_data type");
1411 /* Handle stream on the relayd if the output is on the network */
1413 unsigned long netlen
= len
;
1416 * Lock the control socket for the complete duration of the function
1417 * since from this point on we will use the socket.
1419 if (stream
->metadata_flag
) {
1420 /* Metadata requires the control socket. */
1421 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1422 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1425 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1427 /* Use the returned socket. */
1430 /* Write metadata stream id before payload */
1431 if (stream
->metadata_flag
) {
1432 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1435 /* Socket operation failed. We consider the relayd dead */
1436 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1444 /* Socket operation failed. We consider the relayd dead */
1445 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1449 /* Else, use the default set before which is the filesystem. */
1452 /* No streaming, we have to set the len with the full padding */
1456 * Check if we need to change the tracefile before writing the packet.
1458 if (stream
->chan
->tracefile_size
> 0 &&
1459 (stream
->tracefile_size_current
+ len
) >
1460 stream
->chan
->tracefile_size
) {
1461 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1462 stream
->name
, stream
->chan
->tracefile_size
,
1463 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1464 stream
->out_fd
, &(stream
->tracefile_count_current
),
1467 ERR("Rotating output file");
1470 outfd
= stream
->out_fd
;
1472 if (stream
->index_fd
>= 0) {
1473 ret
= index_create_file(stream
->chan
->pathname
,
1474 stream
->name
, stream
->uid
, stream
->gid
,
1475 stream
->chan
->tracefile_size
,
1476 stream
->tracefile_count_current
);
1480 stream
->index_fd
= ret
;
1483 /* Reset current size because we just perform a rotation. */
1484 stream
->tracefile_size_current
= 0;
1485 stream
->out_fd_offset
= 0;
1488 stream
->tracefile_size_current
+= len
;
1490 index
->offset
= htobe64(stream
->out_fd_offset
);
1495 ret
= lttng_write(outfd
, mmap_base
+ mmap_offset
, len
);
1496 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1499 * This is possible if the fd is closed on the other side (outfd)
1500 * or any write problem. It can be verbose a bit for a normal
1501 * execution if for instance the relayd is stopped abruptly. This
1502 * can happen so set this to a DBG statement.
1504 DBG("Error in file write mmap");
1508 /* Socket operation failed. We consider the relayd dead */
1509 if (errno
== EPIPE
|| errno
== EINVAL
) {
1514 } else if (ret
> len
) {
1515 PERROR("Error in file write (ret %zd > len %lu)", ret
, len
);
1523 /* This call is useless on a socket so better save a syscall. */
1525 /* This won't block, but will start writeout asynchronously */
1526 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret
,
1527 SYNC_FILE_RANGE_WRITE
);
1528 stream
->out_fd_offset
+= ret
;
1530 stream
->output_written
+= ret
;
1533 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1537 * This is a special case that the relayd has closed its socket. Let's
1538 * cleanup the relayd object and all associated streams.
1540 if (relayd
&& relayd_hang_up
) {
1541 cleanup_relayd(relayd
, ctx
);
1545 /* Unlock only if ctrl socket used */
1546 if (relayd
&& stream
->metadata_flag
) {
1547 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1555 * Splice the data from the ring buffer to the tracefile.
1557 * It must be called with the stream lock held.
1559 * Returns the number of bytes spliced.
1561 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1562 struct lttng_consumer_local_data
*ctx
,
1563 struct lttng_consumer_stream
*stream
, unsigned long len
,
1564 unsigned long padding
,
1565 struct lttng_packet_index
*index
)
1567 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1569 off_t orig_offset
= stream
->out_fd_offset
;
1570 int fd
= stream
->wait_fd
;
1571 /* Default is on the disk */
1572 int outfd
= stream
->out_fd
;
1573 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1575 unsigned int relayd_hang_up
= 0;
1577 switch (consumer_data
.type
) {
1578 case LTTNG_CONSUMER_KERNEL
:
1580 case LTTNG_CONSUMER32_UST
:
1581 case LTTNG_CONSUMER64_UST
:
1582 /* Not supported for user space tracing */
1585 ERR("Unknown consumer_data type");
1589 /* RCU lock for the relayd pointer */
1592 /* Flag that the current stream if set for network streaming. */
1593 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1594 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1595 if (relayd
== NULL
) {
1602 * Choose right pipe for splice. Metadata and trace data are handled by
1603 * different threads hence the use of two pipes in order not to race or
1604 * corrupt the written data.
1606 if (stream
->metadata_flag
) {
1607 splice_pipe
= ctx
->consumer_splice_metadata_pipe
;
1609 splice_pipe
= ctx
->consumer_thread_pipe
;
1612 /* Write metadata stream id before payload */
1614 int total_len
= len
;
1616 if (stream
->metadata_flag
) {
1618 * Lock the control socket for the complete duration of the function
1619 * since from this point on we will use the socket.
1621 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1623 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1627 /* Socket operation failed. We consider the relayd dead */
1628 if (ret
== -EBADF
) {
1629 WARN("Remote relayd disconnected. Stopping");
1636 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1639 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1641 /* Use the returned socket. */
1644 /* Socket operation failed. We consider the relayd dead */
1645 if (ret
== -EBADF
) {
1646 WARN("Remote relayd disconnected. Stopping");
1653 /* No streaming, we have to set the len with the full padding */
1657 * Check if we need to change the tracefile before writing the packet.
1659 if (stream
->chan
->tracefile_size
> 0 &&
1660 (stream
->tracefile_size_current
+ len
) >
1661 stream
->chan
->tracefile_size
) {
1662 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1663 stream
->name
, stream
->chan
->tracefile_size
,
1664 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1665 stream
->out_fd
, &(stream
->tracefile_count_current
),
1668 ERR("Rotating output file");
1671 outfd
= stream
->out_fd
;
1673 if (stream
->index_fd
>= 0) {
1674 ret
= index_create_file(stream
->chan
->pathname
,
1675 stream
->name
, stream
->uid
, stream
->gid
,
1676 stream
->chan
->tracefile_size
,
1677 stream
->tracefile_count_current
);
1681 stream
->index_fd
= ret
;
1684 /* Reset current size because we just perform a rotation. */
1685 stream
->tracefile_size_current
= 0;
1686 stream
->out_fd_offset
= 0;
1689 stream
->tracefile_size_current
+= len
;
1690 index
->offset
= htobe64(stream
->out_fd_offset
);
1694 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1695 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1696 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1697 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1698 DBG("splice chan to pipe, ret %zd", ret_splice
);
1699 if (ret_splice
< 0) {
1700 PERROR("Error in relay splice");
1702 written
= ret_splice
;
1708 /* Handle stream on the relayd if the output is on the network */
1710 if (stream
->metadata_flag
) {
1711 size_t metadata_payload_size
=
1712 sizeof(struct lttcomm_relayd_metadata_payload
);
1714 /* Update counter to fit the spliced data */
1715 ret_splice
+= metadata_payload_size
;
1716 len
+= metadata_payload_size
;
1718 * We do this so the return value can match the len passed as
1719 * argument to this function.
1721 written
-= metadata_payload_size
;
1725 /* Splice data out */
1726 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1727 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1728 DBG("Consumer splice pipe to file, ret %zd", ret_splice
);
1729 if (ret_splice
< 0) {
1730 PERROR("Error in file splice");
1732 written
= ret_splice
;
1734 /* Socket operation failed. We consider the relayd dead */
1735 if (errno
== EBADF
|| errno
== EPIPE
) {
1736 WARN("Remote relayd disconnected. Stopping");
1742 } else if (ret_splice
> len
) {
1744 PERROR("Wrote more data than requested %zd (len: %lu)",
1746 written
+= ret_splice
;
1752 /* This call is useless on a socket so better save a syscall. */
1754 /* This won't block, but will start writeout asynchronously */
1755 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1756 SYNC_FILE_RANGE_WRITE
);
1757 stream
->out_fd_offset
+= ret_splice
;
1759 stream
->output_written
+= ret_splice
;
1760 written
+= ret_splice
;
1762 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1770 * This is a special case that the relayd has closed its socket. Let's
1771 * cleanup the relayd object and all associated streams.
1773 if (relayd
&& relayd_hang_up
) {
1774 cleanup_relayd(relayd
, ctx
);
1775 /* Skip splice error so the consumer does not fail */
1780 /* send the appropriate error description to sessiond */
1783 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1786 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1789 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1794 if (relayd
&& stream
->metadata_flag
) {
1795 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1803 * Take a snapshot for a specific fd
1805 * Returns 0 on success, < 0 on error
1807 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
1809 switch (consumer_data
.type
) {
1810 case LTTNG_CONSUMER_KERNEL
:
1811 return lttng_kconsumer_take_snapshot(stream
);
1812 case LTTNG_CONSUMER32_UST
:
1813 case LTTNG_CONSUMER64_UST
:
1814 return lttng_ustconsumer_take_snapshot(stream
);
1816 ERR("Unknown consumer_data type");
1823 * Get the produced position
1825 * Returns 0 on success, < 0 on error
1827 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
1830 switch (consumer_data
.type
) {
1831 case LTTNG_CONSUMER_KERNEL
:
1832 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
1833 case LTTNG_CONSUMER32_UST
:
1834 case LTTNG_CONSUMER64_UST
:
1835 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
1837 ERR("Unknown consumer_data type");
1843 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1844 int sock
, struct pollfd
*consumer_sockpoll
)
1846 switch (consumer_data
.type
) {
1847 case LTTNG_CONSUMER_KERNEL
:
1848 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1849 case LTTNG_CONSUMER32_UST
:
1850 case LTTNG_CONSUMER64_UST
:
1851 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1853 ERR("Unknown consumer_data type");
1860 * Iterate over all streams of the hashtable and free them properly.
1862 * WARNING: *MUST* be used with data stream only.
1864 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1866 struct lttng_ht_iter iter
;
1867 struct lttng_consumer_stream
*stream
;
1874 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1876 * Ignore return value since we are currently cleaning up so any error
1879 (void) consumer_del_stream(stream
, ht
);
1883 lttng_ht_destroy(ht
);
1887 * Iterate over all streams of the hashtable and free them properly.
1889 * XXX: Should not be only for metadata stream or else use an other name.
1891 static void destroy_stream_ht(struct lttng_ht
*ht
)
1893 struct lttng_ht_iter iter
;
1894 struct lttng_consumer_stream
*stream
;
1901 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1903 * Ignore return value since we are currently cleaning up so any error
1906 (void) consumer_del_metadata_stream(stream
, ht
);
1910 lttng_ht_destroy(ht
);
1913 void lttng_consumer_close_metadata(void)
1915 switch (consumer_data
.type
) {
1916 case LTTNG_CONSUMER_KERNEL
:
1918 * The Kernel consumer has a different metadata scheme so we don't
1919 * close anything because the stream will be closed by the session
1923 case LTTNG_CONSUMER32_UST
:
1924 case LTTNG_CONSUMER64_UST
:
1926 * Close all metadata streams. The metadata hash table is passed and
1927 * this call iterates over it by closing all wakeup fd. This is safe
1928 * because at this point we are sure that the metadata producer is
1929 * either dead or blocked.
1931 lttng_ustconsumer_close_metadata(metadata_ht
);
1934 ERR("Unknown consumer_data type");
1940 * Clean up a metadata stream and free its memory.
1942 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
1943 struct lttng_ht
*ht
)
1946 struct lttng_ht_iter iter
;
1947 struct lttng_consumer_channel
*free_chan
= NULL
;
1948 struct consumer_relayd_sock_pair
*relayd
;
1952 * This call should NEVER receive regular stream. It must always be
1953 * metadata stream and this is crucial for data structure synchronization.
1955 assert(stream
->metadata_flag
);
1957 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
1960 /* Means the stream was allocated but not successfully added */
1961 goto free_stream_rcu
;
1964 pthread_mutex_lock(&consumer_data
.lock
);
1965 pthread_mutex_lock(&stream
->chan
->lock
);
1966 pthread_mutex_lock(&stream
->lock
);
1968 switch (consumer_data
.type
) {
1969 case LTTNG_CONSUMER_KERNEL
:
1970 if (stream
->mmap_base
!= NULL
) {
1971 ret
= munmap(stream
->mmap_base
, stream
->mmap_len
);
1973 PERROR("munmap metadata stream");
1976 if (stream
->wait_fd
>= 0) {
1977 ret
= close(stream
->wait_fd
);
1979 PERROR("close kernel metadata wait_fd");
1983 case LTTNG_CONSUMER32_UST
:
1984 case LTTNG_CONSUMER64_UST
:
1985 if (stream
->monitor
) {
1986 /* close the write-side in close_metadata */
1987 ret
= close(stream
->ust_metadata_poll_pipe
[0]);
1989 PERROR("Close UST metadata read-side poll pipe");
1992 lttng_ustconsumer_del_stream(stream
);
1995 ERR("Unknown consumer_data type");
2001 iter
.iter
.node
= &stream
->node
.node
;
2002 ret
= lttng_ht_del(ht
, &iter
);
2005 iter
.iter
.node
= &stream
->node_channel_id
.node
;
2006 ret
= lttng_ht_del(consumer_data
.stream_per_chan_id_ht
, &iter
);
2009 iter
.iter
.node
= &stream
->node_session_id
.node
;
2010 ret
= lttng_ht_del(consumer_data
.stream_list_ht
, &iter
);
2014 if (stream
->out_fd
>= 0) {
2015 ret
= close(stream
->out_fd
);
2021 /* Check and cleanup relayd */
2023 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
2024 if (relayd
!= NULL
) {
2025 uatomic_dec(&relayd
->refcount
);
2026 assert(uatomic_read(&relayd
->refcount
) >= 0);
2028 /* Closing streams requires to lock the control socket. */
2029 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
2030 ret
= relayd_send_close_stream(&relayd
->control_sock
,
2031 stream
->relayd_stream_id
, stream
->next_net_seq_num
- 1);
2032 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
2034 DBG("Unable to close stream on the relayd. Continuing");
2036 * Continue here. There is nothing we can do for the relayd.
2037 * Chances are that the relayd has closed the socket so we just
2038 * continue cleaning up.
2042 /* Both conditions are met, we destroy the relayd. */
2043 if (uatomic_read(&relayd
->refcount
) == 0 &&
2044 uatomic_read(&relayd
->destroy_flag
)) {
2045 consumer_destroy_relayd(relayd
);
2050 /* Atomically decrement channel refcount since other threads can use it. */
2051 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
2052 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
2053 /* Go for channel deletion! */
2054 free_chan
= stream
->chan
;
2059 * Nullify the stream reference so it is not used after deletion. The
2060 * channel lock MUST be acquired before being able to check for
2061 * a NULL pointer value.
2063 stream
->chan
->metadata_stream
= NULL
;
2065 pthread_mutex_unlock(&stream
->lock
);
2066 pthread_mutex_unlock(&stream
->chan
->lock
);
2067 pthread_mutex_unlock(&consumer_data
.lock
);
2070 consumer_del_channel(free_chan
);
2074 call_rcu(&stream
->node
.head
, free_stream_rcu
);
2078 * Action done with the metadata stream when adding it to the consumer internal
2079 * data structures to handle it.
2081 int consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2083 struct lttng_ht
*ht
= metadata_ht
;
2085 struct lttng_ht_iter iter
;
2086 struct lttng_ht_node_u64
*node
;
2091 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2093 pthread_mutex_lock(&consumer_data
.lock
);
2094 pthread_mutex_lock(&stream
->chan
->lock
);
2095 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2096 pthread_mutex_lock(&stream
->lock
);
2099 * From here, refcounts are updated so be _careful_ when returning an error
2106 * Lookup the stream just to make sure it does not exist in our internal
2107 * state. This should NEVER happen.
2109 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2110 node
= lttng_ht_iter_get_node_u64(&iter
);
2114 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2115 * in terms of destroying the associated channel, because the action that
2116 * causes the count to become 0 also causes a stream to be added. The
2117 * channel deletion will thus be triggered by the following removal of this
2120 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2121 /* Increment refcount before decrementing nb_init_stream_left */
2123 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2126 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2128 lttng_ht_add_unique_u64(consumer_data
.stream_per_chan_id_ht
,
2129 &stream
->node_channel_id
);
2132 * Add stream to the stream_list_ht of the consumer data. No need to steal
2133 * the key since the HT does not use it and we allow to add redundant keys
2136 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2140 pthread_mutex_unlock(&stream
->lock
);
2141 pthread_mutex_unlock(&stream
->chan
->lock
);
2142 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2143 pthread_mutex_unlock(&consumer_data
.lock
);
2148 * Delete data stream that are flagged for deletion (endpoint_status).
2150 static void validate_endpoint_status_data_stream(void)
2152 struct lttng_ht_iter iter
;
2153 struct lttng_consumer_stream
*stream
;
2155 DBG("Consumer delete flagged data stream");
2158 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2159 /* Validate delete flag of the stream */
2160 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2163 /* Delete it right now */
2164 consumer_del_stream(stream
, data_ht
);
2170 * Delete metadata stream that are flagged for deletion (endpoint_status).
2172 static void validate_endpoint_status_metadata_stream(
2173 struct lttng_poll_event
*pollset
)
2175 struct lttng_ht_iter iter
;
2176 struct lttng_consumer_stream
*stream
;
2178 DBG("Consumer delete flagged metadata stream");
2183 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2184 /* Validate delete flag of the stream */
2185 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2189 * Remove from pollset so the metadata thread can continue without
2190 * blocking on a deleted stream.
2192 lttng_poll_del(pollset
, stream
->wait_fd
);
2194 /* Delete it right now */
2195 consumer_del_metadata_stream(stream
, metadata_ht
);
2201 * Thread polls on metadata file descriptor and write them on disk or on the
2204 void *consumer_thread_metadata_poll(void *data
)
2206 int ret
, i
, pollfd
, err
= -1;
2207 uint32_t revents
, nb_fd
;
2208 struct lttng_consumer_stream
*stream
= NULL
;
2209 struct lttng_ht_iter iter
;
2210 struct lttng_ht_node_u64
*node
;
2211 struct lttng_poll_event events
;
2212 struct lttng_consumer_local_data
*ctx
= data
;
2215 rcu_register_thread();
2217 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_METADATA
);
2219 health_code_update();
2221 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2223 /* ENOMEM at this point. Better to bail out. */
2227 DBG("Thread metadata poll started");
2229 /* Size is set to 1 for the consumer_metadata pipe */
2230 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2232 ERR("Poll set creation failed");
2236 ret
= lttng_poll_add(&events
,
2237 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2243 DBG("Metadata main loop started");
2246 health_code_update();
2248 /* Only the metadata pipe is set */
2249 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2250 err
= 0; /* All is OK */
2255 DBG("Metadata poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2256 health_poll_entry();
2257 ret
= lttng_poll_wait(&events
, -1);
2259 DBG("Metadata event catched in thread");
2261 if (errno
== EINTR
) {
2262 ERR("Poll EINTR catched");
2270 /* From here, the event is a metadata wait fd */
2271 for (i
= 0; i
< nb_fd
; i
++) {
2272 health_code_update();
2274 revents
= LTTNG_POLL_GETEV(&events
, i
);
2275 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2277 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2278 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2279 DBG("Metadata thread pipe hung up");
2281 * Remove the pipe from the poll set and continue the loop
2282 * since their might be data to consume.
2284 lttng_poll_del(&events
,
2285 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2286 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2288 } else if (revents
& LPOLLIN
) {
2291 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2292 &stream
, sizeof(stream
));
2293 if (pipe_len
< sizeof(stream
)) {
2294 PERROR("read metadata stream");
2296 * Continue here to handle the rest of the streams.
2301 /* A NULL stream means that the state has changed. */
2302 if (stream
== NULL
) {
2303 /* Check for deleted streams. */
2304 validate_endpoint_status_metadata_stream(&events
);
2308 DBG("Adding metadata stream %d to poll set",
2311 /* Add metadata stream to the global poll events list */
2312 lttng_poll_add(&events
, stream
->wait_fd
,
2313 LPOLLIN
| LPOLLPRI
);
2316 /* Handle other stream */
2322 uint64_t tmp_id
= (uint64_t) pollfd
;
2324 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2326 node
= lttng_ht_iter_get_node_u64(&iter
);
2329 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2332 /* Check for error event */
2333 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2334 DBG("Metadata fd %d is hup|err.", pollfd
);
2335 if (!stream
->hangup_flush_done
2336 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2337 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2338 DBG("Attempting to flush and consume the UST buffers");
2339 lttng_ustconsumer_on_stream_hangup(stream
);
2341 /* We just flushed the stream now read it. */
2343 health_code_update();
2345 len
= ctx
->on_buffer_ready(stream
, ctx
);
2347 * We don't check the return value here since if we get
2348 * a negative len, it means an error occured thus we
2349 * simply remove it from the poll set and free the
2355 lttng_poll_del(&events
, stream
->wait_fd
);
2357 * This call update the channel states, closes file descriptors
2358 * and securely free the stream.
2360 consumer_del_metadata_stream(stream
, metadata_ht
);
2361 } else if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2362 /* Get the data out of the metadata file descriptor */
2363 DBG("Metadata available on fd %d", pollfd
);
2364 assert(stream
->wait_fd
== pollfd
);
2367 health_code_update();
2369 len
= ctx
->on_buffer_ready(stream
, ctx
);
2371 * We don't check the return value here since if we get
2372 * a negative len, it means an error occured thus we
2373 * simply remove it from the poll set and free the
2378 /* It's ok to have an unavailable sub-buffer */
2379 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2380 /* Clean up stream from consumer and free it. */
2381 lttng_poll_del(&events
, stream
->wait_fd
);
2382 consumer_del_metadata_stream(stream
, metadata_ht
);
2386 /* Release RCU lock for the stream looked up */
2395 DBG("Metadata poll thread exiting");
2397 lttng_poll_clean(&events
);
2399 destroy_stream_ht(metadata_ht
);
2403 ERR("Health error occurred in %s", __func__
);
2405 health_unregister(health_consumerd
);
2406 rcu_unregister_thread();
2411 * This thread polls the fds in the set to consume the data and write
2412 * it to tracefile if necessary.
2414 void *consumer_thread_data_poll(void *data
)
2416 int num_rdy
, num_hup
, high_prio
, ret
, i
, err
= -1;
2417 struct pollfd
*pollfd
= NULL
;
2418 /* local view of the streams */
2419 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2420 /* local view of consumer_data.fds_count */
2422 struct lttng_consumer_local_data
*ctx
= data
;
2425 rcu_register_thread();
2427 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_DATA
);
2429 health_code_update();
2431 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2432 if (data_ht
== NULL
) {
2433 /* ENOMEM at this point. Better to bail out. */
2437 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2438 if (local_stream
== NULL
) {
2439 PERROR("local_stream malloc");
2444 health_code_update();
2450 * the fds set has been updated, we need to update our
2451 * local array as well
2453 pthread_mutex_lock(&consumer_data
.lock
);
2454 if (consumer_data
.need_update
) {
2459 local_stream
= NULL
;
2461 /* allocate for all fds + 1 for the consumer_data_pipe */
2462 pollfd
= zmalloc((consumer_data
.stream_count
+ 1) * sizeof(struct pollfd
));
2463 if (pollfd
== NULL
) {
2464 PERROR("pollfd malloc");
2465 pthread_mutex_unlock(&consumer_data
.lock
);
2469 /* allocate for all fds + 1 for the consumer_data_pipe */
2470 local_stream
= zmalloc((consumer_data
.stream_count
+ 1) *
2471 sizeof(struct lttng_consumer_stream
*));
2472 if (local_stream
== NULL
) {
2473 PERROR("local_stream malloc");
2474 pthread_mutex_unlock(&consumer_data
.lock
);
2477 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2480 ERR("Error in allocating pollfd or local_outfds");
2481 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2482 pthread_mutex_unlock(&consumer_data
.lock
);
2486 consumer_data
.need_update
= 0;
2488 pthread_mutex_unlock(&consumer_data
.lock
);
2490 /* No FDs and consumer_quit, consumer_cleanup the thread */
2491 if (nb_fd
== 0 && consumer_quit
== 1) {
2492 err
= 0; /* All is OK */
2495 /* poll on the array of fds */
2497 DBG("polling on %d fd", nb_fd
+ 1);
2498 health_poll_entry();
2499 num_rdy
= poll(pollfd
, nb_fd
+ 1, -1);
2501 DBG("poll num_rdy : %d", num_rdy
);
2502 if (num_rdy
== -1) {
2504 * Restart interrupted system call.
2506 if (errno
== EINTR
) {
2509 PERROR("Poll error");
2510 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2512 } else if (num_rdy
== 0) {
2513 DBG("Polling thread timed out");
2518 * If the consumer_data_pipe triggered poll go directly to the
2519 * beginning of the loop to update the array. We want to prioritize
2520 * array update over low-priority reads.
2522 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2523 ssize_t pipe_readlen
;
2525 DBG("consumer_data_pipe wake up");
2526 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2527 &new_stream
, sizeof(new_stream
));
2528 if (pipe_readlen
< sizeof(new_stream
)) {
2529 PERROR("Consumer data pipe");
2530 /* Continue so we can at least handle the current stream(s). */
2535 * If the stream is NULL, just ignore it. It's also possible that
2536 * the sessiond poll thread changed the consumer_quit state and is
2537 * waking us up to test it.
2539 if (new_stream
== NULL
) {
2540 validate_endpoint_status_data_stream();
2544 /* Continue to update the local streams and handle prio ones */
2548 /* Take care of high priority channels first. */
2549 for (i
= 0; i
< nb_fd
; i
++) {
2550 health_code_update();
2552 if (local_stream
[i
] == NULL
) {
2555 if (pollfd
[i
].revents
& POLLPRI
) {
2556 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2558 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2559 /* it's ok to have an unavailable sub-buffer */
2560 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2561 /* Clean the stream and free it. */
2562 consumer_del_stream(local_stream
[i
], data_ht
);
2563 local_stream
[i
] = NULL
;
2564 } else if (len
> 0) {
2565 local_stream
[i
]->data_read
= 1;
2571 * If we read high prio channel in this loop, try again
2572 * for more high prio data.
2578 /* Take care of low priority channels. */
2579 for (i
= 0; i
< nb_fd
; i
++) {
2580 health_code_update();
2582 if (local_stream
[i
] == NULL
) {
2585 if ((pollfd
[i
].revents
& POLLIN
) ||
2586 local_stream
[i
]->hangup_flush_done
) {
2587 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2588 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2589 /* it's ok to have an unavailable sub-buffer */
2590 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2591 /* Clean the stream and free it. */
2592 consumer_del_stream(local_stream
[i
], data_ht
);
2593 local_stream
[i
] = NULL
;
2594 } else if (len
> 0) {
2595 local_stream
[i
]->data_read
= 1;
2600 /* Handle hangup and errors */
2601 for (i
= 0; i
< nb_fd
; i
++) {
2602 health_code_update();
2604 if (local_stream
[i
] == NULL
) {
2607 if (!local_stream
[i
]->hangup_flush_done
2608 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2609 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2610 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2611 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2613 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2614 /* Attempt read again, for the data we just flushed. */
2615 local_stream
[i
]->data_read
= 1;
2618 * If the poll flag is HUP/ERR/NVAL and we have
2619 * read no data in this pass, we can remove the
2620 * stream from its hash table.
2622 if ((pollfd
[i
].revents
& POLLHUP
)) {
2623 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2624 if (!local_stream
[i
]->data_read
) {
2625 consumer_del_stream(local_stream
[i
], data_ht
);
2626 local_stream
[i
] = NULL
;
2629 } else if (pollfd
[i
].revents
& POLLERR
) {
2630 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2631 if (!local_stream
[i
]->data_read
) {
2632 consumer_del_stream(local_stream
[i
], data_ht
);
2633 local_stream
[i
] = NULL
;
2636 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2637 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2638 if (!local_stream
[i
]->data_read
) {
2639 consumer_del_stream(local_stream
[i
], data_ht
);
2640 local_stream
[i
] = NULL
;
2644 if (local_stream
[i
] != NULL
) {
2645 local_stream
[i
]->data_read
= 0;
2652 DBG("polling thread exiting");
2657 * Close the write side of the pipe so epoll_wait() in
2658 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2659 * read side of the pipe. If we close them both, epoll_wait strangely does
2660 * not return and could create a endless wait period if the pipe is the
2661 * only tracked fd in the poll set. The thread will take care of closing
2664 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2666 destroy_data_stream_ht(data_ht
);
2670 ERR("Health error occurred in %s", __func__
);
2672 health_unregister(health_consumerd
);
2674 rcu_unregister_thread();
2679 * Close wake-up end of each stream belonging to the channel. This will
2680 * allow the poll() on the stream read-side to detect when the
2681 * write-side (application) finally closes them.
2684 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2686 struct lttng_ht
*ht
;
2687 struct lttng_consumer_stream
*stream
;
2688 struct lttng_ht_iter iter
;
2690 ht
= consumer_data
.stream_per_chan_id_ht
;
2693 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2694 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2695 ht
->match_fct
, &channel
->key
,
2696 &iter
.iter
, stream
, node_channel_id
.node
) {
2698 * Protect against teardown with mutex.
2700 pthread_mutex_lock(&stream
->lock
);
2701 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2704 switch (consumer_data
.type
) {
2705 case LTTNG_CONSUMER_KERNEL
:
2707 case LTTNG_CONSUMER32_UST
:
2708 case LTTNG_CONSUMER64_UST
:
2710 * Note: a mutex is taken internally within
2711 * liblttng-ust-ctl to protect timer wakeup_fd
2712 * use from concurrent close.
2714 lttng_ustconsumer_close_stream_wakeup(stream
);
2717 ERR("Unknown consumer_data type");
2721 pthread_mutex_unlock(&stream
->lock
);
2726 static void destroy_channel_ht(struct lttng_ht
*ht
)
2728 struct lttng_ht_iter iter
;
2729 struct lttng_consumer_channel
*channel
;
2737 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2738 ret
= lttng_ht_del(ht
, &iter
);
2743 lttng_ht_destroy(ht
);
2747 * This thread polls the channel fds to detect when they are being
2748 * closed. It closes all related streams if the channel is detected as
2749 * closed. It is currently only used as a shim layer for UST because the
2750 * consumerd needs to keep the per-stream wakeup end of pipes open for
2753 void *consumer_thread_channel_poll(void *data
)
2755 int ret
, i
, pollfd
, err
= -1;
2756 uint32_t revents
, nb_fd
;
2757 struct lttng_consumer_channel
*chan
= NULL
;
2758 struct lttng_ht_iter iter
;
2759 struct lttng_ht_node_u64
*node
;
2760 struct lttng_poll_event events
;
2761 struct lttng_consumer_local_data
*ctx
= data
;
2762 struct lttng_ht
*channel_ht
;
2764 rcu_register_thread();
2766 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_CHANNEL
);
2768 health_code_update();
2770 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2772 /* ENOMEM at this point. Better to bail out. */
2776 DBG("Thread channel poll started");
2778 /* Size is set to 1 for the consumer_channel pipe */
2779 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2781 ERR("Poll set creation failed");
2785 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2791 DBG("Channel main loop started");
2794 health_code_update();
2796 /* Only the channel pipe is set */
2797 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2798 err
= 0; /* All is OK */
2803 DBG("Channel poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2804 health_poll_entry();
2805 ret
= lttng_poll_wait(&events
, -1);
2807 DBG("Channel event catched in thread");
2809 if (errno
== EINTR
) {
2810 ERR("Poll EINTR catched");
2818 /* From here, the event is a channel wait fd */
2819 for (i
= 0; i
< nb_fd
; i
++) {
2820 health_code_update();
2822 revents
= LTTNG_POLL_GETEV(&events
, i
);
2823 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2825 /* Just don't waste time if no returned events for the fd */
2829 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
2830 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2831 DBG("Channel thread pipe hung up");
2833 * Remove the pipe from the poll set and continue the loop
2834 * since their might be data to consume.
2836 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2838 } else if (revents
& LPOLLIN
) {
2839 enum consumer_channel_action action
;
2842 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
2844 ERR("Error reading channel pipe");
2849 case CONSUMER_CHANNEL_ADD
:
2850 DBG("Adding channel %d to poll set",
2853 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
2856 lttng_ht_add_unique_u64(channel_ht
,
2857 &chan
->wait_fd_node
);
2859 /* Add channel to the global poll events list */
2860 lttng_poll_add(&events
, chan
->wait_fd
,
2861 LPOLLIN
| LPOLLPRI
);
2863 case CONSUMER_CHANNEL_DEL
:
2865 struct lttng_consumer_stream
*stream
, *stmp
;
2868 chan
= consumer_find_channel(key
);
2871 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
2874 lttng_poll_del(&events
, chan
->wait_fd
);
2875 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
2876 ret
= lttng_ht_del(channel_ht
, &iter
);
2878 consumer_close_channel_streams(chan
);
2880 switch (consumer_data
.type
) {
2881 case LTTNG_CONSUMER_KERNEL
:
2883 case LTTNG_CONSUMER32_UST
:
2884 case LTTNG_CONSUMER64_UST
:
2885 /* Delete streams that might have been left in the stream list. */
2886 cds_list_for_each_entry_safe(stream
, stmp
, &chan
->streams
.head
,
2888 health_code_update();
2890 cds_list_del(&stream
->send_node
);
2891 lttng_ustconsumer_del_stream(stream
);
2892 uatomic_sub(&stream
->chan
->refcount
, 1);
2893 assert(&chan
->refcount
);
2898 ERR("Unknown consumer_data type");
2903 * Release our own refcount. Force channel deletion even if
2904 * streams were not initialized.
2906 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
2907 consumer_del_channel(chan
);
2912 case CONSUMER_CHANNEL_QUIT
:
2914 * Remove the pipe from the poll set and continue the loop
2915 * since their might be data to consume.
2917 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2920 ERR("Unknown action");
2925 /* Handle other stream */
2931 uint64_t tmp_id
= (uint64_t) pollfd
;
2933 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
2935 node
= lttng_ht_iter_get_node_u64(&iter
);
2938 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
2941 /* Check for error event */
2942 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2943 DBG("Channel fd %d is hup|err.", pollfd
);
2945 lttng_poll_del(&events
, chan
->wait_fd
);
2946 ret
= lttng_ht_del(channel_ht
, &iter
);
2948 consumer_close_channel_streams(chan
);
2950 /* Release our own refcount */
2951 if (!uatomic_sub_return(&chan
->refcount
, 1)
2952 && !uatomic_read(&chan
->nb_init_stream_left
)) {
2953 consumer_del_channel(chan
);
2957 /* Release RCU lock for the channel looked up */
2965 lttng_poll_clean(&events
);
2967 destroy_channel_ht(channel_ht
);
2969 DBG("Channel poll thread exiting");
2972 ERR("Health error occurred in %s", __func__
);
2974 health_unregister(health_consumerd
);
2975 rcu_unregister_thread();
2979 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
2980 struct pollfd
*sockpoll
, int client_socket
)
2987 if (lttng_consumer_poll_socket(sockpoll
) < 0) {
2991 DBG("Metadata connection on client_socket");
2993 /* Blocking call, waiting for transmission */
2994 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
2995 if (ctx
->consumer_metadata_socket
< 0) {
2996 WARN("On accept metadata");
3007 * This thread listens on the consumerd socket and receives the file
3008 * descriptors from the session daemon.
3010 void *consumer_thread_sessiond_poll(void *data
)
3012 int sock
= -1, client_socket
, ret
, err
= -1;
3014 * structure to poll for incoming data on communication socket avoids
3015 * making blocking sockets.
3017 struct pollfd consumer_sockpoll
[2];
3018 struct lttng_consumer_local_data
*ctx
= data
;
3020 rcu_register_thread();
3022 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_SESSIOND
);
3024 health_code_update();
3026 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
3027 unlink(ctx
->consumer_command_sock_path
);
3028 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
3029 if (client_socket
< 0) {
3030 ERR("Cannot create command socket");
3034 ret
= lttcomm_listen_unix_sock(client_socket
);
3039 DBG("Sending ready command to lttng-sessiond");
3040 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
3041 /* return < 0 on error, but == 0 is not fatal */
3043 ERR("Error sending ready command to lttng-sessiond");
3047 /* prepare the FDs to poll : to client socket and the should_quit pipe */
3048 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
3049 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
3050 consumer_sockpoll
[1].fd
= client_socket
;
3051 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3053 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3056 DBG("Connection on client_socket");
3058 /* Blocking call, waiting for transmission */
3059 sock
= lttcomm_accept_unix_sock(client_socket
);
3066 * Setup metadata socket which is the second socket connection on the
3067 * command unix socket.
3069 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
3074 /* This socket is not useful anymore. */
3075 ret
= close(client_socket
);
3077 PERROR("close client_socket");
3081 /* update the polling structure to poll on the established socket */
3082 consumer_sockpoll
[1].fd
= sock
;
3083 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3086 health_code_update();
3088 health_poll_entry();
3089 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3094 DBG("Incoming command on sock");
3095 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
3096 if (ret
== -ENOENT
) {
3097 DBG("Received STOP command");
3102 * This could simply be a session daemon quitting. Don't output
3105 DBG("Communication interrupted on command socket");
3109 if (consumer_quit
) {
3110 DBG("consumer_thread_receive_fds received quit from signal");
3111 err
= 0; /* All is OK */
3114 DBG("received command on sock");
3120 DBG("Consumer thread sessiond poll exiting");
3123 * Close metadata streams since the producer is the session daemon which
3126 * NOTE: for now, this only applies to the UST tracer.
3128 lttng_consumer_close_metadata();
3131 * when all fds have hung up, the polling thread
3137 * Notify the data poll thread to poll back again and test the
3138 * consumer_quit state that we just set so to quit gracefully.
3140 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
3142 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
3144 notify_health_quit_pipe(health_quit_pipe
);
3146 /* Cleaning up possibly open sockets. */
3150 PERROR("close sock sessiond poll");
3153 if (client_socket
>= 0) {
3154 ret
= close(client_socket
);
3156 PERROR("close client_socket sessiond poll");
3162 ERR("Health error occurred in %s", __func__
);
3164 health_unregister(health_consumerd
);
3166 rcu_unregister_thread();
3170 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3171 struct lttng_consumer_local_data
*ctx
)
3175 pthread_mutex_lock(&stream
->lock
);
3176 if (stream
->metadata_flag
) {
3177 pthread_mutex_lock(&stream
->metadata_rdv_lock
);
3180 switch (consumer_data
.type
) {
3181 case LTTNG_CONSUMER_KERNEL
:
3182 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
3184 case LTTNG_CONSUMER32_UST
:
3185 case LTTNG_CONSUMER64_UST
:
3186 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
3189 ERR("Unknown consumer_data type");
3195 if (stream
->metadata_flag
) {
3196 pthread_cond_broadcast(&stream
->metadata_rdv
);
3197 pthread_mutex_unlock(&stream
->metadata_rdv_lock
);
3199 pthread_mutex_unlock(&stream
->lock
);
3203 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3205 switch (consumer_data
.type
) {
3206 case LTTNG_CONSUMER_KERNEL
:
3207 return lttng_kconsumer_on_recv_stream(stream
);
3208 case LTTNG_CONSUMER32_UST
:
3209 case LTTNG_CONSUMER64_UST
:
3210 return lttng_ustconsumer_on_recv_stream(stream
);
3212 ERR("Unknown consumer_data type");
3219 * Allocate and set consumer data hash tables.
3221 void lttng_consumer_init(void)
3223 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3224 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3225 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3226 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3230 * Process the ADD_RELAYD command receive by a consumer.
3232 * This will create a relayd socket pair and add it to the relayd hash table.
3233 * The caller MUST acquire a RCU read side lock before calling it.
3235 int consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3236 struct lttng_consumer_local_data
*ctx
, int sock
,
3237 struct pollfd
*consumer_sockpoll
,
3238 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
,
3239 uint64_t relayd_session_id
)
3241 int fd
= -1, ret
= -1, relayd_created
= 0;
3242 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3243 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3246 assert(relayd_sock
);
3248 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3250 /* Get relayd reference if exists. */
3251 relayd
= consumer_find_relayd(net_seq_idx
);
3252 if (relayd
== NULL
) {
3253 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3254 /* Not found. Allocate one. */
3255 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3256 if (relayd
== NULL
) {
3258 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3261 relayd
->sessiond_session_id
= sessiond_id
;
3266 * This code path MUST continue to the consumer send status message to
3267 * we can notify the session daemon and continue our work without
3268 * killing everything.
3272 * relayd key should never be found for control socket.
3274 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3277 /* First send a status message before receiving the fds. */
3278 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
3280 /* Somehow, the session daemon is not responding anymore. */
3281 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3282 goto error_nosignal
;
3285 /* Poll on consumer socket. */
3286 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3287 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3289 goto error_nosignal
;
3292 /* Get relayd socket from session daemon */
3293 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3294 if (ret
!= sizeof(fd
)) {
3296 fd
= -1; /* Just in case it gets set with an invalid value. */
3299 * Failing to receive FDs might indicate a major problem such as
3300 * reaching a fd limit during the receive where the kernel returns a
3301 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3302 * don't take any chances and stop everything.
3304 * XXX: Feature request #558 will fix that and avoid this possible
3305 * issue when reaching the fd limit.
3307 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3308 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3312 /* Copy socket information and received FD */
3313 switch (sock_type
) {
3314 case LTTNG_STREAM_CONTROL
:
3315 /* Copy received lttcomm socket */
3316 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3317 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3318 /* Handle create_sock error. */
3320 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3324 * Close the socket created internally by
3325 * lttcomm_create_sock, so we can replace it by the one
3326 * received from sessiond.
3328 if (close(relayd
->control_sock
.sock
.fd
)) {
3332 /* Assign new file descriptor */
3333 relayd
->control_sock
.sock
.fd
= fd
;
3334 fd
= -1; /* For error path */
3335 /* Assign version values. */
3336 relayd
->control_sock
.major
= relayd_sock
->major
;
3337 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3339 relayd
->relayd_session_id
= relayd_session_id
;
3342 case LTTNG_STREAM_DATA
:
3343 /* Copy received lttcomm socket */
3344 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3345 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3346 /* Handle create_sock error. */
3348 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3352 * Close the socket created internally by
3353 * lttcomm_create_sock, so we can replace it by the one
3354 * received from sessiond.
3356 if (close(relayd
->data_sock
.sock
.fd
)) {
3360 /* Assign new file descriptor */
3361 relayd
->data_sock
.sock
.fd
= fd
;
3362 fd
= -1; /* for eventual error paths */
3363 /* Assign version values. */
3364 relayd
->data_sock
.major
= relayd_sock
->major
;
3365 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3368 ERR("Unknown relayd socket type (%d)", sock_type
);
3370 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3374 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3375 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3376 relayd
->net_seq_idx
, fd
);
3378 /* We successfully added the socket. Send status back. */
3379 ret
= consumer_send_status_msg(sock
, ret_code
);
3381 /* Somehow, the session daemon is not responding anymore. */
3382 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3383 goto error_nosignal
;
3387 * Add relayd socket pair to consumer data hashtable. If object already
3388 * exists or on error, the function gracefully returns.
3396 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3397 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3401 /* Close received socket if valid. */
3404 PERROR("close received socket");
3408 if (relayd_created
) {
3416 * Try to lock the stream mutex.
3418 * On success, 1 is returned else 0 indicating that the mutex is NOT lock.
3420 static int stream_try_lock(struct lttng_consumer_stream
*stream
)
3427 * Try to lock the stream mutex. On failure, we know that the stream is
3428 * being used else where hence there is data still being extracted.
3430 ret
= pthread_mutex_trylock(&stream
->lock
);
3432 /* For both EBUSY and EINVAL error, the mutex is NOT locked. */
3444 * Search for a relayd associated to the session id and return the reference.
3446 * A rcu read side lock MUST be acquire before calling this function and locked
3447 * until the relayd object is no longer necessary.
3449 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3451 struct lttng_ht_iter iter
;
3452 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3454 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3455 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3458 * Check by sessiond id which is unique here where the relayd session
3459 * id might not be when having multiple relayd.
3461 if (relayd
->sessiond_session_id
== id
) {
3462 /* Found the relayd. There can be only one per id. */
3474 * Check if for a given session id there is still data needed to be extract
3477 * Return 1 if data is pending or else 0 meaning ready to be read.
3479 int consumer_data_pending(uint64_t id
)
3482 struct lttng_ht_iter iter
;
3483 struct lttng_ht
*ht
;
3484 struct lttng_consumer_stream
*stream
;
3485 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3486 int (*data_pending
)(struct lttng_consumer_stream
*);
3488 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3491 pthread_mutex_lock(&consumer_data
.lock
);
3493 switch (consumer_data
.type
) {
3494 case LTTNG_CONSUMER_KERNEL
:
3495 data_pending
= lttng_kconsumer_data_pending
;
3497 case LTTNG_CONSUMER32_UST
:
3498 case LTTNG_CONSUMER64_UST
:
3499 data_pending
= lttng_ustconsumer_data_pending
;
3502 ERR("Unknown consumer data type");
3506 /* Ease our life a bit */
3507 ht
= consumer_data
.stream_list_ht
;
3509 relayd
= find_relayd_by_session_id(id
);
3511 /* Send init command for data pending. */
3512 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3513 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3514 relayd
->relayd_session_id
);
3515 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3517 /* Communication error thus the relayd so no data pending. */
3518 goto data_not_pending
;
3522 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3523 ht
->hash_fct(&id
, lttng_ht_seed
),
3525 &iter
.iter
, stream
, node_session_id
.node
) {
3526 /* If this call fails, the stream is being used hence data pending. */
3527 ret
= stream_try_lock(stream
);
3533 * A removed node from the hash table indicates that the stream has
3534 * been deleted thus having a guarantee that the buffers are closed
3535 * on the consumer side. However, data can still be transmitted
3536 * over the network so don't skip the relayd check.
3538 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3541 * An empty output file is not valid. We need at least one packet
3542 * generated per stream, even if it contains no event, so it
3543 * contains at least one packet header.
3545 if (stream
->output_written
== 0) {
3546 pthread_mutex_unlock(&stream
->lock
);
3549 /* Check the stream if there is data in the buffers. */
3550 ret
= data_pending(stream
);
3552 pthread_mutex_unlock(&stream
->lock
);
3559 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3560 if (stream
->metadata_flag
) {
3561 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3562 stream
->relayd_stream_id
);
3564 ret
= relayd_data_pending(&relayd
->control_sock
,
3565 stream
->relayd_stream_id
,
3566 stream
->next_net_seq_num
- 1);
3568 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3570 pthread_mutex_unlock(&stream
->lock
);
3574 pthread_mutex_unlock(&stream
->lock
);
3578 unsigned int is_data_inflight
= 0;
3580 /* Send init command for data pending. */
3581 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3582 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3583 relayd
->relayd_session_id
, &is_data_inflight
);
3584 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3586 goto data_not_pending
;
3588 if (is_data_inflight
) {
3594 * Finding _no_ node in the hash table and no inflight data means that the
3595 * stream(s) have been removed thus data is guaranteed to be available for
3596 * analysis from the trace files.
3600 /* Data is available to be read by a viewer. */
3601 pthread_mutex_unlock(&consumer_data
.lock
);
3606 /* Data is still being extracted from buffers. */
3607 pthread_mutex_unlock(&consumer_data
.lock
);
3613 * Send a ret code status message to the sessiond daemon.
3615 * Return the sendmsg() return value.
3617 int consumer_send_status_msg(int sock
, int ret_code
)
3619 struct lttcomm_consumer_status_msg msg
;
3621 msg
.ret_code
= ret_code
;
3623 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3627 * Send a channel status message to the sessiond daemon.
3629 * Return the sendmsg() return value.
3631 int consumer_send_status_channel(int sock
,
3632 struct lttng_consumer_channel
*channel
)
3634 struct lttcomm_consumer_status_channel msg
;
3639 msg
.ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
3641 msg
.ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3642 msg
.key
= channel
->key
;
3643 msg
.stream_count
= channel
->streams
.count
;
3646 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3650 * Using a maximum stream size with the produced and consumed position of a
3651 * stream, computes the new consumed position to be as close as possible to the
3652 * maximum possible stream size.
3654 * If maximum stream size is lower than the possible buffer size (produced -
3655 * consumed), the consumed_pos given is returned untouched else the new value
3658 unsigned long consumer_get_consumed_maxsize(unsigned long consumed_pos
,
3659 unsigned long produced_pos
, uint64_t max_stream_size
)
3661 if (max_stream_size
&& max_stream_size
< (produced_pos
- consumed_pos
)) {
3662 /* Offset from the produced position to get the latest buffers. */
3663 return produced_pos
- max_stream_size
;
3666 return consumed_pos
;