2 * Copyright (C) 2013 Julien Desfossez <jdesfossez@efficios.com>
3 * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
4 * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * SPDX-License-Identifier: GPL-2.0-only
22 #include <sys/mount.h>
23 #include <sys/resource.h>
24 #include <sys/socket.h>
26 #include <sys/types.h>
29 #include <urcu/futex.h>
30 #include <urcu/rculist.h>
31 #include <urcu/uatomic.h>
33 #include <common/common.h>
34 #include <common/compat/endian.h>
35 #include <common/compat/poll.h>
36 #include <common/compat/socket.h>
37 #include <common/defaults.h>
38 #include <common/fd-tracker/utils.h>
39 #include <common/fs-handle.h>
40 #include <common/futex.h>
41 #include <common/index/index.h>
42 #include <common/sessiond-comm/inet.h>
43 #include <common/sessiond-comm/relayd.h>
44 #include <common/sessiond-comm/sessiond-comm.h>
45 #include <common/uri.h>
46 #include <common/utils.h>
47 #include <lttng/lttng.h>
50 #include "connection.h"
51 #include "ctf-trace.h"
52 #include "health-relayd.h"
54 #include "lttng-relayd.h"
57 #include "testpoint.h"
59 #include "viewer-session.h"
60 #include "viewer-stream.h"
62 #define SESSION_BUF_DEFAULT_COUNT 16
64 static struct lttng_uri
*live_uri
;
67 * This pipe is used to inform the worker thread that a command is queued and
68 * ready to be processed.
70 static int live_conn_pipe
[2] = { -1, -1 };
72 /* Shared between threads */
73 static int live_dispatch_thread_exit
;
75 static pthread_t live_listener_thread
;
76 static pthread_t live_dispatcher_thread
;
77 static pthread_t live_worker_thread
;
80 * Relay command queue.
82 * The live_thread_listener and live_thread_dispatcher communicate with this
85 static struct relay_conn_queue viewer_conn_queue
;
87 static uint64_t last_relay_viewer_session_id
;
88 static pthread_mutex_t last_relay_viewer_session_id_lock
=
89 PTHREAD_MUTEX_INITIALIZER
;
95 void cleanup_relayd_live(void)
103 * Receive a request buffer using a given socket, destination allocated buffer
106 * Return the size of the received message or else a negative value on error
107 * with errno being set by recvmsg() syscall.
110 ssize_t
recv_request(struct lttcomm_sock
*sock
, void *buf
, size_t size
)
114 ret
= sock
->ops
->recvmsg(sock
, buf
, size
, 0);
115 if (ret
< 0 || ret
!= size
) {
117 /* Orderly shutdown. Not necessary to print an error. */
118 DBG("Socket %d did an orderly shutdown", sock
->fd
);
120 ERR("Relay failed to receive request.");
129 * Send a response buffer using a given socket, source allocated buffer of
132 * Return the size of the sent message or else a negative value on error with
133 * errno being set by sendmsg() syscall.
136 ssize_t
send_response(struct lttcomm_sock
*sock
, void *buf
, size_t size
)
140 ret
= sock
->ops
->sendmsg(sock
, buf
, size
, 0);
142 ERR("Relayd failed to send response.");
149 * Atomically check if new streams got added in one of the sessions attached
150 * and reset the flag to 0.
152 * Returns 1 if new streams got added, 0 if nothing changed, a negative value
156 int check_new_streams(struct relay_connection
*conn
)
158 struct relay_session
*session
;
159 unsigned long current_val
;
162 if (!conn
->viewer_session
) {
166 cds_list_for_each_entry_rcu(session
,
167 &conn
->viewer_session
->session_list
,
168 viewer_session_node
) {
169 if (!session_get(session
)) {
172 current_val
= uatomic_cmpxchg(&session
->new_streams
, 1, 0);
174 session_put(session
);
185 * Send viewer streams to the given socket. The ignore_sent_flag indicates if
186 * this function should ignore the sent flag or not.
188 * Return 0 on success or else a negative value.
191 ssize_t
send_viewer_streams(struct lttcomm_sock
*sock
,
192 uint64_t session_id
, unsigned int ignore_sent_flag
)
195 struct lttng_ht_iter iter
;
196 struct relay_viewer_stream
*vstream
;
200 cds_lfht_for_each_entry(viewer_streams_ht
->ht
, &iter
.iter
, vstream
,
202 struct ctf_trace
*ctf_trace
;
203 struct lttng_viewer_stream send_stream
= {};
205 health_code_update();
207 if (!viewer_stream_get(vstream
)) {
211 pthread_mutex_lock(&vstream
->stream
->lock
);
212 /* Ignore if not the same session. */
213 if (vstream
->stream
->trace
->session
->id
!= session_id
||
214 (!ignore_sent_flag
&& vstream
->sent_flag
)) {
215 pthread_mutex_unlock(&vstream
->stream
->lock
);
216 viewer_stream_put(vstream
);
220 ctf_trace
= vstream
->stream
->trace
;
221 send_stream
.id
= htobe64(vstream
->stream
->stream_handle
);
222 send_stream
.ctf_trace_id
= htobe64(ctf_trace
->id
);
223 send_stream
.metadata_flag
= htobe32(
224 vstream
->stream
->is_metadata
);
225 if (lttng_strncpy(send_stream
.path_name
, vstream
->path_name
,
226 sizeof(send_stream
.path_name
))) {
227 pthread_mutex_unlock(&vstream
->stream
->lock
);
228 viewer_stream_put(vstream
);
229 ret
= -1; /* Error. */
232 if (lttng_strncpy(send_stream
.channel_name
,
233 vstream
->channel_name
,
234 sizeof(send_stream
.channel_name
))) {
235 pthread_mutex_unlock(&vstream
->stream
->lock
);
236 viewer_stream_put(vstream
);
237 ret
= -1; /* Error. */
241 DBG("Sending stream %" PRIu64
" to viewer",
242 vstream
->stream
->stream_handle
);
243 vstream
->sent_flag
= 1;
244 pthread_mutex_unlock(&vstream
->stream
->lock
);
246 ret
= send_response(sock
, &send_stream
, sizeof(send_stream
));
247 viewer_stream_put(vstream
);
261 * Create every viewer stream possible for the given session with the seek
262 * type. Three counters *can* be return which are in order the total amount of
263 * viewer stream of the session, the number of unsent stream and the number of
264 * stream created. Those counters can be NULL and thus will be ignored.
266 * session must be locked to ensure that we see either none or all initial
267 * streams for a session, but no intermediate state..
269 * Return 0 on success or else a negative value.
271 static int make_viewer_streams(struct relay_session
*session
,
272 struct lttng_trace_chunk
*viewer_trace_chunk
,
273 enum lttng_viewer_seek seek_t
,
276 uint32_t *nb_created
,
280 struct lttng_ht_iter iter
;
281 struct ctf_trace
*ctf_trace
;
284 ASSERT_LOCKED(session
->lock
);
286 if (!viewer_trace_chunk
) {
287 ERR("Internal error: viewer session associated with session \"%s\" has a NULL trace chunk",
288 session
->session_name
);
293 if (session
->connection_closed
) {
298 * Create viewer streams for relay streams that are ready to be
299 * used for a the given session id only.
302 cds_lfht_for_each_entry(session
->ctf_traces_ht
->ht
, &iter
.iter
, ctf_trace
,
304 bool trace_has_metadata_stream
= false;
305 struct relay_stream
*stream
;
307 health_code_update();
309 if (!ctf_trace_get(ctf_trace
)) {
314 * Iterate over all the streams of the trace to see if we have a
317 cds_list_for_each_entry_rcu(
318 stream
, &ctf_trace
->stream_list
, stream_node
)
320 if (stream
->is_metadata
) {
321 trace_has_metadata_stream
= true;
327 * If there is no metadata stream in this trace at the moment
328 * and we never sent one to the viewer, skip the trace. We
329 * accept that the viewer will not see this trace at all.
331 if (!trace_has_metadata_stream
&&
332 !ctf_trace
->metadata_stream_sent_to_viewer
) {
333 ctf_trace_put(ctf_trace
);
337 cds_list_for_each_entry_rcu(stream
, &ctf_trace
->stream_list
, stream_node
) {
338 struct relay_viewer_stream
*vstream
;
340 if (!stream_get(stream
)) {
344 * stream published is protected by the session lock.
346 if (!stream
->published
) {
349 vstream
= viewer_stream_get_by_id(stream
->stream_handle
);
352 * Save that we sent the metadata stream to the
353 * viewer. So that we know what trace the viewer
356 if (stream
->is_metadata
) {
357 ctf_trace
->metadata_stream_sent_to_viewer
=
360 vstream
= viewer_stream_create(stream
,
361 viewer_trace_chunk
, seek_t
);
364 ctf_trace_put(ctf_trace
);
370 /* Update number of created stream counter. */
374 * Ensure a self-reference is preserved even
375 * after we have put our local reference.
377 if (!viewer_stream_get(vstream
)) {
378 ERR("Unable to get self-reference on viewer stream, logic error.");
382 if (!vstream
->sent_flag
&& nb_unsent
) {
383 /* Update number of unsent stream counter. */
387 /* Update number of total stream counter. */
389 if (stream
->is_metadata
) {
390 if (!stream
->closed
||
391 stream
->metadata_received
> vstream
->metadata_sent
) {
395 if (!stream
->closed
||
396 !(((int64_t) (stream
->prev_data_seq
- stream
->last_net_seq_num
)) >= 0)) {
402 /* Put local reference. */
403 viewer_stream_put(vstream
);
407 ctf_trace_put(ctf_trace
);
418 int relayd_live_stop(void)
420 /* Stop dispatch thread */
421 CMM_STORE_SHARED(live_dispatch_thread_exit
, 1);
422 futex_nto1_wake(&viewer_conn_queue
.futex
);
427 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
430 int create_named_thread_poll_set(struct lttng_poll_event
*events
,
431 int size
, const char *name
)
435 if (events
== NULL
|| size
== 0) {
440 ret
= fd_tracker_util_poll_create(the_fd_tracker
,
441 name
, events
, 1, LTTNG_CLOEXEC
);
443 PERROR("Failed to create \"%s\" poll file descriptor", name
);
448 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
| LPOLLERR
);
460 * Check if the thread quit pipe was triggered.
462 * Return 1 if it was triggered else 0;
465 int check_thread_quit_pipe(int fd
, uint32_t events
)
467 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
475 int create_sock(void *data
, int *out_fd
)
478 struct lttcomm_sock
*sock
= data
;
480 ret
= lttcomm_create_sock(sock
);
491 int close_sock(void *data
, int *in_fd
)
493 struct lttcomm_sock
*sock
= data
;
495 return sock
->ops
->close(sock
);
498 static int accept_sock(void *data
, int *out_fd
)
501 /* Socks is an array of in_sock, out_sock. */
502 struct lttcomm_sock
**socks
= data
;
503 struct lttcomm_sock
*in_sock
= socks
[0];
505 socks
[1] = in_sock
->ops
->accept(in_sock
);
510 *out_fd
= socks
[1]->fd
;
516 struct lttcomm_sock
*accept_live_sock(struct lttcomm_sock
*listening_sock
,
520 struct lttcomm_sock
*socks
[2] = { listening_sock
, NULL
};
521 struct lttcomm_sock
*new_sock
= NULL
;
523 ret
= fd_tracker_open_unsuspendable_fd(the_fd_tracker
, &out_fd
,
524 (const char **) &name
, 1, accept_sock
, &socks
);
529 DBG("%s accepted, socket %d", name
, new_sock
->fd
);
535 * Create and init socket from uri.
538 struct lttcomm_sock
*init_socket(struct lttng_uri
*uri
, const char *name
)
541 struct lttcomm_sock
*sock
= NULL
;
542 char uri_str
[LTTNG_PATH_MAX
];
543 char *formated_name
= NULL
;
545 sock
= lttcomm_alloc_sock_from_uri(uri
);
547 ERR("Allocating socket");
552 * Don't fail to create the socket if the name can't be built as it is
553 * only used for debugging purposes.
555 ret
= uri_to_str_url(uri
, uri_str
, sizeof(uri_str
));
556 uri_str
[sizeof(uri_str
) - 1] = '\0';
558 ret
= asprintf(&formated_name
, "%s socket @ %s", name
,
561 formated_name
= NULL
;
565 ret
= fd_tracker_open_unsuspendable_fd(the_fd_tracker
, &sock_fd
,
566 (const char **) (formated_name
? &formated_name
: NULL
),
567 1, create_sock
, sock
);
569 PERROR("Failed to create \"%s\" socket",
570 formated_name
?: "Unknown");
573 DBG("Listening on %s socket %d", name
, sock
->fd
);
575 ret
= sock
->ops
->bind(sock
);
577 PERROR("Failed to bind lttng-live socket");
581 ret
= sock
->ops
->listen(sock
, -1);
592 lttcomm_destroy_sock(sock
);
599 * This thread manages the listening for new connections on the network
602 void *thread_listener(void *data
)
604 int i
, ret
, pollfd
, err
= -1;
605 uint32_t revents
, nb_fd
;
606 struct lttng_poll_event events
;
607 struct lttcomm_sock
*live_control_sock
;
609 DBG("[thread] Relay live listener started");
611 rcu_register_thread();
612 health_register(health_relayd
, HEALTH_RELAYD_TYPE_LIVE_LISTENER
);
614 health_code_update();
616 live_control_sock
= init_socket(live_uri
, "Live listener");
617 if (!live_control_sock
) {
618 goto error_sock_control
;
621 /* Pass 2 as size here for the thread quit pipe and control sockets. */
622 ret
= create_named_thread_poll_set(&events
, 2,
623 "Live listener thread epoll");
625 goto error_create_poll
;
628 /* Add the control socket */
629 ret
= lttng_poll_add(&events
, live_control_sock
->fd
, LPOLLIN
| LPOLLRDHUP
);
634 lttng_relay_notify_ready();
636 if (testpoint(relayd_thread_live_listener
)) {
637 goto error_testpoint
;
641 health_code_update();
643 DBG("Listener accepting live viewers connections");
647 ret
= lttng_poll_wait(&events
, -1);
651 * Restart interrupted system call.
653 if (errno
== EINTR
) {
660 DBG("Relay new viewer connection received");
661 for (i
= 0; i
< nb_fd
; i
++) {
662 health_code_update();
664 /* Fetch once the poll data */
665 revents
= LTTNG_POLL_GETEV(&events
, i
);
666 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
668 /* Thread quit pipe has been closed. Killing thread. */
669 ret
= check_thread_quit_pipe(pollfd
, revents
);
675 if (revents
& LPOLLIN
) {
677 * A new connection is requested, therefore a
678 * viewer connection is allocated in this
679 * thread, enqueued to a global queue and
680 * dequeued (and freed) in the worker thread.
683 struct relay_connection
*new_conn
;
684 struct lttcomm_sock
*newsock
;
686 newsock
= accept_live_sock(live_control_sock
,
687 "Live socket to client");
689 PERROR("accepting control sock");
692 DBG("Relay viewer connection accepted socket %d", newsock
->fd
);
694 ret
= setsockopt(newsock
->fd
, SOL_SOCKET
, SO_REUSEADDR
, &val
,
697 PERROR("setsockopt inet");
698 lttcomm_destroy_sock(newsock
);
701 new_conn
= connection_create(newsock
, RELAY_CONNECTION_UNKNOWN
);
703 lttcomm_destroy_sock(newsock
);
706 /* Ownership assumed by the connection. */
709 /* Enqueue request for the dispatcher thread. */
710 cds_wfcq_enqueue(&viewer_conn_queue
.head
, &viewer_conn_queue
.tail
,
714 * Wake the dispatch queue futex.
715 * Implicit memory barrier with the
716 * exchange in cds_wfcq_enqueue.
718 futex_nto1_wake(&viewer_conn_queue
.futex
);
719 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
720 ERR("socket poll error");
723 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
733 (void) fd_tracker_util_poll_clean(the_fd_tracker
, &events
);
735 if (live_control_sock
->fd
>= 0) {
736 int sock_fd
= live_control_sock
->fd
;
738 ret
= fd_tracker_close_unsuspendable_fd(the_fd_tracker
,
739 &sock_fd
, 1, close_sock
,
744 live_control_sock
->fd
= -1;
746 lttcomm_destroy_sock(live_control_sock
);
750 DBG("Live viewer listener thread exited with error");
752 health_unregister(health_relayd
);
753 rcu_unregister_thread();
754 DBG("Live viewer listener thread cleanup complete");
755 if (lttng_relay_stop_threads()) {
756 ERR("Error stopping threads");
762 * This thread manages the dispatching of the requests to worker threads
765 void *thread_dispatcher(void *data
)
769 struct cds_wfcq_node
*node
;
770 struct relay_connection
*conn
= NULL
;
772 DBG("[thread] Live viewer relay dispatcher started");
774 health_register(health_relayd
, HEALTH_RELAYD_TYPE_LIVE_DISPATCHER
);
776 if (testpoint(relayd_thread_live_dispatcher
)) {
777 goto error_testpoint
;
780 health_code_update();
783 health_code_update();
785 /* Atomically prepare the queue futex */
786 futex_nto1_prepare(&viewer_conn_queue
.futex
);
788 if (CMM_LOAD_SHARED(live_dispatch_thread_exit
)) {
793 health_code_update();
795 /* Dequeue commands */
796 node
= cds_wfcq_dequeue_blocking(&viewer_conn_queue
.head
,
797 &viewer_conn_queue
.tail
);
799 DBG("Woken up but nothing in the live-viewer "
800 "relay command queue");
801 /* Continue thread execution */
804 conn
= caa_container_of(node
, struct relay_connection
, qnode
);
805 DBG("Dispatching viewer request waiting on sock %d",
809 * Inform worker thread of the new request. This
810 * call is blocking so we can be assured that
811 * the data will be read at some point in time
812 * or wait to the end of the world :)
814 ret
= lttng_write(live_conn_pipe
[1], &conn
, sizeof(conn
));
816 PERROR("write conn pipe");
817 connection_put(conn
);
820 } while (node
!= NULL
);
822 /* Futex wait on queue. Blocking call on futex() */
824 futex_nto1_wait(&viewer_conn_queue
.futex
);
828 /* Normal exit, no error */
835 ERR("Health error occurred in %s", __func__
);
837 health_unregister(health_relayd
);
838 DBG("Live viewer dispatch thread dying");
839 if (lttng_relay_stop_threads()) {
840 ERR("Error stopping threads");
846 * Establish connection with the viewer and check the versions.
848 * Return 0 on success or else negative value.
851 int viewer_connect(struct relay_connection
*conn
)
854 struct lttng_viewer_connect reply
, msg
;
856 conn
->version_check_done
= 1;
858 health_code_update();
860 DBG("Viewer is establishing a connection to the relayd.");
862 ret
= recv_request(conn
->sock
, &msg
, sizeof(msg
));
867 health_code_update();
869 memset(&reply
, 0, sizeof(reply
));
870 reply
.major
= RELAYD_VERSION_COMM_MAJOR
;
871 reply
.minor
= RELAYD_VERSION_COMM_MINOR
;
873 /* Major versions must be the same */
874 if (reply
.major
!= be32toh(msg
.major
)) {
875 DBG("Incompatible major versions ([relayd] %u vs [client] %u)",
876 reply
.major
, be32toh(msg
.major
));
881 conn
->major
= reply
.major
;
882 /* We adapt to the lowest compatible version */
883 if (reply
.minor
<= be32toh(msg
.minor
)) {
884 conn
->minor
= reply
.minor
;
886 conn
->minor
= be32toh(msg
.minor
);
889 if (be32toh(msg
.type
) == LTTNG_VIEWER_CLIENT_COMMAND
) {
890 conn
->type
= RELAY_VIEWER_COMMAND
;
891 } else if (be32toh(msg
.type
) == LTTNG_VIEWER_CLIENT_NOTIFICATION
) {
892 conn
->type
= RELAY_VIEWER_NOTIFICATION
;
894 ERR("Unknown connection type : %u", be32toh(msg
.type
));
899 reply
.major
= htobe32(reply
.major
);
900 reply
.minor
= htobe32(reply
.minor
);
901 if (conn
->type
== RELAY_VIEWER_COMMAND
) {
903 * Increment outside of htobe64 macro, because the argument can
904 * be used more than once within the macro, and thus the
905 * operation may be undefined.
907 pthread_mutex_lock(&last_relay_viewer_session_id_lock
);
908 last_relay_viewer_session_id
++;
909 pthread_mutex_unlock(&last_relay_viewer_session_id_lock
);
910 reply
.viewer_session_id
= htobe64(last_relay_viewer_session_id
);
913 health_code_update();
915 ret
= send_response(conn
->sock
, &reply
, sizeof(reply
));
920 health_code_update();
922 DBG("Version check done using protocol %u.%u", conn
->major
, conn
->minor
);
930 * Send the viewer the list of current sessions.
931 * We need to create a copy of the hash table content because otherwise
932 * we cannot assume the number of entries stays the same between getting
933 * the number of HT elements and iteration over the HT.
935 * Return 0 on success or else a negative value.
938 int viewer_list_sessions(struct relay_connection
*conn
)
941 struct lttng_viewer_list_sessions session_list
;
942 struct lttng_ht_iter iter
;
943 struct relay_session
*session
;
944 struct lttng_viewer_session
*send_session_buf
= NULL
;
945 uint32_t buf_count
= SESSION_BUF_DEFAULT_COUNT
;
948 DBG("List sessions received");
950 send_session_buf
= zmalloc(SESSION_BUF_DEFAULT_COUNT
* sizeof(*send_session_buf
));
951 if (!send_session_buf
) {
956 cds_lfht_for_each_entry(sessions_ht
->ht
, &iter
.iter
, session
,
958 struct lttng_viewer_session
*send_session
;
960 health_code_update();
962 pthread_mutex_lock(&session
->lock
);
963 if (session
->connection_closed
) {
964 /* Skip closed session */
967 if (!session
->current_trace_chunk
) {
969 * Skip un-attachable session. It is either
970 * being destroyed or has not had a trace
971 * chunk created against it yet.
976 if (count
>= buf_count
) {
977 struct lttng_viewer_session
*newbuf
;
978 uint32_t new_buf_count
= buf_count
<< 1;
980 newbuf
= realloc(send_session_buf
,
981 new_buf_count
* sizeof(*send_session_buf
));
986 send_session_buf
= newbuf
;
987 buf_count
= new_buf_count
;
989 send_session
= &send_session_buf
[count
];
990 if (lttng_strncpy(send_session
->session_name
,
991 session
->session_name
,
992 sizeof(send_session
->session_name
))) {
996 if (lttng_strncpy(send_session
->hostname
, session
->hostname
,
997 sizeof(send_session
->hostname
))) {
1001 send_session
->id
= htobe64(session
->id
);
1002 send_session
->live_timer
= htobe32(session
->live_timer
);
1003 if (session
->viewer_attached
) {
1004 send_session
->clients
= htobe32(1);
1006 send_session
->clients
= htobe32(0);
1008 send_session
->streams
= htobe32(session
->stream_count
);
1011 pthread_mutex_unlock(&session
->lock
);
1014 pthread_mutex_unlock(&session
->lock
);
1022 session_list
.sessions_count
= htobe32(count
);
1024 health_code_update();
1026 ret
= send_response(conn
->sock
, &session_list
, sizeof(session_list
));
1031 health_code_update();
1033 ret
= send_response(conn
->sock
, send_session_buf
,
1034 count
* sizeof(*send_session_buf
));
1038 health_code_update();
1042 free(send_session_buf
);
1047 * Send the viewer the list of current streams.
1050 int viewer_get_new_streams(struct relay_connection
*conn
)
1052 int ret
, send_streams
= 0;
1053 uint32_t nb_created
= 0, nb_unsent
= 0, nb_streams
= 0, nb_total
= 0;
1054 struct lttng_viewer_new_streams_request request
;
1055 struct lttng_viewer_new_streams_response response
;
1056 struct relay_session
*session
= NULL
;
1057 uint64_t session_id
;
1058 bool closed
= false;
1062 DBG("Get new streams received");
1064 health_code_update();
1066 /* Receive the request from the connected client. */
1067 ret
= recv_request(conn
->sock
, &request
, sizeof(request
));
1071 session_id
= be64toh(request
.session_id
);
1073 health_code_update();
1075 memset(&response
, 0, sizeof(response
));
1077 session
= session_get_by_id(session_id
);
1079 DBG("Relay session %" PRIu64
" not found", session_id
);
1080 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR
);
1084 if (!viewer_session_is_attached(conn
->viewer_session
, session
)) {
1085 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_ERR
);
1089 pthread_mutex_lock(&session
->lock
);
1090 ret
= make_viewer_streams(session
,
1091 conn
->viewer_session
->current_trace_chunk
,
1092 LTTNG_VIEWER_SEEK_LAST
, &nb_total
, &nb_unsent
,
1093 &nb_created
, &closed
);
1095 goto error_unlock_session
;
1098 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_OK
);
1100 /* Only send back the newly created streams with the unsent ones. */
1101 nb_streams
= nb_created
+ nb_unsent
;
1102 response
.streams_count
= htobe32(nb_streams
);
1105 * If the session is closed, HUP when there are no more streams
1108 if (closed
&& nb_total
== 0) {
1110 response
.streams_count
= 0;
1111 response
.status
= htobe32(LTTNG_VIEWER_NEW_STREAMS_HUP
);
1112 goto send_reply_unlock
;
1115 pthread_mutex_unlock(&session
->lock
);
1118 health_code_update();
1119 ret
= send_response(conn
->sock
, &response
, sizeof(response
));
1121 goto end_put_session
;
1123 health_code_update();
1126 * Unknown or empty session, just return gracefully, the viewer
1127 * knows what is happening.
1129 if (!send_streams
|| !nb_streams
) {
1131 goto end_put_session
;
1135 * Send stream and *DON'T* ignore the sent flag so every viewer
1136 * streams that were not sent from that point will be sent to
1139 ret
= send_viewer_streams(conn
->sock
, session_id
, 0);
1141 goto end_put_session
;
1146 session_put(session
);
1150 error_unlock_session
:
1151 pthread_mutex_unlock(&session
->lock
);
1152 session_put(session
);
1157 * Send the viewer the list of current sessions.
1160 int viewer_attach_session(struct relay_connection
*conn
)
1162 int send_streams
= 0;
1164 uint32_t nb_streams
= 0;
1165 enum lttng_viewer_seek seek_type
;
1166 struct lttng_viewer_attach_session_request request
;
1167 struct lttng_viewer_attach_session_response response
;
1168 struct relay_session
*session
= NULL
;
1169 enum lttng_viewer_attach_return_code viewer_attach_status
;
1170 bool closed
= false;
1171 uint64_t session_id
;
1175 health_code_update();
1177 /* Receive the request from the connected client. */
1178 ret
= recv_request(conn
->sock
, &request
, sizeof(request
));
1183 session_id
= be64toh(request
.session_id
);
1184 health_code_update();
1186 memset(&response
, 0, sizeof(response
));
1188 if (!conn
->viewer_session
) {
1189 DBG("Client trying to attach before creating a live viewer session");
1190 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_NO_SESSION
);
1194 session
= session_get_by_id(session_id
);
1196 DBG("Relay session %" PRIu64
" not found", session_id
);
1197 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_UNK
);
1200 DBG("Attach session ID %" PRIu64
" received", session_id
);
1202 pthread_mutex_lock(&session
->lock
);
1203 if (!session
->current_trace_chunk
) {
1205 * Session is either being destroyed or it never had a trace
1206 * chunk created against it.
1208 DBG("Session requested by live client has no current trace chunk, returning unknown session");
1209 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_UNK
);
1212 if (session
->live_timer
== 0) {
1213 DBG("Not live session");
1214 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_NOT_LIVE
);
1219 viewer_attach_status
= viewer_session_attach(conn
->viewer_session
,
1221 if (viewer_attach_status
!= LTTNG_VIEWER_ATTACH_OK
) {
1222 response
.status
= htobe32(viewer_attach_status
);
1226 switch (be32toh(request
.seek
)) {
1227 case LTTNG_VIEWER_SEEK_BEGINNING
:
1228 case LTTNG_VIEWER_SEEK_LAST
:
1229 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_OK
);
1230 seek_type
= be32toh(request
.seek
);
1233 ERR("Wrong seek parameter");
1234 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_SEEK_ERR
);
1239 ret
= make_viewer_streams(session
,
1240 conn
->viewer_session
->current_trace_chunk
, seek_type
,
1241 &nb_streams
, NULL
, NULL
, &closed
);
1243 goto end_put_session
;
1245 pthread_mutex_unlock(&session
->lock
);
1246 session_put(session
);
1249 response
.streams_count
= htobe32(nb_streams
);
1251 * If the session is closed when the viewer is attaching, it
1252 * means some of the streams may have been concurrently removed,
1253 * so we don't allow the viewer to attach, even if there are
1254 * streams available.
1258 response
.streams_count
= 0;
1259 response
.status
= htobe32(LTTNG_VIEWER_ATTACH_UNK
);
1264 health_code_update();
1265 ret
= send_response(conn
->sock
, &response
, sizeof(response
));
1267 goto end_put_session
;
1269 health_code_update();
1272 * Unknown or empty session, just return gracefully, the viewer
1273 * knows what is happening.
1275 if (!send_streams
|| !nb_streams
) {
1277 goto end_put_session
;
1280 /* Send stream and ignore the sent flag. */
1281 ret
= send_viewer_streams(conn
->sock
, session_id
, 1);
1283 goto end_put_session
;
1288 pthread_mutex_unlock(&session
->lock
);
1289 session_put(session
);
1296 * Open the index file if needed for the given vstream.
1298 * If an index file is successfully opened, the vstream will set it as its
1299 * current index file.
1301 * Return 0 on success, a negative value on error (-ENOENT if not ready yet).
1303 * Called with rstream lock held.
1305 static int try_open_index(struct relay_viewer_stream
*vstream
,
1306 struct relay_stream
*rstream
)
1309 const uint32_t connection_major
= rstream
->trace
->session
->major
;
1310 const uint32_t connection_minor
= rstream
->trace
->session
->minor
;
1311 enum lttng_trace_chunk_status chunk_status
;
1313 if (vstream
->index_file
) {
1318 * First time, we open the index file and at least one index is ready.
1320 if (rstream
->index_received_seqcount
== 0) {
1324 chunk_status
= lttng_index_file_create_from_trace_chunk_read_only(
1325 vstream
->stream_file
.trace_chunk
, rstream
->path_name
,
1326 rstream
->channel_name
, rstream
->tracefile_size
,
1327 vstream
->current_tracefile_id
,
1328 lttng_to_index_major(connection_major
, connection_minor
),
1329 lttng_to_index_minor(connection_major
, connection_minor
),
1330 true, &vstream
->index_file
);
1331 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
1332 if (chunk_status
== LTTNG_TRACE_CHUNK_STATUS_NO_FILE
) {
1344 * Check the status of the index for the given stream. This function
1345 * updates the index structure if needed and can put (close) the vstream
1346 * in the HUP situation.
1348 * Return 0 means that we can proceed with the index. A value of 1 means
1349 * that the index has been updated and is ready to be sent to the
1350 * client. A negative value indicates an error that can't be handled.
1352 * Called with rstream lock held.
1354 static int check_index_status(struct relay_viewer_stream
*vstream
,
1355 struct relay_stream
*rstream
, struct ctf_trace
*trace
,
1356 struct lttng_viewer_index
*index
)
1360 DBG("Check index status: index_received_seqcount %" PRIu64
" "
1361 "index_sent_seqcount %" PRIu64
" "
1362 "for stream %" PRIu64
,
1363 rstream
->index_received_seqcount
,
1364 vstream
->index_sent_seqcount
,
1365 vstream
->stream
->stream_handle
);
1366 if ((trace
->session
->connection_closed
|| rstream
->closed
)
1367 && rstream
->index_received_seqcount
1368 == vstream
->index_sent_seqcount
) {
1370 * Last index sent and session connection or relay
1371 * stream are closed.
1373 index
->status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1375 } else if (rstream
->beacon_ts_end
!= -1ULL &&
1376 (rstream
->index_received_seqcount
== 0 ||
1377 (vstream
->index_sent_seqcount
!= 0 &&
1378 rstream
->index_received_seqcount
1379 <= vstream
->index_sent_seqcount
))) {
1381 * We've received a synchronization beacon and the last index
1382 * available has been sent, the index for now is inactive.
1384 * In this case, we have received a beacon which allows us to
1385 * inform the client of a time interval during which we can
1386 * guarantee that there are no events to read (and never will
1389 * The sent seqcount can grow higher than receive seqcount on
1390 * clear because the rotation performed by clear will push
1391 * the index_sent_seqcount ahead (see
1392 * viewer_stream_sync_tracefile_array_tail) and skip over
1393 * packet sequence numbers.
1395 index
->status
= htobe32(LTTNG_VIEWER_INDEX_INACTIVE
);
1396 index
->timestamp_end
= htobe64(rstream
->beacon_ts_end
);
1397 index
->stream_id
= htobe64(rstream
->ctf_stream_id
);
1398 DBG("Check index status: inactive with beacon, for stream %" PRIu64
,
1399 vstream
->stream
->stream_handle
);
1401 } else if (rstream
->index_received_seqcount
== 0 ||
1402 (vstream
->index_sent_seqcount
!= 0 &&
1403 rstream
->index_received_seqcount
1404 <= vstream
->index_sent_seqcount
)) {
1406 * This checks whether received <= sent seqcount. In
1407 * this case, we have not received a beacon. Therefore,
1408 * we can only ask the client to retry later.
1410 * The sent seqcount can grow higher than receive seqcount on
1411 * clear because the rotation performed by clear will push
1412 * the index_sent_seqcount ahead (see
1413 * viewer_stream_sync_tracefile_array_tail) and skip over
1414 * packet sequence numbers.
1416 index
->status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1417 DBG("Check index status: retry for stream %" PRIu64
,
1418 vstream
->stream
->stream_handle
);
1420 } else if (!tracefile_array_seq_in_file(rstream
->tfa
,
1421 vstream
->current_tracefile_id
,
1422 vstream
->index_sent_seqcount
)) {
1424 * The next index we want to send cannot be read either
1425 * because we need to perform a rotation, or due to
1426 * the producer having overwritten its trace file.
1428 DBG("Viewer stream %" PRIu64
" rotation",
1429 vstream
->stream
->stream_handle
);
1430 ret
= viewer_stream_rotate(vstream
);
1432 /* EOF across entire stream. */
1433 index
->status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1437 * If we have been pushed due to overwrite, it
1438 * necessarily means there is data that can be read in
1439 * the stream. If we rotated because we reached the end
1440 * of a tracefile, it means the following tracefile
1441 * needs to contain at least one index, else we would
1442 * have already returned LTTNG_VIEWER_INDEX_RETRY to the
1443 * viewer. The updated index_sent_seqcount needs to
1444 * point to a readable index entry now.
1446 * In the case where we "rotate" on a single file, we
1447 * can end up in a case where the requested index is
1448 * still unavailable.
1450 if (rstream
->tracefile_count
== 1 &&
1451 !tracefile_array_seq_in_file(
1453 vstream
->current_tracefile_id
,
1454 vstream
->index_sent_seqcount
)) {
1455 index
->status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1456 DBG("Check index status: retry: "
1457 "tracefile array sequence number %" PRIu64
1458 " not in file for stream %" PRIu64
,
1459 vstream
->index_sent_seqcount
,
1460 vstream
->stream
->stream_handle
);
1463 assert(tracefile_array_seq_in_file(rstream
->tfa
,
1464 vstream
->current_tracefile_id
,
1465 vstream
->index_sent_seqcount
));
1467 /* ret == 0 means successful so we continue. */
1472 viewer_stream_put(vstream
);
1478 * Send the next index for a stream.
1480 * Return 0 on success or else a negative value.
1483 int viewer_get_next_index(struct relay_connection
*conn
)
1486 struct lttng_viewer_get_next_index request_index
;
1487 struct lttng_viewer_index viewer_index
;
1488 struct ctf_packet_index packet_index
;
1489 struct relay_viewer_stream
*vstream
= NULL
;
1490 struct relay_stream
*rstream
= NULL
;
1491 struct ctf_trace
*ctf_trace
= NULL
;
1492 struct relay_viewer_stream
*metadata_viewer_stream
= NULL
;
1496 DBG("Viewer get next index");
1498 memset(&viewer_index
, 0, sizeof(viewer_index
));
1499 health_code_update();
1501 ret
= recv_request(conn
->sock
, &request_index
, sizeof(request_index
));
1505 health_code_update();
1507 vstream
= viewer_stream_get_by_id(be64toh(request_index
.stream_id
));
1509 DBG("Client requested index of unknown stream id %" PRIu64
,
1510 (uint64_t) be64toh(request_index
.stream_id
));
1511 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1515 /* Use back. ref. Protected by refcounts. */
1516 rstream
= vstream
->stream
;
1517 ctf_trace
= rstream
->trace
;
1519 /* metadata_viewer_stream may be NULL. */
1520 metadata_viewer_stream
=
1521 ctf_trace_get_viewer_metadata_stream(ctf_trace
);
1523 pthread_mutex_lock(&rstream
->lock
);
1526 * The viewer should not ask for index on metadata stream.
1528 if (rstream
->is_metadata
) {
1529 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1533 if (rstream
->ongoing_rotation
.is_set
) {
1534 /* Rotation is ongoing, try again later. */
1535 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1539 if (rstream
->trace
->session
->ongoing_rotation
) {
1540 /* Rotation is ongoing, try again later. */
1541 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1545 if (rstream
->trace_chunk
&& !lttng_trace_chunk_ids_equal(
1546 conn
->viewer_session
->current_trace_chunk
,
1547 rstream
->trace_chunk
)) {
1548 DBG("Metadata relay stream and viewer chunk ids differ");
1550 ret
= viewer_session_set_trace_chunk_copy(
1551 conn
->viewer_session
,
1552 rstream
->trace_chunk
);
1554 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1558 if (conn
->viewer_session
->current_trace_chunk
!=
1559 vstream
->stream_file
.trace_chunk
) {
1560 bool acquired_reference
;
1562 DBG("Viewer session and viewer stream chunk differ: "
1563 "vsession chunk %p vstream chunk %p",
1564 conn
->viewer_session
->current_trace_chunk
,
1565 vstream
->stream_file
.trace_chunk
);
1566 lttng_trace_chunk_put(vstream
->stream_file
.trace_chunk
);
1567 acquired_reference
= lttng_trace_chunk_get(conn
->viewer_session
->current_trace_chunk
);
1568 assert(acquired_reference
);
1569 vstream
->stream_file
.trace_chunk
=
1570 conn
->viewer_session
->current_trace_chunk
;
1571 viewer_stream_sync_tracefile_array_tail(vstream
);
1572 viewer_stream_close_files(vstream
);
1575 ret
= check_index_status(vstream
, rstream
, ctf_trace
, &viewer_index
);
1578 } else if (ret
== 1) {
1580 * We have no index to send and check_index_status has populated
1581 * viewer_index's status.
1585 /* At this point, ret is 0 thus we will be able to read the index. */
1588 /* Try to open an index if one is needed for that stream. */
1589 ret
= try_open_index(vstream
, rstream
);
1590 if (ret
== -ENOENT
) {
1591 if (rstream
->closed
) {
1592 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1595 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_RETRY
);
1600 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1605 * vstream->stream_fd may be NULL if it has been closed by
1606 * tracefile rotation, or if we are at the beginning of the
1607 * stream. We open the data stream file here to protect against
1608 * overwrite caused by tracefile rotation (in association with
1609 * unlink performed before overwrite).
1611 if (!vstream
->stream_file
.handle
) {
1612 char file_path
[LTTNG_PATH_MAX
];
1613 enum lttng_trace_chunk_status status
;
1614 struct fs_handle
*fs_handle
;
1616 ret
= utils_stream_file_path(rstream
->path_name
,
1617 rstream
->channel_name
, rstream
->tracefile_size
,
1618 vstream
->current_tracefile_id
, NULL
, file_path
,
1625 * It is possible the the file we are trying to open is
1626 * missing if the stream has been closed (application exits with
1627 * per-pid buffers) and a clear command has been performed.
1629 status
= lttng_trace_chunk_open_fs_handle(
1630 vstream
->stream_file
.trace_chunk
,
1631 file_path
, O_RDONLY
, 0, &fs_handle
, true);
1632 if (status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
1633 if (status
== LTTNG_TRACE_CHUNK_STATUS_NO_FILE
&&
1635 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_HUP
);
1638 PERROR("Failed to open trace file for viewer stream");
1641 vstream
->stream_file
.handle
= fs_handle
;
1644 ret
= check_new_streams(conn
);
1646 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1648 } else if (ret
== 1) {
1649 viewer_index
.flags
|= LTTNG_VIEWER_FLAG_NEW_STREAM
;
1652 ret
= lttng_index_file_read(vstream
->index_file
, &packet_index
);
1654 ERR("Relay error reading index file");
1655 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_ERR
);
1658 viewer_index
.status
= htobe32(LTTNG_VIEWER_INDEX_OK
);
1659 vstream
->index_sent_seqcount
++;
1663 * Indexes are stored in big endian, no need to switch before sending.
1665 DBG("Sending viewer index for stream %" PRIu64
" offset %" PRIu64
,
1666 rstream
->stream_handle
,
1667 (uint64_t) be64toh(packet_index
.offset
));
1668 viewer_index
.offset
= packet_index
.offset
;
1669 viewer_index
.packet_size
= packet_index
.packet_size
;
1670 viewer_index
.content_size
= packet_index
.content_size
;
1671 viewer_index
.timestamp_begin
= packet_index
.timestamp_begin
;
1672 viewer_index
.timestamp_end
= packet_index
.timestamp_end
;
1673 viewer_index
.events_discarded
= packet_index
.events_discarded
;
1674 viewer_index
.stream_id
= packet_index
.stream_id
;
1678 pthread_mutex_unlock(&rstream
->lock
);
1681 if (metadata_viewer_stream
) {
1682 pthread_mutex_lock(&metadata_viewer_stream
->stream
->lock
);
1683 DBG("get next index metadata check: recv %" PRIu64
1685 metadata_viewer_stream
->stream
->metadata_received
,
1686 metadata_viewer_stream
->metadata_sent
);
1687 if (!metadata_viewer_stream
->stream
->metadata_received
||
1688 metadata_viewer_stream
->stream
->metadata_received
>
1689 metadata_viewer_stream
->metadata_sent
) {
1690 viewer_index
.flags
|= LTTNG_VIEWER_FLAG_NEW_METADATA
;
1692 pthread_mutex_unlock(&metadata_viewer_stream
->stream
->lock
);
1695 viewer_index
.flags
= htobe32(viewer_index
.flags
);
1696 health_code_update();
1698 ret
= send_response(conn
->sock
, &viewer_index
, sizeof(viewer_index
));
1702 health_code_update();
1705 DBG("Index %" PRIu64
" for stream %" PRIu64
" sent",
1706 vstream
->index_sent_seqcount
,
1707 vstream
->stream
->stream_handle
);
1710 if (metadata_viewer_stream
) {
1711 viewer_stream_put(metadata_viewer_stream
);
1714 viewer_stream_put(vstream
);
1719 pthread_mutex_unlock(&rstream
->lock
);
1720 if (metadata_viewer_stream
) {
1721 viewer_stream_put(metadata_viewer_stream
);
1723 viewer_stream_put(vstream
);
1728 * Send the next index for a stream
1730 * Return 0 on success or else a negative value.
1733 int viewer_get_packet(struct relay_connection
*conn
)
1738 struct lttng_viewer_get_packet get_packet_info
;
1739 struct lttng_viewer_trace_packet reply_header
;
1740 struct relay_viewer_stream
*vstream
= NULL
;
1741 uint32_t reply_size
= sizeof(reply_header
);
1742 uint32_t packet_data_len
= 0;
1746 DBG2("Relay get data packet");
1748 health_code_update();
1750 ret
= recv_request(conn
->sock
, &get_packet_info
,
1751 sizeof(get_packet_info
));
1755 health_code_update();
1757 /* From this point on, the error label can be reached. */
1758 memset(&reply_header
, 0, sizeof(reply_header
));
1759 stream_id
= (uint64_t) be64toh(get_packet_info
.stream_id
);
1761 vstream
= viewer_stream_get_by_id(stream_id
);
1763 DBG("Client requested packet of unknown stream id %" PRIu64
,
1765 reply_header
.status
= htobe32(LTTNG_VIEWER_GET_PACKET_ERR
);
1766 goto send_reply_nolock
;
1768 packet_data_len
= be32toh(get_packet_info
.len
);
1769 reply_size
+= packet_data_len
;
1772 reply
= zmalloc(reply_size
);
1774 PERROR("packet reply zmalloc");
1775 reply_size
= sizeof(reply_header
);
1779 pthread_mutex_lock(&vstream
->stream
->lock
);
1780 lseek_ret
= fs_handle_seek(vstream
->stream_file
.handle
,
1781 be64toh(get_packet_info
.offset
), SEEK_SET
);
1782 if (lseek_ret
< 0) {
1783 PERROR("Failed to seek file system handle of viewer stream %" PRIu64
1784 " to offset %" PRIu64
,
1786 (uint64_t) be64toh(get_packet_info
.offset
));
1789 read_len
= fs_handle_read(vstream
->stream_file
.handle
,
1790 reply
+ sizeof(reply_header
), packet_data_len
);
1791 if (read_len
< packet_data_len
) {
1792 PERROR("Failed to read from file system handle of viewer stream id %" PRIu64
1793 ", offset: %" PRIu64
,
1795 (uint64_t) be64toh(get_packet_info
.offset
));
1798 reply_header
.status
= htobe32(LTTNG_VIEWER_GET_PACKET_OK
);
1799 reply_header
.len
= htobe32(packet_data_len
);
1803 reply_header
.status
= htobe32(LTTNG_VIEWER_GET_PACKET_ERR
);
1807 pthread_mutex_unlock(&vstream
->stream
->lock
);
1811 health_code_update();
1814 memcpy(reply
, &reply_header
, sizeof(reply_header
));
1815 ret
= send_response(conn
->sock
, reply
, reply_size
);
1817 /* No reply to send. */
1818 ret
= send_response(conn
->sock
, &reply_header
,
1822 health_code_update();
1824 PERROR("sendmsg of packet data failed");
1828 DBG("Sent %u bytes for stream %" PRIu64
, reply_size
, stream_id
);
1834 viewer_stream_put(vstream
);
1840 * Send the session's metadata
1842 * Return 0 on success else a negative value.
1845 int viewer_get_metadata(struct relay_connection
*conn
)
1852 struct lttng_viewer_get_metadata request
;
1853 struct lttng_viewer_metadata_packet reply
;
1854 struct relay_viewer_stream
*vstream
= NULL
;
1858 DBG("Relay get metadata");
1860 health_code_update();
1862 ret
= recv_request(conn
->sock
, &request
, sizeof(request
));
1866 health_code_update();
1868 memset(&reply
, 0, sizeof(reply
));
1870 vstream
= viewer_stream_get_by_id(be64toh(request
.stream_id
));
1873 * The metadata stream can be closed by a CLOSE command
1874 * just before we attach. It can also be closed by
1875 * per-pid tracing during tracing. Therefore, it is
1876 * possible that we cannot find this viewer stream.
1877 * Reply back to the client with an error if we cannot
1880 DBG("Client requested metadata of unknown stream id %" PRIu64
,
1881 (uint64_t) be64toh(request
.stream_id
));
1882 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_ERR
);
1885 pthread_mutex_lock(&vstream
->stream
->lock
);
1886 if (!vstream
->stream
->is_metadata
) {
1887 ERR("Invalid metadata stream");
1891 if (vstream
->metadata_sent
>= vstream
->stream
->metadata_received
) {
1893 * The live viewers expect to receive a NO_NEW_METADATA
1894 * status before a stream disappears, otherwise they abort the
1895 * entire live connection when receiving an error status.
1897 * Clear feature resets the metadata_sent to 0 until the
1898 * same metadata is received again.
1900 reply
.status
= htobe32(LTTNG_VIEWER_NO_NEW_METADATA
);
1902 * The live viewer considers a closed 0 byte metadata stream as
1905 if (vstream
->metadata_sent
> 0) {
1906 vstream
->stream
->no_new_metadata_notified
= true;
1907 if (vstream
->stream
->closed
) {
1908 /* Release ownership for the viewer metadata stream. */
1909 viewer_stream_put(vstream
);
1915 if (vstream
->stream
->trace_chunk
&&
1916 !lttng_trace_chunk_ids_equal(
1917 conn
->viewer_session
->current_trace_chunk
,
1918 vstream
->stream
->trace_chunk
)) {
1919 /* A rotation has occurred on the relay stream. */
1920 DBG("Metadata relay stream and viewer chunk ids differ");
1922 ret
= viewer_session_set_trace_chunk_copy(
1923 conn
->viewer_session
,
1924 vstream
->stream
->trace_chunk
);
1926 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_ERR
);
1931 if (conn
->viewer_session
->current_trace_chunk
!=
1932 vstream
->stream_file
.trace_chunk
) {
1933 bool acquired_reference
;
1935 DBG("Viewer session and viewer stream chunk differ: "
1936 "vsession chunk %p vstream chunk %p",
1937 conn
->viewer_session
->current_trace_chunk
,
1938 vstream
->stream_file
.trace_chunk
);
1939 lttng_trace_chunk_put(vstream
->stream_file
.trace_chunk
);
1940 acquired_reference
= lttng_trace_chunk_get(conn
->viewer_session
->current_trace_chunk
);
1941 assert(acquired_reference
);
1942 vstream
->stream_file
.trace_chunk
=
1943 conn
->viewer_session
->current_trace_chunk
;
1944 viewer_stream_close_files(vstream
);
1947 len
= vstream
->stream
->metadata_received
- vstream
->metadata_sent
;
1950 * Either this is the first time the metadata file is read, or a
1951 * rotation of the corresponding relay stream has occured.
1953 if (!vstream
->stream_file
.handle
&& len
> 0) {
1954 struct fs_handle
*fs_handle
;
1955 char file_path
[LTTNG_PATH_MAX
];
1956 enum lttng_trace_chunk_status status
;
1957 struct relay_stream
*rstream
= vstream
->stream
;
1959 ret
= utils_stream_file_path(rstream
->path_name
,
1960 rstream
->channel_name
, rstream
->tracefile_size
,
1961 vstream
->current_tracefile_id
, NULL
, file_path
,
1968 * It is possible the the metadata file we are trying to open is
1969 * missing if the stream has been closed (application exits with
1970 * per-pid buffers) and a clear command has been performed.
1972 status
= lttng_trace_chunk_open_fs_handle(
1973 vstream
->stream_file
.trace_chunk
,
1974 file_path
, O_RDONLY
, 0, &fs_handle
, true);
1975 if (status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
1976 if (status
== LTTNG_TRACE_CHUNK_STATUS_NO_FILE
) {
1977 reply
.status
= htobe32(LTTNG_VIEWER_NO_NEW_METADATA
);
1979 if (vstream
->stream
->closed
) {
1980 viewer_stream_put(vstream
);
1984 PERROR("Failed to open metadata file for viewer stream");
1987 vstream
->stream_file
.handle
= fs_handle
;
1989 if (vstream
->metadata_sent
!= 0) {
1991 * The client does not expect to receive any metadata
1992 * it has received and metadata files in successive
1993 * chunks must be a strict superset of one another.
1995 * Skip the first `metadata_sent` bytes to ensure
1996 * they are not sent a second time to the client.
1998 * Baring a block layer error or an internal error,
1999 * this seek should not fail as
2000 * `vstream->stream->metadata_received` is reset when
2001 * a relay stream is rotated. If this is reached, it is
2002 * safe to assume that
2003 * `metadata_received` > `metadata_sent`.
2005 const off_t seek_ret
= fs_handle_seek(fs_handle
,
2006 vstream
->metadata_sent
, SEEK_SET
);
2009 PERROR("Failed to seek metadata viewer stream file to `sent` position: pos = %" PRId64
,
2010 vstream
->metadata_sent
);
2011 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_ERR
);
2017 reply
.len
= htobe64(len
);
2018 data
= zmalloc(len
);
2020 PERROR("viewer metadata zmalloc");
2024 fd
= fs_handle_get_fd(vstream
->stream_file
.handle
);
2026 ERR("Failed to restore viewer stream file system handle");
2029 read_len
= lttng_read(fd
, data
, len
);
2030 fs_handle_put_fd(vstream
->stream_file
.handle
);
2032 if (read_len
< len
) {
2034 PERROR("Failed to read metadata file");
2038 * A clear has been performed which prevents the relay
2039 * from sending `len` bytes of metadata.
2041 * It is important not to send any metadata if we
2042 * couldn't read all the available metadata in one shot:
2043 * sending partial metadata can cause the client to
2044 * attempt to parse an incomplete (incoherent) metadata
2045 * stream, which would result in an error.
2047 const off_t seek_ret
= fs_handle_seek(
2048 vstream
->stream_file
.handle
, -read_len
,
2051 DBG("Failed to read metadata: requested = %" PRIu64
", got = %zd",
2056 PERROR("Failed to restore metadata file position after partial read");
2062 vstream
->metadata_sent
+= read_len
;
2063 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_OK
);
2068 reply
.status
= htobe32(LTTNG_VIEWER_METADATA_ERR
);
2071 health_code_update();
2073 pthread_mutex_unlock(&vstream
->stream
->lock
);
2075 ret
= send_response(conn
->sock
, &reply
, sizeof(reply
));
2079 health_code_update();
2082 ret
= send_response(conn
->sock
, data
, len
);
2088 DBG("Sent %" PRIu64
" bytes of metadata for stream %" PRIu64
, len
,
2089 (uint64_t) be64toh(request
.stream_id
));
2091 DBG("Metadata sent");
2097 viewer_stream_put(vstream
);
2103 * Create a viewer session.
2105 * Return 0 on success or else a negative value.
2108 int viewer_create_session(struct relay_connection
*conn
)
2111 struct lttng_viewer_create_session_response resp
;
2113 DBG("Viewer create session received");
2115 memset(&resp
, 0, sizeof(resp
));
2116 resp
.status
= htobe32(LTTNG_VIEWER_CREATE_SESSION_OK
);
2117 conn
->viewer_session
= viewer_session_create();
2118 if (!conn
->viewer_session
) {
2119 ERR("Allocation viewer session");
2120 resp
.status
= htobe32(LTTNG_VIEWER_CREATE_SESSION_ERR
);
2125 health_code_update();
2126 ret
= send_response(conn
->sock
, &resp
, sizeof(resp
));
2130 health_code_update();
2138 * Detach a viewer session.
2140 * Return 0 on success or else a negative value.
2143 int viewer_detach_session(struct relay_connection
*conn
)
2146 struct lttng_viewer_detach_session_response response
;
2147 struct lttng_viewer_detach_session_request request
;
2148 struct relay_session
*session
= NULL
;
2149 uint64_t viewer_session_to_close
;
2151 DBG("Viewer detach session received");
2155 health_code_update();
2157 /* Receive the request from the connected client. */
2158 ret
= recv_request(conn
->sock
, &request
, sizeof(request
));
2162 viewer_session_to_close
= be64toh(request
.session_id
);
2164 if (!conn
->viewer_session
) {
2165 DBG("Client trying to detach before creating a live viewer session");
2166 response
.status
= htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR
);
2170 health_code_update();
2172 memset(&response
, 0, sizeof(response
));
2173 DBG("Detaching from session ID %" PRIu64
, viewer_session_to_close
);
2175 session
= session_get_by_id(be64toh(request
.session_id
));
2177 DBG("Relay session %" PRIu64
" not found",
2178 (uint64_t) be64toh(request
.session_id
));
2179 response
.status
= htobe32(LTTNG_VIEWER_DETACH_SESSION_UNK
);
2183 ret
= viewer_session_is_attached(conn
->viewer_session
, session
);
2185 DBG("Not attached to this session");
2186 response
.status
= htobe32(LTTNG_VIEWER_DETACH_SESSION_ERR
);
2187 goto send_reply_put
;
2190 viewer_session_close_one_session(conn
->viewer_session
, session
);
2191 response
.status
= htobe32(LTTNG_VIEWER_DETACH_SESSION_OK
);
2192 DBG("Session %" PRIu64
" detached.", viewer_session_to_close
);
2195 session_put(session
);
2198 health_code_update();
2199 ret
= send_response(conn
->sock
, &response
, sizeof(response
));
2203 health_code_update();
2211 * live_relay_unknown_command: send -1 if received unknown command
2214 void live_relay_unknown_command(struct relay_connection
*conn
)
2216 struct lttcomm_relayd_generic_reply reply
;
2218 memset(&reply
, 0, sizeof(reply
));
2219 reply
.ret_code
= htobe32(LTTNG_ERR_UNK
);
2220 (void) send_response(conn
->sock
, &reply
, sizeof(reply
));
2224 * Process the commands received on the control socket
2227 int process_control(struct lttng_viewer_cmd
*recv_hdr
,
2228 struct relay_connection
*conn
)
2233 msg_value
= be32toh(recv_hdr
->cmd
);
2236 * Make sure we've done the version check before any command other then a
2237 * new client connection.
2239 if (msg_value
!= LTTNG_VIEWER_CONNECT
&& !conn
->version_check_done
) {
2240 ERR("Viewer conn value %" PRIu32
" before version check", msg_value
);
2245 switch (msg_value
) {
2246 case LTTNG_VIEWER_CONNECT
:
2247 ret
= viewer_connect(conn
);
2249 case LTTNG_VIEWER_LIST_SESSIONS
:
2250 ret
= viewer_list_sessions(conn
);
2252 case LTTNG_VIEWER_ATTACH_SESSION
:
2253 ret
= viewer_attach_session(conn
);
2255 case LTTNG_VIEWER_GET_NEXT_INDEX
:
2256 ret
= viewer_get_next_index(conn
);
2258 case LTTNG_VIEWER_GET_PACKET
:
2259 ret
= viewer_get_packet(conn
);
2261 case LTTNG_VIEWER_GET_METADATA
:
2262 ret
= viewer_get_metadata(conn
);
2264 case LTTNG_VIEWER_GET_NEW_STREAMS
:
2265 ret
= viewer_get_new_streams(conn
);
2267 case LTTNG_VIEWER_CREATE_SESSION
:
2268 ret
= viewer_create_session(conn
);
2270 case LTTNG_VIEWER_DETACH_SESSION
:
2271 ret
= viewer_detach_session(conn
);
2274 ERR("Received unknown viewer command (%u)",
2275 be32toh(recv_hdr
->cmd
));
2276 live_relay_unknown_command(conn
);
2286 void cleanup_connection_pollfd(struct lttng_poll_event
*events
, int pollfd
)
2290 (void) lttng_poll_del(events
, pollfd
);
2292 ret
= fd_tracker_close_unsuspendable_fd(the_fd_tracker
, &pollfd
, 1,
2293 fd_tracker_util_close_fd
, NULL
);
2295 ERR("Closing pollfd %d", pollfd
);
2300 * This thread does the actual work
2303 void *thread_worker(void *data
)
2307 struct lttng_poll_event events
;
2308 struct lttng_ht
*viewer_connections_ht
;
2309 struct lttng_ht_iter iter
;
2310 struct lttng_viewer_cmd recv_hdr
;
2311 struct relay_connection
*destroy_conn
;
2313 DBG("[thread] Live viewer relay worker started");
2315 rcu_register_thread();
2317 health_register(health_relayd
, HEALTH_RELAYD_TYPE_LIVE_WORKER
);
2319 if (testpoint(relayd_thread_live_worker
)) {
2320 goto error_testpoint
;
2323 /* table of connections indexed on socket */
2324 viewer_connections_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
2325 if (!viewer_connections_ht
) {
2326 goto viewer_connections_ht_error
;
2329 ret
= create_named_thread_poll_set(&events
, 2,
2330 "Live viewer worker thread epoll");
2332 goto error_poll_create
;
2335 ret
= lttng_poll_add(&events
, live_conn_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
2344 health_code_update();
2346 /* Infinite blocking call, waiting for transmission */
2347 DBG3("Relayd live viewer worker thread polling...");
2348 health_poll_entry();
2349 ret
= lttng_poll_wait(&events
, -1);
2353 * Restart interrupted system call.
2355 if (errno
== EINTR
) {
2364 * Process control. The control connection is prioritised so we don't
2365 * starve it with high throughput tracing data on the data
2368 for (i
= 0; i
< nb_fd
; i
++) {
2369 /* Fetch once the poll data */
2370 uint32_t revents
= LTTNG_POLL_GETEV(&events
, i
);
2371 int pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2373 health_code_update();
2375 /* Thread quit pipe has been closed. Killing thread. */
2376 ret
= check_thread_quit_pipe(pollfd
, revents
);
2382 /* Inspect the relay conn pipe for new connection. */
2383 if (pollfd
== live_conn_pipe
[0]) {
2384 if (revents
& LPOLLIN
) {
2385 struct relay_connection
*conn
;
2387 ret
= lttng_read(live_conn_pipe
[0],
2388 &conn
, sizeof(conn
));
2392 ret
= lttng_poll_add(&events
,
2394 LPOLLIN
| LPOLLRDHUP
);
2396 ERR("Failed to add new live connection file descriptor to poll set");
2399 connection_ht_add(viewer_connections_ht
, conn
);
2400 DBG("Connection socket %d added to poll", conn
->sock
->fd
);
2401 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2402 ERR("Relay live pipe error");
2405 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2409 /* Connection activity. */
2410 struct relay_connection
*conn
;
2412 conn
= connection_get_by_sock(viewer_connections_ht
, pollfd
);
2417 if (revents
& LPOLLIN
) {
2418 ret
= conn
->sock
->ops
->recvmsg(conn
->sock
, &recv_hdr
,
2419 sizeof(recv_hdr
), 0);
2421 /* Connection closed. */
2422 cleanup_connection_pollfd(&events
, pollfd
);
2423 /* Put "create" ownership reference. */
2424 connection_put(conn
);
2425 DBG("Viewer control conn closed with %d", pollfd
);
2427 ret
= process_control(&recv_hdr
, conn
);
2429 /* Clear the session on error. */
2430 cleanup_connection_pollfd(&events
, pollfd
);
2431 /* Put "create" ownership reference. */
2432 connection_put(conn
);
2433 DBG("Viewer connection closed with %d", pollfd
);
2436 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2437 cleanup_connection_pollfd(&events
, pollfd
);
2438 /* Put "create" ownership reference. */
2439 connection_put(conn
);
2441 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2442 connection_put(conn
);
2445 /* Put local "get_by_sock" reference. */
2446 connection_put(conn
);
2453 (void) fd_tracker_util_poll_clean(the_fd_tracker
, &events
);
2455 /* Cleanup remaining connection object. */
2457 cds_lfht_for_each_entry(viewer_connections_ht
->ht
, &iter
.iter
,
2460 health_code_update();
2461 connection_put(destroy_conn
);
2465 lttng_ht_destroy(viewer_connections_ht
);
2466 viewer_connections_ht_error
:
2467 /* Close relay conn pipes */
2468 (void) fd_tracker_util_pipe_close(the_fd_tracker
, live_conn_pipe
);
2470 DBG("Viewer worker thread exited with error");
2472 DBG("Viewer worker thread cleanup complete");
2476 ERR("Health error occurred in %s", __func__
);
2478 health_unregister(health_relayd
);
2479 if (lttng_relay_stop_threads()) {
2480 ERR("Error stopping threads");
2482 rcu_unregister_thread();
2487 * Create the relay command pipe to wake thread_manage_apps.
2488 * Closed in cleanup().
2490 static int create_conn_pipe(void)
2492 return fd_tracker_util_pipe_open_cloexec(the_fd_tracker
,
2493 "Live connection pipe", live_conn_pipe
);
2496 int relayd_live_join(void)
2498 int ret
, retval
= 0;
2501 ret
= pthread_join(live_listener_thread
, &status
);
2504 PERROR("pthread_join live listener");
2508 ret
= pthread_join(live_worker_thread
, &status
);
2511 PERROR("pthread_join live worker");
2515 ret
= pthread_join(live_dispatcher_thread
, &status
);
2518 PERROR("pthread_join live dispatcher");
2522 cleanup_relayd_live();
2530 int relayd_live_create(struct lttng_uri
*uri
)
2532 int ret
= 0, retval
= 0;
2538 goto exit_init_data
;
2542 /* Check if daemon is UID = 0 */
2543 is_root
= !getuid();
2546 if (live_uri
->port
< 1024) {
2547 ERR("Need to be root to use ports < 1024");
2549 goto exit_init_data
;
2553 /* Setup the thread apps communication pipe. */
2554 if (create_conn_pipe()) {
2556 goto exit_init_data
;
2559 /* Init relay command queue. */
2560 cds_wfcq_init(&viewer_conn_queue
.head
, &viewer_conn_queue
.tail
);
2562 /* Set up max poll set size */
2563 if (lttng_poll_set_max_size()) {
2565 goto exit_init_data
;
2568 /* Setup the dispatcher thread */
2569 ret
= pthread_create(&live_dispatcher_thread
, default_pthread_attr(),
2570 thread_dispatcher
, (void *) NULL
);
2573 PERROR("pthread_create viewer dispatcher");
2575 goto exit_dispatcher_thread
;
2578 /* Setup the worker thread */
2579 ret
= pthread_create(&live_worker_thread
, default_pthread_attr(),
2580 thread_worker
, NULL
);
2583 PERROR("pthread_create viewer worker");
2585 goto exit_worker_thread
;
2588 /* Setup the listener thread */
2589 ret
= pthread_create(&live_listener_thread
, default_pthread_attr(),
2590 thread_listener
, (void *) NULL
);
2593 PERROR("pthread_create viewer listener");
2595 goto exit_listener_thread
;
2599 * All OK, started all threads.
2604 * Join on the live_listener_thread should anything be added after
2605 * the live_listener thread's creation.
2608 exit_listener_thread
:
2610 ret
= pthread_join(live_worker_thread
, &status
);
2613 PERROR("pthread_join live worker");
2618 ret
= pthread_join(live_dispatcher_thread
, &status
);
2621 PERROR("pthread_join live dispatcher");
2624 exit_dispatcher_thread
:
2627 cleanup_relayd_live();