2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
34 #include <sys/types.h>
36 #include <urcu/uatomic.h>
40 #include <common/common.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
48 #include "lttng-sessiond.h"
49 #include "buffer-registry.h"
56 #include "kernel-consumer.h"
60 #include "ust-consumer.h"
64 #include "testpoint.h"
65 #include "ust-thread.h"
67 #define CONSUMERD_FILE "lttng-consumerd"
70 const char default_tracing_group
[] = DEFAULT_TRACING_GROUP
;
73 const char *opt_tracing_group
;
74 static const char *opt_pidfile
;
75 static int opt_sig_parent
;
76 static int opt_verbose_consumer
;
77 static int opt_daemon
;
78 static int opt_no_kernel
;
79 static int is_root
; /* Set to 1 if the daemon is running as root */
80 static pid_t ppid
; /* Parent PID for --sig-parent option */
84 * Consumer daemon specific control data. Every value not initialized here is
85 * set to 0 by the static definition.
87 static struct consumer_data kconsumer_data
= {
88 .type
= LTTNG_CONSUMER_KERNEL
,
89 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
90 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
93 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
94 .lock
= PTHREAD_MUTEX_INITIALIZER
,
95 .cond
= PTHREAD_COND_INITIALIZER
,
96 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
98 static struct consumer_data ustconsumer64_data
= {
99 .type
= LTTNG_CONSUMER64_UST
,
100 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
101 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
104 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
105 .lock
= PTHREAD_MUTEX_INITIALIZER
,
106 .cond
= PTHREAD_COND_INITIALIZER
,
107 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
109 static struct consumer_data ustconsumer32_data
= {
110 .type
= LTTNG_CONSUMER32_UST
,
111 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
112 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
115 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
116 .lock
= PTHREAD_MUTEX_INITIALIZER
,
117 .cond
= PTHREAD_COND_INITIALIZER
,
118 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
121 /* Shared between threads */
122 static int dispatch_thread_exit
;
124 /* Global application Unix socket path */
125 static char apps_unix_sock_path
[PATH_MAX
];
126 /* Global client Unix socket path */
127 static char client_unix_sock_path
[PATH_MAX
];
128 /* global wait shm path for UST */
129 static char wait_shm_path
[PATH_MAX
];
130 /* Global health check unix path */
131 static char health_unix_sock_path
[PATH_MAX
];
133 /* Sockets and FDs */
134 static int client_sock
= -1;
135 static int apps_sock
= -1;
136 int kernel_tracer_fd
= -1;
137 static int kernel_poll_pipe
[2] = { -1, -1 };
140 * Quit pipe for all threads. This permits a single cancellation point
141 * for all threads when receiving an event on the pipe.
143 static int thread_quit_pipe
[2] = { -1, -1 };
146 * This pipe is used to inform the thread managing application communication
147 * that a command is queued and ready to be processed.
149 static int apps_cmd_pipe
[2] = { -1, -1 };
151 int apps_cmd_notify_pipe
[2] = { -1, -1 };
153 /* Pthread, Mutexes and Semaphores */
154 static pthread_t apps_thread
;
155 static pthread_t apps_notify_thread
;
156 static pthread_t reg_apps_thread
;
157 static pthread_t client_thread
;
158 static pthread_t kernel_thread
;
159 static pthread_t dispatch_thread
;
160 static pthread_t health_thread
;
161 static pthread_t ht_cleanup_thread
;
164 * UST registration command queue. This queue is tied with a futex and uses a N
165 * wakers / 1 waiter implemented and detailed in futex.c/.h
167 * The thread_manage_apps and thread_dispatch_ust_registration interact with
168 * this queue and the wait/wake scheme.
170 static struct ust_cmd_queue ust_cmd_queue
;
173 * Pointer initialized before thread creation.
175 * This points to the tracing session list containing the session count and a
176 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
177 * MUST NOT be taken if you call a public function in session.c.
179 * The lock is nested inside the structure: session_list_ptr->lock. Please use
180 * session_lock_list and session_unlock_list for lock acquisition.
182 static struct ltt_session_list
*session_list_ptr
;
184 int ust_consumerd64_fd
= -1;
185 int ust_consumerd32_fd
= -1;
187 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
188 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
189 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
190 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
192 static const char *module_proc_lttng
= "/proc/lttng";
195 * Consumer daemon state which is changed when spawning it, killing it or in
196 * case of a fatal error.
198 enum consumerd_state
{
199 CONSUMER_STARTED
= 1,
200 CONSUMER_STOPPED
= 2,
205 * This consumer daemon state is used to validate if a client command will be
206 * able to reach the consumer. If not, the client is informed. For instance,
207 * doing a "lttng start" when the consumer state is set to ERROR will return an
208 * error to the client.
210 * The following example shows a possible race condition of this scheme:
212 * consumer thread error happens
214 * client cmd checks state -> still OK
215 * consumer thread exit, sets error
216 * client cmd try to talk to consumer
219 * However, since the consumer is a different daemon, we have no way of making
220 * sure the command will reach it safely even with this state flag. This is why
221 * we consider that up to the state validation during command processing, the
222 * command is safe. After that, we can not guarantee the correctness of the
223 * client request vis-a-vis the consumer.
225 static enum consumerd_state ust_consumerd_state
;
226 static enum consumerd_state kernel_consumerd_state
;
229 * Socket timeout for receiving and sending in seconds.
231 static int app_socket_timeout
;
233 /* Set in main() with the current page size. */
237 void setup_consumerd_path(void)
239 const char *bin
, *libdir
;
242 * Allow INSTALL_BIN_PATH to be used as a target path for the
243 * native architecture size consumer if CONFIG_CONSUMER*_PATH
244 * has not been defined.
246 #if (CAA_BITS_PER_LONG == 32)
247 if (!consumerd32_bin
[0]) {
248 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
250 if (!consumerd32_libdir
[0]) {
251 consumerd32_libdir
= INSTALL_LIB_PATH
;
253 #elif (CAA_BITS_PER_LONG == 64)
254 if (!consumerd64_bin
[0]) {
255 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
257 if (!consumerd64_libdir
[0]) {
258 consumerd64_libdir
= INSTALL_LIB_PATH
;
261 #error "Unknown bitness"
265 * runtime env. var. overrides the build default.
267 bin
= getenv("LTTNG_CONSUMERD32_BIN");
269 consumerd32_bin
= bin
;
271 bin
= getenv("LTTNG_CONSUMERD64_BIN");
273 consumerd64_bin
= bin
;
275 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
277 consumerd32_libdir
= libdir
;
279 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
281 consumerd64_libdir
= libdir
;
286 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
288 int sessiond_set_thread_pollset(struct lttng_poll_event
*events
, size_t size
)
294 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
300 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
| LPOLLERR
);
312 * Check if the thread quit pipe was triggered.
314 * Return 1 if it was triggered else 0;
316 int sessiond_check_thread_quit_pipe(int fd
, uint32_t events
)
318 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
326 * Return group ID of the tracing group or -1 if not found.
328 static gid_t
allowed_group(void)
332 if (opt_tracing_group
) {
333 grp
= getgrnam(opt_tracing_group
);
335 grp
= getgrnam(default_tracing_group
);
345 * Init thread quit pipe.
347 * Return -1 on error or 0 if all pipes are created.
349 static int init_thread_quit_pipe(void)
353 ret
= pipe(thread_quit_pipe
);
355 PERROR("thread quit pipe");
359 for (i
= 0; i
< 2; i
++) {
360 ret
= fcntl(thread_quit_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
372 * Stop all threads by closing the thread quit pipe.
374 static void stop_threads(void)
378 /* Stopping all threads */
379 DBG("Terminating all threads");
380 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
382 ERR("write error on thread quit pipe");
385 /* Dispatch thread */
386 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
387 futex_nto1_wake(&ust_cmd_queue
.futex
);
391 * Close every consumer sockets.
393 static void close_consumer_sockets(void)
397 if (kconsumer_data
.err_sock
>= 0) {
398 ret
= close(kconsumer_data
.err_sock
);
400 PERROR("kernel consumer err_sock close");
403 if (ustconsumer32_data
.err_sock
>= 0) {
404 ret
= close(ustconsumer32_data
.err_sock
);
406 PERROR("UST consumerd32 err_sock close");
409 if (ustconsumer64_data
.err_sock
>= 0) {
410 ret
= close(ustconsumer64_data
.err_sock
);
412 PERROR("UST consumerd64 err_sock close");
415 if (kconsumer_data
.cmd_sock
>= 0) {
416 ret
= close(kconsumer_data
.cmd_sock
);
418 PERROR("kernel consumer cmd_sock close");
421 if (ustconsumer32_data
.cmd_sock
>= 0) {
422 ret
= close(ustconsumer32_data
.cmd_sock
);
424 PERROR("UST consumerd32 cmd_sock close");
427 if (ustconsumer64_data
.cmd_sock
>= 0) {
428 ret
= close(ustconsumer64_data
.cmd_sock
);
430 PERROR("UST consumerd64 cmd_sock close");
438 static void cleanup(void)
442 struct ltt_session
*sess
, *stmp
;
447 * Close the thread quit pipe. It has already done its job,
448 * since we are now called.
450 utils_close_pipe(thread_quit_pipe
);
453 * If opt_pidfile is undefined, the default file will be wiped when
454 * removing the rundir.
457 ret
= remove(opt_pidfile
);
459 PERROR("remove pidfile %s", opt_pidfile
);
463 DBG("Removing %s directory", rundir
);
464 ret
= asprintf(&cmd
, "rm -rf %s", rundir
);
466 ERR("asprintf failed. Something is really wrong!");
469 /* Remove lttng run directory */
472 ERR("Unable to clean %s", rundir
);
477 DBG("Cleaning up all sessions");
479 /* Destroy session list mutex */
480 if (session_list_ptr
!= NULL
) {
481 pthread_mutex_destroy(&session_list_ptr
->lock
);
483 /* Cleanup ALL session */
484 cds_list_for_each_entry_safe(sess
, stmp
,
485 &session_list_ptr
->head
, list
) {
486 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
490 DBG("Closing all UST sockets");
491 ust_app_clean_list();
492 buffer_reg_destroy_registries();
494 if (is_root
&& !opt_no_kernel
) {
495 DBG2("Closing kernel fd");
496 if (kernel_tracer_fd
>= 0) {
497 ret
= close(kernel_tracer_fd
);
502 DBG("Unloading kernel modules");
503 modprobe_remove_lttng_all();
506 close_consumer_sockets();
509 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
510 "Matthew, BEET driven development works!%c[%dm",
511 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
516 * Send data on a unix socket using the liblttsessiondcomm API.
518 * Return lttcomm error code.
520 static int send_unix_sock(int sock
, void *buf
, size_t len
)
522 /* Check valid length */
527 return lttcomm_send_unix_sock(sock
, buf
, len
);
531 * Free memory of a command context structure.
533 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
535 DBG("Clean command context structure");
537 if ((*cmd_ctx
)->llm
) {
538 free((*cmd_ctx
)->llm
);
540 if ((*cmd_ctx
)->lsm
) {
541 free((*cmd_ctx
)->lsm
);
549 * Notify UST applications using the shm mmap futex.
551 static int notify_ust_apps(int active
)
555 DBG("Notifying applications of session daemon state: %d", active
);
557 /* See shm.c for this call implying mmap, shm and futex calls */
558 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
559 if (wait_shm_mmap
== NULL
) {
563 /* Wake waiting process */
564 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
566 /* Apps notified successfully */
574 * Setup the outgoing data buffer for the response (llm) by allocating the
575 * right amount of memory and copying the original information from the lsm
578 * Return total size of the buffer pointed by buf.
580 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
586 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
587 if (cmd_ctx
->llm
== NULL
) {
593 /* Copy common data */
594 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
595 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
597 cmd_ctx
->llm
->data_size
= size
;
598 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
607 * Update the kernel poll set of all channel fd available over all tracing
608 * session. Add the wakeup pipe at the end of the set.
610 static int update_kernel_poll(struct lttng_poll_event
*events
)
613 struct ltt_session
*session
;
614 struct ltt_kernel_channel
*channel
;
616 DBG("Updating kernel poll set");
619 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
620 session_lock(session
);
621 if (session
->kernel_session
== NULL
) {
622 session_unlock(session
);
626 cds_list_for_each_entry(channel
,
627 &session
->kernel_session
->channel_list
.head
, list
) {
628 /* Add channel fd to the kernel poll set */
629 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
631 session_unlock(session
);
634 DBG("Channel fd %d added to kernel set", channel
->fd
);
636 session_unlock(session
);
638 session_unlock_list();
643 session_unlock_list();
648 * Find the channel fd from 'fd' over all tracing session. When found, check
649 * for new channel stream and send those stream fds to the kernel consumer.
651 * Useful for CPU hotplug feature.
653 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
656 struct ltt_session
*session
;
657 struct ltt_kernel_session
*ksess
;
658 struct ltt_kernel_channel
*channel
;
660 DBG("Updating kernel streams for channel fd %d", fd
);
663 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
664 session_lock(session
);
665 if (session
->kernel_session
== NULL
) {
666 session_unlock(session
);
669 ksess
= session
->kernel_session
;
671 cds_list_for_each_entry(channel
, &ksess
->channel_list
.head
, list
) {
672 if (channel
->fd
== fd
) {
673 DBG("Channel found, updating kernel streams");
674 ret
= kernel_open_channel_stream(channel
);
678 /* Update the stream global counter */
679 ksess
->stream_count_global
+= ret
;
682 * Have we already sent fds to the consumer? If yes, it means
683 * that tracing is started so it is safe to send our updated
686 if (ksess
->consumer_fds_sent
== 1 && ksess
->consumer
!= NULL
) {
687 struct lttng_ht_iter iter
;
688 struct consumer_socket
*socket
;
691 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
692 &iter
.iter
, socket
, node
.node
) {
693 /* Code flow error */
696 pthread_mutex_lock(socket
->lock
);
697 ret
= kernel_consumer_send_channel_stream(socket
,
699 session
->output_traces
? 1 : 0);
700 pthread_mutex_unlock(socket
->lock
);
711 session_unlock(session
);
713 session_unlock_list();
717 session_unlock(session
);
718 session_unlock_list();
723 * For each tracing session, update newly registered apps. The session list
724 * lock MUST be acquired before calling this.
726 static void update_ust_app(int app_sock
)
728 struct ltt_session
*sess
, *stmp
;
730 /* For all tracing session(s) */
731 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
733 if (sess
->ust_session
) {
734 ust_app_global_update(sess
->ust_session
, app_sock
);
736 session_unlock(sess
);
741 * This thread manage event coming from the kernel.
743 * Features supported in this thread:
746 static void *thread_manage_kernel(void *data
)
748 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
749 uint32_t revents
, nb_fd
;
751 struct lttng_poll_event events
;
753 DBG("[thread] Thread manage kernel started");
755 health_register(HEALTH_TYPE_KERNEL
);
758 * This first step of the while is to clean this structure which could free
759 * non NULL pointers so initialize it before the loop.
761 lttng_poll_init(&events
);
763 if (testpoint(thread_manage_kernel
)) {
764 goto error_testpoint
;
767 health_code_update();
769 if (testpoint(thread_manage_kernel_before_loop
)) {
770 goto error_testpoint
;
774 health_code_update();
776 if (update_poll_flag
== 1) {
777 /* Clean events object. We are about to populate it again. */
778 lttng_poll_clean(&events
);
780 ret
= sessiond_set_thread_pollset(&events
, 2);
782 goto error_poll_create
;
785 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
790 /* This will add the available kernel channel if any. */
791 ret
= update_kernel_poll(&events
);
795 update_poll_flag
= 0;
798 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events
));
800 /* Poll infinite value of time */
803 ret
= lttng_poll_wait(&events
, -1);
807 * Restart interrupted system call.
809 if (errno
== EINTR
) {
813 } else if (ret
== 0) {
814 /* Should not happen since timeout is infinite */
815 ERR("Return value of poll is 0 with an infinite timeout.\n"
816 "This should not have happened! Continuing...");
822 for (i
= 0; i
< nb_fd
; i
++) {
823 /* Fetch once the poll data */
824 revents
= LTTNG_POLL_GETEV(&events
, i
);
825 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
827 health_code_update();
829 /* Thread quit pipe has been closed. Killing thread. */
830 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
836 /* Check for data on kernel pipe */
837 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
839 ret
= read(kernel_poll_pipe
[0], &tmp
, 1);
840 } while (ret
< 0 && errno
== EINTR
);
842 * Ret value is useless here, if this pipe gets any actions an
843 * update is required anyway.
845 update_poll_flag
= 1;
849 * New CPU detected by the kernel. Adding kernel stream to
850 * kernel session and updating the kernel consumer
852 if (revents
& LPOLLIN
) {
853 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
859 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
860 * and unregister kernel stream at this point.
869 lttng_poll_clean(&events
);
872 utils_close_pipe(kernel_poll_pipe
);
873 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
876 ERR("Health error occurred in %s", __func__
);
877 WARN("Kernel thread died unexpectedly. "
878 "Kernel tracing can continue but CPU hotplug is disabled.");
881 DBG("Kernel thread dying");
886 * Signal pthread condition of the consumer data that the thread.
888 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
890 pthread_mutex_lock(&data
->cond_mutex
);
893 * The state is set before signaling. It can be any value, it's the waiter
894 * job to correctly interpret this condition variable associated to the
895 * consumer pthread_cond.
897 * A value of 0 means that the corresponding thread of the consumer data
898 * was not started. 1 indicates that the thread has started and is ready
899 * for action. A negative value means that there was an error during the
902 data
->consumer_thread_is_ready
= state
;
903 (void) pthread_cond_signal(&data
->cond
);
905 pthread_mutex_unlock(&data
->cond_mutex
);
909 * This thread manage the consumer error sent back to the session daemon.
911 static void *thread_manage_consumer(void *data
)
913 int sock
= -1, i
, ret
, pollfd
, err
= -1;
914 uint32_t revents
, nb_fd
;
915 enum lttcomm_return_code code
;
916 struct lttng_poll_event events
;
917 struct consumer_data
*consumer_data
= data
;
919 DBG("[thread] Manage consumer started");
921 health_register(HEALTH_TYPE_CONSUMER
);
923 health_code_update();
926 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
927 * metadata_sock. Nothing more will be added to this poll set.
929 ret
= sessiond_set_thread_pollset(&events
, 3);
935 * The error socket here is already in a listening state which was done
936 * just before spawning this thread to avoid a race between the consumer
937 * daemon exec trying to connect and the listen() call.
939 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
944 health_code_update();
946 /* Infinite blocking call, waiting for transmission */
950 if (testpoint(thread_manage_consumer
)) {
954 ret
= lttng_poll_wait(&events
, -1);
958 * Restart interrupted system call.
960 if (errno
== EINTR
) {
968 for (i
= 0; i
< nb_fd
; i
++) {
969 /* Fetch once the poll data */
970 revents
= LTTNG_POLL_GETEV(&events
, i
);
971 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
973 health_code_update();
975 /* Thread quit pipe has been closed. Killing thread. */
976 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
982 /* Event on the registration socket */
983 if (pollfd
== consumer_data
->err_sock
) {
984 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
985 ERR("consumer err socket poll error");
991 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
997 * Set the CLOEXEC flag. Return code is useless because either way, the
1000 (void) utils_set_fd_cloexec(sock
);
1002 health_code_update();
1004 DBG2("Receiving code from consumer err_sock");
1006 /* Getting status code from kconsumerd */
1007 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1008 sizeof(enum lttcomm_return_code
));
1013 health_code_update();
1015 if (code
== LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
1016 /* Connect both socket, command and metadata. */
1017 consumer_data
->cmd_sock
=
1018 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
1019 consumer_data
->metadata_fd
=
1020 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
1021 if (consumer_data
->cmd_sock
< 0 || consumer_data
->metadata_fd
< 0) {
1022 PERROR("consumer connect cmd socket");
1023 /* On error, signal condition and quit. */
1024 signal_consumer_condition(consumer_data
, -1);
1027 consumer_data
->metadata_sock
.fd
= &consumer_data
->metadata_fd
;
1028 /* Create metadata socket lock. */
1029 consumer_data
->metadata_sock
.lock
= zmalloc(sizeof(pthread_mutex_t
));
1030 if (consumer_data
->metadata_sock
.lock
== NULL
) {
1031 PERROR("zmalloc pthread mutex");
1035 pthread_mutex_init(consumer_data
->metadata_sock
.lock
, NULL
);
1037 signal_consumer_condition(consumer_data
, 1);
1038 DBG("Consumer command socket ready (fd: %d", consumer_data
->cmd_sock
);
1039 DBG("Consumer metadata socket ready (fd: %d)",
1040 consumer_data
->metadata_fd
);
1042 ERR("consumer error when waiting for SOCK_READY : %s",
1043 lttcomm_get_readable_code(-code
));
1047 /* Remove the consumerd error sock since we've established a connexion */
1048 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
1053 /* Add new accepted error socket. */
1054 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
1059 /* Add metadata socket that is successfully connected. */
1060 ret
= lttng_poll_add(&events
, consumer_data
->metadata_fd
,
1061 LPOLLIN
| LPOLLRDHUP
);
1066 health_code_update();
1068 /* Infinite blocking call, waiting for transmission */
1071 health_poll_entry();
1072 ret
= lttng_poll_wait(&events
, -1);
1076 * Restart interrupted system call.
1078 if (errno
== EINTR
) {
1086 for (i
= 0; i
< nb_fd
; i
++) {
1087 /* Fetch once the poll data */
1088 revents
= LTTNG_POLL_GETEV(&events
, i
);
1089 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1091 health_code_update();
1093 /* Thread quit pipe has been closed. Killing thread. */
1094 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1100 if (pollfd
== sock
) {
1101 /* Event on the consumerd socket */
1102 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1103 ERR("consumer err socket second poll error");
1106 health_code_update();
1107 /* Wait for any kconsumerd error */
1108 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1109 sizeof(enum lttcomm_return_code
));
1111 ERR("consumer closed the command socket");
1115 ERR("consumer return code : %s",
1116 lttcomm_get_readable_code(-code
));
1119 } else if (pollfd
== consumer_data
->metadata_fd
) {
1120 /* UST metadata requests */
1121 ret
= ust_consumer_metadata_request(
1122 &consumer_data
->metadata_sock
);
1124 ERR("Handling metadata request");
1129 ERR("Unknown pollfd");
1133 health_code_update();
1138 /* Immediately set the consumerd state to stopped */
1139 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1140 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1141 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1142 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1143 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1145 /* Code flow error... */
1149 if (consumer_data
->err_sock
>= 0) {
1150 ret
= close(consumer_data
->err_sock
);
1154 consumer_data
->err_sock
= -1;
1156 if (consumer_data
->cmd_sock
>= 0) {
1157 ret
= close(consumer_data
->cmd_sock
);
1161 consumer_data
->cmd_sock
= -1;
1163 if (*consumer_data
->metadata_sock
.fd
>= 0) {
1164 ret
= close(*consumer_data
->metadata_sock
.fd
);
1169 /* Cleanup metadata socket mutex. */
1170 pthread_mutex_destroy(consumer_data
->metadata_sock
.lock
);
1171 free(consumer_data
->metadata_sock
.lock
);
1180 unlink(consumer_data
->err_unix_sock_path
);
1181 unlink(consumer_data
->cmd_unix_sock_path
);
1182 consumer_data
->pid
= 0;
1184 lttng_poll_clean(&events
);
1188 ERR("Health error occurred in %s", __func__
);
1190 health_unregister();
1191 DBG("consumer thread cleanup completed");
1197 * This thread manage application communication.
1199 static void *thread_manage_apps(void *data
)
1201 int i
, ret
, pollfd
, err
= -1;
1202 uint32_t revents
, nb_fd
;
1203 struct lttng_poll_event events
;
1205 DBG("[thread] Manage application started");
1207 rcu_register_thread();
1208 rcu_thread_online();
1210 health_register(HEALTH_TYPE_APP_MANAGE
);
1212 if (testpoint(thread_manage_apps
)) {
1213 goto error_testpoint
;
1216 health_code_update();
1218 ret
= sessiond_set_thread_pollset(&events
, 2);
1220 goto error_poll_create
;
1223 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1228 if (testpoint(thread_manage_apps_before_loop
)) {
1232 health_code_update();
1235 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events
));
1237 /* Inifinite blocking call, waiting for transmission */
1239 health_poll_entry();
1240 ret
= lttng_poll_wait(&events
, -1);
1244 * Restart interrupted system call.
1246 if (errno
== EINTR
) {
1254 for (i
= 0; i
< nb_fd
; i
++) {
1255 /* Fetch once the poll data */
1256 revents
= LTTNG_POLL_GETEV(&events
, i
);
1257 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1259 health_code_update();
1261 /* Thread quit pipe has been closed. Killing thread. */
1262 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1268 /* Inspect the apps cmd pipe */
1269 if (pollfd
== apps_cmd_pipe
[0]) {
1270 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1271 ERR("Apps command pipe error");
1273 } else if (revents
& LPOLLIN
) {
1278 ret
= read(apps_cmd_pipe
[0], &sock
, sizeof(sock
));
1279 } while (ret
< 0 && errno
== EINTR
);
1280 if (ret
< 0 || ret
< sizeof(sock
)) {
1281 PERROR("read apps cmd pipe");
1285 health_code_update();
1288 * We only monitor the error events of the socket. This
1289 * thread does not handle any incoming data from UST
1292 ret
= lttng_poll_add(&events
, sock
,
1293 LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
);
1299 * Set socket timeout for both receiving and ending.
1300 * app_socket_timeout is in seconds, whereas
1301 * lttcomm_setsockopt_rcv_timeout and
1302 * lttcomm_setsockopt_snd_timeout expect msec as
1305 (void) lttcomm_setsockopt_rcv_timeout(sock
,
1306 app_socket_timeout
* 1000);
1307 (void) lttcomm_setsockopt_snd_timeout(sock
,
1308 app_socket_timeout
* 1000);
1310 DBG("Apps with sock %d added to poll set", sock
);
1312 health_code_update();
1318 * At this point, we know that a registered application made
1319 * the event at poll_wait.
1321 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1322 /* Removing from the poll set */
1323 ret
= lttng_poll_del(&events
, pollfd
);
1328 /* Socket closed on remote end. */
1329 ust_app_unregister(pollfd
);
1334 health_code_update();
1340 lttng_poll_clean(&events
);
1343 utils_close_pipe(apps_cmd_pipe
);
1344 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1347 * We don't clean the UST app hash table here since already registered
1348 * applications can still be controlled so let them be until the session
1349 * daemon dies or the applications stop.
1354 ERR("Health error occurred in %s", __func__
);
1356 health_unregister();
1357 DBG("Application communication apps thread cleanup complete");
1358 rcu_thread_offline();
1359 rcu_unregister_thread();
1364 * Send a socket to a thread This is called from the dispatch UST registration
1365 * thread once all sockets are set for the application.
1367 * On success, return 0 else a negative value being the errno message of the
1370 static int send_socket_to_thread(int fd
, int sock
)
1374 /* Sockets MUST be set or else this should not have been called. */
1379 ret
= write(fd
, &sock
, sizeof(sock
));
1380 } while (ret
< 0 && errno
== EINTR
);
1381 if (ret
< 0 || ret
!= sizeof(sock
)) {
1382 PERROR("write apps pipe %d", fd
);
1389 /* All good. Don't send back the write positive ret value. */
1396 * Sanitize the wait queue of the dispatch registration thread meaning removing
1397 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1398 * notify socket is never received.
1400 static void sanitize_wait_queue(struct ust_reg_wait_queue
*wait_queue
)
1402 int ret
, nb_fd
= 0, i
;
1403 unsigned int fd_added
= 0;
1404 struct lttng_poll_event events
;
1405 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1409 lttng_poll_init(&events
);
1411 /* Just skip everything for an empty queue. */
1412 if (!wait_queue
->count
) {
1416 ret
= lttng_poll_create(&events
, wait_queue
->count
, LTTNG_CLOEXEC
);
1421 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1422 &wait_queue
->head
, head
) {
1423 assert(wait_node
->app
);
1424 ret
= lttng_poll_add(&events
, wait_node
->app
->sock
,
1425 LPOLLHUP
| LPOLLERR
);
1438 * Poll but don't block so we can quickly identify the faulty events and
1439 * clean them afterwards from the wait queue.
1441 ret
= lttng_poll_wait(&events
, 0);
1447 for (i
= 0; i
< nb_fd
; i
++) {
1448 /* Get faulty FD. */
1449 uint32_t revents
= LTTNG_POLL_GETEV(&events
, i
);
1450 int pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1452 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1453 &wait_queue
->head
, head
) {
1454 if (pollfd
== wait_node
->app
->sock
&&
1455 (revents
& (LPOLLHUP
| LPOLLERR
))) {
1456 cds_list_del(&wait_node
->head
);
1457 wait_queue
->count
--;
1458 ust_app_destroy(wait_node
->app
);
1466 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd
);
1470 lttng_poll_clean(&events
);
1474 lttng_poll_clean(&events
);
1476 ERR("Unable to sanitize wait queue");
1481 * Dispatch request from the registration threads to the application
1482 * communication thread.
1484 static void *thread_dispatch_ust_registration(void *data
)
1487 struct cds_wfq_node
*node
;
1488 struct ust_command
*ust_cmd
= NULL
;
1489 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1490 struct ust_reg_wait_queue wait_queue
= {
1494 health_register(HEALTH_TYPE_APP_REG_DISPATCH
);
1496 health_code_update();
1498 CDS_INIT_LIST_HEAD(&wait_queue
.head
);
1500 DBG("[thread] Dispatch UST command started");
1502 while (!CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1503 health_code_update();
1505 /* Atomically prepare the queue futex */
1506 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1509 struct ust_app
*app
= NULL
;
1513 * Make sure we don't have node(s) that have hung up before receiving
1514 * the notify socket. This is to clean the list in order to avoid
1515 * memory leaks from notify socket that are never seen.
1517 sanitize_wait_queue(&wait_queue
);
1519 health_code_update();
1520 /* Dequeue command for registration */
1521 node
= cds_wfq_dequeue_blocking(&ust_cmd_queue
.queue
);
1523 DBG("Woken up but nothing in the UST command queue");
1524 /* Continue thread execution */
1528 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1530 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1531 " gid:%d sock:%d name:%s (version %d.%d)",
1532 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1533 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1534 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1535 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1537 if (ust_cmd
->reg_msg
.type
== USTCTL_SOCKET_CMD
) {
1538 wait_node
= zmalloc(sizeof(*wait_node
));
1540 PERROR("zmalloc wait_node dispatch");
1541 ret
= close(ust_cmd
->sock
);
1543 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1545 lttng_fd_put(1, LTTNG_FD_APPS
);
1549 CDS_INIT_LIST_HEAD(&wait_node
->head
);
1551 /* Create application object if socket is CMD. */
1552 wait_node
->app
= ust_app_create(&ust_cmd
->reg_msg
,
1554 if (!wait_node
->app
) {
1555 ret
= close(ust_cmd
->sock
);
1557 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1559 lttng_fd_put(1, LTTNG_FD_APPS
);
1565 * Add application to the wait queue so we can set the notify
1566 * socket before putting this object in the global ht.
1568 cds_list_add(&wait_node
->head
, &wait_queue
.head
);
1573 * We have to continue here since we don't have the notify
1574 * socket and the application MUST be added to the hash table
1575 * only at that moment.
1580 * Look for the application in the local wait queue and set the
1581 * notify socket if found.
1583 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1584 &wait_queue
.head
, head
) {
1585 health_code_update();
1586 if (wait_node
->app
->pid
== ust_cmd
->reg_msg
.pid
) {
1587 wait_node
->app
->notify_sock
= ust_cmd
->sock
;
1588 cds_list_del(&wait_node
->head
);
1590 app
= wait_node
->app
;
1592 DBG3("UST app notify socket %d is set", ust_cmd
->sock
);
1598 * With no application at this stage the received socket is
1599 * basically useless so close it before we free the cmd data
1600 * structure for good.
1603 ret
= close(ust_cmd
->sock
);
1605 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1607 lttng_fd_put(1, LTTNG_FD_APPS
);
1614 * @session_lock_list
1616 * Lock the global session list so from the register up to the
1617 * registration done message, no thread can see the application
1618 * and change its state.
1620 session_lock_list();
1624 * Add application to the global hash table. This needs to be
1625 * done before the update to the UST registry can locate the
1630 /* Set app version. This call will print an error if needed. */
1631 (void) ust_app_version(app
);
1633 /* Send notify socket through the notify pipe. */
1634 ret
= send_socket_to_thread(apps_cmd_notify_pipe
[1],
1638 session_unlock_list();
1639 /* No notify thread, stop the UST tracing. */
1644 * Update newly registered application with the tracing
1645 * registry info already enabled information.
1647 update_ust_app(app
->sock
);
1650 * Don't care about return value. Let the manage apps threads
1651 * handle app unregistration upon socket close.
1653 (void) ust_app_register_done(app
->sock
);
1656 * Even if the application socket has been closed, send the app
1657 * to the thread and unregistration will take place at that
1660 ret
= send_socket_to_thread(apps_cmd_pipe
[1], app
->sock
);
1663 session_unlock_list();
1664 /* No apps. thread, stop the UST tracing. */
1669 session_unlock_list();
1671 } while (node
!= NULL
);
1673 health_poll_entry();
1674 /* Futex wait on queue. Blocking call on futex() */
1675 futex_nto1_wait(&ust_cmd_queue
.futex
);
1678 /* Normal exit, no error */
1682 /* Clean up wait queue. */
1683 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1684 &wait_queue
.head
, head
) {
1685 cds_list_del(&wait_node
->head
);
1690 DBG("Dispatch thread dying");
1693 ERR("Health error occurred in %s", __func__
);
1695 health_unregister();
1700 * This thread manage application registration.
1702 static void *thread_registration_apps(void *data
)
1704 int sock
= -1, i
, ret
, pollfd
, err
= -1;
1705 uint32_t revents
, nb_fd
;
1706 struct lttng_poll_event events
;
1708 * Get allocated in this thread, enqueued to a global queue, dequeued and
1709 * freed in the manage apps thread.
1711 struct ust_command
*ust_cmd
= NULL
;
1713 DBG("[thread] Manage application registration started");
1715 health_register(HEALTH_TYPE_APP_REG
);
1717 if (testpoint(thread_registration_apps
)) {
1718 goto error_testpoint
;
1721 ret
= lttcomm_listen_unix_sock(apps_sock
);
1727 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1728 * more will be added to this poll set.
1730 ret
= sessiond_set_thread_pollset(&events
, 2);
1732 goto error_create_poll
;
1735 /* Add the application registration socket */
1736 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1738 goto error_poll_add
;
1741 /* Notify all applications to register */
1742 ret
= notify_ust_apps(1);
1744 ERR("Failed to notify applications or create the wait shared memory.\n"
1745 "Execution continues but there might be problem for already\n"
1746 "running applications that wishes to register.");
1750 DBG("Accepting application registration");
1752 /* Inifinite blocking call, waiting for transmission */
1754 health_poll_entry();
1755 ret
= lttng_poll_wait(&events
, -1);
1759 * Restart interrupted system call.
1761 if (errno
== EINTR
) {
1769 for (i
= 0; i
< nb_fd
; i
++) {
1770 health_code_update();
1772 /* Fetch once the poll data */
1773 revents
= LTTNG_POLL_GETEV(&events
, i
);
1774 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1776 /* Thread quit pipe has been closed. Killing thread. */
1777 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1783 /* Event on the registration socket */
1784 if (pollfd
== apps_sock
) {
1785 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1786 ERR("Register apps socket poll error");
1788 } else if (revents
& LPOLLIN
) {
1789 sock
= lttcomm_accept_unix_sock(apps_sock
);
1795 * Set the CLOEXEC flag. Return code is useless because
1796 * either way, the show must go on.
1798 (void) utils_set_fd_cloexec(sock
);
1800 /* Create UST registration command for enqueuing */
1801 ust_cmd
= zmalloc(sizeof(struct ust_command
));
1802 if (ust_cmd
== NULL
) {
1803 PERROR("ust command zmalloc");
1808 * Using message-based transmissions to ensure we don't
1809 * have to deal with partially received messages.
1811 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
1813 ERR("Exhausted file descriptors allowed for applications.");
1823 health_code_update();
1824 ret
= ust_app_recv_registration(sock
, &ust_cmd
->reg_msg
);
1827 /* Close socket of the application. */
1832 lttng_fd_put(LTTNG_FD_APPS
, 1);
1836 health_code_update();
1838 ust_cmd
->sock
= sock
;
1841 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1842 " gid:%d sock:%d name:%s (version %d.%d)",
1843 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1844 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1845 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1846 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1849 * Lock free enqueue the registration request. The red pill
1850 * has been taken! This apps will be part of the *system*.
1852 cds_wfq_enqueue(&ust_cmd_queue
.queue
, &ust_cmd
->node
);
1855 * Wake the registration queue futex. Implicit memory
1856 * barrier with the exchange in cds_wfq_enqueue.
1858 futex_nto1_wake(&ust_cmd_queue
.futex
);
1868 ERR("Health error occurred in %s", __func__
);
1871 /* Notify that the registration thread is gone */
1874 if (apps_sock
>= 0) {
1875 ret
= close(apps_sock
);
1885 lttng_fd_put(LTTNG_FD_APPS
, 1);
1887 unlink(apps_unix_sock_path
);
1890 lttng_poll_clean(&events
);
1894 DBG("UST Registration thread cleanup complete");
1895 health_unregister();
1901 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1902 * exec or it will fails.
1904 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
1907 struct timespec timeout
;
1909 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1910 consumer_data
->consumer_thread_is_ready
= 0;
1912 /* Setup pthread condition */
1913 ret
= pthread_condattr_init(&consumer_data
->condattr
);
1916 PERROR("pthread_condattr_init consumer data");
1921 * Set the monotonic clock in order to make sure we DO NOT jump in time
1922 * between the clock_gettime() call and the timedwait call. See bug #324
1923 * for a more details and how we noticed it.
1925 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
1928 PERROR("pthread_condattr_setclock consumer data");
1932 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
1935 PERROR("pthread_cond_init consumer data");
1939 ret
= pthread_create(&consumer_data
->thread
, NULL
, thread_manage_consumer
,
1942 PERROR("pthread_create consumer");
1947 /* We are about to wait on a pthread condition */
1948 pthread_mutex_lock(&consumer_data
->cond_mutex
);
1950 /* Get time for sem_timedwait absolute timeout */
1951 clock_ret
= clock_gettime(CLOCK_MONOTONIC
, &timeout
);
1953 * Set the timeout for the condition timed wait even if the clock gettime
1954 * call fails since we might loop on that call and we want to avoid to
1955 * increment the timeout too many times.
1957 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
1960 * The following loop COULD be skipped in some conditions so this is why we
1961 * set ret to 0 in order to make sure at least one round of the loop is
1967 * Loop until the condition is reached or when a timeout is reached. Note
1968 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
1969 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
1970 * possible. This loop does not take any chances and works with both of
1973 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
1974 if (clock_ret
< 0) {
1975 PERROR("clock_gettime spawn consumer");
1976 /* Infinite wait for the consumerd thread to be ready */
1977 ret
= pthread_cond_wait(&consumer_data
->cond
,
1978 &consumer_data
->cond_mutex
);
1980 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
1981 &consumer_data
->cond_mutex
, &timeout
);
1985 /* Release the pthread condition */
1986 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
1990 if (ret
== ETIMEDOUT
) {
1992 * Call has timed out so we kill the kconsumerd_thread and return
1995 ERR("Condition timed out. The consumer thread was never ready."
1997 ret
= pthread_cancel(consumer_data
->thread
);
1999 PERROR("pthread_cancel consumer thread");
2002 PERROR("pthread_cond_wait failed consumer thread");
2007 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2008 if (consumer_data
->pid
== 0) {
2009 ERR("Consumerd did not start");
2010 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2013 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2022 * Join consumer thread
2024 static int join_consumer_thread(struct consumer_data
*consumer_data
)
2028 /* Consumer pid must be a real one. */
2029 if (consumer_data
->pid
> 0) {
2031 ret
= kill(consumer_data
->pid
, SIGTERM
);
2033 ERR("Error killing consumer daemon");
2036 return pthread_join(consumer_data
->thread
, &status
);
2043 * Fork and exec a consumer daemon (consumerd).
2045 * Return pid if successful else -1.
2047 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
2051 const char *consumer_to_use
;
2052 const char *verbosity
;
2055 DBG("Spawning consumerd");
2062 if (opt_verbose_consumer
) {
2063 verbosity
= "--verbose";
2065 verbosity
= "--quiet";
2067 switch (consumer_data
->type
) {
2068 case LTTNG_CONSUMER_KERNEL
:
2070 * Find out which consumerd to execute. We will first try the
2071 * 64-bit path, then the sessiond's installation directory, and
2072 * fallback on the 32-bit one,
2074 DBG3("Looking for a kernel consumer at these locations:");
2075 DBG3(" 1) %s", consumerd64_bin
);
2076 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
2077 DBG3(" 3) %s", consumerd32_bin
);
2078 if (stat(consumerd64_bin
, &st
) == 0) {
2079 DBG3("Found location #1");
2080 consumer_to_use
= consumerd64_bin
;
2081 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
2082 DBG3("Found location #2");
2083 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
2084 } else if (stat(consumerd32_bin
, &st
) == 0) {
2085 DBG3("Found location #3");
2086 consumer_to_use
= consumerd32_bin
;
2088 DBG("Could not find any valid consumerd executable");
2091 DBG("Using kernel consumer at: %s", consumer_to_use
);
2092 execl(consumer_to_use
,
2093 "lttng-consumerd", verbosity
, "-k",
2094 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2095 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2098 case LTTNG_CONSUMER64_UST
:
2100 char *tmpnew
= NULL
;
2102 if (consumerd64_libdir
[0] != '\0') {
2106 tmp
= getenv("LD_LIBRARY_PATH");
2110 tmplen
= strlen("LD_LIBRARY_PATH=")
2111 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
2112 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2117 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
2118 strcat(tmpnew
, consumerd64_libdir
);
2119 if (tmp
[0] != '\0') {
2120 strcat(tmpnew
, ":");
2121 strcat(tmpnew
, tmp
);
2123 ret
= putenv(tmpnew
);
2130 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
2131 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
2132 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2133 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2135 if (consumerd64_libdir
[0] != '\0') {
2143 case LTTNG_CONSUMER32_UST
:
2145 char *tmpnew
= NULL
;
2147 if (consumerd32_libdir
[0] != '\0') {
2151 tmp
= getenv("LD_LIBRARY_PATH");
2155 tmplen
= strlen("LD_LIBRARY_PATH=")
2156 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
2157 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2162 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
2163 strcat(tmpnew
, consumerd32_libdir
);
2164 if (tmp
[0] != '\0') {
2165 strcat(tmpnew
, ":");
2166 strcat(tmpnew
, tmp
);
2168 ret
= putenv(tmpnew
);
2175 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
2176 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
2177 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2178 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2180 if (consumerd32_libdir
[0] != '\0') {
2189 PERROR("unknown consumer type");
2193 PERROR("kernel start consumer exec");
2196 } else if (pid
> 0) {
2199 PERROR("start consumer fork");
2207 * Spawn the consumerd daemon and session daemon thread.
2209 static int start_consumerd(struct consumer_data
*consumer_data
)
2214 * Set the listen() state on the socket since there is a possible race
2215 * between the exec() of the consumer daemon and this call if place in the
2216 * consumer thread. See bug #366 for more details.
2218 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
2223 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2224 if (consumer_data
->pid
!= 0) {
2225 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2229 ret
= spawn_consumerd(consumer_data
);
2231 ERR("Spawning consumerd failed");
2232 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2236 /* Setting up the consumer_data pid */
2237 consumer_data
->pid
= ret
;
2238 DBG2("Consumer pid %d", consumer_data
->pid
);
2239 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2241 DBG2("Spawning consumer control thread");
2242 ret
= spawn_consumer_thread(consumer_data
);
2244 ERR("Fatal error spawning consumer control thread");
2252 /* Cleanup already created sockets on error. */
2253 if (consumer_data
->err_sock
>= 0) {
2256 err
= close(consumer_data
->err_sock
);
2258 PERROR("close consumer data error socket");
2265 * Compute health status of each consumer. If one of them is zero (bad
2266 * state), we return 0.
2268 static int check_consumer_health(void)
2272 ret
= health_check_state(HEALTH_TYPE_CONSUMER
);
2274 DBG3("Health consumer check %d", ret
);
2280 * Setup necessary data for kernel tracer action.
2282 static int init_kernel_tracer(void)
2286 /* Modprobe lttng kernel modules */
2287 ret
= modprobe_lttng_control();
2292 /* Open debugfs lttng */
2293 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
2294 if (kernel_tracer_fd
< 0) {
2295 DBG("Failed to open %s", module_proc_lttng
);
2300 /* Validate kernel version */
2301 ret
= kernel_validate_version(kernel_tracer_fd
);
2306 ret
= modprobe_lttng_data();
2311 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
2315 modprobe_remove_lttng_control();
2316 ret
= close(kernel_tracer_fd
);
2320 kernel_tracer_fd
= -1;
2321 return LTTNG_ERR_KERN_VERSION
;
2324 ret
= close(kernel_tracer_fd
);
2330 modprobe_remove_lttng_control();
2333 WARN("No kernel tracer available");
2334 kernel_tracer_fd
= -1;
2336 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
2338 return LTTNG_ERR_KERN_NA
;
2344 * Copy consumer output from the tracing session to the domain session. The
2345 * function also applies the right modification on a per domain basis for the
2346 * trace files destination directory.
2348 * Should *NOT* be called with RCU read-side lock held.
2350 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2353 const char *dir_name
;
2354 struct consumer_output
*consumer
;
2357 assert(session
->consumer
);
2360 case LTTNG_DOMAIN_KERNEL
:
2361 DBG3("Copying tracing session consumer output in kernel session");
2363 * XXX: We should audit the session creation and what this function
2364 * does "extra" in order to avoid a destroy since this function is used
2365 * in the domain session creation (kernel and ust) only. Same for UST
2368 if (session
->kernel_session
->consumer
) {
2369 consumer_destroy_output(session
->kernel_session
->consumer
);
2371 session
->kernel_session
->consumer
=
2372 consumer_copy_output(session
->consumer
);
2373 /* Ease our life a bit for the next part */
2374 consumer
= session
->kernel_session
->consumer
;
2375 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2377 case LTTNG_DOMAIN_UST
:
2378 DBG3("Copying tracing session consumer output in UST session");
2379 if (session
->ust_session
->consumer
) {
2380 consumer_destroy_output(session
->ust_session
->consumer
);
2382 session
->ust_session
->consumer
=
2383 consumer_copy_output(session
->consumer
);
2384 /* Ease our life a bit for the next part */
2385 consumer
= session
->ust_session
->consumer
;
2386 dir_name
= DEFAULT_UST_TRACE_DIR
;
2389 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2393 /* Append correct directory to subdir */
2394 strncat(consumer
->subdir
, dir_name
,
2395 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2396 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2405 * Create an UST session and add it to the session ust list.
2407 * Should *NOT* be called with RCU read-side lock held.
2409 static int create_ust_session(struct ltt_session
*session
,
2410 struct lttng_domain
*domain
)
2413 struct ltt_ust_session
*lus
= NULL
;
2417 assert(session
->consumer
);
2419 switch (domain
->type
) {
2420 case LTTNG_DOMAIN_UST
:
2423 ERR("Unknown UST domain on create session %d", domain
->type
);
2424 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2428 DBG("Creating UST session");
2430 lus
= trace_ust_create_session(session
->id
);
2432 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2436 lus
->uid
= session
->uid
;
2437 lus
->gid
= session
->gid
;
2438 lus
->output_traces
= session
->output_traces
;
2439 lus
->snapshot_mode
= session
->snapshot_mode
;
2440 session
->ust_session
= lus
;
2442 /* Copy session output to the newly created UST session */
2443 ret
= copy_session_consumer(domain
->type
, session
);
2444 if (ret
!= LTTNG_OK
) {
2452 session
->ust_session
= NULL
;
2457 * Create a kernel tracer session then create the default channel.
2459 static int create_kernel_session(struct ltt_session
*session
)
2463 DBG("Creating kernel session");
2465 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2467 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2471 /* Code flow safety */
2472 assert(session
->kernel_session
);
2474 /* Copy session output to the newly created Kernel session */
2475 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2476 if (ret
!= LTTNG_OK
) {
2480 /* Create directory(ies) on local filesystem. */
2481 if (session
->kernel_session
->consumer
->type
== CONSUMER_DST_LOCAL
&&
2482 strlen(session
->kernel_session
->consumer
->dst
.trace_path
) > 0) {
2483 ret
= run_as_mkdir_recursive(
2484 session
->kernel_session
->consumer
->dst
.trace_path
,
2485 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2487 if (ret
!= -EEXIST
) {
2488 ERR("Trace directory creation error");
2494 session
->kernel_session
->uid
= session
->uid
;
2495 session
->kernel_session
->gid
= session
->gid
;
2496 session
->kernel_session
->output_traces
= session
->output_traces
;
2497 session
->kernel_session
->snapshot_mode
= session
->snapshot_mode
;
2502 trace_kernel_destroy_session(session
->kernel_session
);
2503 session
->kernel_session
= NULL
;
2508 * Count number of session permitted by uid/gid.
2510 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2513 struct ltt_session
*session
;
2515 DBG("Counting number of available session for UID %d GID %d",
2517 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2519 * Only list the sessions the user can control.
2521 if (!session_access_ok(session
, uid
, gid
)) {
2530 * Process the command requested by the lttng client within the command
2531 * context structure. This function make sure that the return structure (llm)
2532 * is set and ready for transmission before returning.
2534 * Return any error encountered or 0 for success.
2536 * "sock" is only used for special-case var. len data.
2538 * Should *NOT* be called with RCU read-side lock held.
2540 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2544 int need_tracing_session
= 1;
2547 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2551 switch (cmd_ctx
->lsm
->cmd_type
) {
2552 case LTTNG_CREATE_SESSION
:
2553 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2554 case LTTNG_DESTROY_SESSION
:
2555 case LTTNG_LIST_SESSIONS
:
2556 case LTTNG_LIST_DOMAINS
:
2557 case LTTNG_START_TRACE
:
2558 case LTTNG_STOP_TRACE
:
2559 case LTTNG_DATA_PENDING
:
2560 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
2561 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
2562 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
2563 case LTTNG_SNAPSHOT_RECORD
:
2570 if (opt_no_kernel
&& need_domain
2571 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2573 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2575 ret
= LTTNG_ERR_KERN_NA
;
2580 /* Deny register consumer if we already have a spawned consumer. */
2581 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2582 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2583 if (kconsumer_data
.pid
> 0) {
2584 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2585 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2588 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2592 * Check for command that don't needs to allocate a returned payload. We do
2593 * this here so we don't have to make the call for no payload at each
2596 switch(cmd_ctx
->lsm
->cmd_type
) {
2597 case LTTNG_LIST_SESSIONS
:
2598 case LTTNG_LIST_TRACEPOINTS
:
2599 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2600 case LTTNG_LIST_DOMAINS
:
2601 case LTTNG_LIST_CHANNELS
:
2602 case LTTNG_LIST_EVENTS
:
2605 /* Setup lttng message with no payload */
2606 ret
= setup_lttng_msg(cmd_ctx
, 0);
2608 /* This label does not try to unlock the session */
2609 goto init_setup_error
;
2613 /* Commands that DO NOT need a session. */
2614 switch (cmd_ctx
->lsm
->cmd_type
) {
2615 case LTTNG_CREATE_SESSION
:
2616 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2617 case LTTNG_CALIBRATE
:
2618 case LTTNG_LIST_SESSIONS
:
2619 case LTTNG_LIST_TRACEPOINTS
:
2620 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2621 need_tracing_session
= 0;
2624 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
2626 * We keep the session list lock across _all_ commands
2627 * for now, because the per-session lock does not
2628 * handle teardown properly.
2630 session_lock_list();
2631 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
2632 if (cmd_ctx
->session
== NULL
) {
2633 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
2636 /* Acquire lock for the session */
2637 session_lock(cmd_ctx
->session
);
2647 * Check domain type for specific "pre-action".
2649 switch (cmd_ctx
->lsm
->domain
.type
) {
2650 case LTTNG_DOMAIN_KERNEL
:
2652 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2656 /* Kernel tracer check */
2657 if (kernel_tracer_fd
== -1) {
2658 /* Basically, load kernel tracer modules */
2659 ret
= init_kernel_tracer();
2665 /* Consumer is in an ERROR state. Report back to client */
2666 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
2667 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2671 /* Need a session for kernel command */
2672 if (need_tracing_session
) {
2673 if (cmd_ctx
->session
->kernel_session
== NULL
) {
2674 ret
= create_kernel_session(cmd_ctx
->session
);
2676 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2681 /* Start the kernel consumer daemon */
2682 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2683 if (kconsumer_data
.pid
== 0 &&
2684 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
2685 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2686 ret
= start_consumerd(&kconsumer_data
);
2688 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2691 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
2693 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2697 * The consumer was just spawned so we need to add the socket to
2698 * the consumer output of the session if exist.
2700 ret
= consumer_create_socket(&kconsumer_data
,
2701 cmd_ctx
->session
->kernel_session
->consumer
);
2708 case LTTNG_DOMAIN_UST
:
2710 if (!ust_app_supported()) {
2711 ret
= LTTNG_ERR_NO_UST
;
2714 /* Consumer is in an ERROR state. Report back to client */
2715 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
2716 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2720 if (need_tracing_session
) {
2721 /* Create UST session if none exist. */
2722 if (cmd_ctx
->session
->ust_session
== NULL
) {
2723 ret
= create_ust_session(cmd_ctx
->session
,
2724 &cmd_ctx
->lsm
->domain
);
2725 if (ret
!= LTTNG_OK
) {
2730 /* Start the UST consumer daemons */
2732 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
2733 if (consumerd64_bin
[0] != '\0' &&
2734 ustconsumer64_data
.pid
== 0 &&
2735 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
2736 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2737 ret
= start_consumerd(&ustconsumer64_data
);
2739 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
2740 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
2744 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
2745 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2747 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2751 * Setup socket for consumer 64 bit. No need for atomic access
2752 * since it was set above and can ONLY be set in this thread.
2754 ret
= consumer_create_socket(&ustconsumer64_data
,
2755 cmd_ctx
->session
->ust_session
->consumer
);
2761 if (consumerd32_bin
[0] != '\0' &&
2762 ustconsumer32_data
.pid
== 0 &&
2763 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
2764 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2765 ret
= start_consumerd(&ustconsumer32_data
);
2767 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
2768 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
2772 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
2773 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2775 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2779 * Setup socket for consumer 64 bit. No need for atomic access
2780 * since it was set above and can ONLY be set in this thread.
2782 ret
= consumer_create_socket(&ustconsumer32_data
,
2783 cmd_ctx
->session
->ust_session
->consumer
);
2795 /* Validate consumer daemon state when start/stop trace command */
2796 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
2797 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
2798 switch (cmd_ctx
->lsm
->domain
.type
) {
2799 case LTTNG_DOMAIN_UST
:
2800 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
2801 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2805 case LTTNG_DOMAIN_KERNEL
:
2806 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
2807 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2815 * Check that the UID or GID match that of the tracing session.
2816 * The root user can interact with all sessions.
2818 if (need_tracing_session
) {
2819 if (!session_access_ok(cmd_ctx
->session
,
2820 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2821 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
2822 ret
= LTTNG_ERR_EPERM
;
2828 * Send relayd information to consumer as soon as we have a domain and a
2831 if (cmd_ctx
->session
&& need_domain
) {
2833 * Setup relayd if not done yet. If the relayd information was already
2834 * sent to the consumer, this call will gracefully return.
2836 ret
= cmd_setup_relayd(cmd_ctx
->session
);
2837 if (ret
!= LTTNG_OK
) {
2842 /* Process by command type */
2843 switch (cmd_ctx
->lsm
->cmd_type
) {
2844 case LTTNG_ADD_CONTEXT
:
2846 ret
= cmd_add_context(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2847 cmd_ctx
->lsm
->u
.context
.channel_name
,
2848 &cmd_ctx
->lsm
->u
.context
.ctx
, kernel_poll_pipe
[1]);
2851 case LTTNG_DISABLE_CHANNEL
:
2853 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2854 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2857 case LTTNG_DISABLE_EVENT
:
2859 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2860 cmd_ctx
->lsm
->u
.disable
.channel_name
,
2861 cmd_ctx
->lsm
->u
.disable
.name
);
2864 case LTTNG_DISABLE_ALL_EVENT
:
2866 DBG("Disabling all events");
2868 ret
= cmd_disable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2869 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2872 case LTTNG_ENABLE_CHANNEL
:
2874 ret
= cmd_enable_channel(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
2875 &cmd_ctx
->lsm
->u
.channel
.chan
, kernel_poll_pipe
[1]);
2878 case LTTNG_ENABLE_EVENT
:
2880 ret
= cmd_enable_event(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
2881 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2882 &cmd_ctx
->lsm
->u
.enable
.event
, NULL
, kernel_poll_pipe
[1]);
2885 case LTTNG_ENABLE_ALL_EVENT
:
2887 DBG("Enabling all events");
2889 ret
= cmd_enable_event_all(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
2890 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2891 cmd_ctx
->lsm
->u
.enable
.event
.type
, NULL
, kernel_poll_pipe
[1]);
2894 case LTTNG_LIST_TRACEPOINTS
:
2896 struct lttng_event
*events
;
2899 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
2900 if (nb_events
< 0) {
2901 /* Return value is a negative lttng_error_code. */
2907 * Setup lttng message with payload size set to the event list size in
2908 * bytes and then copy list into the llm payload.
2910 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
2916 /* Copy event list into message payload */
2917 memcpy(cmd_ctx
->llm
->payload
, events
,
2918 sizeof(struct lttng_event
) * nb_events
);
2925 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2927 struct lttng_event_field
*fields
;
2930 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
2932 if (nb_fields
< 0) {
2933 /* Return value is a negative lttng_error_code. */
2939 * Setup lttng message with payload size set to the event list size in
2940 * bytes and then copy list into the llm payload.
2942 ret
= setup_lttng_msg(cmd_ctx
,
2943 sizeof(struct lttng_event_field
) * nb_fields
);
2949 /* Copy event list into message payload */
2950 memcpy(cmd_ctx
->llm
->payload
, fields
,
2951 sizeof(struct lttng_event_field
) * nb_fields
);
2958 case LTTNG_SET_CONSUMER_URI
:
2961 struct lttng_uri
*uris
;
2963 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
2964 len
= nb_uri
* sizeof(struct lttng_uri
);
2967 ret
= LTTNG_ERR_INVALID
;
2971 uris
= zmalloc(len
);
2973 ret
= LTTNG_ERR_FATAL
;
2977 /* Receive variable len data */
2978 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
2979 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
2981 DBG("No URIs received from client... continuing");
2983 ret
= LTTNG_ERR_SESSION_FAIL
;
2988 ret
= cmd_set_consumer_uri(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
2990 if (ret
!= LTTNG_OK
) {
2996 * XXX: 0 means that this URI should be applied on the session. Should
2997 * be a DOMAIN enuam.
2999 if (cmd_ctx
->lsm
->domain
.type
== 0) {
3000 /* Add the URI for the UST session if a consumer is present. */
3001 if (cmd_ctx
->session
->ust_session
&&
3002 cmd_ctx
->session
->ust_session
->consumer
) {
3003 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_UST
, cmd_ctx
->session
,
3005 } else if (cmd_ctx
->session
->kernel_session
&&
3006 cmd_ctx
->session
->kernel_session
->consumer
) {
3007 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL
,
3008 cmd_ctx
->session
, nb_uri
, uris
);
3016 case LTTNG_START_TRACE
:
3018 ret
= cmd_start_trace(cmd_ctx
->session
);
3021 case LTTNG_STOP_TRACE
:
3023 ret
= cmd_stop_trace(cmd_ctx
->session
);
3026 case LTTNG_CREATE_SESSION
:
3029 struct lttng_uri
*uris
= NULL
;
3031 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3032 len
= nb_uri
* sizeof(struct lttng_uri
);
3035 uris
= zmalloc(len
);
3037 ret
= LTTNG_ERR_FATAL
;
3041 /* Receive variable len data */
3042 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3043 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3045 DBG("No URIs received from client... continuing");
3047 ret
= LTTNG_ERR_SESSION_FAIL
;
3052 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3053 DBG("Creating session with ONE network URI is a bad call");
3054 ret
= LTTNG_ERR_SESSION_FAIL
;
3060 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
3067 case LTTNG_DESTROY_SESSION
:
3069 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
3071 /* Set session to NULL so we do not unlock it after free. */
3072 cmd_ctx
->session
= NULL
;
3075 case LTTNG_LIST_DOMAINS
:
3078 struct lttng_domain
*domains
;
3080 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
3082 /* Return value is a negative lttng_error_code. */
3087 ret
= setup_lttng_msg(cmd_ctx
, nb_dom
* sizeof(struct lttng_domain
));
3093 /* Copy event list into message payload */
3094 memcpy(cmd_ctx
->llm
->payload
, domains
,
3095 nb_dom
* sizeof(struct lttng_domain
));
3102 case LTTNG_LIST_CHANNELS
:
3105 struct lttng_channel
*channels
;
3107 nb_chan
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
3108 cmd_ctx
->session
, &channels
);
3110 /* Return value is a negative lttng_error_code. */
3115 ret
= setup_lttng_msg(cmd_ctx
, nb_chan
* sizeof(struct lttng_channel
));
3121 /* Copy event list into message payload */
3122 memcpy(cmd_ctx
->llm
->payload
, channels
,
3123 nb_chan
* sizeof(struct lttng_channel
));
3130 case LTTNG_LIST_EVENTS
:
3133 struct lttng_event
*events
= NULL
;
3135 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
3136 cmd_ctx
->lsm
->u
.list
.channel_name
, &events
);
3138 /* Return value is a negative lttng_error_code. */
3143 ret
= setup_lttng_msg(cmd_ctx
, nb_event
* sizeof(struct lttng_event
));
3149 /* Copy event list into message payload */
3150 memcpy(cmd_ctx
->llm
->payload
, events
,
3151 nb_event
* sizeof(struct lttng_event
));
3158 case LTTNG_LIST_SESSIONS
:
3160 unsigned int nr_sessions
;
3162 session_lock_list();
3163 nr_sessions
= lttng_sessions_count(
3164 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3165 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3167 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_session
) * nr_sessions
);
3169 session_unlock_list();
3173 /* Filled the session array */
3174 cmd_list_lttng_sessions((struct lttng_session
*)(cmd_ctx
->llm
->payload
),
3175 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3176 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3178 session_unlock_list();
3183 case LTTNG_CALIBRATE
:
3185 ret
= cmd_calibrate(cmd_ctx
->lsm
->domain
.type
,
3186 &cmd_ctx
->lsm
->u
.calibrate
);
3189 case LTTNG_REGISTER_CONSUMER
:
3191 struct consumer_data
*cdata
;
3193 switch (cmd_ctx
->lsm
->domain
.type
) {
3194 case LTTNG_DOMAIN_KERNEL
:
3195 cdata
= &kconsumer_data
;
3198 ret
= LTTNG_ERR_UND
;
3202 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3203 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
3206 case LTTNG_ENABLE_EVENT_WITH_FILTER
:
3208 struct lttng_filter_bytecode
*bytecode
;
3210 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
3211 ret
= LTTNG_ERR_FILTER_INVAL
;
3214 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
== 0) {
3215 ret
= LTTNG_ERR_FILTER_INVAL
;
3218 bytecode
= zmalloc(cmd_ctx
->lsm
->u
.enable
.bytecode_len
);
3220 ret
= LTTNG_ERR_FILTER_NOMEM
;
3223 /* Receive var. len. data */
3224 DBG("Receiving var len data from client ...");
3225 ret
= lttcomm_recv_unix_sock(sock
, bytecode
,
3226 cmd_ctx
->lsm
->u
.enable
.bytecode_len
);
3228 DBG("Nothing recv() from client var len data... continuing");
3230 ret
= LTTNG_ERR_FILTER_INVAL
;
3234 if (bytecode
->len
+ sizeof(*bytecode
)
3235 != cmd_ctx
->lsm
->u
.enable
.bytecode_len
) {
3237 ret
= LTTNG_ERR_FILTER_INVAL
;
3241 ret
= cmd_enable_event(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3242 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3243 &cmd_ctx
->lsm
->u
.enable
.event
, bytecode
, kernel_poll_pipe
[1]);
3246 case LTTNG_DATA_PENDING
:
3248 ret
= cmd_data_pending(cmd_ctx
->session
);
3251 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
3253 struct lttcomm_lttng_output_id reply
;
3255 ret
= cmd_snapshot_add_output(cmd_ctx
->session
,
3256 &cmd_ctx
->lsm
->u
.snapshot_output
.output
, &reply
.id
);
3257 if (ret
!= LTTNG_OK
) {
3261 ret
= setup_lttng_msg(cmd_ctx
, sizeof(reply
));
3266 /* Copy output list into message payload */
3267 memcpy(cmd_ctx
->llm
->payload
, &reply
, sizeof(reply
));
3271 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
3273 ret
= cmd_snapshot_del_output(cmd_ctx
->session
,
3274 &cmd_ctx
->lsm
->u
.snapshot_output
.output
);
3277 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
3280 struct lttng_snapshot_output
*outputs
= NULL
;
3282 nb_output
= cmd_snapshot_list_outputs(cmd_ctx
->session
, &outputs
);
3283 if (nb_output
< 0) {
3288 ret
= setup_lttng_msg(cmd_ctx
,
3289 nb_output
* sizeof(struct lttng_snapshot_output
));
3296 /* Copy output list into message payload */
3297 memcpy(cmd_ctx
->llm
->payload
, outputs
,
3298 nb_output
* sizeof(struct lttng_snapshot_output
));
3305 case LTTNG_SNAPSHOT_RECORD
:
3307 ret
= cmd_snapshot_record(cmd_ctx
->session
,
3308 &cmd_ctx
->lsm
->u
.snapshot_record
.output
,
3309 cmd_ctx
->lsm
->u
.snapshot_record
.wait
);
3312 case LTTNG_CREATE_SESSION_SNAPSHOT
:
3315 struct lttng_uri
*uris
= NULL
;
3317 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3318 len
= nb_uri
* sizeof(struct lttng_uri
);
3321 uris
= zmalloc(len
);
3323 ret
= LTTNG_ERR_FATAL
;
3327 /* Receive variable len data */
3328 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3329 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3331 DBG("No URIs received from client... continuing");
3333 ret
= LTTNG_ERR_SESSION_FAIL
;
3338 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3339 DBG("Creating session with ONE network URI is a bad call");
3340 ret
= LTTNG_ERR_SESSION_FAIL
;
3346 ret
= cmd_create_session_snapshot(cmd_ctx
->lsm
->session
.name
, uris
,
3347 nb_uri
, &cmd_ctx
->creds
);
3352 ret
= LTTNG_ERR_UND
;
3357 if (cmd_ctx
->llm
== NULL
) {
3358 DBG("Missing llm structure. Allocating one.");
3359 if (setup_lttng_msg(cmd_ctx
, 0) < 0) {
3363 /* Set return code */
3364 cmd_ctx
->llm
->ret_code
= ret
;
3366 if (cmd_ctx
->session
) {
3367 session_unlock(cmd_ctx
->session
);
3369 if (need_tracing_session
) {
3370 session_unlock_list();
3377 * Thread managing health check socket.
3379 static void *thread_manage_health(void *data
)
3381 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
3382 uint32_t revents
, nb_fd
;
3383 struct lttng_poll_event events
;
3384 struct lttcomm_health_msg msg
;
3385 struct lttcomm_health_data reply
;
3387 DBG("[thread] Manage health check started");
3389 rcu_register_thread();
3391 /* We might hit an error path before this is created. */
3392 lttng_poll_init(&events
);
3394 /* Create unix socket */
3395 sock
= lttcomm_create_unix_sock(health_unix_sock_path
);
3397 ERR("Unable to create health check Unix socket");
3403 * Set the CLOEXEC flag. Return code is useless because either way, the
3406 (void) utils_set_fd_cloexec(sock
);
3408 ret
= lttcomm_listen_unix_sock(sock
);
3414 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3415 * more will be added to this poll set.
3417 ret
= sessiond_set_thread_pollset(&events
, 2);
3422 /* Add the application registration socket */
3423 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
3429 DBG("Health check ready");
3431 /* Inifinite blocking call, waiting for transmission */
3433 ret
= lttng_poll_wait(&events
, -1);
3436 * Restart interrupted system call.
3438 if (errno
== EINTR
) {
3446 for (i
= 0; i
< nb_fd
; i
++) {
3447 /* Fetch once the poll data */
3448 revents
= LTTNG_POLL_GETEV(&events
, i
);
3449 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3451 /* Thread quit pipe has been closed. Killing thread. */
3452 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
3458 /* Event on the registration socket */
3459 if (pollfd
== sock
) {
3460 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3461 ERR("Health socket poll error");
3467 new_sock
= lttcomm_accept_unix_sock(sock
);
3473 * Set the CLOEXEC flag. Return code is useless because either way, the
3476 (void) utils_set_fd_cloexec(new_sock
);
3478 DBG("Receiving data from client for health...");
3479 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
3481 DBG("Nothing recv() from client... continuing");
3482 ret
= close(new_sock
);
3490 rcu_thread_online();
3492 switch (msg
.component
) {
3493 case LTTNG_HEALTH_CMD
:
3494 reply
.ret_code
= health_check_state(HEALTH_TYPE_CMD
);
3496 case LTTNG_HEALTH_APP_MANAGE
:
3497 reply
.ret_code
= health_check_state(HEALTH_TYPE_APP_MANAGE
);
3499 case LTTNG_HEALTH_APP_REG
:
3500 reply
.ret_code
= health_check_state(HEALTH_TYPE_APP_REG
);
3502 case LTTNG_HEALTH_KERNEL
:
3503 reply
.ret_code
= health_check_state(HEALTH_TYPE_KERNEL
);
3505 case LTTNG_HEALTH_CONSUMER
:
3506 reply
.ret_code
= check_consumer_health();
3508 case LTTNG_HEALTH_HT_CLEANUP
:
3509 reply
.ret_code
= health_check_state(HEALTH_TYPE_HT_CLEANUP
);
3511 case LTTNG_HEALTH_APP_MANAGE_NOTIFY
:
3512 reply
.ret_code
= health_check_state(HEALTH_TYPE_APP_MANAGE_NOTIFY
);
3514 case LTTNG_HEALTH_APP_REG_DISPATCH
:
3515 reply
.ret_code
= health_check_state(HEALTH_TYPE_APP_REG_DISPATCH
);
3517 case LTTNG_HEALTH_ALL
:
3519 health_check_state(HEALTH_TYPE_APP_MANAGE
) &&
3520 health_check_state(HEALTH_TYPE_APP_REG
) &&
3521 health_check_state(HEALTH_TYPE_CMD
) &&
3522 health_check_state(HEALTH_TYPE_KERNEL
) &&
3523 check_consumer_health() &&
3524 health_check_state(HEALTH_TYPE_HT_CLEANUP
) &&
3525 health_check_state(HEALTH_TYPE_APP_MANAGE_NOTIFY
) &&
3526 health_check_state(HEALTH_TYPE_APP_REG_DISPATCH
);
3529 reply
.ret_code
= LTTNG_ERR_UND
;
3534 * Flip ret value since 0 is a success and 1 indicates a bad health for
3535 * the client where in the sessiond it is the opposite. Again, this is
3536 * just to make things easier for us poor developer which enjoy a lot
3539 if (reply
.ret_code
== 0 || reply
.ret_code
== 1) {
3540 reply
.ret_code
= !reply
.ret_code
;
3543 DBG2("Health check return value %d", reply
.ret_code
);
3545 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
3547 ERR("Failed to send health data back to client");
3550 /* End of transmission */
3551 ret
= close(new_sock
);
3561 ERR("Health error occurred in %s", __func__
);
3563 DBG("Health check thread dying");
3564 unlink(health_unix_sock_path
);
3572 lttng_poll_clean(&events
);
3574 rcu_unregister_thread();
3579 * This thread manage all clients request using the unix client socket for
3582 static void *thread_manage_clients(void *data
)
3584 int sock
= -1, ret
, i
, pollfd
, err
= -1;
3586 uint32_t revents
, nb_fd
;
3587 struct command_ctx
*cmd_ctx
= NULL
;
3588 struct lttng_poll_event events
;
3590 DBG("[thread] Manage client started");
3592 rcu_register_thread();
3594 health_register(HEALTH_TYPE_CMD
);
3596 if (testpoint(thread_manage_clients
)) {
3597 goto error_testpoint
;
3600 health_code_update();
3602 ret
= lttcomm_listen_unix_sock(client_sock
);
3608 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3609 * more will be added to this poll set.
3611 ret
= sessiond_set_thread_pollset(&events
, 2);
3613 goto error_create_poll
;
3616 /* Add the application registration socket */
3617 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
3623 * Notify parent pid that we are ready to accept command for client side.
3625 if (opt_sig_parent
) {
3626 kill(ppid
, SIGUSR1
);
3629 if (testpoint(thread_manage_clients_before_loop
)) {
3633 health_code_update();
3636 DBG("Accepting client command ...");
3638 /* Inifinite blocking call, waiting for transmission */
3640 health_poll_entry();
3641 ret
= lttng_poll_wait(&events
, -1);
3645 * Restart interrupted system call.
3647 if (errno
== EINTR
) {
3655 for (i
= 0; i
< nb_fd
; i
++) {
3656 /* Fetch once the poll data */
3657 revents
= LTTNG_POLL_GETEV(&events
, i
);
3658 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3660 health_code_update();
3662 /* Thread quit pipe has been closed. Killing thread. */
3663 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
3669 /* Event on the registration socket */
3670 if (pollfd
== client_sock
) {
3671 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3672 ERR("Client socket poll error");
3678 DBG("Wait for client response");
3680 health_code_update();
3682 sock
= lttcomm_accept_unix_sock(client_sock
);
3688 * Set the CLOEXEC flag. Return code is useless because either way, the
3691 (void) utils_set_fd_cloexec(sock
);
3693 /* Set socket option for credentials retrieval */
3694 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
3699 /* Allocate context command to process the client request */
3700 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
3701 if (cmd_ctx
== NULL
) {
3702 PERROR("zmalloc cmd_ctx");
3706 /* Allocate data buffer for reception */
3707 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
3708 if (cmd_ctx
->lsm
== NULL
) {
3709 PERROR("zmalloc cmd_ctx->lsm");
3713 cmd_ctx
->llm
= NULL
;
3714 cmd_ctx
->session
= NULL
;
3716 health_code_update();
3719 * Data is received from the lttng client. The struct
3720 * lttcomm_session_msg (lsm) contains the command and data request of
3723 DBG("Receiving data from client ...");
3724 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
3725 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
3727 DBG("Nothing recv() from client... continuing");
3733 clean_command_ctx(&cmd_ctx
);
3737 health_code_update();
3739 // TODO: Validate cmd_ctx including sanity check for
3740 // security purpose.
3742 rcu_thread_online();
3744 * This function dispatch the work to the kernel or userspace tracer
3745 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3746 * informations for the client. The command context struct contains
3747 * everything this function may needs.
3749 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
3750 rcu_thread_offline();
3758 * TODO: Inform client somehow of the fatal error. At
3759 * this point, ret < 0 means that a zmalloc failed
3760 * (ENOMEM). Error detected but still accept
3761 * command, unless a socket error has been
3764 clean_command_ctx(&cmd_ctx
);
3768 health_code_update();
3770 DBG("Sending response (size: %d, retcode: %s)",
3771 cmd_ctx
->lttng_msg_size
,
3772 lttng_strerror(-cmd_ctx
->llm
->ret_code
));
3773 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
3775 ERR("Failed to send data back to client");
3778 /* End of transmission */
3785 clean_command_ctx(&cmd_ctx
);
3787 health_code_update();
3799 lttng_poll_clean(&events
);
3800 clean_command_ctx(&cmd_ctx
);
3805 unlink(client_unix_sock_path
);
3806 if (client_sock
>= 0) {
3807 ret
= close(client_sock
);
3815 ERR("Health error occurred in %s", __func__
);
3818 health_unregister();
3820 DBG("Client thread dying");
3822 rcu_unregister_thread();
3828 * usage function on stderr
3830 static void usage(void)
3832 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
3833 fprintf(stderr
, " -h, --help Display this usage.\n");
3834 fprintf(stderr
, " -c, --client-sock PATH Specify path for the client unix socket\n");
3835 fprintf(stderr
, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3836 fprintf(stderr
, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3837 fprintf(stderr
, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3838 fprintf(stderr
, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3839 fprintf(stderr
, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3840 fprintf(stderr
, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3841 fprintf(stderr
, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3842 fprintf(stderr
, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3843 fprintf(stderr
, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3844 fprintf(stderr
, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3845 fprintf(stderr
, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3846 fprintf(stderr
, " -d, --daemonize Start as a daemon.\n");
3847 fprintf(stderr
, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3848 fprintf(stderr
, " -V, --version Show version number.\n");
3849 fprintf(stderr
, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3850 fprintf(stderr
, " -q, --quiet No output at all.\n");
3851 fprintf(stderr
, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3852 fprintf(stderr
, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
3853 fprintf(stderr
, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3854 fprintf(stderr
, " --no-kernel Disable kernel tracer\n");
3858 * daemon argument parsing
3860 static int parse_args(int argc
, char **argv
)
3864 static struct option long_options
[] = {
3865 { "client-sock", 1, 0, 'c' },
3866 { "apps-sock", 1, 0, 'a' },
3867 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3868 { "kconsumerd-err-sock", 1, 0, 'E' },
3869 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3870 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3871 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3872 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3873 { "consumerd32-path", 1, 0, 'u' },
3874 { "consumerd32-libdir", 1, 0, 'U' },
3875 { "consumerd64-path", 1, 0, 't' },
3876 { "consumerd64-libdir", 1, 0, 'T' },
3877 { "daemonize", 0, 0, 'd' },
3878 { "sig-parent", 0, 0, 'S' },
3879 { "help", 0, 0, 'h' },
3880 { "group", 1, 0, 'g' },
3881 { "version", 0, 0, 'V' },
3882 { "quiet", 0, 0, 'q' },
3883 { "verbose", 0, 0, 'v' },
3884 { "verbose-consumer", 0, 0, 'Z' },
3885 { "no-kernel", 0, 0, 'N' },
3886 { "pidfile", 1, 0, 'p' },
3891 int option_index
= 0;
3892 c
= getopt_long(argc
, argv
, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:",
3893 long_options
, &option_index
);
3900 fprintf(stderr
, "option %s", long_options
[option_index
].name
);
3902 fprintf(stderr
, " with arg %s\n", optarg
);
3906 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3909 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3915 opt_tracing_group
= optarg
;
3921 fprintf(stdout
, "%s\n", VERSION
);
3927 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3930 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3933 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3936 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3939 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3942 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3948 lttng_opt_quiet
= 1;
3951 /* Verbose level can increase using multiple -v */
3952 lttng_opt_verbose
+= 1;
3955 opt_verbose_consumer
+= 1;
3958 consumerd32_bin
= optarg
;
3961 consumerd32_libdir
= optarg
;
3964 consumerd64_bin
= optarg
;
3967 consumerd64_libdir
= optarg
;
3970 opt_pidfile
= optarg
;
3973 /* Unknown option or other error.
3974 * Error is printed by getopt, just return */
3983 * Creates the two needed socket by the daemon.
3984 * apps_sock - The communication socket for all UST apps.
3985 * client_sock - The communication of the cli tool (lttng).
3987 static int init_daemon_socket(void)
3992 old_umask
= umask(0);
3994 /* Create client tool unix socket */
3995 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
3996 if (client_sock
< 0) {
3997 ERR("Create unix sock failed: %s", client_unix_sock_path
);
4002 /* Set the cloexec flag */
4003 ret
= utils_set_fd_cloexec(client_sock
);
4005 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
4006 "Continuing but note that the consumer daemon will have a "
4007 "reference to this socket on exec()", client_sock
);
4010 /* File permission MUST be 660 */
4011 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4013 ERR("Set file permissions failed: %s", client_unix_sock_path
);
4018 /* Create the application unix socket */
4019 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
4020 if (apps_sock
< 0) {
4021 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
4026 /* Set the cloexec flag */
4027 ret
= utils_set_fd_cloexec(apps_sock
);
4029 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
4030 "Continuing but note that the consumer daemon will have a "
4031 "reference to this socket on exec()", apps_sock
);
4034 /* File permission MUST be 666 */
4035 ret
= chmod(apps_unix_sock_path
,
4036 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
4038 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
4043 DBG3("Session daemon client socket %d and application socket %d created",
4044 client_sock
, apps_sock
);
4052 * Check if the global socket is available, and if a daemon is answering at the
4053 * other side. If yes, error is returned.
4055 static int check_existing_daemon(void)
4057 /* Is there anybody out there ? */
4058 if (lttng_session_daemon_alive()) {
4066 * Set the tracing group gid onto the client socket.
4068 * Race window between mkdir and chown is OK because we are going from more
4069 * permissive (root.root) to less permissive (root.tracing).
4071 static int set_permissions(char *rundir
)
4076 ret
= allowed_group();
4078 WARN("No tracing group detected");
4079 /* Setting gid to 0 if no tracing group is found */
4085 /* Set lttng run dir */
4086 ret
= chown(rundir
, 0, gid
);
4088 ERR("Unable to set group on %s", rundir
);
4092 /* Ensure all applications and tracing group can search the run dir */
4093 ret
= chmod(rundir
, S_IRWXU
| S_IXGRP
| S_IXOTH
);
4095 ERR("Unable to set permissions on %s", rundir
);
4099 /* lttng client socket path */
4100 ret
= chown(client_unix_sock_path
, 0, gid
);
4102 ERR("Unable to set group on %s", client_unix_sock_path
);
4106 /* kconsumer error socket path */
4107 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, gid
);
4109 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
4113 /* 64-bit ustconsumer error socket path */
4114 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, gid
);
4116 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
4120 /* 32-bit ustconsumer compat32 error socket path */
4121 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, gid
);
4123 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
4127 DBG("All permissions are set");
4133 * Create the lttng run directory needed for all global sockets and pipe.
4135 static int create_lttng_rundir(const char *rundir
)
4139 DBG3("Creating LTTng run directory: %s", rundir
);
4141 ret
= mkdir(rundir
, S_IRWXU
);
4143 if (errno
!= EEXIST
) {
4144 ERR("Unable to create %s", rundir
);
4156 * Setup sockets and directory needed by the kconsumerd communication with the
4159 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
4163 char path
[PATH_MAX
];
4165 switch (consumer_data
->type
) {
4166 case LTTNG_CONSUMER_KERNEL
:
4167 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
4169 case LTTNG_CONSUMER64_UST
:
4170 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
4172 case LTTNG_CONSUMER32_UST
:
4173 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
4176 ERR("Consumer type unknown");
4181 DBG2("Creating consumer directory: %s", path
);
4183 ret
= mkdir(path
, S_IRWXU
);
4185 if (errno
!= EEXIST
) {
4187 ERR("Failed to create %s", path
);
4193 /* Create the kconsumerd error unix socket */
4194 consumer_data
->err_sock
=
4195 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
4196 if (consumer_data
->err_sock
< 0) {
4197 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
4203 * Set the CLOEXEC flag. Return code is useless because either way, the
4206 ret
= utils_set_fd_cloexec(consumer_data
->err_sock
);
4208 PERROR("utils_set_fd_cloexec");
4209 /* continue anyway */
4212 /* File permission MUST be 660 */
4213 ret
= chmod(consumer_data
->err_unix_sock_path
,
4214 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4216 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
4226 * Signal handler for the daemon
4228 * Simply stop all worker threads, leaving main() return gracefully after
4229 * joining all threads and calling cleanup().
4231 static void sighandler(int sig
)
4235 DBG("SIGPIPE caught");
4238 DBG("SIGINT caught");
4242 DBG("SIGTERM caught");
4251 * Setup signal handler for :
4252 * SIGINT, SIGTERM, SIGPIPE
4254 static int set_signal_handler(void)
4257 struct sigaction sa
;
4260 if ((ret
= sigemptyset(&sigset
)) < 0) {
4261 PERROR("sigemptyset");
4265 sa
.sa_handler
= sighandler
;
4266 sa
.sa_mask
= sigset
;
4268 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
4269 PERROR("sigaction");
4273 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
4274 PERROR("sigaction");
4278 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
4279 PERROR("sigaction");
4283 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
4289 * Set open files limit to unlimited. This daemon can open a large number of
4290 * file descriptors in order to consumer multiple kernel traces.
4292 static void set_ulimit(void)
4297 /* The kernel does not allowed an infinite limit for open files */
4298 lim
.rlim_cur
= 65535;
4299 lim
.rlim_max
= 65535;
4301 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
4303 PERROR("failed to set open files limit");
4308 * Write pidfile using the rundir and opt_pidfile.
4310 static void write_pidfile(void)
4313 char pidfile_path
[PATH_MAX
];
4318 strncpy(pidfile_path
, opt_pidfile
, sizeof(pidfile_path
));
4320 /* Build pidfile path from rundir and opt_pidfile. */
4321 ret
= snprintf(pidfile_path
, sizeof(pidfile_path
), "%s/"
4322 DEFAULT_LTTNG_SESSIOND_PIDFILE
, rundir
);
4324 PERROR("snprintf pidfile path");
4330 * Create pid file in rundir. Return value is of no importance. The
4331 * execution will continue even though we are not able to write the file.
4333 (void) utils_create_pid_file(getpid(), pidfile_path
);
4342 int main(int argc
, char **argv
)
4346 const char *home_path
, *env_app_timeout
;
4348 init_kernel_workarounds();
4350 rcu_register_thread();
4352 setup_consumerd_path();
4354 page_size
= sysconf(_SC_PAGESIZE
);
4355 if (page_size
< 0) {
4356 PERROR("sysconf _SC_PAGESIZE");
4357 page_size
= LONG_MAX
;
4358 WARN("Fallback page size to %ld", page_size
);
4361 /* Parse arguments */
4363 if ((ret
= parse_args(argc
, argv
)) < 0) {
4373 * child: setsid, close FD 0, 1, 2, chdir /
4374 * parent: exit (if fork is successful)
4382 * We are in the child. Make sure all other file
4383 * descriptors are closed, in case we are called with
4384 * more opened file descriptors than the standard ones.
4386 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
4391 /* Create thread quit pipe */
4392 if ((ret
= init_thread_quit_pipe()) < 0) {
4396 /* Check if daemon is UID = 0 */
4397 is_root
= !getuid();
4400 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
4402 /* Create global run dir with root access */
4403 ret
= create_lttng_rundir(rundir
);
4408 if (strlen(apps_unix_sock_path
) == 0) {
4409 snprintf(apps_unix_sock_path
, PATH_MAX
,
4410 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
4413 if (strlen(client_unix_sock_path
) == 0) {
4414 snprintf(client_unix_sock_path
, PATH_MAX
,
4415 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
4418 /* Set global SHM for ust */
4419 if (strlen(wait_shm_path
) == 0) {
4420 snprintf(wait_shm_path
, PATH_MAX
,
4421 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
4424 if (strlen(health_unix_sock_path
) == 0) {
4425 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
4426 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK
);
4429 /* Setup kernel consumerd path */
4430 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
4431 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
4432 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
4433 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
4435 DBG2("Kernel consumer err path: %s",
4436 kconsumer_data
.err_unix_sock_path
);
4437 DBG2("Kernel consumer cmd path: %s",
4438 kconsumer_data
.cmd_unix_sock_path
);
4440 home_path
= utils_get_home_dir();
4441 if (home_path
== NULL
) {
4442 /* TODO: Add --socket PATH option */
4443 ERR("Can't get HOME directory for sockets creation.");
4449 * Create rundir from home path. This will create something like
4452 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
4458 ret
= create_lttng_rundir(rundir
);
4463 if (strlen(apps_unix_sock_path
) == 0) {
4464 snprintf(apps_unix_sock_path
, PATH_MAX
,
4465 DEFAULT_HOME_APPS_UNIX_SOCK
, home_path
);
4468 /* Set the cli tool unix socket path */
4469 if (strlen(client_unix_sock_path
) == 0) {
4470 snprintf(client_unix_sock_path
, PATH_MAX
,
4471 DEFAULT_HOME_CLIENT_UNIX_SOCK
, home_path
);
4474 /* Set global SHM for ust */
4475 if (strlen(wait_shm_path
) == 0) {
4476 snprintf(wait_shm_path
, PATH_MAX
,
4477 DEFAULT_HOME_APPS_WAIT_SHM_PATH
, getuid());
4480 /* Set health check Unix path */
4481 if (strlen(health_unix_sock_path
) == 0) {
4482 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
4483 DEFAULT_HOME_HEALTH_UNIX_SOCK
, home_path
);
4487 /* Set consumer initial state */
4488 kernel_consumerd_state
= CONSUMER_STOPPED
;
4489 ust_consumerd_state
= CONSUMER_STOPPED
;
4491 DBG("Client socket path %s", client_unix_sock_path
);
4492 DBG("Application socket path %s", apps_unix_sock_path
);
4493 DBG("Application wait path %s", wait_shm_path
);
4494 DBG("LTTng run directory path: %s", rundir
);
4496 /* 32 bits consumerd path setup */
4497 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
4498 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
4499 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
4500 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
4502 DBG2("UST consumer 32 bits err path: %s",
4503 ustconsumer32_data
.err_unix_sock_path
);
4504 DBG2("UST consumer 32 bits cmd path: %s",
4505 ustconsumer32_data
.cmd_unix_sock_path
);
4507 /* 64 bits consumerd path setup */
4508 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
4509 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
4510 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
4511 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
4513 DBG2("UST consumer 64 bits err path: %s",
4514 ustconsumer64_data
.err_unix_sock_path
);
4515 DBG2("UST consumer 64 bits cmd path: %s",
4516 ustconsumer64_data
.cmd_unix_sock_path
);
4519 * See if daemon already exist.
4521 if ((ret
= check_existing_daemon()) < 0) {
4522 ERR("Already running daemon.\n");
4524 * We do not goto exit because we must not cleanup()
4525 * because a daemon is already running.
4531 * Init UST app hash table. Alloc hash table before this point since
4532 * cleanup() can get called after that point.
4536 /* After this point, we can safely call cleanup() with "goto exit" */
4539 * These actions must be executed as root. We do that *after* setting up
4540 * the sockets path because we MUST make the check for another daemon using
4541 * those paths *before* trying to set the kernel consumer sockets and init
4545 ret
= set_consumer_sockets(&kconsumer_data
, rundir
);
4550 /* Setup kernel tracer */
4551 if (!opt_no_kernel
) {
4552 init_kernel_tracer();
4555 /* Set ulimit for open files */
4558 /* init lttng_fd tracking must be done after set_ulimit. */
4561 ret
= set_consumer_sockets(&ustconsumer64_data
, rundir
);
4566 ret
= set_consumer_sockets(&ustconsumer32_data
, rundir
);
4571 if ((ret
= set_signal_handler()) < 0) {
4575 /* Setup the needed unix socket */
4576 if ((ret
= init_daemon_socket()) < 0) {
4580 /* Set credentials to socket */
4581 if (is_root
&& ((ret
= set_permissions(rundir
)) < 0)) {
4585 /* Get parent pid if -S, --sig-parent is specified. */
4586 if (opt_sig_parent
) {
4590 /* Setup the kernel pipe for waking up the kernel thread */
4591 if (is_root
&& !opt_no_kernel
) {
4592 if ((ret
= utils_create_pipe_cloexec(kernel_poll_pipe
)) < 0) {
4597 /* Setup the thread ht_cleanup communication pipe. */
4598 if (utils_create_pipe_cloexec(ht_cleanup_pipe
) < 0) {
4602 /* Setup the thread apps communication pipe. */
4603 if ((ret
= utils_create_pipe_cloexec(apps_cmd_pipe
)) < 0) {
4607 /* Setup the thread apps notify communication pipe. */
4608 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe
) < 0) {
4612 /* Initialize global buffer per UID and PID registry. */
4613 buffer_reg_init_uid_registry();
4614 buffer_reg_init_pid_registry();
4616 /* Init UST command queue. */
4617 cds_wfq_init(&ust_cmd_queue
.queue
);
4620 * Get session list pointer. This pointer MUST NOT be free(). This list is
4621 * statically declared in session.c
4623 session_list_ptr
= session_get_list();
4625 /* Set up max poll set size */
4626 lttng_poll_set_max_size();
4630 /* Check for the application socket timeout env variable. */
4631 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
4632 if (env_app_timeout
) {
4633 app_socket_timeout
= atoi(env_app_timeout
);
4635 app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
4640 /* Initialize communication library */
4643 /* Create thread to manage the client socket */
4644 ret
= pthread_create(&ht_cleanup_thread
, NULL
,
4645 thread_ht_cleanup
, (void *) NULL
);
4647 PERROR("pthread_create ht_cleanup");
4648 goto exit_ht_cleanup
;
4651 /* Create thread to manage the client socket */
4652 ret
= pthread_create(&health_thread
, NULL
,
4653 thread_manage_health
, (void *) NULL
);
4655 PERROR("pthread_create health");
4659 /* Create thread to manage the client socket */
4660 ret
= pthread_create(&client_thread
, NULL
,
4661 thread_manage_clients
, (void *) NULL
);
4663 PERROR("pthread_create clients");
4667 /* Create thread to dispatch registration */
4668 ret
= pthread_create(&dispatch_thread
, NULL
,
4669 thread_dispatch_ust_registration
, (void *) NULL
);
4671 PERROR("pthread_create dispatch");
4675 /* Create thread to manage application registration. */
4676 ret
= pthread_create(®_apps_thread
, NULL
,
4677 thread_registration_apps
, (void *) NULL
);
4679 PERROR("pthread_create registration");
4683 /* Create thread to manage application socket */
4684 ret
= pthread_create(&apps_thread
, NULL
,
4685 thread_manage_apps
, (void *) NULL
);
4687 PERROR("pthread_create apps");
4691 /* Create thread to manage application notify socket */
4692 ret
= pthread_create(&apps_notify_thread
, NULL
,
4693 ust_thread_manage_notify
, (void *) NULL
);
4695 PERROR("pthread_create apps");
4699 /* Don't start this thread if kernel tracing is not requested nor root */
4700 if (is_root
&& !opt_no_kernel
) {
4701 /* Create kernel thread to manage kernel event */
4702 ret
= pthread_create(&kernel_thread
, NULL
,
4703 thread_manage_kernel
, (void *) NULL
);
4705 PERROR("pthread_create kernel");
4709 ret
= pthread_join(kernel_thread
, &status
);
4711 PERROR("pthread_join");
4712 goto error
; /* join error, exit without cleanup */
4717 ret
= pthread_join(apps_thread
, &status
);
4719 PERROR("pthread_join");
4720 goto error
; /* join error, exit without cleanup */
4724 ret
= pthread_join(reg_apps_thread
, &status
);
4726 PERROR("pthread_join");
4727 goto error
; /* join error, exit without cleanup */
4731 ret
= pthread_join(dispatch_thread
, &status
);
4733 PERROR("pthread_join");
4734 goto error
; /* join error, exit without cleanup */
4738 ret
= pthread_join(client_thread
, &status
);
4740 PERROR("pthread_join");
4741 goto error
; /* join error, exit without cleanup */
4744 ret
= join_consumer_thread(&kconsumer_data
);
4746 PERROR("join_consumer");
4747 goto error
; /* join error, exit without cleanup */
4750 ret
= join_consumer_thread(&ustconsumer32_data
);
4752 PERROR("join_consumer ust32");
4753 goto error
; /* join error, exit without cleanup */
4756 ret
= join_consumer_thread(&ustconsumer64_data
);
4758 PERROR("join_consumer ust64");
4759 goto error
; /* join error, exit without cleanup */
4763 ret
= pthread_join(health_thread
, &status
);
4765 PERROR("pthread_join health thread");
4766 goto error
; /* join error, exit without cleanup */
4770 ret
= pthread_join(ht_cleanup_thread
, &status
);
4772 PERROR("pthread_join ht cleanup thread");
4773 goto error
; /* join error, exit without cleanup */
4778 * cleanup() is called when no other thread is running.
4780 rcu_thread_online();
4782 rcu_thread_offline();
4783 rcu_unregister_thread();