2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
29 #include <sys/mount.h>
30 #include <sys/resource.h>
31 #include <sys/socket.h>
33 #include <sys/types.h>
35 #include <urcu/uatomic.h>
39 #include <common/common.h>
40 #include <common/compat/poll.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
48 #include "lttng-sessiond.h"
55 #include "kernel-consumer.h"
59 #include "ust-consumer.h"
63 #include "testpoint.h"
65 #define CONSUMERD_FILE "lttng-consumerd"
68 const char default_home_dir
[] = DEFAULT_HOME_DIR
;
69 const char default_tracing_group
[] = DEFAULT_TRACING_GROUP
;
70 const char default_ust_sock_dir
[] = DEFAULT_UST_SOCK_DIR
;
71 const char default_global_apps_pipe
[] = DEFAULT_GLOBAL_APPS_PIPE
;
74 const char *opt_tracing_group
;
75 static const char *opt_pidfile
;
76 static int opt_sig_parent
;
77 static int opt_verbose_consumer
;
78 static int opt_daemon
;
79 static int opt_no_kernel
;
80 static int is_root
; /* Set to 1 if the daemon is running as root */
81 static pid_t ppid
; /* Parent PID for --sig-parent option */
85 * Consumer daemon specific control data. Every value not initialized here is
86 * set to 0 by the static definition.
88 static struct consumer_data kconsumer_data
= {
89 .type
= LTTNG_CONSUMER_KERNEL
,
90 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
91 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
94 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
95 .lock
= PTHREAD_MUTEX_INITIALIZER
,
96 .cond
= PTHREAD_COND_INITIALIZER
,
97 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
99 static struct consumer_data ustconsumer64_data
= {
100 .type
= LTTNG_CONSUMER64_UST
,
101 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
102 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
105 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
106 .lock
= PTHREAD_MUTEX_INITIALIZER
,
107 .cond
= PTHREAD_COND_INITIALIZER
,
108 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
110 static struct consumer_data ustconsumer32_data
= {
111 .type
= LTTNG_CONSUMER32_UST
,
112 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
113 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
116 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
117 .lock
= PTHREAD_MUTEX_INITIALIZER
,
118 .cond
= PTHREAD_COND_INITIALIZER
,
119 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
122 /* Shared between threads */
123 static int dispatch_thread_exit
;
125 /* Global application Unix socket path */
126 static char apps_unix_sock_path
[PATH_MAX
];
127 /* Global client Unix socket path */
128 static char client_unix_sock_path
[PATH_MAX
];
129 /* global wait shm path for UST */
130 static char wait_shm_path
[PATH_MAX
];
131 /* Global health check unix path */
132 static char health_unix_sock_path
[PATH_MAX
];
134 /* Sockets and FDs */
135 static int client_sock
= -1;
136 static int apps_sock
= -1;
137 int kernel_tracer_fd
= -1;
138 static int kernel_poll_pipe
[2] = { -1, -1 };
141 * Quit pipe for all threads. This permits a single cancellation point
142 * for all threads when receiving an event on the pipe.
144 static int thread_quit_pipe
[2] = { -1, -1 };
147 * This pipe is used to inform the thread managing application communication
148 * that a command is queued and ready to be processed.
150 static int apps_cmd_pipe
[2] = { -1, -1 };
152 /* Pthread, Mutexes and Semaphores */
153 static pthread_t apps_thread
;
154 static pthread_t reg_apps_thread
;
155 static pthread_t client_thread
;
156 static pthread_t kernel_thread
;
157 static pthread_t dispatch_thread
;
158 static pthread_t health_thread
;
161 * UST registration command queue. This queue is tied with a futex and uses a N
162 * wakers / 1 waiter implemented and detailed in futex.c/.h
164 * The thread_manage_apps and thread_dispatch_ust_registration interact with
165 * this queue and the wait/wake scheme.
167 static struct ust_cmd_queue ust_cmd_queue
;
170 * Pointer initialized before thread creation.
172 * This points to the tracing session list containing the session count and a
173 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
174 * MUST NOT be taken if you call a public function in session.c.
176 * The lock is nested inside the structure: session_list_ptr->lock. Please use
177 * session_lock_list and session_unlock_list for lock acquisition.
179 static struct ltt_session_list
*session_list_ptr
;
181 int ust_consumerd64_fd
= -1;
182 int ust_consumerd32_fd
= -1;
184 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
185 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
186 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
187 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
189 static const char *module_proc_lttng
= "/proc/lttng";
192 * Consumer daemon state which is changed when spawning it, killing it or in
193 * case of a fatal error.
195 enum consumerd_state
{
196 CONSUMER_STARTED
= 1,
197 CONSUMER_STOPPED
= 2,
202 * This consumer daemon state is used to validate if a client command will be
203 * able to reach the consumer. If not, the client is informed. For instance,
204 * doing a "lttng start" when the consumer state is set to ERROR will return an
205 * error to the client.
207 * The following example shows a possible race condition of this scheme:
209 * consumer thread error happens
211 * client cmd checks state -> still OK
212 * consumer thread exit, sets error
213 * client cmd try to talk to consumer
216 * However, since the consumer is a different daemon, we have no way of making
217 * sure the command will reach it safely even with this state flag. This is why
218 * we consider that up to the state validation during command processing, the
219 * command is safe. After that, we can not guarantee the correctness of the
220 * client request vis-a-vis the consumer.
222 static enum consumerd_state ust_consumerd_state
;
223 static enum consumerd_state kernel_consumerd_state
;
226 * Socket timeout for receiving and sending in seconds.
228 static int app_socket_timeout
;
231 void setup_consumerd_path(void)
233 const char *bin
, *libdir
;
236 * Allow INSTALL_BIN_PATH to be used as a target path for the
237 * native architecture size consumer if CONFIG_CONSUMER*_PATH
238 * has not been defined.
240 #if (CAA_BITS_PER_LONG == 32)
241 if (!consumerd32_bin
[0]) {
242 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
244 if (!consumerd32_libdir
[0]) {
245 consumerd32_libdir
= INSTALL_LIB_PATH
;
247 #elif (CAA_BITS_PER_LONG == 64)
248 if (!consumerd64_bin
[0]) {
249 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
251 if (!consumerd64_libdir
[0]) {
252 consumerd64_libdir
= INSTALL_LIB_PATH
;
255 #error "Unknown bitness"
259 * runtime env. var. overrides the build default.
261 bin
= getenv("LTTNG_CONSUMERD32_BIN");
263 consumerd32_bin
= bin
;
265 bin
= getenv("LTTNG_CONSUMERD64_BIN");
267 consumerd64_bin
= bin
;
269 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
271 consumerd32_libdir
= libdir
;
273 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
275 consumerd64_libdir
= libdir
;
280 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
282 static int create_thread_poll_set(struct lttng_poll_event
*events
,
287 if (events
== NULL
|| size
== 0) {
292 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
298 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
);
310 * Check if the thread quit pipe was triggered.
312 * Return 1 if it was triggered else 0;
314 static int check_thread_quit_pipe(int fd
, uint32_t events
)
316 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
324 * Return group ID of the tracing group or -1 if not found.
326 static gid_t
allowed_group(void)
330 if (opt_tracing_group
) {
331 grp
= getgrnam(opt_tracing_group
);
333 grp
= getgrnam(default_tracing_group
);
343 * Init thread quit pipe.
345 * Return -1 on error or 0 if all pipes are created.
347 static int init_thread_quit_pipe(void)
351 ret
= pipe(thread_quit_pipe
);
353 PERROR("thread quit pipe");
357 for (i
= 0; i
< 2; i
++) {
358 ret
= fcntl(thread_quit_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
370 * Stop all threads by closing the thread quit pipe.
372 static void stop_threads(void)
376 /* Stopping all threads */
377 DBG("Terminating all threads");
378 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
380 ERR("write error on thread quit pipe");
383 /* Dispatch thread */
384 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
385 futex_nto1_wake(&ust_cmd_queue
.futex
);
391 static void cleanup(void)
395 struct ltt_session
*sess
, *stmp
;
399 /* First thing first, stop all threads */
400 utils_close_pipe(thread_quit_pipe
);
403 * If opt_pidfile is undefined, the default file will be wiped when
404 * removing the rundir.
407 ret
= remove(opt_pidfile
);
409 PERROR("remove pidfile %s", opt_pidfile
);
413 DBG("Removing %s directory", rundir
);
414 ret
= asprintf(&cmd
, "rm -rf %s", rundir
);
416 ERR("asprintf failed. Something is really wrong!");
419 /* Remove lttng run directory */
422 ERR("Unable to clean %s", rundir
);
427 DBG("Cleaning up all sessions");
429 /* Destroy session list mutex */
430 if (session_list_ptr
!= NULL
) {
431 pthread_mutex_destroy(&session_list_ptr
->lock
);
433 /* Cleanup ALL session */
434 cds_list_for_each_entry_safe(sess
, stmp
,
435 &session_list_ptr
->head
, list
) {
436 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
440 DBG("Closing all UST sockets");
441 ust_app_clean_list();
443 if (is_root
&& !opt_no_kernel
) {
444 DBG2("Closing kernel fd");
445 if (kernel_tracer_fd
>= 0) {
446 ret
= close(kernel_tracer_fd
);
451 DBG("Unloading kernel modules");
452 modprobe_remove_lttng_all();
456 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
457 "Matthew, BEET driven development works!%c[%dm",
458 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
463 * Send data on a unix socket using the liblttsessiondcomm API.
465 * Return lttcomm error code.
467 static int send_unix_sock(int sock
, void *buf
, size_t len
)
469 /* Check valid length */
474 return lttcomm_send_unix_sock(sock
, buf
, len
);
478 * Free memory of a command context structure.
480 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
482 DBG("Clean command context structure");
484 if ((*cmd_ctx
)->llm
) {
485 free((*cmd_ctx
)->llm
);
487 if ((*cmd_ctx
)->lsm
) {
488 free((*cmd_ctx
)->lsm
);
496 * Notify UST applications using the shm mmap futex.
498 static int notify_ust_apps(int active
)
502 DBG("Notifying applications of session daemon state: %d", active
);
504 /* See shm.c for this call implying mmap, shm and futex calls */
505 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
506 if (wait_shm_mmap
== NULL
) {
510 /* Wake waiting process */
511 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
513 /* Apps notified successfully */
521 * Setup the outgoing data buffer for the response (llm) by allocating the
522 * right amount of memory and copying the original information from the lsm
525 * Return total size of the buffer pointed by buf.
527 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
533 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
534 if (cmd_ctx
->llm
== NULL
) {
540 /* Copy common data */
541 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
542 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
544 cmd_ctx
->llm
->data_size
= size
;
545 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
554 * Update the kernel poll set of all channel fd available over all tracing
555 * session. Add the wakeup pipe at the end of the set.
557 static int update_kernel_poll(struct lttng_poll_event
*events
)
560 struct ltt_session
*session
;
561 struct ltt_kernel_channel
*channel
;
563 DBG("Updating kernel poll set");
566 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
567 session_lock(session
);
568 if (session
->kernel_session
== NULL
) {
569 session_unlock(session
);
573 cds_list_for_each_entry(channel
,
574 &session
->kernel_session
->channel_list
.head
, list
) {
575 /* Add channel fd to the kernel poll set */
576 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
578 session_unlock(session
);
581 DBG("Channel fd %d added to kernel set", channel
->fd
);
583 session_unlock(session
);
585 session_unlock_list();
590 session_unlock_list();
595 * Find the channel fd from 'fd' over all tracing session. When found, check
596 * for new channel stream and send those stream fds to the kernel consumer.
598 * Useful for CPU hotplug feature.
600 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
603 struct ltt_session
*session
;
604 struct ltt_kernel_session
*ksess
;
605 struct ltt_kernel_channel
*channel
;
607 DBG("Updating kernel streams for channel fd %d", fd
);
610 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
611 session_lock(session
);
612 if (session
->kernel_session
== NULL
) {
613 session_unlock(session
);
616 ksess
= session
->kernel_session
;
618 cds_list_for_each_entry(channel
, &ksess
->channel_list
.head
, list
) {
619 if (channel
->fd
== fd
) {
620 DBG("Channel found, updating kernel streams");
621 ret
= kernel_open_channel_stream(channel
);
627 * Have we already sent fds to the consumer? If yes, it means
628 * that tracing is started so it is safe to send our updated
631 if (ksess
->consumer_fds_sent
== 1 && ksess
->consumer
!= NULL
) {
632 struct lttng_ht_iter iter
;
633 struct consumer_socket
*socket
;
636 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
637 &iter
.iter
, socket
, node
.node
) {
638 /* Code flow error */
639 assert(socket
->fd
>= 0);
641 pthread_mutex_lock(socket
->lock
);
642 ret
= kernel_consumer_send_channel_stream(socket
,
644 pthread_mutex_unlock(socket
->lock
);
655 session_unlock(session
);
657 session_unlock_list();
661 session_unlock(session
);
662 session_unlock_list();
667 * For each tracing session, update newly registered apps. The session list
668 * lock MUST be acquired before calling this.
670 static void update_ust_app(int app_sock
)
672 struct ltt_session
*sess
, *stmp
;
674 /* For all tracing session(s) */
675 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
677 if (sess
->ust_session
) {
678 ust_app_global_update(sess
->ust_session
, app_sock
);
680 session_unlock(sess
);
685 * This thread manage event coming from the kernel.
687 * Features supported in this thread:
690 static void *thread_manage_kernel(void *data
)
692 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
693 uint32_t revents
, nb_fd
;
695 struct lttng_poll_event events
;
697 DBG("[thread] Thread manage kernel started");
699 health_register(HEALTH_TYPE_KERNEL
);
702 * This first step of the while is to clean this structure which could free
703 * non NULL pointers so zero it before the loop.
705 memset(&events
, 0, sizeof(events
));
707 if (testpoint(thread_manage_kernel
)) {
708 goto error_testpoint
;
711 health_code_update();
713 if (testpoint(thread_manage_kernel_before_loop
)) {
714 goto error_testpoint
;
718 health_code_update();
720 if (update_poll_flag
== 1) {
721 /* Clean events object. We are about to populate it again. */
722 lttng_poll_clean(&events
);
724 ret
= create_thread_poll_set(&events
, 2);
726 goto error_poll_create
;
729 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
734 /* This will add the available kernel channel if any. */
735 ret
= update_kernel_poll(&events
);
739 update_poll_flag
= 0;
742 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events
));
744 /* Poll infinite value of time */
747 ret
= lttng_poll_wait(&events
, -1);
751 * Restart interrupted system call.
753 if (errno
== EINTR
) {
757 } else if (ret
== 0) {
758 /* Should not happen since timeout is infinite */
759 ERR("Return value of poll is 0 with an infinite timeout.\n"
760 "This should not have happened! Continuing...");
766 for (i
= 0; i
< nb_fd
; i
++) {
767 /* Fetch once the poll data */
768 revents
= LTTNG_POLL_GETEV(&events
, i
);
769 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
771 health_code_update();
773 /* Thread quit pipe has been closed. Killing thread. */
774 ret
= check_thread_quit_pipe(pollfd
, revents
);
780 /* Check for data on kernel pipe */
781 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
783 ret
= read(kernel_poll_pipe
[0], &tmp
, 1);
784 } while (ret
< 0 && errno
== EINTR
);
786 * Ret value is useless here, if this pipe gets any actions an
787 * update is required anyway.
789 update_poll_flag
= 1;
793 * New CPU detected by the kernel. Adding kernel stream to
794 * kernel session and updating the kernel consumer
796 if (revents
& LPOLLIN
) {
797 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
803 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
804 * and unregister kernel stream at this point.
813 lttng_poll_clean(&events
);
816 utils_close_pipe(kernel_poll_pipe
);
817 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
820 ERR("Health error occurred in %s", __func__
);
821 WARN("Kernel thread died unexpectedly. "
822 "Kernel tracing can continue but CPU hotplug is disabled.");
825 DBG("Kernel thread dying");
830 * Signal pthread condition of the consumer data that the thread.
832 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
834 pthread_mutex_lock(&data
->cond_mutex
);
837 * The state is set before signaling. It can be any value, it's the waiter
838 * job to correctly interpret this condition variable associated to the
839 * consumer pthread_cond.
841 * A value of 0 means that the corresponding thread of the consumer data
842 * was not started. 1 indicates that the thread has started and is ready
843 * for action. A negative value means that there was an error during the
846 data
->consumer_thread_is_ready
= state
;
847 (void) pthread_cond_signal(&data
->cond
);
849 pthread_mutex_unlock(&data
->cond_mutex
);
853 * This thread manage the consumer error sent back to the session daemon.
855 static void *thread_manage_consumer(void *data
)
857 int sock
= -1, i
, ret
, pollfd
, err
= -1;
858 uint32_t revents
, nb_fd
;
859 enum lttcomm_return_code code
;
860 struct lttng_poll_event events
;
861 struct consumer_data
*consumer_data
= data
;
863 DBG("[thread] Manage consumer started");
865 health_register(HEALTH_TYPE_CONSUMER
);
867 health_code_update();
870 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
871 * Nothing more will be added to this poll set.
873 ret
= create_thread_poll_set(&events
, 2);
879 * The error socket here is already in a listening state which was done
880 * just before spawning this thread to avoid a race between the consumer
881 * daemon exec trying to connect and the listen() call.
883 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
888 health_code_update();
890 /* Inifinite blocking call, waiting for transmission */
894 if (testpoint(thread_manage_consumer
)) {
898 ret
= lttng_poll_wait(&events
, -1);
902 * Restart interrupted system call.
904 if (errno
== EINTR
) {
912 for (i
= 0; i
< nb_fd
; i
++) {
913 /* Fetch once the poll data */
914 revents
= LTTNG_POLL_GETEV(&events
, i
);
915 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
917 health_code_update();
919 /* Thread quit pipe has been closed. Killing thread. */
920 ret
= check_thread_quit_pipe(pollfd
, revents
);
926 /* Event on the registration socket */
927 if (pollfd
== consumer_data
->err_sock
) {
928 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
929 ERR("consumer err socket poll error");
935 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
941 * Set the CLOEXEC flag. Return code is useless because either way, the
944 (void) utils_set_fd_cloexec(sock
);
946 health_code_update();
948 DBG2("Receiving code from consumer err_sock");
950 /* Getting status code from kconsumerd */
951 ret
= lttcomm_recv_unix_sock(sock
, &code
,
952 sizeof(enum lttcomm_return_code
));
957 health_code_update();
959 if (code
== LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
960 consumer_data
->cmd_sock
=
961 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
962 if (consumer_data
->cmd_sock
< 0) {
963 /* On error, signal condition and quit. */
964 signal_consumer_condition(consumer_data
, -1);
965 PERROR("consumer connect");
968 signal_consumer_condition(consumer_data
, 1);
969 DBG("Consumer command socket ready");
971 ERR("consumer error when waiting for SOCK_READY : %s",
972 lttcomm_get_readable_code(-code
));
976 /* Remove the kconsumerd error sock since we've established a connexion */
977 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
982 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
987 health_code_update();
989 /* Inifinite blocking call, waiting for transmission */
992 ret
= lttng_poll_wait(&events
, -1);
996 * Restart interrupted system call.
998 if (errno
== EINTR
) {
1006 for (i
= 0; i
< nb_fd
; i
++) {
1007 /* Fetch once the poll data */
1008 revents
= LTTNG_POLL_GETEV(&events
, i
);
1009 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1011 health_code_update();
1013 /* Thread quit pipe has been closed. Killing thread. */
1014 ret
= check_thread_quit_pipe(pollfd
, revents
);
1020 /* Event on the kconsumerd socket */
1021 if (pollfd
== sock
) {
1022 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1023 ERR("consumer err socket second poll error");
1029 health_code_update();
1031 /* Wait for any kconsumerd error */
1032 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1033 sizeof(enum lttcomm_return_code
));
1035 ERR("consumer closed the command socket");
1039 ERR("consumer return code : %s", lttcomm_get_readable_code(-code
));
1043 /* Immediately set the consumerd state to stopped */
1044 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1045 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1046 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1047 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1048 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1050 /* Code flow error... */
1054 if (consumer_data
->err_sock
>= 0) {
1055 ret
= close(consumer_data
->err_sock
);
1060 if (consumer_data
->cmd_sock
>= 0) {
1061 ret
= close(consumer_data
->cmd_sock
);
1073 unlink(consumer_data
->err_unix_sock_path
);
1074 unlink(consumer_data
->cmd_unix_sock_path
);
1075 consumer_data
->pid
= 0;
1077 lttng_poll_clean(&events
);
1081 ERR("Health error occurred in %s", __func__
);
1083 health_unregister();
1084 DBG("consumer thread cleanup completed");
1090 * This thread manage application communication.
1092 static void *thread_manage_apps(void *data
)
1094 int i
, ret
, pollfd
, err
= -1;
1095 uint32_t revents
, nb_fd
;
1096 struct ust_command ust_cmd
;
1097 struct lttng_poll_event events
;
1099 DBG("[thread] Manage application started");
1101 rcu_register_thread();
1102 rcu_thread_online();
1104 health_register(HEALTH_TYPE_APP_MANAGE
);
1106 if (testpoint(thread_manage_apps
)) {
1107 goto error_testpoint
;
1110 health_code_update();
1112 ret
= create_thread_poll_set(&events
, 2);
1114 goto error_poll_create
;
1117 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1122 if (testpoint(thread_manage_apps_before_loop
)) {
1126 health_code_update();
1129 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events
));
1131 /* Inifinite blocking call, waiting for transmission */
1133 health_poll_entry();
1134 ret
= lttng_poll_wait(&events
, -1);
1138 * Restart interrupted system call.
1140 if (errno
== EINTR
) {
1148 for (i
= 0; i
< nb_fd
; i
++) {
1149 /* Fetch once the poll data */
1150 revents
= LTTNG_POLL_GETEV(&events
, i
);
1151 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1153 health_code_update();
1155 /* Thread quit pipe has been closed. Killing thread. */
1156 ret
= check_thread_quit_pipe(pollfd
, revents
);
1162 /* Inspect the apps cmd pipe */
1163 if (pollfd
== apps_cmd_pipe
[0]) {
1164 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1165 ERR("Apps command pipe error");
1167 } else if (revents
& LPOLLIN
) {
1170 ret
= read(apps_cmd_pipe
[0], &ust_cmd
, sizeof(ust_cmd
));
1171 } while (ret
< 0 && errno
== EINTR
);
1172 if (ret
< 0 || ret
< sizeof(ust_cmd
)) {
1173 PERROR("read apps cmd pipe");
1177 health_code_update();
1181 * Lock the global session list so from the register up to
1182 * the registration done message, no thread can see the
1183 * application and change its state.
1185 session_lock_list();
1187 /* Register applicaton to the session daemon */
1188 ret
= ust_app_register(&ust_cmd
.reg_msg
,
1190 if (ret
== -ENOMEM
) {
1191 session_unlock_list();
1193 } else if (ret
< 0) {
1194 session_unlock_list();
1198 health_code_update();
1201 * Validate UST version compatibility.
1203 ret
= ust_app_validate_version(ust_cmd
.sock
);
1206 * Add channel(s) and event(s) to newly registered apps
1207 * from lttng global UST domain.
1209 update_ust_app(ust_cmd
.sock
);
1212 health_code_update();
1214 ret
= ust_app_register_done(ust_cmd
.sock
);
1217 * If the registration is not possible, we simply
1218 * unregister the apps and continue
1220 ust_app_unregister(ust_cmd
.sock
);
1223 * We only monitor the error events of the socket. This
1224 * thread does not handle any incoming data from UST
1227 ret
= lttng_poll_add(&events
, ust_cmd
.sock
,
1228 LPOLLERR
& LPOLLHUP
& LPOLLRDHUP
);
1230 session_unlock_list();
1234 /* Set socket timeout for both receiving and ending */
1235 (void) lttcomm_setsockopt_rcv_timeout(ust_cmd
.sock
,
1236 app_socket_timeout
);
1237 (void) lttcomm_setsockopt_snd_timeout(ust_cmd
.sock
,
1238 app_socket_timeout
);
1240 DBG("Apps with sock %d added to poll set",
1243 session_unlock_list();
1245 health_code_update();
1251 * At this point, we know that a registered application made
1252 * the event at poll_wait.
1254 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1255 /* Removing from the poll set */
1256 ret
= lttng_poll_del(&events
, pollfd
);
1261 /* Socket closed on remote end. */
1262 ust_app_unregister(pollfd
);
1267 health_code_update();
1273 lttng_poll_clean(&events
);
1276 utils_close_pipe(apps_cmd_pipe
);
1277 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1280 * We don't clean the UST app hash table here since already registered
1281 * applications can still be controlled so let them be until the session
1282 * daemon dies or the applications stop.
1287 ERR("Health error occurred in %s", __func__
);
1289 health_unregister();
1290 DBG("Application communication apps thread cleanup complete");
1291 rcu_thread_offline();
1292 rcu_unregister_thread();
1297 * Dispatch request from the registration threads to the application
1298 * communication thread.
1300 static void *thread_dispatch_ust_registration(void *data
)
1303 struct cds_wfq_node
*node
;
1304 struct ust_command
*ust_cmd
= NULL
;
1306 DBG("[thread] Dispatch UST command started");
1308 while (!CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1309 /* Atomically prepare the queue futex */
1310 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1313 /* Dequeue command for registration */
1314 node
= cds_wfq_dequeue_blocking(&ust_cmd_queue
.queue
);
1316 DBG("Woken up but nothing in the UST command queue");
1317 /* Continue thread execution */
1321 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1323 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1324 " gid:%d sock:%d name:%s (version %d.%d)",
1325 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1326 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1327 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1328 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1330 * Inform apps thread of the new application registration. This
1331 * call is blocking so we can be assured that the data will be read
1332 * at some point in time or wait to the end of the world :)
1334 if (apps_cmd_pipe
[1] >= 0) {
1336 ret
= write(apps_cmd_pipe
[1], ust_cmd
,
1337 sizeof(struct ust_command
));
1338 } while (ret
< 0 && errno
== EINTR
);
1339 if (ret
< 0 || ret
!= sizeof(struct ust_command
)) {
1340 PERROR("write apps cmd pipe");
1341 if (errno
== EBADF
) {
1343 * We can't inform the application thread to process
1344 * registration. We will exit or else application
1345 * registration will not occur and tracing will never
1352 /* Application manager thread is not available. */
1353 ret
= close(ust_cmd
->sock
);
1355 PERROR("close ust_cmd sock");
1359 } while (node
!= NULL
);
1361 /* Futex wait on queue. Blocking call on futex() */
1362 futex_nto1_wait(&ust_cmd_queue
.futex
);
1366 DBG("Dispatch thread dying");
1371 * This thread manage application registration.
1373 static void *thread_registration_apps(void *data
)
1375 int sock
= -1, i
, ret
, pollfd
, err
= -1;
1376 uint32_t revents
, nb_fd
;
1377 struct lttng_poll_event events
;
1379 * Get allocated in this thread, enqueued to a global queue, dequeued and
1380 * freed in the manage apps thread.
1382 struct ust_command
*ust_cmd
= NULL
;
1384 DBG("[thread] Manage application registration started");
1386 health_register(HEALTH_TYPE_APP_REG
);
1388 if (testpoint(thread_registration_apps
)) {
1389 goto error_testpoint
;
1392 ret
= lttcomm_listen_unix_sock(apps_sock
);
1398 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1399 * more will be added to this poll set.
1401 ret
= create_thread_poll_set(&events
, 2);
1403 goto error_create_poll
;
1406 /* Add the application registration socket */
1407 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1409 goto error_poll_add
;
1412 /* Notify all applications to register */
1413 ret
= notify_ust_apps(1);
1415 ERR("Failed to notify applications or create the wait shared memory.\n"
1416 "Execution continues but there might be problem for already\n"
1417 "running applications that wishes to register.");
1421 DBG("Accepting application registration");
1423 /* Inifinite blocking call, waiting for transmission */
1425 health_poll_entry();
1426 ret
= lttng_poll_wait(&events
, -1);
1430 * Restart interrupted system call.
1432 if (errno
== EINTR
) {
1440 for (i
= 0; i
< nb_fd
; i
++) {
1441 health_code_update();
1443 /* Fetch once the poll data */
1444 revents
= LTTNG_POLL_GETEV(&events
, i
);
1445 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1447 /* Thread quit pipe has been closed. Killing thread. */
1448 ret
= check_thread_quit_pipe(pollfd
, revents
);
1454 /* Event on the registration socket */
1455 if (pollfd
== apps_sock
) {
1456 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1457 ERR("Register apps socket poll error");
1459 } else if (revents
& LPOLLIN
) {
1460 sock
= lttcomm_accept_unix_sock(apps_sock
);
1466 * Set the CLOEXEC flag. Return code is useless because
1467 * either way, the show must go on.
1469 (void) utils_set_fd_cloexec(sock
);
1471 /* Create UST registration command for enqueuing */
1472 ust_cmd
= zmalloc(sizeof(struct ust_command
));
1473 if (ust_cmd
== NULL
) {
1474 PERROR("ust command zmalloc");
1479 * Using message-based transmissions to ensure we don't
1480 * have to deal with partially received messages.
1482 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
1484 ERR("Exhausted file descriptors allowed for applications.");
1493 health_code_update();
1494 ret
= lttcomm_recv_unix_sock(sock
, &ust_cmd
->reg_msg
,
1495 sizeof(struct ust_register_msg
));
1496 if (ret
< 0 || ret
< sizeof(struct ust_register_msg
)) {
1498 PERROR("lttcomm_recv_unix_sock register apps");
1500 ERR("Wrong size received on apps register");
1507 lttng_fd_put(LTTNG_FD_APPS
, 1);
1511 health_code_update();
1513 ust_cmd
->sock
= sock
;
1516 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1517 " gid:%d sock:%d name:%s (version %d.%d)",
1518 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1519 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1520 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1521 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1524 * Lock free enqueue the registration request. The red pill
1525 * has been taken! This apps will be part of the *system*.
1527 cds_wfq_enqueue(&ust_cmd_queue
.queue
, &ust_cmd
->node
);
1530 * Wake the registration queue futex. Implicit memory
1531 * barrier with the exchange in cds_wfq_enqueue.
1533 futex_nto1_wake(&ust_cmd_queue
.futex
);
1543 ERR("Health error occurred in %s", __func__
);
1546 /* Notify that the registration thread is gone */
1549 if (apps_sock
>= 0) {
1550 ret
= close(apps_sock
);
1560 lttng_fd_put(LTTNG_FD_APPS
, 1);
1562 unlink(apps_unix_sock_path
);
1565 lttng_poll_clean(&events
);
1569 DBG("UST Registration thread cleanup complete");
1570 health_unregister();
1576 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1577 * exec or it will fails.
1579 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
1582 struct timespec timeout
;
1584 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1585 consumer_data
->consumer_thread_is_ready
= 0;
1587 /* Setup pthread condition */
1588 ret
= pthread_condattr_init(&consumer_data
->condattr
);
1591 PERROR("pthread_condattr_init consumer data");
1596 * Set the monotonic clock in order to make sure we DO NOT jump in time
1597 * between the clock_gettime() call and the timedwait call. See bug #324
1598 * for a more details and how we noticed it.
1600 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
1603 PERROR("pthread_condattr_setclock consumer data");
1607 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
1610 PERROR("pthread_cond_init consumer data");
1614 ret
= pthread_create(&consumer_data
->thread
, NULL
, thread_manage_consumer
,
1617 PERROR("pthread_create consumer");
1622 /* We are about to wait on a pthread condition */
1623 pthread_mutex_lock(&consumer_data
->cond_mutex
);
1625 /* Get time for sem_timedwait absolute timeout */
1626 clock_ret
= clock_gettime(CLOCK_MONOTONIC
, &timeout
);
1628 * Set the timeout for the condition timed wait even if the clock gettime
1629 * call fails since we might loop on that call and we want to avoid to
1630 * increment the timeout too many times.
1632 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
1635 * The following loop COULD be skipped in some conditions so this is why we
1636 * set ret to 0 in order to make sure at least one round of the loop is
1642 * Loop until the condition is reached or when a timeout is reached. Note
1643 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
1644 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
1645 * possible. This loop does not take any chances and works with both of
1648 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
1649 if (clock_ret
< 0) {
1650 PERROR("clock_gettime spawn consumer");
1651 /* Infinite wait for the consumerd thread to be ready */
1652 ret
= pthread_cond_wait(&consumer_data
->cond
,
1653 &consumer_data
->cond_mutex
);
1655 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
1656 &consumer_data
->cond_mutex
, &timeout
);
1660 /* Release the pthread condition */
1661 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
1665 if (ret
== ETIMEDOUT
) {
1667 * Call has timed out so we kill the kconsumerd_thread and return
1670 ERR("Condition timed out. The consumer thread was never ready."
1672 ret
= pthread_cancel(consumer_data
->thread
);
1674 PERROR("pthread_cancel consumer thread");
1677 PERROR("pthread_cond_wait failed consumer thread");
1682 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1683 if (consumer_data
->pid
== 0) {
1684 ERR("Consumerd did not start");
1685 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1688 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1697 * Join consumer thread
1699 static int join_consumer_thread(struct consumer_data
*consumer_data
)
1703 /* Consumer pid must be a real one. */
1704 if (consumer_data
->pid
> 0) {
1706 ret
= kill(consumer_data
->pid
, SIGTERM
);
1708 ERR("Error killing consumer daemon");
1711 return pthread_join(consumer_data
->thread
, &status
);
1718 * Fork and exec a consumer daemon (consumerd).
1720 * Return pid if successful else -1.
1722 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
1726 const char *consumer_to_use
;
1727 const char *verbosity
;
1730 DBG("Spawning consumerd");
1737 if (opt_verbose_consumer
) {
1738 verbosity
= "--verbose";
1740 verbosity
= "--quiet";
1742 switch (consumer_data
->type
) {
1743 case LTTNG_CONSUMER_KERNEL
:
1745 * Find out which consumerd to execute. We will first try the
1746 * 64-bit path, then the sessiond's installation directory, and
1747 * fallback on the 32-bit one,
1749 DBG3("Looking for a kernel consumer at these locations:");
1750 DBG3(" 1) %s", consumerd64_bin
);
1751 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
1752 DBG3(" 3) %s", consumerd32_bin
);
1753 if (stat(consumerd64_bin
, &st
) == 0) {
1754 DBG3("Found location #1");
1755 consumer_to_use
= consumerd64_bin
;
1756 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
1757 DBG3("Found location #2");
1758 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
1759 } else if (stat(consumerd32_bin
, &st
) == 0) {
1760 DBG3("Found location #3");
1761 consumer_to_use
= consumerd32_bin
;
1763 DBG("Could not find any valid consumerd executable");
1766 DBG("Using kernel consumer at: %s", consumer_to_use
);
1767 execl(consumer_to_use
,
1768 "lttng-consumerd", verbosity
, "-k",
1769 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1770 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1773 case LTTNG_CONSUMER64_UST
:
1775 char *tmpnew
= NULL
;
1777 if (consumerd64_libdir
[0] != '\0') {
1781 tmp
= getenv("LD_LIBRARY_PATH");
1785 tmplen
= strlen("LD_LIBRARY_PATH=")
1786 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
1787 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1792 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1793 strcat(tmpnew
, consumerd64_libdir
);
1794 if (tmp
[0] != '\0') {
1795 strcat(tmpnew
, ":");
1796 strcat(tmpnew
, tmp
);
1798 ret
= putenv(tmpnew
);
1804 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
1805 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
1806 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1807 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1809 if (consumerd64_libdir
[0] != '\0') {
1817 case LTTNG_CONSUMER32_UST
:
1819 char *tmpnew
= NULL
;
1821 if (consumerd32_libdir
[0] != '\0') {
1825 tmp
= getenv("LD_LIBRARY_PATH");
1829 tmplen
= strlen("LD_LIBRARY_PATH=")
1830 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
1831 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1836 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1837 strcat(tmpnew
, consumerd32_libdir
);
1838 if (tmp
[0] != '\0') {
1839 strcat(tmpnew
, ":");
1840 strcat(tmpnew
, tmp
);
1842 ret
= putenv(tmpnew
);
1848 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
1849 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
1850 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1851 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1853 if (consumerd32_libdir
[0] != '\0') {
1862 PERROR("unknown consumer type");
1866 PERROR("kernel start consumer exec");
1869 } else if (pid
> 0) {
1872 PERROR("start consumer fork");
1880 * Spawn the consumerd daemon and session daemon thread.
1882 static int start_consumerd(struct consumer_data
*consumer_data
)
1887 * Set the listen() state on the socket since there is a possible race
1888 * between the exec() of the consumer daemon and this call if place in the
1889 * consumer thread. See bug #366 for more details.
1891 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
1896 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1897 if (consumer_data
->pid
!= 0) {
1898 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1902 ret
= spawn_consumerd(consumer_data
);
1904 ERR("Spawning consumerd failed");
1905 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1909 /* Setting up the consumer_data pid */
1910 consumer_data
->pid
= ret
;
1911 DBG2("Consumer pid %d", consumer_data
->pid
);
1912 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1914 DBG2("Spawning consumer control thread");
1915 ret
= spawn_consumer_thread(consumer_data
);
1917 ERR("Fatal error spawning consumer control thread");
1925 /* Cleanup already created socket on error. */
1926 if (consumer_data
->err_sock
>= 0) {
1929 err
= close(consumer_data
->err_sock
);
1931 PERROR("close consumer data error socket");
1938 * Compute health status of each consumer. If one of them is zero (bad
1939 * state), we return 0.
1941 static int check_consumer_health(void)
1945 ret
= health_check_state(HEALTH_TYPE_CONSUMER
);
1947 DBG3("Health consumer check %d", ret
);
1953 * Setup necessary data for kernel tracer action.
1955 static int init_kernel_tracer(void)
1959 /* Modprobe lttng kernel modules */
1960 ret
= modprobe_lttng_control();
1965 /* Open debugfs lttng */
1966 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
1967 if (kernel_tracer_fd
< 0) {
1968 DBG("Failed to open %s", module_proc_lttng
);
1973 /* Validate kernel version */
1974 ret
= kernel_validate_version(kernel_tracer_fd
);
1979 ret
= modprobe_lttng_data();
1984 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
1988 modprobe_remove_lttng_control();
1989 ret
= close(kernel_tracer_fd
);
1993 kernel_tracer_fd
= -1;
1994 return LTTNG_ERR_KERN_VERSION
;
1997 ret
= close(kernel_tracer_fd
);
2003 modprobe_remove_lttng_control();
2006 WARN("No kernel tracer available");
2007 kernel_tracer_fd
= -1;
2009 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
2011 return LTTNG_ERR_KERN_NA
;
2017 * Copy consumer output from the tracing session to the domain session. The
2018 * function also applies the right modification on a per domain basis for the
2019 * trace files destination directory.
2021 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2024 const char *dir_name
;
2025 struct consumer_output
*consumer
;
2028 assert(session
->consumer
);
2031 case LTTNG_DOMAIN_KERNEL
:
2032 DBG3("Copying tracing session consumer output in kernel session");
2034 * XXX: We should audit the session creation and what this function
2035 * does "extra" in order to avoid a destroy since this function is used
2036 * in the domain session creation (kernel and ust) only. Same for UST
2039 if (session
->kernel_session
->consumer
) {
2040 consumer_destroy_output(session
->kernel_session
->consumer
);
2042 session
->kernel_session
->consumer
=
2043 consumer_copy_output(session
->consumer
);
2044 /* Ease our life a bit for the next part */
2045 consumer
= session
->kernel_session
->consumer
;
2046 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2048 case LTTNG_DOMAIN_UST
:
2049 DBG3("Copying tracing session consumer output in UST session");
2050 if (session
->ust_session
->consumer
) {
2051 consumer_destroy_output(session
->ust_session
->consumer
);
2053 session
->ust_session
->consumer
=
2054 consumer_copy_output(session
->consumer
);
2055 /* Ease our life a bit for the next part */
2056 consumer
= session
->ust_session
->consumer
;
2057 dir_name
= DEFAULT_UST_TRACE_DIR
;
2060 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2064 /* Append correct directory to subdir */
2065 strncat(consumer
->subdir
, dir_name
,
2066 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2067 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2076 * Create an UST session and add it to the session ust list.
2078 static int create_ust_session(struct ltt_session
*session
,
2079 struct lttng_domain
*domain
)
2082 struct ltt_ust_session
*lus
= NULL
;
2086 assert(session
->consumer
);
2088 switch (domain
->type
) {
2089 case LTTNG_DOMAIN_UST
:
2092 ERR("Unknown UST domain on create session %d", domain
->type
);
2093 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2097 DBG("Creating UST session");
2099 lus
= trace_ust_create_session(session
->path
, session
->id
);
2101 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2105 lus
->uid
= session
->uid
;
2106 lus
->gid
= session
->gid
;
2107 session
->ust_session
= lus
;
2109 /* Copy session output to the newly created UST session */
2110 ret
= copy_session_consumer(domain
->type
, session
);
2111 if (ret
!= LTTNG_OK
) {
2119 session
->ust_session
= NULL
;
2124 * Create a kernel tracer session then create the default channel.
2126 static int create_kernel_session(struct ltt_session
*session
)
2130 DBG("Creating kernel session");
2132 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2134 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2138 /* Code flow safety */
2139 assert(session
->kernel_session
);
2141 /* Copy session output to the newly created Kernel session */
2142 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2143 if (ret
!= LTTNG_OK
) {
2147 /* Create directory(ies) on local filesystem. */
2148 if (session
->kernel_session
->consumer
->type
== CONSUMER_DST_LOCAL
&&
2149 strlen(session
->kernel_session
->consumer
->dst
.trace_path
) > 0) {
2150 ret
= run_as_mkdir_recursive(
2151 session
->kernel_session
->consumer
->dst
.trace_path
,
2152 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2154 if (ret
!= -EEXIST
) {
2155 ERR("Trace directory creation error");
2161 session
->kernel_session
->uid
= session
->uid
;
2162 session
->kernel_session
->gid
= session
->gid
;
2167 trace_kernel_destroy_session(session
->kernel_session
);
2168 session
->kernel_session
= NULL
;
2173 * Count number of session permitted by uid/gid.
2175 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2178 struct ltt_session
*session
;
2180 DBG("Counting number of available session for UID %d GID %d",
2182 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2184 * Only list the sessions the user can control.
2186 if (!session_access_ok(session
, uid
, gid
)) {
2195 * Process the command requested by the lttng client within the command
2196 * context structure. This function make sure that the return structure (llm)
2197 * is set and ready for transmission before returning.
2199 * Return any error encountered or 0 for success.
2201 * "sock" is only used for special-case var. len data.
2203 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2207 int need_tracing_session
= 1;
2210 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2214 switch (cmd_ctx
->lsm
->cmd_type
) {
2215 case LTTNG_CREATE_SESSION
:
2216 case LTTNG_DESTROY_SESSION
:
2217 case LTTNG_LIST_SESSIONS
:
2218 case LTTNG_LIST_DOMAINS
:
2219 case LTTNG_START_TRACE
:
2220 case LTTNG_STOP_TRACE
:
2221 case LTTNG_DATA_PENDING
:
2228 if (opt_no_kernel
&& need_domain
2229 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2231 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2233 ret
= LTTNG_ERR_KERN_NA
;
2238 /* Deny register consumer if we already have a spawned consumer. */
2239 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2240 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2241 if (kconsumer_data
.pid
> 0) {
2242 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2243 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2246 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2250 * Check for command that don't needs to allocate a returned payload. We do
2251 * this here so we don't have to make the call for no payload at each
2254 switch(cmd_ctx
->lsm
->cmd_type
) {
2255 case LTTNG_LIST_SESSIONS
:
2256 case LTTNG_LIST_TRACEPOINTS
:
2257 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2258 case LTTNG_LIST_DOMAINS
:
2259 case LTTNG_LIST_CHANNELS
:
2260 case LTTNG_LIST_EVENTS
:
2263 /* Setup lttng message with no payload */
2264 ret
= setup_lttng_msg(cmd_ctx
, 0);
2266 /* This label does not try to unlock the session */
2267 goto init_setup_error
;
2271 /* Commands that DO NOT need a session. */
2272 switch (cmd_ctx
->lsm
->cmd_type
) {
2273 case LTTNG_CREATE_SESSION
:
2274 case LTTNG_CALIBRATE
:
2275 case LTTNG_LIST_SESSIONS
:
2276 case LTTNG_LIST_TRACEPOINTS
:
2277 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2278 need_tracing_session
= 0;
2281 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
2283 * We keep the session list lock across _all_ commands
2284 * for now, because the per-session lock does not
2285 * handle teardown properly.
2287 session_lock_list();
2288 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
2289 if (cmd_ctx
->session
== NULL
) {
2290 if (cmd_ctx
->lsm
->session
.name
!= NULL
) {
2291 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
2293 /* If no session name specified */
2294 ret
= LTTNG_ERR_SELECT_SESS
;
2298 /* Acquire lock for the session */
2299 session_lock(cmd_ctx
->session
);
2309 * Check domain type for specific "pre-action".
2311 switch (cmd_ctx
->lsm
->domain
.type
) {
2312 case LTTNG_DOMAIN_KERNEL
:
2314 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2318 /* Kernel tracer check */
2319 if (kernel_tracer_fd
== -1) {
2320 /* Basically, load kernel tracer modules */
2321 ret
= init_kernel_tracer();
2327 /* Consumer is in an ERROR state. Report back to client */
2328 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
2329 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2333 /* Need a session for kernel command */
2334 if (need_tracing_session
) {
2335 if (cmd_ctx
->session
->kernel_session
== NULL
) {
2336 ret
= create_kernel_session(cmd_ctx
->session
);
2338 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2343 /* Start the kernel consumer daemon */
2344 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2345 if (kconsumer_data
.pid
== 0 &&
2346 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2347 cmd_ctx
->session
->start_consumer
) {
2348 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2349 ret
= start_consumerd(&kconsumer_data
);
2351 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2354 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
2356 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2360 * The consumer was just spawned so we need to add the socket to
2361 * the consumer output of the session if exist.
2363 ret
= consumer_create_socket(&kconsumer_data
,
2364 cmd_ctx
->session
->kernel_session
->consumer
);
2371 case LTTNG_DOMAIN_UST
:
2373 /* Consumer is in an ERROR state. Report back to client */
2374 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
2375 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2379 if (need_tracing_session
) {
2380 /* Create UST session if none exist. */
2381 if (cmd_ctx
->session
->ust_session
== NULL
) {
2382 ret
= create_ust_session(cmd_ctx
->session
,
2383 &cmd_ctx
->lsm
->domain
);
2384 if (ret
!= LTTNG_OK
) {
2389 /* Start the UST consumer daemons */
2391 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
2392 if (consumerd64_bin
[0] != '\0' &&
2393 ustconsumer64_data
.pid
== 0 &&
2394 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2395 cmd_ctx
->session
->start_consumer
) {
2396 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2397 ret
= start_consumerd(&ustconsumer64_data
);
2399 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
2400 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
2404 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
2405 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2407 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2411 * Setup socket for consumer 64 bit. No need for atomic access
2412 * since it was set above and can ONLY be set in this thread.
2414 ret
= consumer_create_socket(&ustconsumer64_data
,
2415 cmd_ctx
->session
->ust_session
->consumer
);
2421 if (consumerd32_bin
[0] != '\0' &&
2422 ustconsumer32_data
.pid
== 0 &&
2423 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2424 cmd_ctx
->session
->start_consumer
) {
2425 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2426 ret
= start_consumerd(&ustconsumer32_data
);
2428 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
2429 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
2433 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
2434 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2436 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2440 * Setup socket for consumer 64 bit. No need for atomic access
2441 * since it was set above and can ONLY be set in this thread.
2443 ret
= consumer_create_socket(&ustconsumer32_data
,
2444 cmd_ctx
->session
->ust_session
->consumer
);
2456 /* Validate consumer daemon state when start/stop trace command */
2457 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
2458 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
2459 switch (cmd_ctx
->lsm
->domain
.type
) {
2460 case LTTNG_DOMAIN_UST
:
2461 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
2462 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2466 case LTTNG_DOMAIN_KERNEL
:
2467 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
2468 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2476 * Check that the UID or GID match that of the tracing session.
2477 * The root user can interact with all sessions.
2479 if (need_tracing_session
) {
2480 if (!session_access_ok(cmd_ctx
->session
,
2481 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2482 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
2483 ret
= LTTNG_ERR_EPERM
;
2489 * Send relayd information to consumer as soon as we have a domain and a
2492 if (cmd_ctx
->session
&& need_domain
) {
2494 * Setup relayd if not done yet. If the relayd information was already
2495 * sent to the consumer, this call will gracefully return.
2497 ret
= cmd_setup_relayd(cmd_ctx
->session
);
2498 if (ret
!= LTTNG_OK
) {
2503 /* Process by command type */
2504 switch (cmd_ctx
->lsm
->cmd_type
) {
2505 case LTTNG_ADD_CONTEXT
:
2507 ret
= cmd_add_context(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2508 cmd_ctx
->lsm
->u
.context
.channel_name
,
2509 &cmd_ctx
->lsm
->u
.context
.ctx
, kernel_poll_pipe
[1]);
2512 case LTTNG_DISABLE_CHANNEL
:
2514 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2515 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2518 case LTTNG_DISABLE_EVENT
:
2520 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2521 cmd_ctx
->lsm
->u
.disable
.channel_name
,
2522 cmd_ctx
->lsm
->u
.disable
.name
);
2525 case LTTNG_DISABLE_ALL_EVENT
:
2527 DBG("Disabling all events");
2529 ret
= cmd_disable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2530 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2533 case LTTNG_DISABLE_CONSUMER
:
2535 ret
= cmd_disable_consumer(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
);
2538 case LTTNG_ENABLE_CHANNEL
:
2540 ret
= cmd_enable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2541 &cmd_ctx
->lsm
->u
.channel
.chan
, kernel_poll_pipe
[1]);
2544 case LTTNG_ENABLE_CONSUMER
:
2547 * XXX: 0 means that this URI should be applied on the session. Should
2548 * be a DOMAIN enuam.
2550 ret
= cmd_enable_consumer(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
);
2551 if (ret
!= LTTNG_OK
) {
2555 if (cmd_ctx
->lsm
->domain
.type
== 0) {
2556 /* Add the URI for the UST session if a consumer is present. */
2557 if (cmd_ctx
->session
->ust_session
&&
2558 cmd_ctx
->session
->ust_session
->consumer
) {
2559 ret
= cmd_enable_consumer(LTTNG_DOMAIN_UST
, cmd_ctx
->session
);
2560 } else if (cmd_ctx
->session
->kernel_session
&&
2561 cmd_ctx
->session
->kernel_session
->consumer
) {
2562 ret
= cmd_enable_consumer(LTTNG_DOMAIN_KERNEL
,
2568 case LTTNG_ENABLE_EVENT
:
2570 ret
= cmd_enable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2571 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2572 &cmd_ctx
->lsm
->u
.enable
.event
, NULL
, kernel_poll_pipe
[1]);
2575 case LTTNG_ENABLE_ALL_EVENT
:
2577 DBG("Enabling all events");
2579 ret
= cmd_enable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2580 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2581 cmd_ctx
->lsm
->u
.enable
.event
.type
, NULL
, kernel_poll_pipe
[1]);
2584 case LTTNG_LIST_TRACEPOINTS
:
2586 struct lttng_event
*events
;
2589 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
2590 if (nb_events
< 0) {
2591 /* Return value is a negative lttng_error_code. */
2597 * Setup lttng message with payload size set to the event list size in
2598 * bytes and then copy list into the llm payload.
2600 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
2606 /* Copy event list into message payload */
2607 memcpy(cmd_ctx
->llm
->payload
, events
,
2608 sizeof(struct lttng_event
) * nb_events
);
2615 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2617 struct lttng_event_field
*fields
;
2620 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
2622 if (nb_fields
< 0) {
2623 /* Return value is a negative lttng_error_code. */
2629 * Setup lttng message with payload size set to the event list size in
2630 * bytes and then copy list into the llm payload.
2632 ret
= setup_lttng_msg(cmd_ctx
,
2633 sizeof(struct lttng_event_field
) * nb_fields
);
2639 /* Copy event list into message payload */
2640 memcpy(cmd_ctx
->llm
->payload
, fields
,
2641 sizeof(struct lttng_event_field
) * nb_fields
);
2648 case LTTNG_SET_CONSUMER_URI
:
2651 struct lttng_uri
*uris
;
2653 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
2654 len
= nb_uri
* sizeof(struct lttng_uri
);
2657 ret
= LTTNG_ERR_INVALID
;
2661 uris
= zmalloc(len
);
2663 ret
= LTTNG_ERR_FATAL
;
2667 /* Receive variable len data */
2668 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
2669 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
2671 DBG("No URIs received from client... continuing");
2673 ret
= LTTNG_ERR_SESSION_FAIL
;
2678 ret
= cmd_set_consumer_uri(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
2680 if (ret
!= LTTNG_OK
) {
2686 * XXX: 0 means that this URI should be applied on the session. Should
2687 * be a DOMAIN enuam.
2689 if (cmd_ctx
->lsm
->domain
.type
== 0) {
2690 /* Add the URI for the UST session if a consumer is present. */
2691 if (cmd_ctx
->session
->ust_session
&&
2692 cmd_ctx
->session
->ust_session
->consumer
) {
2693 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_UST
, cmd_ctx
->session
,
2695 } else if (cmd_ctx
->session
->kernel_session
&&
2696 cmd_ctx
->session
->kernel_session
->consumer
) {
2697 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL
,
2698 cmd_ctx
->session
, nb_uri
, uris
);
2706 case LTTNG_START_TRACE
:
2708 ret
= cmd_start_trace(cmd_ctx
->session
);
2711 case LTTNG_STOP_TRACE
:
2713 ret
= cmd_stop_trace(cmd_ctx
->session
);
2716 case LTTNG_CREATE_SESSION
:
2719 struct lttng_uri
*uris
= NULL
;
2721 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
2722 len
= nb_uri
* sizeof(struct lttng_uri
);
2725 uris
= zmalloc(len
);
2727 ret
= LTTNG_ERR_FATAL
;
2731 /* Receive variable len data */
2732 DBG("Waiting for %zu URIs from client ...", nb_uri
);
2733 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
2735 DBG("No URIs received from client... continuing");
2737 ret
= LTTNG_ERR_SESSION_FAIL
;
2742 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
2743 DBG("Creating session with ONE network URI is a bad call");
2744 ret
= LTTNG_ERR_SESSION_FAIL
;
2750 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
2757 case LTTNG_DESTROY_SESSION
:
2759 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
2761 /* Set session to NULL so we do not unlock it after free. */
2762 cmd_ctx
->session
= NULL
;
2765 case LTTNG_LIST_DOMAINS
:
2768 struct lttng_domain
*domains
;
2770 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
2772 /* Return value is a negative lttng_error_code. */
2777 ret
= setup_lttng_msg(cmd_ctx
, nb_dom
* sizeof(struct lttng_domain
));
2782 /* Copy event list into message payload */
2783 memcpy(cmd_ctx
->llm
->payload
, domains
,
2784 nb_dom
* sizeof(struct lttng_domain
));
2791 case LTTNG_LIST_CHANNELS
:
2794 struct lttng_channel
*channels
;
2796 nb_chan
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
2797 cmd_ctx
->session
, &channels
);
2799 /* Return value is a negative lttng_error_code. */
2804 ret
= setup_lttng_msg(cmd_ctx
, nb_chan
* sizeof(struct lttng_channel
));
2809 /* Copy event list into message payload */
2810 memcpy(cmd_ctx
->llm
->payload
, channels
,
2811 nb_chan
* sizeof(struct lttng_channel
));
2818 case LTTNG_LIST_EVENTS
:
2821 struct lttng_event
*events
= NULL
;
2823 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
2824 cmd_ctx
->lsm
->u
.list
.channel_name
, &events
);
2826 /* Return value is a negative lttng_error_code. */
2831 ret
= setup_lttng_msg(cmd_ctx
, nb_event
* sizeof(struct lttng_event
));
2836 /* Copy event list into message payload */
2837 memcpy(cmd_ctx
->llm
->payload
, events
,
2838 nb_event
* sizeof(struct lttng_event
));
2845 case LTTNG_LIST_SESSIONS
:
2847 unsigned int nr_sessions
;
2849 session_lock_list();
2850 nr_sessions
= lttng_sessions_count(
2851 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2852 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
2854 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_session
) * nr_sessions
);
2856 session_unlock_list();
2860 /* Filled the session array */
2861 cmd_list_lttng_sessions((struct lttng_session
*)(cmd_ctx
->llm
->payload
),
2862 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2863 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
2865 session_unlock_list();
2870 case LTTNG_CALIBRATE
:
2872 ret
= cmd_calibrate(cmd_ctx
->lsm
->domain
.type
,
2873 &cmd_ctx
->lsm
->u
.calibrate
);
2876 case LTTNG_REGISTER_CONSUMER
:
2878 struct consumer_data
*cdata
;
2880 switch (cmd_ctx
->lsm
->domain
.type
) {
2881 case LTTNG_DOMAIN_KERNEL
:
2882 cdata
= &kconsumer_data
;
2885 ret
= LTTNG_ERR_UND
;
2889 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2890 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
2893 case LTTNG_ENABLE_EVENT_WITH_FILTER
:
2895 struct lttng_filter_bytecode
*bytecode
;
2897 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
2898 ret
= LTTNG_ERR_FILTER_INVAL
;
2901 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
== 0) {
2902 ret
= LTTNG_ERR_FILTER_INVAL
;
2905 bytecode
= zmalloc(cmd_ctx
->lsm
->u
.enable
.bytecode_len
);
2907 ret
= LTTNG_ERR_FILTER_NOMEM
;
2910 /* Receive var. len. data */
2911 DBG("Receiving var len data from client ...");
2912 ret
= lttcomm_recv_unix_sock(sock
, bytecode
,
2913 cmd_ctx
->lsm
->u
.enable
.bytecode_len
);
2915 DBG("Nothing recv() from client var len data... continuing");
2917 ret
= LTTNG_ERR_FILTER_INVAL
;
2921 if (bytecode
->len
+ sizeof(*bytecode
)
2922 != cmd_ctx
->lsm
->u
.enable
.bytecode_len
) {
2924 ret
= LTTNG_ERR_FILTER_INVAL
;
2928 ret
= cmd_enable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2929 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2930 &cmd_ctx
->lsm
->u
.enable
.event
, bytecode
, kernel_poll_pipe
[1]);
2933 case LTTNG_DATA_PENDING
:
2935 ret
= cmd_data_pending(cmd_ctx
->session
);
2939 ret
= LTTNG_ERR_UND
;
2944 if (cmd_ctx
->llm
== NULL
) {
2945 DBG("Missing llm structure. Allocating one.");
2946 if (setup_lttng_msg(cmd_ctx
, 0) < 0) {
2950 /* Set return code */
2951 cmd_ctx
->llm
->ret_code
= ret
;
2953 if (cmd_ctx
->session
) {
2954 session_unlock(cmd_ctx
->session
);
2956 if (need_tracing_session
) {
2957 session_unlock_list();
2964 * Thread managing health check socket.
2966 static void *thread_manage_health(void *data
)
2968 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
2969 uint32_t revents
, nb_fd
;
2970 struct lttng_poll_event events
;
2971 struct lttcomm_health_msg msg
;
2972 struct lttcomm_health_data reply
;
2974 DBG("[thread] Manage health check started");
2976 rcu_register_thread();
2978 /* Create unix socket */
2979 sock
= lttcomm_create_unix_sock(health_unix_sock_path
);
2981 ERR("Unable to create health check Unix socket");
2987 * Set the CLOEXEC flag. Return code is useless because either way, the
2990 (void) utils_set_fd_cloexec(sock
);
2992 ret
= lttcomm_listen_unix_sock(sock
);
2998 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
2999 * more will be added to this poll set.
3001 ret
= create_thread_poll_set(&events
, 2);
3006 /* Add the application registration socket */
3007 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
3013 DBG("Health check ready");
3015 /* Inifinite blocking call, waiting for transmission */
3017 ret
= lttng_poll_wait(&events
, -1);
3020 * Restart interrupted system call.
3022 if (errno
== EINTR
) {
3030 for (i
= 0; i
< nb_fd
; i
++) {
3031 /* Fetch once the poll data */
3032 revents
= LTTNG_POLL_GETEV(&events
, i
);
3033 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3035 /* Thread quit pipe has been closed. Killing thread. */
3036 ret
= check_thread_quit_pipe(pollfd
, revents
);
3042 /* Event on the registration socket */
3043 if (pollfd
== sock
) {
3044 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3045 ERR("Health socket poll error");
3051 new_sock
= lttcomm_accept_unix_sock(sock
);
3057 * Set the CLOEXEC flag. Return code is useless because either way, the
3060 (void) utils_set_fd_cloexec(new_sock
);
3062 DBG("Receiving data from client for health...");
3063 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
3065 DBG("Nothing recv() from client... continuing");
3066 ret
= close(new_sock
);
3074 rcu_thread_online();
3076 switch (msg
.component
) {
3077 case LTTNG_HEALTH_CMD
:
3078 reply
.ret_code
= health_check_state(HEALTH_TYPE_CMD
);
3080 case LTTNG_HEALTH_APP_MANAGE
:
3081 reply
.ret_code
= health_check_state(HEALTH_TYPE_APP_MANAGE
);
3083 case LTTNG_HEALTH_APP_REG
:
3084 reply
.ret_code
= health_check_state(HEALTH_TYPE_APP_REG
);
3086 case LTTNG_HEALTH_KERNEL
:
3087 reply
.ret_code
= health_check_state(HEALTH_TYPE_KERNEL
);
3089 case LTTNG_HEALTH_CONSUMER
:
3090 reply
.ret_code
= check_consumer_health();
3092 case LTTNG_HEALTH_ALL
:
3094 health_check_state(HEALTH_TYPE_APP_MANAGE
) &&
3095 health_check_state(HEALTH_TYPE_APP_REG
) &&
3096 health_check_state(HEALTH_TYPE_CMD
) &&
3097 health_check_state(HEALTH_TYPE_KERNEL
) &&
3098 check_consumer_health();
3101 reply
.ret_code
= LTTNG_ERR_UND
;
3106 * Flip ret value since 0 is a success and 1 indicates a bad health for
3107 * the client where in the sessiond it is the opposite. Again, this is
3108 * just to make things easier for us poor developer which enjoy a lot
3111 if (reply
.ret_code
== 0 || reply
.ret_code
== 1) {
3112 reply
.ret_code
= !reply
.ret_code
;
3115 DBG2("Health check return value %d", reply
.ret_code
);
3117 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
3119 ERR("Failed to send health data back to client");
3122 /* End of transmission */
3123 ret
= close(new_sock
);
3133 ERR("Health error occurred in %s", __func__
);
3135 DBG("Health check thread dying");
3136 unlink(health_unix_sock_path
);
3143 if (new_sock
>= 0) {
3144 ret
= close(new_sock
);
3150 lttng_poll_clean(&events
);
3152 rcu_unregister_thread();
3157 * This thread manage all clients request using the unix client socket for
3160 static void *thread_manage_clients(void *data
)
3162 int sock
= -1, ret
, i
, pollfd
, err
= -1;
3164 uint32_t revents
, nb_fd
;
3165 struct command_ctx
*cmd_ctx
= NULL
;
3166 struct lttng_poll_event events
;
3168 DBG("[thread] Manage client started");
3170 rcu_register_thread();
3172 health_register(HEALTH_TYPE_CMD
);
3174 if (testpoint(thread_manage_clients
)) {
3175 goto error_testpoint
;
3178 health_code_update();
3180 ret
= lttcomm_listen_unix_sock(client_sock
);
3186 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3187 * more will be added to this poll set.
3189 ret
= create_thread_poll_set(&events
, 2);
3191 goto error_create_poll
;
3194 /* Add the application registration socket */
3195 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
3201 * Notify parent pid that we are ready to accept command for client side.
3203 if (opt_sig_parent
) {
3204 kill(ppid
, SIGUSR1
);
3207 if (testpoint(thread_manage_clients_before_loop
)) {
3211 health_code_update();
3214 DBG("Accepting client command ...");
3216 /* Inifinite blocking call, waiting for transmission */
3218 health_poll_entry();
3219 ret
= lttng_poll_wait(&events
, -1);
3223 * Restart interrupted system call.
3225 if (errno
== EINTR
) {
3233 for (i
= 0; i
< nb_fd
; i
++) {
3234 /* Fetch once the poll data */
3235 revents
= LTTNG_POLL_GETEV(&events
, i
);
3236 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3238 health_code_update();
3240 /* Thread quit pipe has been closed. Killing thread. */
3241 ret
= check_thread_quit_pipe(pollfd
, revents
);
3247 /* Event on the registration socket */
3248 if (pollfd
== client_sock
) {
3249 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3250 ERR("Client socket poll error");
3256 DBG("Wait for client response");
3258 health_code_update();
3260 sock
= lttcomm_accept_unix_sock(client_sock
);
3266 * Set the CLOEXEC flag. Return code is useless because either way, the
3269 (void) utils_set_fd_cloexec(sock
);
3271 /* Set socket option for credentials retrieval */
3272 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
3277 /* Allocate context command to process the client request */
3278 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
3279 if (cmd_ctx
== NULL
) {
3280 PERROR("zmalloc cmd_ctx");
3284 /* Allocate data buffer for reception */
3285 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
3286 if (cmd_ctx
->lsm
== NULL
) {
3287 PERROR("zmalloc cmd_ctx->lsm");
3291 cmd_ctx
->llm
= NULL
;
3292 cmd_ctx
->session
= NULL
;
3294 health_code_update();
3297 * Data is received from the lttng client. The struct
3298 * lttcomm_session_msg (lsm) contains the command and data request of
3301 DBG("Receiving data from client ...");
3302 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
3303 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
3305 DBG("Nothing recv() from client... continuing");
3311 clean_command_ctx(&cmd_ctx
);
3315 health_code_update();
3317 // TODO: Validate cmd_ctx including sanity check for
3318 // security purpose.
3320 rcu_thread_online();
3322 * This function dispatch the work to the kernel or userspace tracer
3323 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3324 * informations for the client. The command context struct contains
3325 * everything this function may needs.
3327 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
3328 rcu_thread_offline();
3338 * TODO: Inform client somehow of the fatal error. At
3339 * this point, ret < 0 means that a zmalloc failed
3340 * (ENOMEM). Error detected but still accept
3341 * command, unless a socket error has been
3344 clean_command_ctx(&cmd_ctx
);
3348 health_code_update();
3350 DBG("Sending response (size: %d, retcode: %s)",
3351 cmd_ctx
->lttng_msg_size
,
3352 lttng_strerror(-cmd_ctx
->llm
->ret_code
));
3353 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
3355 ERR("Failed to send data back to client");
3358 /* End of transmission */
3365 clean_command_ctx(&cmd_ctx
);
3367 health_code_update();
3379 lttng_poll_clean(&events
);
3380 clean_command_ctx(&cmd_ctx
);
3385 unlink(client_unix_sock_path
);
3386 if (client_sock
>= 0) {
3387 ret
= close(client_sock
);
3395 ERR("Health error occurred in %s", __func__
);
3398 health_unregister();
3400 DBG("Client thread dying");
3402 rcu_unregister_thread();
3408 * usage function on stderr
3410 static void usage(void)
3412 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
3413 fprintf(stderr
, " -h, --help Display this usage.\n");
3414 fprintf(stderr
, " -c, --client-sock PATH Specify path for the client unix socket\n");
3415 fprintf(stderr
, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3416 fprintf(stderr
, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3417 fprintf(stderr
, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3418 fprintf(stderr
, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3419 fprintf(stderr
, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3420 fprintf(stderr
, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3421 fprintf(stderr
, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3422 fprintf(stderr
, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3423 fprintf(stderr
, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3424 fprintf(stderr
, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3425 fprintf(stderr
, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3426 fprintf(stderr
, " -d, --daemonize Start as a daemon.\n");
3427 fprintf(stderr
, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3428 fprintf(stderr
, " -V, --version Show version number.\n");
3429 fprintf(stderr
, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3430 fprintf(stderr
, " -q, --quiet No output at all.\n");
3431 fprintf(stderr
, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3432 fprintf(stderr
, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
3433 fprintf(stderr
, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3434 fprintf(stderr
, " --no-kernel Disable kernel tracer\n");
3438 * daemon argument parsing
3440 static int parse_args(int argc
, char **argv
)
3444 static struct option long_options
[] = {
3445 { "client-sock", 1, 0, 'c' },
3446 { "apps-sock", 1, 0, 'a' },
3447 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3448 { "kconsumerd-err-sock", 1, 0, 'E' },
3449 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3450 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3451 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3452 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3453 { "consumerd32-path", 1, 0, 'u' },
3454 { "consumerd32-libdir", 1, 0, 'U' },
3455 { "consumerd64-path", 1, 0, 't' },
3456 { "consumerd64-libdir", 1, 0, 'T' },
3457 { "daemonize", 0, 0, 'd' },
3458 { "sig-parent", 0, 0, 'S' },
3459 { "help", 0, 0, 'h' },
3460 { "group", 1, 0, 'g' },
3461 { "version", 0, 0, 'V' },
3462 { "quiet", 0, 0, 'q' },
3463 { "verbose", 0, 0, 'v' },
3464 { "verbose-consumer", 0, 0, 'Z' },
3465 { "no-kernel", 0, 0, 'N' },
3466 { "pidfile", 1, 0, 'p' },
3471 int option_index
= 0;
3472 c
= getopt_long(argc
, argv
, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:",
3473 long_options
, &option_index
);
3480 fprintf(stderr
, "option %s", long_options
[option_index
].name
);
3482 fprintf(stderr
, " with arg %s\n", optarg
);
3486 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3489 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3495 opt_tracing_group
= optarg
;
3501 fprintf(stdout
, "%s\n", VERSION
);
3507 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3510 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3513 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3516 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3519 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3522 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3528 lttng_opt_quiet
= 1;
3531 /* Verbose level can increase using multiple -v */
3532 lttng_opt_verbose
+= 1;
3535 opt_verbose_consumer
+= 1;
3538 consumerd32_bin
= optarg
;
3541 consumerd32_libdir
= optarg
;
3544 consumerd64_bin
= optarg
;
3547 consumerd64_libdir
= optarg
;
3550 opt_pidfile
= optarg
;
3553 /* Unknown option or other error.
3554 * Error is printed by getopt, just return */
3563 * Creates the two needed socket by the daemon.
3564 * apps_sock - The communication socket for all UST apps.
3565 * client_sock - The communication of the cli tool (lttng).
3567 static int init_daemon_socket(void)
3572 old_umask
= umask(0);
3574 /* Create client tool unix socket */
3575 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
3576 if (client_sock
< 0) {
3577 ERR("Create unix sock failed: %s", client_unix_sock_path
);
3582 /* Set the cloexec flag */
3583 ret
= utils_set_fd_cloexec(client_sock
);
3585 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
3586 "Continuing but note that the consumer daemon will have a "
3587 "reference to this socket on exec()", client_sock
);
3590 /* File permission MUST be 660 */
3591 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3593 ERR("Set file permissions failed: %s", client_unix_sock_path
);
3598 /* Create the application unix socket */
3599 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
3600 if (apps_sock
< 0) {
3601 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
3606 /* Set the cloexec flag */
3607 ret
= utils_set_fd_cloexec(apps_sock
);
3609 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
3610 "Continuing but note that the consumer daemon will have a "
3611 "reference to this socket on exec()", apps_sock
);
3614 /* File permission MUST be 666 */
3615 ret
= chmod(apps_unix_sock_path
,
3616 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
3618 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
3623 DBG3("Session daemon client socket %d and application socket %d created",
3624 client_sock
, apps_sock
);
3632 * Check if the global socket is available, and if a daemon is answering at the
3633 * other side. If yes, error is returned.
3635 static int check_existing_daemon(void)
3637 /* Is there anybody out there ? */
3638 if (lttng_session_daemon_alive()) {
3646 * Set the tracing group gid onto the client socket.
3648 * Race window between mkdir and chown is OK because we are going from more
3649 * permissive (root.root) to less permissive (root.tracing).
3651 static int set_permissions(char *rundir
)
3656 ret
= allowed_group();
3658 WARN("No tracing group detected");
3665 /* Set lttng run dir */
3666 ret
= chown(rundir
, 0, gid
);
3668 ERR("Unable to set group on %s", rundir
);
3672 /* Ensure tracing group can search the run dir */
3673 ret
= chmod(rundir
, S_IRWXU
| S_IXGRP
| S_IXOTH
);
3675 ERR("Unable to set permissions on %s", rundir
);
3679 /* lttng client socket path */
3680 ret
= chown(client_unix_sock_path
, 0, gid
);
3682 ERR("Unable to set group on %s", client_unix_sock_path
);
3686 /* kconsumer error socket path */
3687 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, gid
);
3689 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
3693 /* 64-bit ustconsumer error socket path */
3694 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, gid
);
3696 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
3700 /* 32-bit ustconsumer compat32 error socket path */
3701 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, gid
);
3703 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
3707 DBG("All permissions are set");
3714 * Create the lttng run directory needed for all global sockets and pipe.
3716 static int create_lttng_rundir(const char *rundir
)
3720 DBG3("Creating LTTng run directory: %s", rundir
);
3722 ret
= mkdir(rundir
, S_IRWXU
);
3724 if (errno
!= EEXIST
) {
3725 ERR("Unable to create %s", rundir
);
3737 * Setup sockets and directory needed by the kconsumerd communication with the
3740 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
3744 char path
[PATH_MAX
];
3746 switch (consumer_data
->type
) {
3747 case LTTNG_CONSUMER_KERNEL
:
3748 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
3750 case LTTNG_CONSUMER64_UST
:
3751 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
3753 case LTTNG_CONSUMER32_UST
:
3754 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
3757 ERR("Consumer type unknown");
3762 DBG2("Creating consumer directory: %s", path
);
3764 ret
= mkdir(path
, S_IRWXU
);
3766 if (errno
!= EEXIST
) {
3768 ERR("Failed to create %s", path
);
3774 /* Create the kconsumerd error unix socket */
3775 consumer_data
->err_sock
=
3776 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
3777 if (consumer_data
->err_sock
< 0) {
3778 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
3783 /* File permission MUST be 660 */
3784 ret
= chmod(consumer_data
->err_unix_sock_path
,
3785 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3787 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
3797 * Signal handler for the daemon
3799 * Simply stop all worker threads, leaving main() return gracefully after
3800 * joining all threads and calling cleanup().
3802 static void sighandler(int sig
)
3806 DBG("SIGPIPE caught");
3809 DBG("SIGINT caught");
3813 DBG("SIGTERM caught");
3822 * Setup signal handler for :
3823 * SIGINT, SIGTERM, SIGPIPE
3825 static int set_signal_handler(void)
3828 struct sigaction sa
;
3831 if ((ret
= sigemptyset(&sigset
)) < 0) {
3832 PERROR("sigemptyset");
3836 sa
.sa_handler
= sighandler
;
3837 sa
.sa_mask
= sigset
;
3839 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
3840 PERROR("sigaction");
3844 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
3845 PERROR("sigaction");
3849 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
3850 PERROR("sigaction");
3854 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
3860 * Set open files limit to unlimited. This daemon can open a large number of
3861 * file descriptors in order to consumer multiple kernel traces.
3863 static void set_ulimit(void)
3868 /* The kernel does not allowed an infinite limit for open files */
3869 lim
.rlim_cur
= 65535;
3870 lim
.rlim_max
= 65535;
3872 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
3874 PERROR("failed to set open files limit");
3879 * Write pidfile using the rundir and opt_pidfile.
3881 static void write_pidfile(void)
3884 char pidfile_path
[PATH_MAX
];
3889 strncpy(pidfile_path
, opt_pidfile
, sizeof(pidfile_path
));
3891 /* Build pidfile path from rundir and opt_pidfile. */
3892 ret
= snprintf(pidfile_path
, sizeof(pidfile_path
), "%s/"
3893 DEFAULT_LTTNG_SESSIOND_PIDFILE
, rundir
);
3895 PERROR("snprintf pidfile path");
3901 * Create pid file in rundir. Return value is of no importance. The
3902 * execution will continue even though we are not able to write the file.
3904 (void) utils_create_pid_file(getpid(), pidfile_path
);
3913 int main(int argc
, char **argv
)
3917 const char *home_path
, *env_app_timeout
;
3919 init_kernel_workarounds();
3921 rcu_register_thread();
3923 setup_consumerd_path();
3925 /* Parse arguments */
3927 if ((ret
= parse_args(argc
, argv
)) < 0) {
3937 * child: setsid, close FD 0, 1, 2, chdir /
3938 * parent: exit (if fork is successful)
3946 * We are in the child. Make sure all other file
3947 * descriptors are closed, in case we are called with
3948 * more opened file descriptors than the standard ones.
3950 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
3955 /* Create thread quit pipe */
3956 if ((ret
= init_thread_quit_pipe()) < 0) {
3960 /* Check if daemon is UID = 0 */
3961 is_root
= !getuid();
3964 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
3966 /* Create global run dir with root access */
3967 ret
= create_lttng_rundir(rundir
);
3972 if (strlen(apps_unix_sock_path
) == 0) {
3973 snprintf(apps_unix_sock_path
, PATH_MAX
,
3974 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
3977 if (strlen(client_unix_sock_path
) == 0) {
3978 snprintf(client_unix_sock_path
, PATH_MAX
,
3979 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
3982 /* Set global SHM for ust */
3983 if (strlen(wait_shm_path
) == 0) {
3984 snprintf(wait_shm_path
, PATH_MAX
,
3985 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
3988 if (strlen(health_unix_sock_path
) == 0) {
3989 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
3990 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK
);
3993 /* Setup kernel consumerd path */
3994 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
3995 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
3996 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
3997 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
3999 DBG2("Kernel consumer err path: %s",
4000 kconsumer_data
.err_unix_sock_path
);
4001 DBG2("Kernel consumer cmd path: %s",
4002 kconsumer_data
.cmd_unix_sock_path
);
4004 home_path
= get_home_dir();
4005 if (home_path
== NULL
) {
4006 /* TODO: Add --socket PATH option */
4007 ERR("Can't get HOME directory for sockets creation.");
4013 * Create rundir from home path. This will create something like
4016 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
4022 ret
= create_lttng_rundir(rundir
);
4027 if (strlen(apps_unix_sock_path
) == 0) {
4028 snprintf(apps_unix_sock_path
, PATH_MAX
,
4029 DEFAULT_HOME_APPS_UNIX_SOCK
, home_path
);
4032 /* Set the cli tool unix socket path */
4033 if (strlen(client_unix_sock_path
) == 0) {
4034 snprintf(client_unix_sock_path
, PATH_MAX
,
4035 DEFAULT_HOME_CLIENT_UNIX_SOCK
, home_path
);
4038 /* Set global SHM for ust */
4039 if (strlen(wait_shm_path
) == 0) {
4040 snprintf(wait_shm_path
, PATH_MAX
,
4041 DEFAULT_HOME_APPS_WAIT_SHM_PATH
, geteuid());
4044 /* Set health check Unix path */
4045 if (strlen(health_unix_sock_path
) == 0) {
4046 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
4047 DEFAULT_HOME_HEALTH_UNIX_SOCK
, home_path
);
4051 /* Set consumer initial state */
4052 kernel_consumerd_state
= CONSUMER_STOPPED
;
4053 ust_consumerd_state
= CONSUMER_STOPPED
;
4055 DBG("Client socket path %s", client_unix_sock_path
);
4056 DBG("Application socket path %s", apps_unix_sock_path
);
4057 DBG("LTTng run directory path: %s", rundir
);
4059 /* 32 bits consumerd path setup */
4060 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
4061 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
4062 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
4063 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
4065 DBG2("UST consumer 32 bits err path: %s",
4066 ustconsumer32_data
.err_unix_sock_path
);
4067 DBG2("UST consumer 32 bits cmd path: %s",
4068 ustconsumer32_data
.cmd_unix_sock_path
);
4070 /* 64 bits consumerd path setup */
4071 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
4072 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
4073 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
4074 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
4076 DBG2("UST consumer 64 bits err path: %s",
4077 ustconsumer64_data
.err_unix_sock_path
);
4078 DBG2("UST consumer 64 bits cmd path: %s",
4079 ustconsumer64_data
.cmd_unix_sock_path
);
4082 * See if daemon already exist.
4084 if ((ret
= check_existing_daemon()) < 0) {
4085 ERR("Already running daemon.\n");
4087 * We do not goto exit because we must not cleanup()
4088 * because a daemon is already running.
4094 * Init UST app hash table. Alloc hash table before this point since
4095 * cleanup() can get called after that point.
4099 /* After this point, we can safely call cleanup() with "goto exit" */
4102 * These actions must be executed as root. We do that *after* setting up
4103 * the sockets path because we MUST make the check for another daemon using
4104 * those paths *before* trying to set the kernel consumer sockets and init
4108 ret
= set_consumer_sockets(&kconsumer_data
, rundir
);
4113 /* Setup kernel tracer */
4114 if (!opt_no_kernel
) {
4115 init_kernel_tracer();
4118 /* Set ulimit for open files */
4121 /* init lttng_fd tracking must be done after set_ulimit. */
4124 ret
= set_consumer_sockets(&ustconsumer64_data
, rundir
);
4129 ret
= set_consumer_sockets(&ustconsumer32_data
, rundir
);
4134 if ((ret
= set_signal_handler()) < 0) {
4138 /* Setup the needed unix socket */
4139 if ((ret
= init_daemon_socket()) < 0) {
4143 /* Set credentials to socket */
4144 if (is_root
&& ((ret
= set_permissions(rundir
)) < 0)) {
4148 /* Get parent pid if -S, --sig-parent is specified. */
4149 if (opt_sig_parent
) {
4153 /* Setup the kernel pipe for waking up the kernel thread */
4154 if (is_root
&& !opt_no_kernel
) {
4155 if ((ret
= utils_create_pipe_cloexec(kernel_poll_pipe
)) < 0) {
4160 /* Setup the thread apps communication pipe. */
4161 if ((ret
= utils_create_pipe_cloexec(apps_cmd_pipe
)) < 0) {
4165 /* Init UST command queue. */
4166 cds_wfq_init(&ust_cmd_queue
.queue
);
4169 * Get session list pointer. This pointer MUST NOT be free(). This list is
4170 * statically declared in session.c
4172 session_list_ptr
= session_get_list();
4174 /* Set up max poll set size */
4175 lttng_poll_set_max_size();
4179 /* Check for the application socket timeout env variable. */
4180 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
4181 if (env_app_timeout
) {
4182 app_socket_timeout
= atoi(env_app_timeout
);
4184 app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
4189 /* Create thread to manage the client socket */
4190 ret
= pthread_create(&health_thread
, NULL
,
4191 thread_manage_health
, (void *) NULL
);
4193 PERROR("pthread_create health");
4197 /* Create thread to manage the client socket */
4198 ret
= pthread_create(&client_thread
, NULL
,
4199 thread_manage_clients
, (void *) NULL
);
4201 PERROR("pthread_create clients");
4205 /* Create thread to dispatch registration */
4206 ret
= pthread_create(&dispatch_thread
, NULL
,
4207 thread_dispatch_ust_registration
, (void *) NULL
);
4209 PERROR("pthread_create dispatch");
4213 /* Create thread to manage application registration. */
4214 ret
= pthread_create(®_apps_thread
, NULL
,
4215 thread_registration_apps
, (void *) NULL
);
4217 PERROR("pthread_create registration");
4221 /* Create thread to manage application socket */
4222 ret
= pthread_create(&apps_thread
, NULL
,
4223 thread_manage_apps
, (void *) NULL
);
4225 PERROR("pthread_create apps");
4229 /* Don't start this thread if kernel tracing is not requested nor root */
4230 if (is_root
&& !opt_no_kernel
) {
4231 /* Create kernel thread to manage kernel event */
4232 ret
= pthread_create(&kernel_thread
, NULL
,
4233 thread_manage_kernel
, (void *) NULL
);
4235 PERROR("pthread_create kernel");
4239 ret
= pthread_join(kernel_thread
, &status
);
4241 PERROR("pthread_join");
4242 goto error
; /* join error, exit without cleanup */
4247 ret
= pthread_join(apps_thread
, &status
);
4249 PERROR("pthread_join");
4250 goto error
; /* join error, exit without cleanup */
4254 ret
= pthread_join(reg_apps_thread
, &status
);
4256 PERROR("pthread_join");
4257 goto error
; /* join error, exit without cleanup */
4261 ret
= pthread_join(dispatch_thread
, &status
);
4263 PERROR("pthread_join");
4264 goto error
; /* join error, exit without cleanup */
4268 ret
= pthread_join(client_thread
, &status
);
4270 PERROR("pthread_join");
4271 goto error
; /* join error, exit without cleanup */
4274 ret
= join_consumer_thread(&kconsumer_data
);
4276 PERROR("join_consumer");
4277 goto error
; /* join error, exit without cleanup */
4280 ret
= join_consumer_thread(&ustconsumer32_data
);
4282 PERROR("join_consumer ust32");
4283 goto error
; /* join error, exit without cleanup */
4286 ret
= join_consumer_thread(&ustconsumer64_data
);
4288 PERROR("join_consumer ust64");
4289 goto error
; /* join error, exit without cleanup */
4293 ret
= pthread_join(health_thread
, &status
);
4295 PERROR("pthread_join health thread");
4296 goto error
; /* join error, exit without cleanup */
4302 * cleanup() is called when no other thread is running.
4304 rcu_thread_online();
4306 rcu_thread_offline();
4307 rcu_unregister_thread();