2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
32 #include <sys/mount.h>
33 #include <sys/resource.h>
34 #include <sys/socket.h>
36 #include <sys/types.h>
38 #include <urcu/uatomic.h>
42 #include <common/common.h>
43 #include <common/compat/socket.h>
44 #include <common/compat/getenv.h>
45 #include <common/defaults.h>
46 #include <common/kernel-consumer/kernel-consumer.h>
47 #include <common/futex.h>
48 #include <common/relayd/relayd.h>
49 #include <common/utils.h>
50 #include <common/daemonize.h>
51 #include <common/config/session-config.h>
53 #include "lttng-sessiond.h"
54 #include "buffer-registry.h"
61 #include "kernel-consumer.h"
65 #include "ust-consumer.h"
68 #include "health-sessiond.h"
69 #include "testpoint.h"
70 #include "ust-thread.h"
71 #include "agent-thread.h"
73 #include "load-session-thread.h"
74 #include "notification-thread.h"
75 #include "notification-thread-commands.h"
78 #include "ht-cleanup.h"
80 #define CONSUMERD_FILE "lttng-consumerd"
82 static const char *help_msg
=
83 #ifdef LTTNG_EMBED_HELP
84 #include <lttng-sessiond.8.h>
91 const char *tracing_group_name
= DEFAULT_TRACING_GROUP
;
92 static int tracing_group_name_override
;
93 static char *opt_pidfile
;
94 static int opt_sig_parent
;
95 static int opt_verbose_consumer
;
96 static int opt_daemon
, opt_background
;
97 static int opt_no_kernel
;
98 static char *opt_load_session_path
;
99 static pid_t ppid
; /* Parent PID for --sig-parent option */
100 static pid_t child_ppid
; /* Internal parent PID use with daemonize. */
102 static int lockfile_fd
= -1;
104 /* Set to 1 when a SIGUSR1 signal is received. */
105 static int recv_child_signal
;
108 * Consumer daemon specific control data. Every value not initialized here is
109 * set to 0 by the static definition.
111 static struct consumer_data kconsumer_data
= {
112 .type
= LTTNG_CONSUMER_KERNEL
,
113 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
114 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
117 .channel_monitor_pipe
= -1,
118 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
119 .lock
= PTHREAD_MUTEX_INITIALIZER
,
120 .cond
= PTHREAD_COND_INITIALIZER
,
121 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
123 static struct consumer_data ustconsumer64_data
= {
124 .type
= LTTNG_CONSUMER64_UST
,
125 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
126 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
129 .channel_monitor_pipe
= -1,
130 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
131 .lock
= PTHREAD_MUTEX_INITIALIZER
,
132 .cond
= PTHREAD_COND_INITIALIZER
,
133 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
135 static struct consumer_data ustconsumer32_data
= {
136 .type
= LTTNG_CONSUMER32_UST
,
137 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
138 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
141 .channel_monitor_pipe
= -1,
142 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
143 .lock
= PTHREAD_MUTEX_INITIALIZER
,
144 .cond
= PTHREAD_COND_INITIALIZER
,
145 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
148 /* Command line options */
149 static const struct option long_options
[] = {
150 { "client-sock", required_argument
, 0, 'c' },
151 { "apps-sock", required_argument
, 0, 'a' },
152 { "kconsumerd-cmd-sock", required_argument
, 0, '\0' },
153 { "kconsumerd-err-sock", required_argument
, 0, '\0' },
154 { "ustconsumerd32-cmd-sock", required_argument
, 0, '\0' },
155 { "ustconsumerd32-err-sock", required_argument
, 0, '\0' },
156 { "ustconsumerd64-cmd-sock", required_argument
, 0, '\0' },
157 { "ustconsumerd64-err-sock", required_argument
, 0, '\0' },
158 { "consumerd32-path", required_argument
, 0, '\0' },
159 { "consumerd32-libdir", required_argument
, 0, '\0' },
160 { "consumerd64-path", required_argument
, 0, '\0' },
161 { "consumerd64-libdir", required_argument
, 0, '\0' },
162 { "daemonize", no_argument
, 0, 'd' },
163 { "background", no_argument
, 0, 'b' },
164 { "sig-parent", no_argument
, 0, 'S' },
165 { "help", no_argument
, 0, 'h' },
166 { "group", required_argument
, 0, 'g' },
167 { "version", no_argument
, 0, 'V' },
168 { "quiet", no_argument
, 0, 'q' },
169 { "verbose", no_argument
, 0, 'v' },
170 { "verbose-consumer", no_argument
, 0, '\0' },
171 { "no-kernel", no_argument
, 0, '\0' },
172 { "pidfile", required_argument
, 0, 'p' },
173 { "agent-tcp-port", required_argument
, 0, '\0' },
174 { "config", required_argument
, 0, 'f' },
175 { "load", required_argument
, 0, 'l' },
176 { "kmod-probes", required_argument
, 0, '\0' },
177 { "extra-kmod-probes", required_argument
, 0, '\0' },
181 /* Command line options to ignore from configuration file */
182 static const char *config_ignore_options
[] = { "help", "version", "config" };
184 /* Shared between threads */
185 static int dispatch_thread_exit
;
187 /* Global application Unix socket path */
188 static char apps_unix_sock_path
[PATH_MAX
];
189 /* Global client Unix socket path */
190 static char client_unix_sock_path
[PATH_MAX
];
191 /* global wait shm path for UST */
192 static char wait_shm_path
[PATH_MAX
];
193 /* Global health check unix path */
194 static char health_unix_sock_path
[PATH_MAX
];
196 /* Sockets and FDs */
197 static int client_sock
= -1;
198 static int apps_sock
= -1;
199 int kernel_tracer_fd
= -1;
200 static int kernel_poll_pipe
[2] = { -1, -1 };
203 * Quit pipe for all threads. This permits a single cancellation point
204 * for all threads when receiving an event on the pipe.
206 static int thread_quit_pipe
[2] = { -1, -1 };
209 * This pipe is used to inform the thread managing application communication
210 * that a command is queued and ready to be processed.
212 static int apps_cmd_pipe
[2] = { -1, -1 };
214 int apps_cmd_notify_pipe
[2] = { -1, -1 };
216 /* Pthread, Mutexes and Semaphores */
217 static pthread_t apps_thread
;
218 static pthread_t apps_notify_thread
;
219 static pthread_t reg_apps_thread
;
220 static pthread_t client_thread
;
221 static pthread_t kernel_thread
;
222 static pthread_t dispatch_thread
;
223 static pthread_t health_thread
;
224 static pthread_t ht_cleanup_thread
;
225 static pthread_t agent_reg_thread
;
226 static pthread_t load_session_thread
;
227 static pthread_t notification_thread
;
230 * UST registration command queue. This queue is tied with a futex and uses a N
231 * wakers / 1 waiter implemented and detailed in futex.c/.h
233 * The thread_registration_apps and thread_dispatch_ust_registration uses this
234 * queue along with the wait/wake scheme. The thread_manage_apps receives down
235 * the line new application socket and monitors it for any I/O error or clean
236 * close that triggers an unregistration of the application.
238 static struct ust_cmd_queue ust_cmd_queue
;
241 * Pointer initialized before thread creation.
243 * This points to the tracing session list containing the session count and a
244 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
245 * MUST NOT be taken if you call a public function in session.c.
247 * The lock is nested inside the structure: session_list_ptr->lock. Please use
248 * session_lock_list and session_unlock_list for lock acquisition.
250 static struct ltt_session_list
*session_list_ptr
;
252 int ust_consumerd64_fd
= -1;
253 int ust_consumerd32_fd
= -1;
255 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
256 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
257 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
258 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
259 static int consumerd32_bin_override
;
260 static int consumerd64_bin_override
;
261 static int consumerd32_libdir_override
;
262 static int consumerd64_libdir_override
;
264 static const char *module_proc_lttng
= "/proc/lttng";
267 * Consumer daemon state which is changed when spawning it, killing it or in
268 * case of a fatal error.
270 enum consumerd_state
{
271 CONSUMER_STARTED
= 1,
272 CONSUMER_STOPPED
= 2,
277 * This consumer daemon state is used to validate if a client command will be
278 * able to reach the consumer. If not, the client is informed. For instance,
279 * doing a "lttng start" when the consumer state is set to ERROR will return an
280 * error to the client.
282 * The following example shows a possible race condition of this scheme:
284 * consumer thread error happens
286 * client cmd checks state -> still OK
287 * consumer thread exit, sets error
288 * client cmd try to talk to consumer
291 * However, since the consumer is a different daemon, we have no way of making
292 * sure the command will reach it safely even with this state flag. This is why
293 * we consider that up to the state validation during command processing, the
294 * command is safe. After that, we can not guarantee the correctness of the
295 * client request vis-a-vis the consumer.
297 static enum consumerd_state ust_consumerd_state
;
298 static enum consumerd_state kernel_consumerd_state
;
301 * Socket timeout for receiving and sending in seconds.
303 static int app_socket_timeout
;
305 /* Set in main() with the current page size. */
308 /* Application health monitoring */
309 struct health_app
*health_sessiond
;
311 /* Agent TCP port for registration. Used by the agent thread. */
312 unsigned int agent_tcp_port
= DEFAULT_AGENT_TCP_PORT
;
314 /* Am I root or not. */
315 int is_root
; /* Set to 1 if the daemon is running as root */
317 const char * const config_section_name
= "sessiond";
319 /* Load session thread information to operate. */
320 struct load_session_thread_data
*load_info
;
322 /* Notification thread handle. */
323 struct notification_thread_handle
*notification_thread_handle
;
325 /* Global hash tables */
326 struct lttng_ht
*agent_apps_ht_by_sock
= NULL
;
329 * Whether sessiond is ready for commands/notification channel/health check
331 * NR_LTTNG_SESSIOND_READY must match the number of calls to
332 * sessiond_notify_ready().
334 #define NR_LTTNG_SESSIOND_READY 4
335 int lttng_sessiond_ready
= NR_LTTNG_SESSIOND_READY
;
337 int sessiond_check_thread_quit_pipe(int fd
, uint32_t events
)
339 return (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) ? 1 : 0;
342 /* Notify parents that we are ready for cmd and health check */
344 void sessiond_notify_ready(void)
346 if (uatomic_sub_return(<tng_sessiond_ready
, 1) == 0) {
348 * Notify parent pid that we are ready to accept command
349 * for client side. This ppid is the one from the
350 * external process that spawned us.
352 if (opt_sig_parent
) {
357 * Notify the parent of the fork() process that we are
360 if (opt_daemon
|| opt_background
) {
361 kill(child_ppid
, SIGUSR1
);
367 void setup_consumerd_path(void)
369 const char *bin
, *libdir
;
372 * Allow INSTALL_BIN_PATH to be used as a target path for the
373 * native architecture size consumer if CONFIG_CONSUMER*_PATH
374 * has not been defined.
376 #if (CAA_BITS_PER_LONG == 32)
377 if (!consumerd32_bin
[0]) {
378 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
380 if (!consumerd32_libdir
[0]) {
381 consumerd32_libdir
= INSTALL_LIB_PATH
;
383 #elif (CAA_BITS_PER_LONG == 64)
384 if (!consumerd64_bin
[0]) {
385 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
387 if (!consumerd64_libdir
[0]) {
388 consumerd64_libdir
= INSTALL_LIB_PATH
;
391 #error "Unknown bitness"
395 * runtime env. var. overrides the build default.
397 bin
= lttng_secure_getenv("LTTNG_CONSUMERD32_BIN");
399 consumerd32_bin
= bin
;
401 bin
= lttng_secure_getenv("LTTNG_CONSUMERD64_BIN");
403 consumerd64_bin
= bin
;
405 libdir
= lttng_secure_getenv("LTTNG_CONSUMERD32_LIBDIR");
407 consumerd32_libdir
= libdir
;
409 libdir
= lttng_secure_getenv("LTTNG_CONSUMERD64_LIBDIR");
411 consumerd64_libdir
= libdir
;
416 int __sessiond_set_thread_pollset(struct lttng_poll_event
*events
, size_t size
,
423 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
429 ret
= lttng_poll_add(events
, a_pipe
[0], LPOLLIN
| LPOLLERR
);
441 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
443 int sessiond_set_thread_pollset(struct lttng_poll_event
*events
, size_t size
)
445 return __sessiond_set_thread_pollset(events
, size
, thread_quit_pipe
);
449 * Init thread quit pipe.
451 * Return -1 on error or 0 if all pipes are created.
453 static int __init_thread_quit_pipe(int *a_pipe
)
459 PERROR("thread quit pipe");
463 for (i
= 0; i
< 2; i
++) {
464 ret
= fcntl(a_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
475 static int init_thread_quit_pipe(void)
477 return __init_thread_quit_pipe(thread_quit_pipe
);
481 * Stop all threads by closing the thread quit pipe.
483 static void stop_threads(void)
487 /* Stopping all threads */
488 DBG("Terminating all threads");
489 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
491 ERR("write error on thread quit pipe");
494 /* Dispatch thread */
495 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
496 futex_nto1_wake(&ust_cmd_queue
.futex
);
500 * Close every consumer sockets.
502 static void close_consumer_sockets(void)
506 if (kconsumer_data
.err_sock
>= 0) {
507 ret
= close(kconsumer_data
.err_sock
);
509 PERROR("kernel consumer err_sock close");
512 if (ustconsumer32_data
.err_sock
>= 0) {
513 ret
= close(ustconsumer32_data
.err_sock
);
515 PERROR("UST consumerd32 err_sock close");
518 if (ustconsumer64_data
.err_sock
>= 0) {
519 ret
= close(ustconsumer64_data
.err_sock
);
521 PERROR("UST consumerd64 err_sock close");
524 if (kconsumer_data
.cmd_sock
>= 0) {
525 ret
= close(kconsumer_data
.cmd_sock
);
527 PERROR("kernel consumer cmd_sock close");
530 if (ustconsumer32_data
.cmd_sock
>= 0) {
531 ret
= close(ustconsumer32_data
.cmd_sock
);
533 PERROR("UST consumerd32 cmd_sock close");
536 if (ustconsumer64_data
.cmd_sock
>= 0) {
537 ret
= close(ustconsumer64_data
.cmd_sock
);
539 PERROR("UST consumerd64 cmd_sock close");
542 if (kconsumer_data
.channel_monitor_pipe
>= 0) {
543 ret
= close(kconsumer_data
.channel_monitor_pipe
);
545 PERROR("kernel consumer channel monitor pipe close");
548 if (ustconsumer32_data
.channel_monitor_pipe
>= 0) {
549 ret
= close(ustconsumer32_data
.channel_monitor_pipe
);
551 PERROR("UST consumerd32 channel monitor pipe close");
554 if (ustconsumer64_data
.channel_monitor_pipe
>= 0) {
555 ret
= close(ustconsumer64_data
.channel_monitor_pipe
);
557 PERROR("UST consumerd64 channel monitor pipe close");
563 * Generate the full lock file path using the rundir.
565 * Return the snprintf() return value thus a negative value is an error.
567 static int generate_lock_file_path(char *path
, size_t len
)
574 /* Build lockfile path from rundir. */
575 ret
= snprintf(path
, len
, "%s/" DEFAULT_LTTNG_SESSIOND_LOCKFILE
, rundir
);
577 PERROR("snprintf lockfile path");
584 * Wait on consumer process termination.
586 * Need to be called with the consumer data lock held or from a context
587 * ensuring no concurrent access to data (e.g: cleanup).
589 static void wait_consumer(struct consumer_data
*consumer_data
)
594 if (consumer_data
->pid
<= 0) {
598 DBG("Waiting for complete teardown of consumerd (PID: %d)",
600 ret
= waitpid(consumer_data
->pid
, &status
, 0);
602 PERROR("consumerd waitpid pid: %d", consumer_data
->pid
)
603 } else if (!WIFEXITED(status
)) {
604 ERR("consumerd termination with error: %d",
607 consumer_data
->pid
= 0;
611 * Cleanup the session daemon's data structures.
613 static void sessiond_cleanup(void)
616 struct ltt_session
*sess
, *stmp
;
619 DBG("Cleanup sessiond");
622 * Close the thread quit pipe. It has already done its job,
623 * since we are now called.
625 utils_close_pipe(thread_quit_pipe
);
628 * If opt_pidfile is undefined, the default file will be wiped when
629 * removing the rundir.
632 ret
= remove(opt_pidfile
);
634 PERROR("remove pidfile %s", opt_pidfile
);
638 DBG("Removing sessiond and consumerd content of directory %s", rundir
);
641 snprintf(path
, PATH_MAX
,
643 rundir
, DEFAULT_LTTNG_SESSIOND_PIDFILE
);
644 DBG("Removing %s", path
);
647 snprintf(path
, PATH_MAX
, "%s/%s", rundir
,
648 DEFAULT_LTTNG_SESSIOND_AGENTPORT_FILE
);
649 DBG("Removing %s", path
);
653 snprintf(path
, PATH_MAX
,
654 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
656 DBG("Removing %s", path
);
659 snprintf(path
, PATH_MAX
,
660 DEFAULT_KCONSUMERD_PATH
,
662 DBG("Removing directory %s", path
);
665 /* ust consumerd 32 */
666 snprintf(path
, PATH_MAX
,
667 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
669 DBG("Removing %s", path
);
672 snprintf(path
, PATH_MAX
,
673 DEFAULT_USTCONSUMERD32_PATH
,
675 DBG("Removing directory %s", path
);
678 /* ust consumerd 64 */
679 snprintf(path
, PATH_MAX
,
680 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
682 DBG("Removing %s", path
);
685 snprintf(path
, PATH_MAX
,
686 DEFAULT_USTCONSUMERD64_PATH
,
688 DBG("Removing directory %s", path
);
691 DBG("Cleaning up all sessions");
693 /* Destroy session list mutex */
694 if (session_list_ptr
!= NULL
) {
695 pthread_mutex_destroy(&session_list_ptr
->lock
);
697 /* Cleanup ALL session */
698 cds_list_for_each_entry_safe(sess
, stmp
,
699 &session_list_ptr
->head
, list
) {
700 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
704 wait_consumer(&kconsumer_data
);
705 wait_consumer(&ustconsumer64_data
);
706 wait_consumer(&ustconsumer32_data
);
708 DBG("Cleaning up all agent apps");
709 agent_app_ht_clean();
711 DBG("Closing all UST sockets");
712 ust_app_clean_list();
713 buffer_reg_destroy_registries();
715 if (is_root
&& !opt_no_kernel
) {
716 DBG2("Closing kernel fd");
717 if (kernel_tracer_fd
>= 0) {
718 ret
= close(kernel_tracer_fd
);
723 DBG("Unloading kernel modules");
724 modprobe_remove_lttng_all();
728 close_consumer_sockets();
731 load_session_destroy_data(load_info
);
736 * Cleanup lock file by deleting it and finaly closing it which will
737 * release the file system lock.
739 if (lockfile_fd
>= 0) {
740 char lockfile_path
[PATH_MAX
];
742 ret
= generate_lock_file_path(lockfile_path
,
743 sizeof(lockfile_path
));
745 ret
= remove(lockfile_path
);
747 PERROR("remove lock file");
749 ret
= close(lockfile_fd
);
751 PERROR("close lock file");
757 * We do NOT rmdir rundir because there are other processes
758 * using it, for instance lttng-relayd, which can start in
759 * parallel with this teardown.
766 * Cleanup the daemon's option data structures.
768 static void sessiond_cleanup_options(void)
770 DBG("Cleaning up options");
773 * If the override option is set, the pointer points to a *non* const
774 * thus freeing it even though the variable type is set to const.
776 if (tracing_group_name_override
) {
777 free((void *) tracing_group_name
);
779 if (consumerd32_bin_override
) {
780 free((void *) consumerd32_bin
);
782 if (consumerd64_bin_override
) {
783 free((void *) consumerd64_bin
);
785 if (consumerd32_libdir_override
) {
786 free((void *) consumerd32_libdir
);
788 if (consumerd64_libdir_override
) {
789 free((void *) consumerd64_libdir
);
793 free(opt_load_session_path
);
794 free(kmod_probes_list
);
795 free(kmod_extra_probes_list
);
797 run_as_destroy_worker();
801 * Send data on a unix socket using the liblttsessiondcomm API.
803 * Return lttcomm error code.
805 static int send_unix_sock(int sock
, void *buf
, size_t len
)
807 /* Check valid length */
812 return lttcomm_send_unix_sock(sock
, buf
, len
);
816 * Free memory of a command context structure.
818 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
820 DBG("Clean command context structure");
822 if ((*cmd_ctx
)->llm
) {
823 free((*cmd_ctx
)->llm
);
825 if ((*cmd_ctx
)->lsm
) {
826 free((*cmd_ctx
)->lsm
);
834 * Notify UST applications using the shm mmap futex.
836 static int notify_ust_apps(int active
)
840 DBG("Notifying applications of session daemon state: %d", active
);
842 /* See shm.c for this call implying mmap, shm and futex calls */
843 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
844 if (wait_shm_mmap
== NULL
) {
848 /* Wake waiting process */
849 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
851 /* Apps notified successfully */
859 * Setup the outgoing data buffer for the response (llm) by allocating the
860 * right amount of memory and copying the original information from the lsm
863 * Return 0 on success, negative value on error.
865 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
,
866 const void *payload_buf
, size_t payload_len
,
867 const void *cmd_header_buf
, size_t cmd_header_len
)
870 const size_t header_len
= sizeof(struct lttcomm_lttng_msg
);
871 const size_t cmd_header_offset
= header_len
;
872 const size_t payload_offset
= cmd_header_offset
+ cmd_header_len
;
873 const size_t total_msg_size
= header_len
+ cmd_header_len
+ payload_len
;
875 cmd_ctx
->llm
= zmalloc(total_msg_size
);
877 if (cmd_ctx
->llm
== NULL
) {
883 /* Copy common data */
884 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
885 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
886 cmd_ctx
->llm
->cmd_header_size
= cmd_header_len
;
887 cmd_ctx
->llm
->data_size
= payload_len
;
888 cmd_ctx
->lttng_msg_size
= total_msg_size
;
890 /* Copy command header */
891 if (cmd_header_len
) {
892 memcpy(((uint8_t *) cmd_ctx
->llm
) + cmd_header_offset
, cmd_header_buf
,
898 memcpy(((uint8_t *) cmd_ctx
->llm
) + payload_offset
, payload_buf
,
907 * Version of setup_lttng_msg() without command header.
909 static int setup_lttng_msg_no_cmd_header(struct command_ctx
*cmd_ctx
,
910 void *payload_buf
, size_t payload_len
)
912 return setup_lttng_msg(cmd_ctx
, payload_buf
, payload_len
, NULL
, 0);
915 * Update the kernel poll set of all channel fd available over all tracing
916 * session. Add the wakeup pipe at the end of the set.
918 static int update_kernel_poll(struct lttng_poll_event
*events
)
921 struct ltt_session
*session
;
922 struct ltt_kernel_channel
*channel
;
924 DBG("Updating kernel poll set");
927 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
928 session_lock(session
);
929 if (session
->kernel_session
== NULL
) {
930 session_unlock(session
);
934 cds_list_for_each_entry(channel
,
935 &session
->kernel_session
->channel_list
.head
, list
) {
936 /* Add channel fd to the kernel poll set */
937 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
939 session_unlock(session
);
942 DBG("Channel fd %d added to kernel set", channel
->fd
);
944 session_unlock(session
);
946 session_unlock_list();
951 session_unlock_list();
956 * Find the channel fd from 'fd' over all tracing session. When found, check
957 * for new channel stream and send those stream fds to the kernel consumer.
959 * Useful for CPU hotplug feature.
961 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
964 struct ltt_session
*session
;
965 struct ltt_kernel_session
*ksess
;
966 struct ltt_kernel_channel
*channel
;
968 DBG("Updating kernel streams for channel fd %d", fd
);
971 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
972 session_lock(session
);
973 if (session
->kernel_session
== NULL
) {
974 session_unlock(session
);
977 ksess
= session
->kernel_session
;
979 cds_list_for_each_entry(channel
,
980 &ksess
->channel_list
.head
, list
) {
981 struct lttng_ht_iter iter
;
982 struct consumer_socket
*socket
;
984 if (channel
->fd
!= fd
) {
987 DBG("Channel found, updating kernel streams");
988 ret
= kernel_open_channel_stream(channel
);
992 /* Update the stream global counter */
993 ksess
->stream_count_global
+= ret
;
996 * Have we already sent fds to the consumer? If yes, it
997 * means that tracing is started so it is safe to send
998 * our updated stream fds.
1000 if (ksess
->consumer_fds_sent
!= 1
1001 || ksess
->consumer
== NULL
) {
1007 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
1008 &iter
.iter
, socket
, node
.node
) {
1009 pthread_mutex_lock(socket
->lock
);
1010 ret
= kernel_consumer_send_channel_stream(socket
,
1012 session
->output_traces
? 1 : 0);
1013 pthread_mutex_unlock(socket
->lock
);
1021 session_unlock(session
);
1023 session_unlock_list();
1027 session_unlock(session
);
1028 session_unlock_list();
1033 * For each tracing session, update newly registered apps. The session list
1034 * lock MUST be acquired before calling this.
1036 static void update_ust_app(int app_sock
)
1038 struct ltt_session
*sess
, *stmp
;
1040 /* Consumer is in an ERROR state. Stop any application update. */
1041 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
1042 /* Stop the update process since the consumer is dead. */
1046 /* For all tracing session(s) */
1047 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
1048 struct ust_app
*app
;
1051 if (!sess
->ust_session
) {
1052 goto unlock_session
;
1056 assert(app_sock
>= 0);
1057 app
= ust_app_find_by_sock(app_sock
);
1060 * Application can be unregistered before so
1061 * this is possible hence simply stopping the
1064 DBG3("UST app update failed to find app sock %d",
1068 ust_app_global_update(sess
->ust_session
, app
);
1072 session_unlock(sess
);
1077 * This thread manage event coming from the kernel.
1079 * Features supported in this thread:
1082 static void *thread_manage_kernel(void *data
)
1084 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
1085 uint32_t revents
, nb_fd
;
1087 struct lttng_poll_event events
;
1089 DBG("[thread] Thread manage kernel started");
1091 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_KERNEL
);
1094 * This first step of the while is to clean this structure which could free
1095 * non NULL pointers so initialize it before the loop.
1097 lttng_poll_init(&events
);
1099 if (testpoint(sessiond_thread_manage_kernel
)) {
1100 goto error_testpoint
;
1103 health_code_update();
1105 if (testpoint(sessiond_thread_manage_kernel_before_loop
)) {
1106 goto error_testpoint
;
1110 health_code_update();
1112 if (update_poll_flag
== 1) {
1113 /* Clean events object. We are about to populate it again. */
1114 lttng_poll_clean(&events
);
1116 ret
= sessiond_set_thread_pollset(&events
, 2);
1118 goto error_poll_create
;
1121 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
1126 /* This will add the available kernel channel if any. */
1127 ret
= update_kernel_poll(&events
);
1131 update_poll_flag
= 0;
1134 DBG("Thread kernel polling");
1136 /* Poll infinite value of time */
1138 health_poll_entry();
1139 ret
= lttng_poll_wait(&events
, -1);
1140 DBG("Thread kernel return from poll on %d fds",
1141 LTTNG_POLL_GETNB(&events
));
1145 * Restart interrupted system call.
1147 if (errno
== EINTR
) {
1151 } else if (ret
== 0) {
1152 /* Should not happen since timeout is infinite */
1153 ERR("Return value of poll is 0 with an infinite timeout.\n"
1154 "This should not have happened! Continuing...");
1160 for (i
= 0; i
< nb_fd
; i
++) {
1161 /* Fetch once the poll data */
1162 revents
= LTTNG_POLL_GETEV(&events
, i
);
1163 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1165 health_code_update();
1168 /* No activity for this FD (poll implementation). */
1172 /* Thread quit pipe has been closed. Killing thread. */
1173 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1179 /* Check for data on kernel pipe */
1180 if (revents
& LPOLLIN
) {
1181 if (pollfd
== kernel_poll_pipe
[0]) {
1182 (void) lttng_read(kernel_poll_pipe
[0],
1185 * Ret value is useless here, if this pipe gets any actions an
1186 * update is required anyway.
1188 update_poll_flag
= 1;
1192 * New CPU detected by the kernel. Adding kernel stream to
1193 * kernel session and updating the kernel consumer
1195 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
1201 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1202 update_poll_flag
= 1;
1205 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1213 lttng_poll_clean(&events
);
1216 utils_close_pipe(kernel_poll_pipe
);
1217 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
1220 ERR("Health error occurred in %s", __func__
);
1221 WARN("Kernel thread died unexpectedly. "
1222 "Kernel tracing can continue but CPU hotplug is disabled.");
1224 health_unregister(health_sessiond
);
1225 DBG("Kernel thread dying");
1230 * Signal pthread condition of the consumer data that the thread.
1232 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
1234 pthread_mutex_lock(&data
->cond_mutex
);
1237 * The state is set before signaling. It can be any value, it's the waiter
1238 * job to correctly interpret this condition variable associated to the
1239 * consumer pthread_cond.
1241 * A value of 0 means that the corresponding thread of the consumer data
1242 * was not started. 1 indicates that the thread has started and is ready
1243 * for action. A negative value means that there was an error during the
1246 data
->consumer_thread_is_ready
= state
;
1247 (void) pthread_cond_signal(&data
->cond
);
1249 pthread_mutex_unlock(&data
->cond_mutex
);
1253 * This thread manage the consumer error sent back to the session daemon.
1255 static void *thread_manage_consumer(void *data
)
1257 int sock
= -1, i
, ret
, pollfd
, err
= -1, should_quit
= 0;
1258 uint32_t revents
, nb_fd
;
1259 enum lttcomm_return_code code
;
1260 struct lttng_poll_event events
;
1261 struct consumer_data
*consumer_data
= data
;
1262 struct consumer_socket
*cmd_socket_wrapper
= NULL
;
1264 DBG("[thread] Manage consumer started");
1266 rcu_register_thread();
1267 rcu_thread_online();
1269 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CONSUMER
);
1271 health_code_update();
1274 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
1275 * metadata_sock. Nothing more will be added to this poll set.
1277 ret
= sessiond_set_thread_pollset(&events
, 3);
1283 * The error socket here is already in a listening state which was done
1284 * just before spawning this thread to avoid a race between the consumer
1285 * daemon exec trying to connect and the listen() call.
1287 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
1292 health_code_update();
1294 /* Infinite blocking call, waiting for transmission */
1296 health_poll_entry();
1298 if (testpoint(sessiond_thread_manage_consumer
)) {
1302 ret
= lttng_poll_wait(&events
, -1);
1306 * Restart interrupted system call.
1308 if (errno
== EINTR
) {
1316 for (i
= 0; i
< nb_fd
; i
++) {
1317 /* Fetch once the poll data */
1318 revents
= LTTNG_POLL_GETEV(&events
, i
);
1319 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1321 health_code_update();
1324 /* No activity for this FD (poll implementation). */
1328 /* Thread quit pipe has been closed. Killing thread. */
1329 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1335 /* Event on the registration socket */
1336 if (pollfd
== consumer_data
->err_sock
) {
1337 if (revents
& LPOLLIN
) {
1339 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1340 ERR("consumer err socket poll error");
1343 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1349 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
1355 * Set the CLOEXEC flag. Return code is useless because either way, the
1358 (void) utils_set_fd_cloexec(sock
);
1360 health_code_update();
1362 DBG2("Receiving code from consumer err_sock");
1364 /* Getting status code from kconsumerd */
1365 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1366 sizeof(enum lttcomm_return_code
));
1371 health_code_update();
1372 if (code
!= LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
1373 ERR("consumer error when waiting for SOCK_READY : %s",
1374 lttcomm_get_readable_code(-code
));
1378 /* Connect both command and metadata sockets. */
1379 consumer_data
->cmd_sock
=
1380 lttcomm_connect_unix_sock(
1381 consumer_data
->cmd_unix_sock_path
);
1382 consumer_data
->metadata_fd
=
1383 lttcomm_connect_unix_sock(
1384 consumer_data
->cmd_unix_sock_path
);
1385 if (consumer_data
->cmd_sock
< 0 || consumer_data
->metadata_fd
< 0) {
1386 PERROR("consumer connect cmd socket");
1387 /* On error, signal condition and quit. */
1388 signal_consumer_condition(consumer_data
, -1);
1392 consumer_data
->metadata_sock
.fd_ptr
= &consumer_data
->metadata_fd
;
1394 /* Create metadata socket lock. */
1395 consumer_data
->metadata_sock
.lock
= zmalloc(sizeof(pthread_mutex_t
));
1396 if (consumer_data
->metadata_sock
.lock
== NULL
) {
1397 PERROR("zmalloc pthread mutex");
1400 pthread_mutex_init(consumer_data
->metadata_sock
.lock
, NULL
);
1402 DBG("Consumer command socket ready (fd: %d", consumer_data
->cmd_sock
);
1403 DBG("Consumer metadata socket ready (fd: %d)",
1404 consumer_data
->metadata_fd
);
1407 * Remove the consumerd error sock since we've established a connection.
1409 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
1414 /* Add new accepted error socket. */
1415 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
1420 /* Add metadata socket that is successfully connected. */
1421 ret
= lttng_poll_add(&events
, consumer_data
->metadata_fd
,
1422 LPOLLIN
| LPOLLRDHUP
);
1427 health_code_update();
1430 * Transfer the write-end of the channel monitoring pipe to the
1431 * by issuing a SET_CHANNEL_MONITOR_PIPE command.
1433 cmd_socket_wrapper
= consumer_allocate_socket(&consumer_data
->cmd_sock
);
1434 if (!cmd_socket_wrapper
) {
1438 ret
= consumer_send_channel_monitor_pipe(cmd_socket_wrapper
,
1439 consumer_data
->channel_monitor_pipe
);
1443 /* Discard the socket wrapper as it is no longer needed. */
1444 consumer_destroy_socket(cmd_socket_wrapper
);
1445 cmd_socket_wrapper
= NULL
;
1447 /* The thread is completely initialized, signal that it is ready. */
1448 signal_consumer_condition(consumer_data
, 1);
1450 /* Infinite blocking call, waiting for transmission */
1453 health_code_update();
1455 /* Exit the thread because the thread quit pipe has been triggered. */
1457 /* Not a health error. */
1462 health_poll_entry();
1463 ret
= lttng_poll_wait(&events
, -1);
1467 * Restart interrupted system call.
1469 if (errno
== EINTR
) {
1477 for (i
= 0; i
< nb_fd
; i
++) {
1478 /* Fetch once the poll data */
1479 revents
= LTTNG_POLL_GETEV(&events
, i
);
1480 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1482 health_code_update();
1485 /* No activity for this FD (poll implementation). */
1490 * Thread quit pipe has been triggered, flag that we should stop
1491 * but continue the current loop to handle potential data from
1494 should_quit
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1496 if (pollfd
== sock
) {
1497 /* Event on the consumerd socket */
1498 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)
1499 && !(revents
& LPOLLIN
)) {
1500 ERR("consumer err socket second poll error");
1503 health_code_update();
1504 /* Wait for any kconsumerd error */
1505 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1506 sizeof(enum lttcomm_return_code
));
1508 ERR("consumer closed the command socket");
1512 ERR("consumer return code : %s",
1513 lttcomm_get_readable_code(-code
));
1516 } else if (pollfd
== consumer_data
->metadata_fd
) {
1517 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)
1518 && !(revents
& LPOLLIN
)) {
1519 ERR("consumer err metadata socket second poll error");
1522 /* UST metadata requests */
1523 ret
= ust_consumer_metadata_request(
1524 &consumer_data
->metadata_sock
);
1526 ERR("Handling metadata request");
1530 /* No need for an else branch all FDs are tested prior. */
1532 health_code_update();
1538 * We lock here because we are about to close the sockets and some other
1539 * thread might be using them so get exclusive access which will abort all
1540 * other consumer command by other threads.
1542 pthread_mutex_lock(&consumer_data
->lock
);
1544 /* Immediately set the consumerd state to stopped */
1545 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1546 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1547 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1548 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1549 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1551 /* Code flow error... */
1555 if (consumer_data
->err_sock
>= 0) {
1556 ret
= close(consumer_data
->err_sock
);
1560 consumer_data
->err_sock
= -1;
1562 if (consumer_data
->cmd_sock
>= 0) {
1563 ret
= close(consumer_data
->cmd_sock
);
1567 consumer_data
->cmd_sock
= -1;
1569 if (consumer_data
->metadata_sock
.fd_ptr
&&
1570 *consumer_data
->metadata_sock
.fd_ptr
>= 0) {
1571 ret
= close(*consumer_data
->metadata_sock
.fd_ptr
);
1583 unlink(consumer_data
->err_unix_sock_path
);
1584 unlink(consumer_data
->cmd_unix_sock_path
);
1585 pthread_mutex_unlock(&consumer_data
->lock
);
1587 /* Cleanup metadata socket mutex. */
1588 if (consumer_data
->metadata_sock
.lock
) {
1589 pthread_mutex_destroy(consumer_data
->metadata_sock
.lock
);
1590 free(consumer_data
->metadata_sock
.lock
);
1592 lttng_poll_clean(&events
);
1594 if (cmd_socket_wrapper
) {
1595 consumer_destroy_socket(cmd_socket_wrapper
);
1600 ERR("Health error occurred in %s", __func__
);
1602 health_unregister(health_sessiond
);
1603 DBG("consumer thread cleanup completed");
1605 rcu_thread_offline();
1606 rcu_unregister_thread();
1612 * This thread manage application communication.
1614 static void *thread_manage_apps(void *data
)
1616 int i
, ret
, pollfd
, err
= -1;
1618 uint32_t revents
, nb_fd
;
1619 struct lttng_poll_event events
;
1621 DBG("[thread] Manage application started");
1623 rcu_register_thread();
1624 rcu_thread_online();
1626 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_MANAGE
);
1628 if (testpoint(sessiond_thread_manage_apps
)) {
1629 goto error_testpoint
;
1632 health_code_update();
1634 ret
= sessiond_set_thread_pollset(&events
, 2);
1636 goto error_poll_create
;
1639 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1644 if (testpoint(sessiond_thread_manage_apps_before_loop
)) {
1648 health_code_update();
1651 DBG("Apps thread polling");
1653 /* Inifinite blocking call, waiting for transmission */
1655 health_poll_entry();
1656 ret
= lttng_poll_wait(&events
, -1);
1657 DBG("Apps thread return from poll on %d fds",
1658 LTTNG_POLL_GETNB(&events
));
1662 * Restart interrupted system call.
1664 if (errno
== EINTR
) {
1672 for (i
= 0; i
< nb_fd
; i
++) {
1673 /* Fetch once the poll data */
1674 revents
= LTTNG_POLL_GETEV(&events
, i
);
1675 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1677 health_code_update();
1680 /* No activity for this FD (poll implementation). */
1684 /* Thread quit pipe has been closed. Killing thread. */
1685 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1691 /* Inspect the apps cmd pipe */
1692 if (pollfd
== apps_cmd_pipe
[0]) {
1693 if (revents
& LPOLLIN
) {
1697 size_ret
= lttng_read(apps_cmd_pipe
[0], &sock
, sizeof(sock
));
1698 if (size_ret
< sizeof(sock
)) {
1699 PERROR("read apps cmd pipe");
1703 health_code_update();
1706 * Since this is a command socket (write then read),
1707 * we only monitor the error events of the socket.
1709 ret
= lttng_poll_add(&events
, sock
,
1710 LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
);
1715 DBG("Apps with sock %d added to poll set", sock
);
1716 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1717 ERR("Apps command pipe error");
1720 ERR("Unknown poll events %u for sock %d", revents
, pollfd
);
1725 * At this point, we know that a registered application made
1726 * the event at poll_wait.
1728 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1729 /* Removing from the poll set */
1730 ret
= lttng_poll_del(&events
, pollfd
);
1735 /* Socket closed on remote end. */
1736 ust_app_unregister(pollfd
);
1738 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1743 health_code_update();
1749 lttng_poll_clean(&events
);
1752 utils_close_pipe(apps_cmd_pipe
);
1753 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1756 * We don't clean the UST app hash table here since already registered
1757 * applications can still be controlled so let them be until the session
1758 * daemon dies or the applications stop.
1763 ERR("Health error occurred in %s", __func__
);
1765 health_unregister(health_sessiond
);
1766 DBG("Application communication apps thread cleanup complete");
1767 rcu_thread_offline();
1768 rcu_unregister_thread();
1773 * Send a socket to a thread This is called from the dispatch UST registration
1774 * thread once all sockets are set for the application.
1776 * The sock value can be invalid, we don't really care, the thread will handle
1777 * it and make the necessary cleanup if so.
1779 * On success, return 0 else a negative value being the errno message of the
1782 static int send_socket_to_thread(int fd
, int sock
)
1787 * It's possible that the FD is set as invalid with -1 concurrently just
1788 * before calling this function being a shutdown state of the thread.
1795 ret
= lttng_write(fd
, &sock
, sizeof(sock
));
1796 if (ret
< sizeof(sock
)) {
1797 PERROR("write apps pipe %d", fd
);
1804 /* All good. Don't send back the write positive ret value. */
1811 * Sanitize the wait queue of the dispatch registration thread meaning removing
1812 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1813 * notify socket is never received.
1815 static void sanitize_wait_queue(struct ust_reg_wait_queue
*wait_queue
)
1817 int ret
, nb_fd
= 0, i
;
1818 unsigned int fd_added
= 0;
1819 struct lttng_poll_event events
;
1820 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1824 lttng_poll_init(&events
);
1826 /* Just skip everything for an empty queue. */
1827 if (!wait_queue
->count
) {
1831 ret
= lttng_poll_create(&events
, wait_queue
->count
, LTTNG_CLOEXEC
);
1836 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1837 &wait_queue
->head
, head
) {
1838 assert(wait_node
->app
);
1839 ret
= lttng_poll_add(&events
, wait_node
->app
->sock
,
1840 LPOLLHUP
| LPOLLERR
);
1853 * Poll but don't block so we can quickly identify the faulty events and
1854 * clean them afterwards from the wait queue.
1856 ret
= lttng_poll_wait(&events
, 0);
1862 for (i
= 0; i
< nb_fd
; i
++) {
1863 /* Get faulty FD. */
1864 uint32_t revents
= LTTNG_POLL_GETEV(&events
, i
);
1865 int pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1868 /* No activity for this FD (poll implementation). */
1872 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1873 &wait_queue
->head
, head
) {
1874 if (pollfd
== wait_node
->app
->sock
&&
1875 (revents
& (LPOLLHUP
| LPOLLERR
))) {
1876 cds_list_del(&wait_node
->head
);
1877 wait_queue
->count
--;
1878 ust_app_destroy(wait_node
->app
);
1881 * Silence warning of use-after-free in
1882 * cds_list_for_each_entry_safe which uses
1883 * __typeof__(*wait_node).
1888 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1895 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd
);
1899 lttng_poll_clean(&events
);
1903 lttng_poll_clean(&events
);
1905 ERR("Unable to sanitize wait queue");
1910 * Dispatch request from the registration threads to the application
1911 * communication thread.
1913 static void *thread_dispatch_ust_registration(void *data
)
1916 struct cds_wfcq_node
*node
;
1917 struct ust_command
*ust_cmd
= NULL
;
1918 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1919 struct ust_reg_wait_queue wait_queue
= {
1923 rcu_register_thread();
1925 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH
);
1927 if (testpoint(sessiond_thread_app_reg_dispatch
)) {
1928 goto error_testpoint
;
1931 health_code_update();
1933 CDS_INIT_LIST_HEAD(&wait_queue
.head
);
1935 DBG("[thread] Dispatch UST command started");
1937 while (!CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1938 health_code_update();
1940 /* Atomically prepare the queue futex */
1941 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1944 struct ust_app
*app
= NULL
;
1948 * Make sure we don't have node(s) that have hung up before receiving
1949 * the notify socket. This is to clean the list in order to avoid
1950 * memory leaks from notify socket that are never seen.
1952 sanitize_wait_queue(&wait_queue
);
1954 health_code_update();
1955 /* Dequeue command for registration */
1956 node
= cds_wfcq_dequeue_blocking(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
1958 DBG("Woken up but nothing in the UST command queue");
1959 /* Continue thread execution */
1963 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1965 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1966 " gid:%d sock:%d name:%s (version %d.%d)",
1967 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1968 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1969 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1970 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1972 if (ust_cmd
->reg_msg
.type
== USTCTL_SOCKET_CMD
) {
1973 wait_node
= zmalloc(sizeof(*wait_node
));
1975 PERROR("zmalloc wait_node dispatch");
1976 ret
= close(ust_cmd
->sock
);
1978 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1980 lttng_fd_put(LTTNG_FD_APPS
, 1);
1984 CDS_INIT_LIST_HEAD(&wait_node
->head
);
1986 /* Create application object if socket is CMD. */
1987 wait_node
->app
= ust_app_create(&ust_cmd
->reg_msg
,
1989 if (!wait_node
->app
) {
1990 ret
= close(ust_cmd
->sock
);
1992 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1994 lttng_fd_put(LTTNG_FD_APPS
, 1);
2000 * Add application to the wait queue so we can set the notify
2001 * socket before putting this object in the global ht.
2003 cds_list_add(&wait_node
->head
, &wait_queue
.head
);
2008 * We have to continue here since we don't have the notify
2009 * socket and the application MUST be added to the hash table
2010 * only at that moment.
2015 * Look for the application in the local wait queue and set the
2016 * notify socket if found.
2018 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
2019 &wait_queue
.head
, head
) {
2020 health_code_update();
2021 if (wait_node
->app
->pid
== ust_cmd
->reg_msg
.pid
) {
2022 wait_node
->app
->notify_sock
= ust_cmd
->sock
;
2023 cds_list_del(&wait_node
->head
);
2025 app
= wait_node
->app
;
2027 DBG3("UST app notify socket %d is set", ust_cmd
->sock
);
2033 * With no application at this stage the received socket is
2034 * basically useless so close it before we free the cmd data
2035 * structure for good.
2038 ret
= close(ust_cmd
->sock
);
2040 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
2042 lttng_fd_put(LTTNG_FD_APPS
, 1);
2049 * @session_lock_list
2051 * Lock the global session list so from the register up to the
2052 * registration done message, no thread can see the application
2053 * and change its state.
2055 session_lock_list();
2059 * Add application to the global hash table. This needs to be
2060 * done before the update to the UST registry can locate the
2065 /* Set app version. This call will print an error if needed. */
2066 (void) ust_app_version(app
);
2068 /* Send notify socket through the notify pipe. */
2069 ret
= send_socket_to_thread(apps_cmd_notify_pipe
[1],
2073 session_unlock_list();
2075 * No notify thread, stop the UST tracing. However, this is
2076 * not an internal error of the this thread thus setting
2077 * the health error code to a normal exit.
2084 * Update newly registered application with the tracing
2085 * registry info already enabled information.
2087 update_ust_app(app
->sock
);
2090 * Don't care about return value. Let the manage apps threads
2091 * handle app unregistration upon socket close.
2093 (void) ust_app_register_done(app
);
2096 * Even if the application socket has been closed, send the app
2097 * to the thread and unregistration will take place at that
2100 ret
= send_socket_to_thread(apps_cmd_pipe
[1], app
->sock
);
2103 session_unlock_list();
2105 * No apps. thread, stop the UST tracing. However, this is
2106 * not an internal error of the this thread thus setting
2107 * the health error code to a normal exit.
2114 session_unlock_list();
2116 } while (node
!= NULL
);
2118 health_poll_entry();
2119 /* Futex wait on queue. Blocking call on futex() */
2120 futex_nto1_wait(&ust_cmd_queue
.futex
);
2123 /* Normal exit, no error */
2127 /* Clean up wait queue. */
2128 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
2129 &wait_queue
.head
, head
) {
2130 cds_list_del(&wait_node
->head
);
2135 /* Empty command queue. */
2137 /* Dequeue command for registration */
2138 node
= cds_wfcq_dequeue_blocking(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
2142 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
2143 ret
= close(ust_cmd
->sock
);
2145 PERROR("close ust sock exit dispatch %d", ust_cmd
->sock
);
2147 lttng_fd_put(LTTNG_FD_APPS
, 1);
2152 DBG("Dispatch thread dying");
2155 ERR("Health error occurred in %s", __func__
);
2157 health_unregister(health_sessiond
);
2158 rcu_unregister_thread();
2163 * This thread manage application registration.
2165 static void *thread_registration_apps(void *data
)
2167 int sock
= -1, i
, ret
, pollfd
, err
= -1;
2168 uint32_t revents
, nb_fd
;
2169 struct lttng_poll_event events
;
2171 * Get allocated in this thread, enqueued to a global queue, dequeued and
2172 * freed in the manage apps thread.
2174 struct ust_command
*ust_cmd
= NULL
;
2176 DBG("[thread] Manage application registration started");
2178 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG
);
2180 if (testpoint(sessiond_thread_registration_apps
)) {
2181 goto error_testpoint
;
2184 ret
= lttcomm_listen_unix_sock(apps_sock
);
2190 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
2191 * more will be added to this poll set.
2193 ret
= sessiond_set_thread_pollset(&events
, 2);
2195 goto error_create_poll
;
2198 /* Add the application registration socket */
2199 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
2201 goto error_poll_add
;
2204 /* Notify all applications to register */
2205 ret
= notify_ust_apps(1);
2207 ERR("Failed to notify applications or create the wait shared memory.\n"
2208 "Execution continues but there might be problem for already\n"
2209 "running applications that wishes to register.");
2213 DBG("Accepting application registration");
2215 /* Inifinite blocking call, waiting for transmission */
2217 health_poll_entry();
2218 ret
= lttng_poll_wait(&events
, -1);
2222 * Restart interrupted system call.
2224 if (errno
== EINTR
) {
2232 for (i
= 0; i
< nb_fd
; i
++) {
2233 health_code_update();
2235 /* Fetch once the poll data */
2236 revents
= LTTNG_POLL_GETEV(&events
, i
);
2237 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2240 /* No activity for this FD (poll implementation). */
2244 /* Thread quit pipe has been closed. Killing thread. */
2245 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
2251 /* Event on the registration socket */
2252 if (pollfd
== apps_sock
) {
2253 if (revents
& LPOLLIN
) {
2254 sock
= lttcomm_accept_unix_sock(apps_sock
);
2260 * Set socket timeout for both receiving and ending.
2261 * app_socket_timeout is in seconds, whereas
2262 * lttcomm_setsockopt_rcv_timeout and
2263 * lttcomm_setsockopt_snd_timeout expect msec as
2266 if (app_socket_timeout
>= 0) {
2267 (void) lttcomm_setsockopt_rcv_timeout(sock
,
2268 app_socket_timeout
* 1000);
2269 (void) lttcomm_setsockopt_snd_timeout(sock
,
2270 app_socket_timeout
* 1000);
2274 * Set the CLOEXEC flag. Return code is useless because
2275 * either way, the show must go on.
2277 (void) utils_set_fd_cloexec(sock
);
2279 /* Create UST registration command for enqueuing */
2280 ust_cmd
= zmalloc(sizeof(struct ust_command
));
2281 if (ust_cmd
== NULL
) {
2282 PERROR("ust command zmalloc");
2291 * Using message-based transmissions to ensure we don't
2292 * have to deal with partially received messages.
2294 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2296 ERR("Exhausted file descriptors allowed for applications.");
2306 health_code_update();
2307 ret
= ust_app_recv_registration(sock
, &ust_cmd
->reg_msg
);
2310 /* Close socket of the application. */
2315 lttng_fd_put(LTTNG_FD_APPS
, 1);
2319 health_code_update();
2321 ust_cmd
->sock
= sock
;
2324 DBG("UST registration received with pid:%d ppid:%d uid:%d"
2325 " gid:%d sock:%d name:%s (version %d.%d)",
2326 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
2327 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
2328 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
2329 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
2332 * Lock free enqueue the registration request. The red pill
2333 * has been taken! This apps will be part of the *system*.
2335 cds_wfcq_enqueue(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
, &ust_cmd
->node
);
2338 * Wake the registration queue futex. Implicit memory
2339 * barrier with the exchange in cds_wfcq_enqueue.
2341 futex_nto1_wake(&ust_cmd_queue
.futex
);
2342 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2343 ERR("Register apps socket poll error");
2346 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2355 /* Notify that the registration thread is gone */
2358 if (apps_sock
>= 0) {
2359 ret
= close(apps_sock
);
2369 lttng_fd_put(LTTNG_FD_APPS
, 1);
2371 unlink(apps_unix_sock_path
);
2374 lttng_poll_clean(&events
);
2378 DBG("UST Registration thread cleanup complete");
2381 ERR("Health error occurred in %s", __func__
);
2383 health_unregister(health_sessiond
);
2389 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
2390 * exec or it will fails.
2392 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
2395 struct timespec timeout
;
2398 * Make sure we set the readiness flag to 0 because we are NOT ready.
2399 * This access to consumer_thread_is_ready does not need to be
2400 * protected by consumer_data.cond_mutex (yet) since the consumer
2401 * management thread has not been started at this point.
2403 consumer_data
->consumer_thread_is_ready
= 0;
2405 /* Setup pthread condition */
2406 ret
= pthread_condattr_init(&consumer_data
->condattr
);
2409 PERROR("pthread_condattr_init consumer data");
2414 * Set the monotonic clock in order to make sure we DO NOT jump in time
2415 * between the clock_gettime() call and the timedwait call. See bug #324
2416 * for a more details and how we noticed it.
2418 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
2421 PERROR("pthread_condattr_setclock consumer data");
2425 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
2428 PERROR("pthread_cond_init consumer data");
2432 ret
= pthread_create(&consumer_data
->thread
, default_pthread_attr(),
2433 thread_manage_consumer
, consumer_data
);
2436 PERROR("pthread_create consumer");
2441 /* We are about to wait on a pthread condition */
2442 pthread_mutex_lock(&consumer_data
->cond_mutex
);
2444 /* Get time for sem_timedwait absolute timeout */
2445 clock_ret
= lttng_clock_gettime(CLOCK_MONOTONIC
, &timeout
);
2447 * Set the timeout for the condition timed wait even if the clock gettime
2448 * call fails since we might loop on that call and we want to avoid to
2449 * increment the timeout too many times.
2451 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
2454 * The following loop COULD be skipped in some conditions so this is why we
2455 * set ret to 0 in order to make sure at least one round of the loop is
2461 * Loop until the condition is reached or when a timeout is reached. Note
2462 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2463 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2464 * possible. This loop does not take any chances and works with both of
2467 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
2468 if (clock_ret
< 0) {
2469 PERROR("clock_gettime spawn consumer");
2470 /* Infinite wait for the consumerd thread to be ready */
2471 ret
= pthread_cond_wait(&consumer_data
->cond
,
2472 &consumer_data
->cond_mutex
);
2474 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
2475 &consumer_data
->cond_mutex
, &timeout
);
2479 /* Release the pthread condition */
2480 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
2484 if (ret
== ETIMEDOUT
) {
2488 * Call has timed out so we kill the kconsumerd_thread and return
2491 ERR("Condition timed out. The consumer thread was never ready."
2493 pth_ret
= pthread_cancel(consumer_data
->thread
);
2495 PERROR("pthread_cancel consumer thread");
2498 PERROR("pthread_cond_wait failed consumer thread");
2500 /* Caller is expecting a negative value on failure. */
2505 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2506 if (consumer_data
->pid
== 0) {
2507 ERR("Consumerd did not start");
2508 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2511 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2520 * Join consumer thread
2522 static int join_consumer_thread(struct consumer_data
*consumer_data
)
2526 /* Consumer pid must be a real one. */
2527 if (consumer_data
->pid
> 0) {
2529 ret
= kill(consumer_data
->pid
, SIGTERM
);
2531 PERROR("Error killing consumer daemon");
2534 return pthread_join(consumer_data
->thread
, &status
);
2541 * Fork and exec a consumer daemon (consumerd).
2543 * Return pid if successful else -1.
2545 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
2549 const char *consumer_to_use
;
2550 const char *verbosity
;
2553 DBG("Spawning consumerd");
2560 if (opt_verbose_consumer
) {
2561 verbosity
= "--verbose";
2562 } else if (lttng_opt_quiet
) {
2563 verbosity
= "--quiet";
2568 switch (consumer_data
->type
) {
2569 case LTTNG_CONSUMER_KERNEL
:
2571 * Find out which consumerd to execute. We will first try the
2572 * 64-bit path, then the sessiond's installation directory, and
2573 * fallback on the 32-bit one,
2575 DBG3("Looking for a kernel consumer at these locations:");
2576 DBG3(" 1) %s", consumerd64_bin
);
2577 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
2578 DBG3(" 3) %s", consumerd32_bin
);
2579 if (stat(consumerd64_bin
, &st
) == 0) {
2580 DBG3("Found location #1");
2581 consumer_to_use
= consumerd64_bin
;
2582 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
2583 DBG3("Found location #2");
2584 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
2585 } else if (stat(consumerd32_bin
, &st
) == 0) {
2586 DBG3("Found location #3");
2587 consumer_to_use
= consumerd32_bin
;
2589 DBG("Could not find any valid consumerd executable");
2593 DBG("Using kernel consumer at: %s", consumer_to_use
);
2594 ret
= execl(consumer_to_use
,
2595 "lttng-consumerd", verbosity
, "-k",
2596 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2597 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2598 "--group", tracing_group_name
,
2601 case LTTNG_CONSUMER64_UST
:
2603 char *tmpnew
= NULL
;
2605 if (consumerd64_libdir
[0] != '\0') {
2609 tmp
= lttng_secure_getenv("LD_LIBRARY_PATH");
2613 tmplen
= strlen("LD_LIBRARY_PATH=")
2614 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
2615 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2620 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
2621 strcat(tmpnew
, consumerd64_libdir
);
2622 if (tmp
[0] != '\0') {
2623 strcat(tmpnew
, ":");
2624 strcat(tmpnew
, tmp
);
2626 ret
= putenv(tmpnew
);
2633 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
2634 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
2635 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2636 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2637 "--group", tracing_group_name
,
2639 if (consumerd64_libdir
[0] != '\0') {
2644 case LTTNG_CONSUMER32_UST
:
2646 char *tmpnew
= NULL
;
2648 if (consumerd32_libdir
[0] != '\0') {
2652 tmp
= lttng_secure_getenv("LD_LIBRARY_PATH");
2656 tmplen
= strlen("LD_LIBRARY_PATH=")
2657 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
2658 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2663 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
2664 strcat(tmpnew
, consumerd32_libdir
);
2665 if (tmp
[0] != '\0') {
2666 strcat(tmpnew
, ":");
2667 strcat(tmpnew
, tmp
);
2669 ret
= putenv(tmpnew
);
2676 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
2677 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
2678 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2679 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2680 "--group", tracing_group_name
,
2682 if (consumerd32_libdir
[0] != '\0') {
2688 PERROR("unknown consumer type");
2692 PERROR("Consumer execl()");
2694 /* Reaching this point, we got a failure on our execl(). */
2696 } else if (pid
> 0) {
2699 PERROR("start consumer fork");
2707 * Spawn the consumerd daemon and session daemon thread.
2709 static int start_consumerd(struct consumer_data
*consumer_data
)
2714 * Set the listen() state on the socket since there is a possible race
2715 * between the exec() of the consumer daemon and this call if place in the
2716 * consumer thread. See bug #366 for more details.
2718 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
2723 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2724 if (consumer_data
->pid
!= 0) {
2725 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2729 ret
= spawn_consumerd(consumer_data
);
2731 ERR("Spawning consumerd failed");
2732 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2736 /* Setting up the consumer_data pid */
2737 consumer_data
->pid
= ret
;
2738 DBG2("Consumer pid %d", consumer_data
->pid
);
2739 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2741 DBG2("Spawning consumer control thread");
2742 ret
= spawn_consumer_thread(consumer_data
);
2744 ERR("Fatal error spawning consumer control thread");
2752 /* Cleanup already created sockets on error. */
2753 if (consumer_data
->err_sock
>= 0) {
2756 err
= close(consumer_data
->err_sock
);
2758 PERROR("close consumer data error socket");
2765 * Setup necessary data for kernel tracer action.
2767 static int init_kernel_tracer(void)
2771 /* Modprobe lttng kernel modules */
2772 ret
= modprobe_lttng_control();
2777 /* Open debugfs lttng */
2778 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
2779 if (kernel_tracer_fd
< 0) {
2780 DBG("Failed to open %s", module_proc_lttng
);
2785 /* Validate kernel version */
2786 ret
= kernel_validate_version(kernel_tracer_fd
);
2791 ret
= modprobe_lttng_data();
2796 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
2800 modprobe_remove_lttng_control();
2801 ret
= close(kernel_tracer_fd
);
2805 kernel_tracer_fd
= -1;
2806 return LTTNG_ERR_KERN_VERSION
;
2809 ret
= close(kernel_tracer_fd
);
2815 modprobe_remove_lttng_control();
2818 WARN("No kernel tracer available");
2819 kernel_tracer_fd
= -1;
2821 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
2823 return LTTNG_ERR_KERN_NA
;
2829 * Copy consumer output from the tracing session to the domain session. The
2830 * function also applies the right modification on a per domain basis for the
2831 * trace files destination directory.
2833 * Should *NOT* be called with RCU read-side lock held.
2835 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2838 const char *dir_name
;
2839 struct consumer_output
*consumer
;
2842 assert(session
->consumer
);
2845 case LTTNG_DOMAIN_KERNEL
:
2846 DBG3("Copying tracing session consumer output in kernel session");
2848 * XXX: We should audit the session creation and what this function
2849 * does "extra" in order to avoid a destroy since this function is used
2850 * in the domain session creation (kernel and ust) only. Same for UST
2853 if (session
->kernel_session
->consumer
) {
2854 consumer_output_put(session
->kernel_session
->consumer
);
2856 session
->kernel_session
->consumer
=
2857 consumer_copy_output(session
->consumer
);
2858 /* Ease our life a bit for the next part */
2859 consumer
= session
->kernel_session
->consumer
;
2860 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2862 case LTTNG_DOMAIN_JUL
:
2863 case LTTNG_DOMAIN_LOG4J
:
2864 case LTTNG_DOMAIN_PYTHON
:
2865 case LTTNG_DOMAIN_UST
:
2866 DBG3("Copying tracing session consumer output in UST session");
2867 if (session
->ust_session
->consumer
) {
2868 consumer_output_put(session
->ust_session
->consumer
);
2870 session
->ust_session
->consumer
=
2871 consumer_copy_output(session
->consumer
);
2872 /* Ease our life a bit for the next part */
2873 consumer
= session
->ust_session
->consumer
;
2874 dir_name
= DEFAULT_UST_TRACE_DIR
;
2877 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2881 /* Append correct directory to subdir */
2882 strncat(consumer
->subdir
, dir_name
,
2883 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2884 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2893 * Create an UST session and add it to the session ust list.
2895 * Should *NOT* be called with RCU read-side lock held.
2897 static int create_ust_session(struct ltt_session
*session
,
2898 struct lttng_domain
*domain
)
2901 struct ltt_ust_session
*lus
= NULL
;
2905 assert(session
->consumer
);
2907 switch (domain
->type
) {
2908 case LTTNG_DOMAIN_JUL
:
2909 case LTTNG_DOMAIN_LOG4J
:
2910 case LTTNG_DOMAIN_PYTHON
:
2911 case LTTNG_DOMAIN_UST
:
2914 ERR("Unknown UST domain on create session %d", domain
->type
);
2915 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2919 DBG("Creating UST session");
2921 lus
= trace_ust_create_session(session
->id
);
2923 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2927 lus
->uid
= session
->uid
;
2928 lus
->gid
= session
->gid
;
2929 lus
->output_traces
= session
->output_traces
;
2930 lus
->snapshot_mode
= session
->snapshot_mode
;
2931 lus
->live_timer_interval
= session
->live_timer
;
2932 session
->ust_session
= lus
;
2933 if (session
->shm_path
[0]) {
2934 strncpy(lus
->root_shm_path
, session
->shm_path
,
2935 sizeof(lus
->root_shm_path
));
2936 lus
->root_shm_path
[sizeof(lus
->root_shm_path
) - 1] = '\0';
2937 strncpy(lus
->shm_path
, session
->shm_path
,
2938 sizeof(lus
->shm_path
));
2939 lus
->shm_path
[sizeof(lus
->shm_path
) - 1] = '\0';
2940 strncat(lus
->shm_path
, "/ust",
2941 sizeof(lus
->shm_path
) - strlen(lus
->shm_path
) - 1);
2943 /* Copy session output to the newly created UST session */
2944 ret
= copy_session_consumer(domain
->type
, session
);
2945 if (ret
!= LTTNG_OK
) {
2953 session
->ust_session
= NULL
;
2958 * Create a kernel tracer session then create the default channel.
2960 static int create_kernel_session(struct ltt_session
*session
)
2964 DBG("Creating kernel session");
2966 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2968 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2972 /* Code flow safety */
2973 assert(session
->kernel_session
);
2975 /* Copy session output to the newly created Kernel session */
2976 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2977 if (ret
!= LTTNG_OK
) {
2981 /* Create directory(ies) on local filesystem. */
2982 if (session
->kernel_session
->consumer
->type
== CONSUMER_DST_LOCAL
&&
2983 strlen(session
->kernel_session
->consumer
->dst
.trace_path
) > 0) {
2984 ret
= run_as_mkdir_recursive(
2985 session
->kernel_session
->consumer
->dst
.trace_path
,
2986 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2988 if (errno
!= EEXIST
) {
2989 ERR("Trace directory creation error");
2995 session
->kernel_session
->uid
= session
->uid
;
2996 session
->kernel_session
->gid
= session
->gid
;
2997 session
->kernel_session
->output_traces
= session
->output_traces
;
2998 session
->kernel_session
->snapshot_mode
= session
->snapshot_mode
;
3003 trace_kernel_destroy_session(session
->kernel_session
);
3004 session
->kernel_session
= NULL
;
3009 * Count number of session permitted by uid/gid.
3011 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
3014 struct ltt_session
*session
;
3016 DBG("Counting number of available session for UID %d GID %d",
3018 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
3020 * Only list the sessions the user can control.
3022 if (!session_access_ok(session
, uid
, gid
)) {
3031 * Process the command requested by the lttng client within the command
3032 * context structure. This function make sure that the return structure (llm)
3033 * is set and ready for transmission before returning.
3035 * Return any error encountered or 0 for success.
3037 * "sock" is only used for special-case var. len data.
3039 * Should *NOT* be called with RCU read-side lock held.
3041 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
3045 int need_tracing_session
= 1;
3048 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
3050 assert(!rcu_read_ongoing());
3054 switch (cmd_ctx
->lsm
->cmd_type
) {
3055 case LTTNG_CREATE_SESSION
:
3056 case LTTNG_CREATE_SESSION_SNAPSHOT
:
3057 case LTTNG_CREATE_SESSION_LIVE
:
3058 case LTTNG_DESTROY_SESSION
:
3059 case LTTNG_LIST_SESSIONS
:
3060 case LTTNG_LIST_DOMAINS
:
3061 case LTTNG_START_TRACE
:
3062 case LTTNG_STOP_TRACE
:
3063 case LTTNG_DATA_PENDING
:
3064 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
3065 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
3066 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
3067 case LTTNG_SNAPSHOT_RECORD
:
3068 case LTTNG_SAVE_SESSION
:
3069 case LTTNG_SET_SESSION_SHM_PATH
:
3070 case LTTNG_REGENERATE_METADATA
:
3071 case LTTNG_REGENERATE_STATEDUMP
:
3072 case LTTNG_REGISTER_TRIGGER
:
3073 case LTTNG_UNREGISTER_TRIGGER
:
3080 if (opt_no_kernel
&& need_domain
3081 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
3083 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
3085 ret
= LTTNG_ERR_KERN_NA
;
3090 /* Deny register consumer if we already have a spawned consumer. */
3091 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
3092 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
3093 if (kconsumer_data
.pid
> 0) {
3094 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
3095 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3098 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3102 * Check for command that don't needs to allocate a returned payload. We do
3103 * this here so we don't have to make the call for no payload at each
3106 switch(cmd_ctx
->lsm
->cmd_type
) {
3107 case LTTNG_LIST_SESSIONS
:
3108 case LTTNG_LIST_TRACEPOINTS
:
3109 case LTTNG_LIST_TRACEPOINT_FIELDS
:
3110 case LTTNG_LIST_DOMAINS
:
3111 case LTTNG_LIST_CHANNELS
:
3112 case LTTNG_LIST_EVENTS
:
3113 case LTTNG_LIST_SYSCALLS
:
3114 case LTTNG_LIST_TRACKER_PIDS
:
3115 case LTTNG_DATA_PENDING
:
3118 /* Setup lttng message with no payload */
3119 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, NULL
, 0);
3121 /* This label does not try to unlock the session */
3122 goto init_setup_error
;
3126 /* Commands that DO NOT need a session. */
3127 switch (cmd_ctx
->lsm
->cmd_type
) {
3128 case LTTNG_CREATE_SESSION
:
3129 case LTTNG_CREATE_SESSION_SNAPSHOT
:
3130 case LTTNG_CREATE_SESSION_LIVE
:
3131 case LTTNG_LIST_SESSIONS
:
3132 case LTTNG_LIST_TRACEPOINTS
:
3133 case LTTNG_LIST_SYSCALLS
:
3134 case LTTNG_LIST_TRACEPOINT_FIELDS
:
3135 case LTTNG_SAVE_SESSION
:
3136 case LTTNG_REGISTER_TRIGGER
:
3137 case LTTNG_UNREGISTER_TRIGGER
:
3138 need_tracing_session
= 0;
3141 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
3143 * We keep the session list lock across _all_ commands
3144 * for now, because the per-session lock does not
3145 * handle teardown properly.
3147 session_lock_list();
3148 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
3149 if (cmd_ctx
->session
== NULL
) {
3150 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
3153 /* Acquire lock for the session */
3154 session_lock(cmd_ctx
->session
);
3160 * Commands that need a valid session but should NOT create one if none
3161 * exists. Instead of creating one and destroying it when the command is
3162 * handled, process that right before so we save some round trip in useless
3165 switch (cmd_ctx
->lsm
->cmd_type
) {
3166 case LTTNG_DISABLE_CHANNEL
:
3167 case LTTNG_DISABLE_EVENT
:
3168 switch (cmd_ctx
->lsm
->domain
.type
) {
3169 case LTTNG_DOMAIN_KERNEL
:
3170 if (!cmd_ctx
->session
->kernel_session
) {
3171 ret
= LTTNG_ERR_NO_CHANNEL
;
3175 case LTTNG_DOMAIN_JUL
:
3176 case LTTNG_DOMAIN_LOG4J
:
3177 case LTTNG_DOMAIN_PYTHON
:
3178 case LTTNG_DOMAIN_UST
:
3179 if (!cmd_ctx
->session
->ust_session
) {
3180 ret
= LTTNG_ERR_NO_CHANNEL
;
3185 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
3197 * Check domain type for specific "pre-action".
3199 switch (cmd_ctx
->lsm
->domain
.type
) {
3200 case LTTNG_DOMAIN_KERNEL
:
3202 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
3206 /* Kernel tracer check */
3207 if (kernel_tracer_fd
== -1) {
3208 /* Basically, load kernel tracer modules */
3209 ret
= init_kernel_tracer();
3215 /* Consumer is in an ERROR state. Report back to client */
3216 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
3217 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
3221 /* Need a session for kernel command */
3222 if (need_tracing_session
) {
3223 if (cmd_ctx
->session
->kernel_session
== NULL
) {
3224 ret
= create_kernel_session(cmd_ctx
->session
);
3226 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
3231 /* Start the kernel consumer daemon */
3232 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
3233 if (kconsumer_data
.pid
== 0 &&
3234 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3235 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3236 ret
= start_consumerd(&kconsumer_data
);
3238 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
3241 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
3243 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3247 * The consumer was just spawned so we need to add the socket to
3248 * the consumer output of the session if exist.
3250 ret
= consumer_create_socket(&kconsumer_data
,
3251 cmd_ctx
->session
->kernel_session
->consumer
);
3258 case LTTNG_DOMAIN_JUL
:
3259 case LTTNG_DOMAIN_LOG4J
:
3260 case LTTNG_DOMAIN_PYTHON
:
3261 case LTTNG_DOMAIN_UST
:
3263 if (!ust_app_supported()) {
3264 ret
= LTTNG_ERR_NO_UST
;
3267 /* Consumer is in an ERROR state. Report back to client */
3268 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
3269 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
3273 if (need_tracing_session
) {
3274 /* Create UST session if none exist. */
3275 if (cmd_ctx
->session
->ust_session
== NULL
) {
3276 ret
= create_ust_session(cmd_ctx
->session
,
3277 &cmd_ctx
->lsm
->domain
);
3278 if (ret
!= LTTNG_OK
) {
3283 /* Start the UST consumer daemons */
3285 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
3286 if (consumerd64_bin
[0] != '\0' &&
3287 ustconsumer64_data
.pid
== 0 &&
3288 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3289 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3290 ret
= start_consumerd(&ustconsumer64_data
);
3292 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
3293 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
3297 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
3298 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3300 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3304 * Setup socket for consumer 64 bit. No need for atomic access
3305 * since it was set above and can ONLY be set in this thread.
3307 ret
= consumer_create_socket(&ustconsumer64_data
,
3308 cmd_ctx
->session
->ust_session
->consumer
);
3314 pthread_mutex_lock(&ustconsumer32_data
.pid_mutex
);
3315 if (consumerd32_bin
[0] != '\0' &&
3316 ustconsumer32_data
.pid
== 0 &&
3317 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3318 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3319 ret
= start_consumerd(&ustconsumer32_data
);
3321 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
3322 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
3326 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
3327 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3329 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3333 * Setup socket for consumer 64 bit. No need for atomic access
3334 * since it was set above and can ONLY be set in this thread.
3336 ret
= consumer_create_socket(&ustconsumer32_data
,
3337 cmd_ctx
->session
->ust_session
->consumer
);
3349 /* Validate consumer daemon state when start/stop trace command */
3350 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
3351 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
3352 switch (cmd_ctx
->lsm
->domain
.type
) {
3353 case LTTNG_DOMAIN_NONE
:
3355 case LTTNG_DOMAIN_JUL
:
3356 case LTTNG_DOMAIN_LOG4J
:
3357 case LTTNG_DOMAIN_PYTHON
:
3358 case LTTNG_DOMAIN_UST
:
3359 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
3360 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
3364 case LTTNG_DOMAIN_KERNEL
:
3365 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
3366 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
3371 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
3377 * Check that the UID or GID match that of the tracing session.
3378 * The root user can interact with all sessions.
3380 if (need_tracing_session
) {
3381 if (!session_access_ok(cmd_ctx
->session
,
3382 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3383 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
3384 ret
= LTTNG_ERR_EPERM
;
3390 * Send relayd information to consumer as soon as we have a domain and a
3393 if (cmd_ctx
->session
&& need_domain
) {
3395 * Setup relayd if not done yet. If the relayd information was already
3396 * sent to the consumer, this call will gracefully return.
3398 ret
= cmd_setup_relayd(cmd_ctx
->session
);
3399 if (ret
!= LTTNG_OK
) {
3404 /* Process by command type */
3405 switch (cmd_ctx
->lsm
->cmd_type
) {
3406 case LTTNG_ADD_CONTEXT
:
3409 * An LTTNG_ADD_CONTEXT command might have a supplementary
3410 * payload if the context being added is an application context.
3412 if (cmd_ctx
->lsm
->u
.context
.ctx
.ctx
==
3413 LTTNG_EVENT_CONTEXT_APP_CONTEXT
) {
3414 char *provider_name
= NULL
, *context_name
= NULL
;
3415 size_t provider_name_len
=
3416 cmd_ctx
->lsm
->u
.context
.provider_name_len
;
3417 size_t context_name_len
=
3418 cmd_ctx
->lsm
->u
.context
.context_name_len
;
3420 if (provider_name_len
== 0 || context_name_len
== 0) {
3422 * Application provider and context names MUST
3425 ret
= -LTTNG_ERR_INVALID
;
3429 provider_name
= zmalloc(provider_name_len
+ 1);
3430 if (!provider_name
) {
3431 ret
= -LTTNG_ERR_NOMEM
;
3434 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
=
3437 context_name
= zmalloc(context_name_len
+ 1);
3438 if (!context_name
) {
3439 ret
= -LTTNG_ERR_NOMEM
;
3440 goto error_add_context
;
3442 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
=
3445 ret
= lttcomm_recv_unix_sock(sock
, provider_name
,
3448 goto error_add_context
;
3451 ret
= lttcomm_recv_unix_sock(sock
, context_name
,
3454 goto error_add_context
;
3459 * cmd_add_context assumes ownership of the provider and context
3462 ret
= cmd_add_context(cmd_ctx
->session
,
3463 cmd_ctx
->lsm
->domain
.type
,
3464 cmd_ctx
->lsm
->u
.context
.channel_name
,
3465 &cmd_ctx
->lsm
->u
.context
.ctx
,
3466 kernel_poll_pipe
[1]);
3468 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
= NULL
;
3469 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
= NULL
;
3471 free(cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
);
3472 free(cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
);
3478 case LTTNG_DISABLE_CHANNEL
:
3480 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3481 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3484 case LTTNG_DISABLE_EVENT
:
3488 * FIXME: handle filter; for now we just receive the filter's
3489 * bytecode along with the filter expression which are sent by
3490 * liblttng-ctl and discard them.
3492 * This fixes an issue where the client may block while sending
3493 * the filter payload and encounter an error because the session
3494 * daemon closes the socket without ever handling this data.
3496 size_t count
= cmd_ctx
->lsm
->u
.disable
.expression_len
+
3497 cmd_ctx
->lsm
->u
.disable
.bytecode_len
;
3500 char data
[LTTNG_FILTER_MAX_LEN
];
3502 DBG("Discarding disable event command payload of size %zu", count
);
3504 ret
= lttcomm_recv_unix_sock(sock
, data
,
3505 count
> sizeof(data
) ? sizeof(data
) : count
);
3510 count
-= (size_t) ret
;
3513 /* FIXME: passing packed structure to non-packed pointer */
3514 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3515 cmd_ctx
->lsm
->u
.disable
.channel_name
,
3516 &cmd_ctx
->lsm
->u
.disable
.event
);
3519 case LTTNG_ENABLE_CHANNEL
:
3521 cmd_ctx
->lsm
->u
.channel
.chan
.attr
.extended
.ptr
=
3522 (struct lttng_channel_extended
*) &cmd_ctx
->lsm
->u
.channel
.extended
;
3523 ret
= cmd_enable_channel(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3524 &cmd_ctx
->lsm
->u
.channel
.chan
,
3525 kernel_poll_pipe
[1]);
3528 case LTTNG_TRACK_PID
:
3530 ret
= cmd_track_pid(cmd_ctx
->session
,
3531 cmd_ctx
->lsm
->domain
.type
,
3532 cmd_ctx
->lsm
->u
.pid_tracker
.pid
);
3535 case LTTNG_UNTRACK_PID
:
3537 ret
= cmd_untrack_pid(cmd_ctx
->session
,
3538 cmd_ctx
->lsm
->domain
.type
,
3539 cmd_ctx
->lsm
->u
.pid_tracker
.pid
);
3542 case LTTNG_ENABLE_EVENT
:
3544 struct lttng_event_exclusion
*exclusion
= NULL
;
3545 struct lttng_filter_bytecode
*bytecode
= NULL
;
3546 char *filter_expression
= NULL
;
3548 /* Handle exclusion events and receive it from the client. */
3549 if (cmd_ctx
->lsm
->u
.enable
.exclusion_count
> 0) {
3550 size_t count
= cmd_ctx
->lsm
->u
.enable
.exclusion_count
;
3552 exclusion
= zmalloc(sizeof(struct lttng_event_exclusion
) +
3553 (count
* LTTNG_SYMBOL_NAME_LEN
));
3555 ret
= LTTNG_ERR_EXCLUSION_NOMEM
;
3559 DBG("Receiving var len exclusion event list from client ...");
3560 exclusion
->count
= count
;
3561 ret
= lttcomm_recv_unix_sock(sock
, exclusion
->names
,
3562 count
* LTTNG_SYMBOL_NAME_LEN
);
3564 DBG("Nothing recv() from client var len data... continuing");
3567 ret
= LTTNG_ERR_EXCLUSION_INVAL
;
3572 /* Get filter expression from client. */
3573 if (cmd_ctx
->lsm
->u
.enable
.expression_len
> 0) {
3574 size_t expression_len
=
3575 cmd_ctx
->lsm
->u
.enable
.expression_len
;
3577 if (expression_len
> LTTNG_FILTER_MAX_LEN
) {
3578 ret
= LTTNG_ERR_FILTER_INVAL
;
3583 filter_expression
= zmalloc(expression_len
);
3584 if (!filter_expression
) {
3586 ret
= LTTNG_ERR_FILTER_NOMEM
;
3590 /* Receive var. len. data */
3591 DBG("Receiving var len filter's expression from client ...");
3592 ret
= lttcomm_recv_unix_sock(sock
, filter_expression
,
3595 DBG("Nothing recv() from client car len data... continuing");
3597 free(filter_expression
);
3599 ret
= LTTNG_ERR_FILTER_INVAL
;
3604 /* Handle filter and get bytecode from client. */
3605 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> 0) {
3606 size_t bytecode_len
= cmd_ctx
->lsm
->u
.enable
.bytecode_len
;
3608 if (bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
3609 ret
= LTTNG_ERR_FILTER_INVAL
;
3610 free(filter_expression
);
3615 bytecode
= zmalloc(bytecode_len
);
3617 free(filter_expression
);
3619 ret
= LTTNG_ERR_FILTER_NOMEM
;
3623 /* Receive var. len. data */
3624 DBG("Receiving var len filter's bytecode from client ...");
3625 ret
= lttcomm_recv_unix_sock(sock
, bytecode
, bytecode_len
);
3627 DBG("Nothing recv() from client car len data... continuing");
3629 free(filter_expression
);
3632 ret
= LTTNG_ERR_FILTER_INVAL
;
3636 if ((bytecode
->len
+ sizeof(*bytecode
)) != bytecode_len
) {
3637 free(filter_expression
);
3640 ret
= LTTNG_ERR_FILTER_INVAL
;
3645 ret
= cmd_enable_event(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3646 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3647 &cmd_ctx
->lsm
->u
.enable
.event
,
3648 filter_expression
, bytecode
, exclusion
,
3649 kernel_poll_pipe
[1]);
3652 case LTTNG_LIST_TRACEPOINTS
:
3654 struct lttng_event
*events
;
3657 session_lock_list();
3658 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
3659 session_unlock_list();
3660 if (nb_events
< 0) {
3661 /* Return value is a negative lttng_error_code. */
3667 * Setup lttng message with payload size set to the event list size in
3668 * bytes and then copy list into the llm payload.
3670 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, events
,
3671 sizeof(struct lttng_event
) * nb_events
);
3681 case LTTNG_LIST_TRACEPOINT_FIELDS
:
3683 struct lttng_event_field
*fields
;
3686 session_lock_list();
3687 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
3689 session_unlock_list();
3690 if (nb_fields
< 0) {
3691 /* Return value is a negative lttng_error_code. */
3697 * Setup lttng message with payload size set to the event list size in
3698 * bytes and then copy list into the llm payload.
3700 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, fields
,
3701 sizeof(struct lttng_event_field
) * nb_fields
);
3711 case LTTNG_LIST_SYSCALLS
:
3713 struct lttng_event
*events
;
3716 nb_events
= cmd_list_syscalls(&events
);
3717 if (nb_events
< 0) {
3718 /* Return value is a negative lttng_error_code. */
3724 * Setup lttng message with payload size set to the event list size in
3725 * bytes and then copy list into the llm payload.
3727 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, events
,
3728 sizeof(struct lttng_event
) * nb_events
);
3738 case LTTNG_LIST_TRACKER_PIDS
:
3740 int32_t *pids
= NULL
;
3743 nr_pids
= cmd_list_tracker_pids(cmd_ctx
->session
,
3744 cmd_ctx
->lsm
->domain
.type
, &pids
);
3746 /* Return value is a negative lttng_error_code. */
3752 * Setup lttng message with payload size set to the event list size in
3753 * bytes and then copy list into the llm payload.
3755 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, pids
,
3756 sizeof(int32_t) * nr_pids
);
3766 case LTTNG_SET_CONSUMER_URI
:
3769 struct lttng_uri
*uris
;
3771 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3772 len
= nb_uri
* sizeof(struct lttng_uri
);
3775 ret
= LTTNG_ERR_INVALID
;
3779 uris
= zmalloc(len
);
3781 ret
= LTTNG_ERR_FATAL
;
3785 /* Receive variable len data */
3786 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
3787 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3789 DBG("No URIs received from client... continuing");
3791 ret
= LTTNG_ERR_SESSION_FAIL
;
3796 ret
= cmd_set_consumer_uri(cmd_ctx
->session
, nb_uri
, uris
);
3798 if (ret
!= LTTNG_OK
) {
3805 case LTTNG_START_TRACE
:
3807 ret
= cmd_start_trace(cmd_ctx
->session
);
3810 case LTTNG_STOP_TRACE
:
3812 ret
= cmd_stop_trace(cmd_ctx
->session
);
3815 case LTTNG_CREATE_SESSION
:
3818 struct lttng_uri
*uris
= NULL
;
3820 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3821 len
= nb_uri
* sizeof(struct lttng_uri
);
3824 uris
= zmalloc(len
);
3826 ret
= LTTNG_ERR_FATAL
;
3830 /* Receive variable len data */
3831 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3832 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3834 DBG("No URIs received from client... continuing");
3836 ret
= LTTNG_ERR_SESSION_FAIL
;
3841 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3842 DBG("Creating session with ONE network URI is a bad call");
3843 ret
= LTTNG_ERR_SESSION_FAIL
;
3849 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
3850 &cmd_ctx
->creds
, 0);
3856 case LTTNG_DESTROY_SESSION
:
3858 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
3860 /* Set session to NULL so we do not unlock it after free. */
3861 cmd_ctx
->session
= NULL
;
3864 case LTTNG_LIST_DOMAINS
:
3867 struct lttng_domain
*domains
= NULL
;
3869 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
3871 /* Return value is a negative lttng_error_code. */
3876 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, domains
,
3877 nb_dom
* sizeof(struct lttng_domain
));
3887 case LTTNG_LIST_CHANNELS
:
3889 ssize_t payload_size
;
3890 struct lttng_channel
*channels
= NULL
;
3892 payload_size
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
3893 cmd_ctx
->session
, &channels
);
3894 if (payload_size
< 0) {
3895 /* Return value is a negative lttng_error_code. */
3896 ret
= -payload_size
;
3900 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, channels
,
3911 case LTTNG_LIST_EVENTS
:
3914 struct lttng_event
*events
= NULL
;
3915 struct lttcomm_event_command_header cmd_header
;
3918 memset(&cmd_header
, 0, sizeof(cmd_header
));
3919 /* Extended infos are included at the end of events */
3920 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
,
3921 cmd_ctx
->session
, cmd_ctx
->lsm
->u
.list
.channel_name
,
3922 &events
, &total_size
);
3925 /* Return value is a negative lttng_error_code. */
3930 cmd_header
.nb_events
= nb_event
;
3931 ret
= setup_lttng_msg(cmd_ctx
, events
, total_size
,
3932 &cmd_header
, sizeof(cmd_header
));
3942 case LTTNG_LIST_SESSIONS
:
3944 unsigned int nr_sessions
;
3945 void *sessions_payload
;
3948 session_lock_list();
3949 nr_sessions
= lttng_sessions_count(
3950 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3951 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3952 payload_len
= sizeof(struct lttng_session
) * nr_sessions
;
3953 sessions_payload
= zmalloc(payload_len
);
3955 if (!sessions_payload
) {
3956 session_unlock_list();
3961 cmd_list_lttng_sessions(sessions_payload
,
3962 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3963 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3964 session_unlock_list();
3966 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, sessions_payload
,
3968 free(sessions_payload
);
3977 case LTTNG_REGISTER_CONSUMER
:
3979 struct consumer_data
*cdata
;
3981 switch (cmd_ctx
->lsm
->domain
.type
) {
3982 case LTTNG_DOMAIN_KERNEL
:
3983 cdata
= &kconsumer_data
;
3986 ret
= LTTNG_ERR_UND
;
3990 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3991 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
3994 case LTTNG_DATA_PENDING
:
3997 uint8_t pending_ret_byte
;
3999 pending_ret
= cmd_data_pending(cmd_ctx
->session
);
4004 * This function may returns 0 or 1 to indicate whether or not
4005 * there is data pending. In case of error, it should return an
4006 * LTTNG_ERR code. However, some code paths may still return
4007 * a nondescript error code, which we handle by returning an
4010 if (pending_ret
== 0 || pending_ret
== 1) {
4012 * ret will be set to LTTNG_OK at the end of
4015 } else if (pending_ret
< 0) {
4016 ret
= LTTNG_ERR_UNK
;
4023 pending_ret_byte
= (uint8_t) pending_ret
;
4025 /* 1 byte to return whether or not data is pending */
4026 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
,
4027 &pending_ret_byte
, 1);
4036 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
4038 struct lttcomm_lttng_output_id reply
;
4040 ret
= cmd_snapshot_add_output(cmd_ctx
->session
,
4041 &cmd_ctx
->lsm
->u
.snapshot_output
.output
, &reply
.id
);
4042 if (ret
!= LTTNG_OK
) {
4046 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, &reply
,
4052 /* Copy output list into message payload */
4056 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
4058 ret
= cmd_snapshot_del_output(cmd_ctx
->session
,
4059 &cmd_ctx
->lsm
->u
.snapshot_output
.output
);
4062 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
4065 struct lttng_snapshot_output
*outputs
= NULL
;
4067 nb_output
= cmd_snapshot_list_outputs(cmd_ctx
->session
, &outputs
);
4068 if (nb_output
< 0) {
4073 assert((nb_output
> 0 && outputs
) || nb_output
== 0);
4074 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, outputs
,
4075 nb_output
* sizeof(struct lttng_snapshot_output
));
4085 case LTTNG_SNAPSHOT_RECORD
:
4087 ret
= cmd_snapshot_record(cmd_ctx
->session
,
4088 &cmd_ctx
->lsm
->u
.snapshot_record
.output
,
4089 cmd_ctx
->lsm
->u
.snapshot_record
.wait
);
4092 case LTTNG_CREATE_SESSION_SNAPSHOT
:
4095 struct lttng_uri
*uris
= NULL
;
4097 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
4098 len
= nb_uri
* sizeof(struct lttng_uri
);
4101 uris
= zmalloc(len
);
4103 ret
= LTTNG_ERR_FATAL
;
4107 /* Receive variable len data */
4108 DBG("Waiting for %zu URIs from client ...", nb_uri
);
4109 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
4111 DBG("No URIs received from client... continuing");
4113 ret
= LTTNG_ERR_SESSION_FAIL
;
4118 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
4119 DBG("Creating session with ONE network URI is a bad call");
4120 ret
= LTTNG_ERR_SESSION_FAIL
;
4126 ret
= cmd_create_session_snapshot(cmd_ctx
->lsm
->session
.name
, uris
,
4127 nb_uri
, &cmd_ctx
->creds
);
4131 case LTTNG_CREATE_SESSION_LIVE
:
4134 struct lttng_uri
*uris
= NULL
;
4136 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
4137 len
= nb_uri
* sizeof(struct lttng_uri
);
4140 uris
= zmalloc(len
);
4142 ret
= LTTNG_ERR_FATAL
;
4146 /* Receive variable len data */
4147 DBG("Waiting for %zu URIs from client ...", nb_uri
);
4148 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
4150 DBG("No URIs received from client... continuing");
4152 ret
= LTTNG_ERR_SESSION_FAIL
;
4157 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
4158 DBG("Creating session with ONE network URI is a bad call");
4159 ret
= LTTNG_ERR_SESSION_FAIL
;
4165 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
,
4166 nb_uri
, &cmd_ctx
->creds
, cmd_ctx
->lsm
->u
.session_live
.timer_interval
);
4170 case LTTNG_SAVE_SESSION
:
4172 ret
= cmd_save_sessions(&cmd_ctx
->lsm
->u
.save_session
.attr
,
4176 case LTTNG_SET_SESSION_SHM_PATH
:
4178 ret
= cmd_set_session_shm_path(cmd_ctx
->session
,
4179 cmd_ctx
->lsm
->u
.set_shm_path
.shm_path
);
4182 case LTTNG_REGENERATE_METADATA
:
4184 ret
= cmd_regenerate_metadata(cmd_ctx
->session
);
4187 case LTTNG_REGENERATE_STATEDUMP
:
4189 ret
= cmd_regenerate_statedump(cmd_ctx
->session
);
4192 case LTTNG_REGISTER_TRIGGER
:
4194 ret
= cmd_register_trigger(cmd_ctx
, sock
,
4195 notification_thread_handle
);
4198 case LTTNG_UNREGISTER_TRIGGER
:
4200 ret
= cmd_unregister_trigger(cmd_ctx
, sock
,
4201 notification_thread_handle
);
4205 ret
= LTTNG_ERR_UND
;
4210 if (cmd_ctx
->llm
== NULL
) {
4211 DBG("Missing llm structure. Allocating one.");
4212 if (setup_lttng_msg_no_cmd_header(cmd_ctx
, NULL
, 0) < 0) {
4216 /* Set return code */
4217 cmd_ctx
->llm
->ret_code
= ret
;
4219 if (cmd_ctx
->session
) {
4220 session_unlock(cmd_ctx
->session
);
4222 if (need_tracing_session
) {
4223 session_unlock_list();
4226 assert(!rcu_read_ongoing());
4231 * Thread managing health check socket.
4233 static void *thread_manage_health(void *data
)
4235 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
4236 uint32_t revents
, nb_fd
;
4237 struct lttng_poll_event events
;
4238 struct health_comm_msg msg
;
4239 struct health_comm_reply reply
;
4241 DBG("[thread] Manage health check started");
4243 rcu_register_thread();
4245 /* We might hit an error path before this is created. */
4246 lttng_poll_init(&events
);
4248 /* Create unix socket */
4249 sock
= lttcomm_create_unix_sock(health_unix_sock_path
);
4251 ERR("Unable to create health check Unix socket");
4256 /* lttng health client socket path permissions */
4257 ret
= chown(health_unix_sock_path
, 0,
4258 utils_get_group_id(tracing_group_name
));
4260 ERR("Unable to set group on %s", health_unix_sock_path
);
4265 ret
= chmod(health_unix_sock_path
,
4266 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4268 ERR("Unable to set permissions on %s", health_unix_sock_path
);
4275 * Set the CLOEXEC flag. Return code is useless because either way, the
4278 (void) utils_set_fd_cloexec(sock
);
4280 ret
= lttcomm_listen_unix_sock(sock
);
4286 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
4287 * more will be added to this poll set.
4289 ret
= sessiond_set_thread_pollset(&events
, 2);
4294 /* Add the application registration socket */
4295 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
4300 sessiond_notify_ready();
4303 DBG("Health check ready");
4305 /* Inifinite blocking call, waiting for transmission */
4307 ret
= lttng_poll_wait(&events
, -1);
4310 * Restart interrupted system call.
4312 if (errno
== EINTR
) {
4320 for (i
= 0; i
< nb_fd
; i
++) {
4321 /* Fetch once the poll data */
4322 revents
= LTTNG_POLL_GETEV(&events
, i
);
4323 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
4326 /* No activity for this FD (poll implementation). */
4330 /* Thread quit pipe has been closed. Killing thread. */
4331 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
4337 /* Event on the registration socket */
4338 if (pollfd
== sock
) {
4339 if (revents
& LPOLLIN
) {
4341 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
4342 ERR("Health socket poll error");
4345 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
4351 new_sock
= lttcomm_accept_unix_sock(sock
);
4357 * Set the CLOEXEC flag. Return code is useless because either way, the
4360 (void) utils_set_fd_cloexec(new_sock
);
4362 DBG("Receiving data from client for health...");
4363 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
4365 DBG("Nothing recv() from client... continuing");
4366 ret
= close(new_sock
);
4373 rcu_thread_online();
4375 memset(&reply
, 0, sizeof(reply
));
4376 for (i
= 0; i
< NR_HEALTH_SESSIOND_TYPES
; i
++) {
4378 * health_check_state returns 0 if health is
4381 if (!health_check_state(health_sessiond
, i
)) {
4382 reply
.ret_code
|= 1ULL << i
;
4386 DBG2("Health check return value %" PRIx64
, reply
.ret_code
);
4388 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
4390 ERR("Failed to send health data back to client");
4393 /* End of transmission */
4394 ret
= close(new_sock
);
4403 ERR("Health error occurred in %s", __func__
);
4405 DBG("Health check thread dying");
4406 unlink(health_unix_sock_path
);
4414 lttng_poll_clean(&events
);
4416 rcu_unregister_thread();
4421 * This thread manage all clients request using the unix client socket for
4424 static void *thread_manage_clients(void *data
)
4426 int sock
= -1, ret
, i
, pollfd
, err
= -1;
4428 uint32_t revents
, nb_fd
;
4429 struct command_ctx
*cmd_ctx
= NULL
;
4430 struct lttng_poll_event events
;
4432 DBG("[thread] Manage client started");
4434 rcu_register_thread();
4436 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CMD
);
4438 health_code_update();
4440 ret
= lttcomm_listen_unix_sock(client_sock
);
4446 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
4447 * more will be added to this poll set.
4449 ret
= sessiond_set_thread_pollset(&events
, 2);
4451 goto error_create_poll
;
4454 /* Add the application registration socket */
4455 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
4460 sessiond_notify_ready();
4461 ret
= sem_post(&load_info
->message_thread_ready
);
4463 PERROR("sem_post message_thread_ready");
4467 /* This testpoint is after we signal readiness to the parent. */
4468 if (testpoint(sessiond_thread_manage_clients
)) {
4472 if (testpoint(sessiond_thread_manage_clients_before_loop
)) {
4476 health_code_update();
4479 DBG("Accepting client command ...");
4481 /* Inifinite blocking call, waiting for transmission */
4483 health_poll_entry();
4484 ret
= lttng_poll_wait(&events
, -1);
4488 * Restart interrupted system call.
4490 if (errno
== EINTR
) {
4498 for (i
= 0; i
< nb_fd
; i
++) {
4499 /* Fetch once the poll data */
4500 revents
= LTTNG_POLL_GETEV(&events
, i
);
4501 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
4503 health_code_update();
4506 /* No activity for this FD (poll implementation). */
4510 /* Thread quit pipe has been closed. Killing thread. */
4511 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
4517 /* Event on the registration socket */
4518 if (pollfd
== client_sock
) {
4519 if (revents
& LPOLLIN
) {
4521 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
4522 ERR("Client socket poll error");
4525 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
4531 DBG("Wait for client response");
4533 health_code_update();
4535 sock
= lttcomm_accept_unix_sock(client_sock
);
4541 * Set the CLOEXEC flag. Return code is useless because either way, the
4544 (void) utils_set_fd_cloexec(sock
);
4546 /* Set socket option for credentials retrieval */
4547 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
4552 /* Allocate context command to process the client request */
4553 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
4554 if (cmd_ctx
== NULL
) {
4555 PERROR("zmalloc cmd_ctx");
4559 /* Allocate data buffer for reception */
4560 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
4561 if (cmd_ctx
->lsm
== NULL
) {
4562 PERROR("zmalloc cmd_ctx->lsm");
4566 cmd_ctx
->llm
= NULL
;
4567 cmd_ctx
->session
= NULL
;
4569 health_code_update();
4572 * Data is received from the lttng client. The struct
4573 * lttcomm_session_msg (lsm) contains the command and data request of
4576 DBG("Receiving data from client ...");
4577 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
4578 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
4580 DBG("Nothing recv() from client... continuing");
4586 clean_command_ctx(&cmd_ctx
);
4590 health_code_update();
4592 // TODO: Validate cmd_ctx including sanity check for
4593 // security purpose.
4595 rcu_thread_online();
4597 * This function dispatch the work to the kernel or userspace tracer
4598 * libs and fill the lttcomm_lttng_msg data structure of all the needed
4599 * informations for the client. The command context struct contains
4600 * everything this function may needs.
4602 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
4603 rcu_thread_offline();
4611 * TODO: Inform client somehow of the fatal error. At
4612 * this point, ret < 0 means that a zmalloc failed
4613 * (ENOMEM). Error detected but still accept
4614 * command, unless a socket error has been
4617 clean_command_ctx(&cmd_ctx
);
4621 health_code_update();
4623 DBG("Sending response (size: %d, retcode: %s (%d))",
4624 cmd_ctx
->lttng_msg_size
,
4625 lttng_strerror(-cmd_ctx
->llm
->ret_code
),
4626 cmd_ctx
->llm
->ret_code
);
4627 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
4629 ERR("Failed to send data back to client");
4632 /* End of transmission */
4639 clean_command_ctx(&cmd_ctx
);
4641 health_code_update();
4653 lttng_poll_clean(&events
);
4654 clean_command_ctx(&cmd_ctx
);
4658 unlink(client_unix_sock_path
);
4659 if (client_sock
>= 0) {
4660 ret
= close(client_sock
);
4668 ERR("Health error occurred in %s", __func__
);
4671 health_unregister(health_sessiond
);
4673 DBG("Client thread dying");
4675 rcu_unregister_thread();
4678 * Since we are creating the consumer threads, we own them, so we need
4679 * to join them before our thread exits.
4681 ret
= join_consumer_thread(&kconsumer_data
);
4684 PERROR("join_consumer");
4687 ret
= join_consumer_thread(&ustconsumer32_data
);
4690 PERROR("join_consumer ust32");
4693 ret
= join_consumer_thread(&ustconsumer64_data
);
4696 PERROR("join_consumer ust64");
4701 static int string_match(const char *str1
, const char *str2
)
4703 return (str1
&& str2
) && !strcmp(str1
, str2
);
4707 * Take an option from the getopt output and set it in the right variable to be
4710 * Return 0 on success else a negative value.
4712 static int set_option(int opt
, const char *arg
, const char *optname
)
4716 if (string_match(optname
, "client-sock") || opt
== 'c') {
4717 if (!arg
|| *arg
== '\0') {
4721 if (lttng_is_setuid_setgid()) {
4722 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4723 "-c, --client-sock");
4725 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", arg
);
4727 } else if (string_match(optname
, "apps-sock") || opt
== 'a') {
4728 if (!arg
|| *arg
== '\0') {
4732 if (lttng_is_setuid_setgid()) {
4733 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4736 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", arg
);
4738 } else if (string_match(optname
, "daemonize") || opt
== 'd') {
4740 } else if (string_match(optname
, "background") || opt
== 'b') {
4742 } else if (string_match(optname
, "group") || opt
== 'g') {
4743 if (!arg
|| *arg
== '\0') {
4747 if (lttng_is_setuid_setgid()) {
4748 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4752 * If the override option is set, the pointer points to a
4753 * *non* const thus freeing it even though the variable type is
4756 if (tracing_group_name_override
) {
4757 free((void *) tracing_group_name
);
4759 tracing_group_name
= strdup(arg
);
4760 if (!tracing_group_name
) {
4764 tracing_group_name_override
= 1;
4766 } else if (string_match(optname
, "help") || opt
== 'h') {
4767 ret
= utils_show_help(8, "lttng-sessiond", help_msg
);
4769 ERR("Cannot show --help for `lttng-sessiond`");
4772 exit(ret
? EXIT_FAILURE
: EXIT_SUCCESS
);
4773 } else if (string_match(optname
, "version") || opt
== 'V') {
4774 fprintf(stdout
, "%s\n", VERSION
);
4776 } else if (string_match(optname
, "sig-parent") || opt
== 'S') {
4778 } else if (string_match(optname
, "kconsumerd-err-sock")) {
4779 if (!arg
|| *arg
== '\0') {
4783 if (lttng_is_setuid_setgid()) {
4784 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4785 "--kconsumerd-err-sock");
4787 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", arg
);
4789 } else if (string_match(optname
, "kconsumerd-cmd-sock")) {
4790 if (!arg
|| *arg
== '\0') {
4794 if (lttng_is_setuid_setgid()) {
4795 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4796 "--kconsumerd-cmd-sock");
4798 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", arg
);
4800 } else if (string_match(optname
, "ustconsumerd64-err-sock")) {
4801 if (!arg
|| *arg
== '\0') {
4805 if (lttng_is_setuid_setgid()) {
4806 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4807 "--ustconsumerd64-err-sock");
4809 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", arg
);
4811 } else if (string_match(optname
, "ustconsumerd64-cmd-sock")) {
4812 if (!arg
|| *arg
== '\0') {
4816 if (lttng_is_setuid_setgid()) {
4817 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4818 "--ustconsumerd64-cmd-sock");
4820 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", arg
);
4822 } else if (string_match(optname
, "ustconsumerd32-err-sock")) {
4823 if (!arg
|| *arg
== '\0') {
4827 if (lttng_is_setuid_setgid()) {
4828 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4829 "--ustconsumerd32-err-sock");
4831 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", arg
);
4833 } else if (string_match(optname
, "ustconsumerd32-cmd-sock")) {
4834 if (!arg
|| *arg
== '\0') {
4838 if (lttng_is_setuid_setgid()) {
4839 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4840 "--ustconsumerd32-cmd-sock");
4842 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", arg
);
4844 } else if (string_match(optname
, "no-kernel")) {
4846 } else if (string_match(optname
, "quiet") || opt
== 'q') {
4847 lttng_opt_quiet
= 1;
4848 } else if (string_match(optname
, "verbose") || opt
== 'v') {
4849 /* Verbose level can increase using multiple -v */
4851 /* Value obtained from config file */
4852 lttng_opt_verbose
= config_parse_value(arg
);
4854 /* -v used on command line */
4855 lttng_opt_verbose
++;
4857 /* Clamp value to [0, 3] */
4858 lttng_opt_verbose
= lttng_opt_verbose
< 0 ? 0 :
4859 (lttng_opt_verbose
<= 3 ? lttng_opt_verbose
: 3);
4860 } else if (string_match(optname
, "verbose-consumer")) {
4862 opt_verbose_consumer
= config_parse_value(arg
);
4864 opt_verbose_consumer
++;
4866 } else if (string_match(optname
, "consumerd32-path")) {
4867 if (!arg
|| *arg
== '\0') {
4871 if (lttng_is_setuid_setgid()) {
4872 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4873 "--consumerd32-path");
4875 if (consumerd32_bin_override
) {
4876 free((void *) consumerd32_bin
);
4878 consumerd32_bin
= strdup(arg
);
4879 if (!consumerd32_bin
) {
4883 consumerd32_bin_override
= 1;
4885 } else if (string_match(optname
, "consumerd32-libdir")) {
4886 if (!arg
|| *arg
== '\0') {
4890 if (lttng_is_setuid_setgid()) {
4891 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4892 "--consumerd32-libdir");
4894 if (consumerd32_libdir_override
) {
4895 free((void *) consumerd32_libdir
);
4897 consumerd32_libdir
= strdup(arg
);
4898 if (!consumerd32_libdir
) {
4902 consumerd32_libdir_override
= 1;
4904 } else if (string_match(optname
, "consumerd64-path")) {
4905 if (!arg
|| *arg
== '\0') {
4909 if (lttng_is_setuid_setgid()) {
4910 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4911 "--consumerd64-path");
4913 if (consumerd64_bin_override
) {
4914 free((void *) consumerd64_bin
);
4916 consumerd64_bin
= strdup(arg
);
4917 if (!consumerd64_bin
) {
4921 consumerd64_bin_override
= 1;
4923 } else if (string_match(optname
, "consumerd64-libdir")) {
4924 if (!arg
|| *arg
== '\0') {
4928 if (lttng_is_setuid_setgid()) {
4929 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4930 "--consumerd64-libdir");
4932 if (consumerd64_libdir_override
) {
4933 free((void *) consumerd64_libdir
);
4935 consumerd64_libdir
= strdup(arg
);
4936 if (!consumerd64_libdir
) {
4940 consumerd64_libdir_override
= 1;
4942 } else if (string_match(optname
, "pidfile") || opt
== 'p') {
4943 if (!arg
|| *arg
== '\0') {
4947 if (lttng_is_setuid_setgid()) {
4948 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4952 opt_pidfile
= strdup(arg
);
4958 } else if (string_match(optname
, "agent-tcp-port")) {
4959 if (!arg
|| *arg
== '\0') {
4963 if (lttng_is_setuid_setgid()) {
4964 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4965 "--agent-tcp-port");
4970 v
= strtoul(arg
, NULL
, 0);
4971 if (errno
!= 0 || !isdigit(arg
[0])) {
4972 ERR("Wrong value in --agent-tcp-port parameter: %s", arg
);
4975 if (v
== 0 || v
>= 65535) {
4976 ERR("Port overflow in --agent-tcp-port parameter: %s", arg
);
4979 agent_tcp_port
= (uint32_t) v
;
4980 DBG3("Agent TCP port set to non default: %u", agent_tcp_port
);
4982 } else if (string_match(optname
, "load") || opt
== 'l') {
4983 if (!arg
|| *arg
== '\0') {
4987 if (lttng_is_setuid_setgid()) {
4988 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4991 free(opt_load_session_path
);
4992 opt_load_session_path
= strdup(arg
);
4993 if (!opt_load_session_path
) {
4998 } else if (string_match(optname
, "kmod-probes")) {
4999 if (!arg
|| *arg
== '\0') {
5003 if (lttng_is_setuid_setgid()) {
5004 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
5007 free(kmod_probes_list
);
5008 kmod_probes_list
= strdup(arg
);
5009 if (!kmod_probes_list
) {
5014 } else if (string_match(optname
, "extra-kmod-probes")) {
5015 if (!arg
|| *arg
== '\0') {
5019 if (lttng_is_setuid_setgid()) {
5020 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
5021 "--extra-kmod-probes");
5023 free(kmod_extra_probes_list
);
5024 kmod_extra_probes_list
= strdup(arg
);
5025 if (!kmod_extra_probes_list
) {
5030 } else if (string_match(optname
, "config") || opt
== 'f') {
5031 /* This is handled in set_options() thus silent skip. */
5034 /* Unknown option or other error.
5035 * Error is printed by getopt, just return */
5040 if (ret
== -EINVAL
) {
5041 const char *opt_name
= "unknown";
5044 for (i
= 0; i
< sizeof(long_options
) / sizeof(struct option
);
5046 if (opt
== long_options
[i
].val
) {
5047 opt_name
= long_options
[i
].name
;
5052 WARN("Invalid argument provided for option \"%s\", using default value.",
5060 * config_entry_handler_cb used to handle options read from a config file.
5061 * See config_entry_handler_cb comment in common/config/session-config.h for the
5062 * return value conventions.
5064 static int config_entry_handler(const struct config_entry
*entry
, void *unused
)
5068 if (!entry
|| !entry
->name
|| !entry
->value
) {
5073 /* Check if the option is to be ignored */
5074 for (i
= 0; i
< sizeof(config_ignore_options
) / sizeof(char *); i
++) {
5075 if (!strcmp(entry
->name
, config_ignore_options
[i
])) {
5080 for (i
= 0; i
< (sizeof(long_options
) / sizeof(struct option
)) - 1;
5083 /* Ignore if not fully matched. */
5084 if (strcmp(entry
->name
, long_options
[i
].name
)) {
5089 * If the option takes no argument on the command line, we have to
5090 * check if the value is "true". We support non-zero numeric values,
5093 if (!long_options
[i
].has_arg
) {
5094 ret
= config_parse_value(entry
->value
);
5097 WARN("Invalid configuration value \"%s\" for option %s",
5098 entry
->value
, entry
->name
);
5100 /* False, skip boolean config option. */
5105 ret
= set_option(long_options
[i
].val
, entry
->value
, entry
->name
);
5109 WARN("Unrecognized option \"%s\" in daemon configuration file.", entry
->name
);
5116 * daemon configuration loading and argument parsing
5118 static int set_options(int argc
, char **argv
)
5120 int ret
= 0, c
= 0, option_index
= 0;
5121 int orig_optopt
= optopt
, orig_optind
= optind
;
5123 const char *config_path
= NULL
;
5125 optstring
= utils_generate_optstring(long_options
,
5126 sizeof(long_options
) / sizeof(struct option
));
5132 /* Check for the --config option */
5133 while ((c
= getopt_long(argc
, argv
, optstring
, long_options
,
5134 &option_index
)) != -1) {
5138 } else if (c
!= 'f') {
5139 /* if not equal to --config option. */
5143 if (lttng_is_setuid_setgid()) {
5144 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
5147 config_path
= utils_expand_path(optarg
);
5149 ERR("Failed to resolve path: %s", optarg
);
5154 ret
= config_get_section_entries(config_path
, config_section_name
,
5155 config_entry_handler
, NULL
);
5158 ERR("Invalid configuration option at line %i", ret
);
5164 /* Reset getopt's global state */
5165 optopt
= orig_optopt
;
5166 optind
= orig_optind
;
5170 * getopt_long() will not set option_index if it encounters a
5173 c
= getopt_long(argc
, argv
, optstring
, long_options
,
5180 * Pass NULL as the long option name if popt left the index
5183 ret
= set_option(c
, optarg
,
5184 option_index
< 0 ? NULL
:
5185 long_options
[option_index
].name
);
5197 * Creates the two needed socket by the daemon.
5198 * apps_sock - The communication socket for all UST apps.
5199 * client_sock - The communication of the cli tool (lttng).
5201 static int init_daemon_socket(void)
5206 old_umask
= umask(0);
5208 /* Create client tool unix socket */
5209 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
5210 if (client_sock
< 0) {
5211 ERR("Create unix sock failed: %s", client_unix_sock_path
);
5216 /* Set the cloexec flag */
5217 ret
= utils_set_fd_cloexec(client_sock
);
5219 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
5220 "Continuing but note that the consumer daemon will have a "
5221 "reference to this socket on exec()", client_sock
);
5224 /* File permission MUST be 660 */
5225 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
5227 ERR("Set file permissions failed: %s", client_unix_sock_path
);
5232 /* Create the application unix socket */
5233 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
5234 if (apps_sock
< 0) {
5235 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
5240 /* Set the cloexec flag */
5241 ret
= utils_set_fd_cloexec(apps_sock
);
5243 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
5244 "Continuing but note that the consumer daemon will have a "
5245 "reference to this socket on exec()", apps_sock
);
5248 /* File permission MUST be 666 */
5249 ret
= chmod(apps_unix_sock_path
,
5250 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
5252 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
5257 DBG3("Session daemon client socket %d and application socket %d created",
5258 client_sock
, apps_sock
);
5266 * Check if the global socket is available, and if a daemon is answering at the
5267 * other side. If yes, error is returned.
5269 static int check_existing_daemon(void)
5271 /* Is there anybody out there ? */
5272 if (lttng_session_daemon_alive()) {
5280 * Set the tracing group gid onto the client socket.
5282 * Race window between mkdir and chown is OK because we are going from more
5283 * permissive (root.root) to less permissive (root.tracing).
5285 static int set_permissions(char *rundir
)
5290 gid
= utils_get_group_id(tracing_group_name
);
5292 /* Set lttng run dir */
5293 ret
= chown(rundir
, 0, gid
);
5295 ERR("Unable to set group on %s", rundir
);
5300 * Ensure all applications and tracing group can search the run
5301 * dir. Allow everyone to read the directory, since it does not
5302 * buy us anything to hide its content.
5304 ret
= chmod(rundir
, S_IRWXU
| S_IRGRP
| S_IXGRP
| S_IROTH
| S_IXOTH
);
5306 ERR("Unable to set permissions on %s", rundir
);
5310 /* lttng client socket path */
5311 ret
= chown(client_unix_sock_path
, 0, gid
);
5313 ERR("Unable to set group on %s", client_unix_sock_path
);
5317 /* kconsumer error socket path */
5318 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, 0);
5320 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
5324 /* 64-bit ustconsumer error socket path */
5325 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, 0);
5327 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
5331 /* 32-bit ustconsumer compat32 error socket path */
5332 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, 0);
5334 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
5338 DBG("All permissions are set");
5344 * Create the lttng run directory needed for all global sockets and pipe.
5346 static int create_lttng_rundir(const char *rundir
)
5350 DBG3("Creating LTTng run directory: %s", rundir
);
5352 ret
= mkdir(rundir
, S_IRWXU
);
5354 if (errno
!= EEXIST
) {
5355 ERR("Unable to create %s", rundir
);
5367 * Setup sockets and directory needed by the kconsumerd communication with the
5370 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
5374 char path
[PATH_MAX
];
5376 switch (consumer_data
->type
) {
5377 case LTTNG_CONSUMER_KERNEL
:
5378 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
5380 case LTTNG_CONSUMER64_UST
:
5381 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
5383 case LTTNG_CONSUMER32_UST
:
5384 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
5387 ERR("Consumer type unknown");
5392 DBG2("Creating consumer directory: %s", path
);
5394 ret
= mkdir(path
, S_IRWXU
| S_IRGRP
| S_IXGRP
);
5396 if (errno
!= EEXIST
) {
5398 ERR("Failed to create %s", path
);
5404 ret
= chown(path
, 0, utils_get_group_id(tracing_group_name
));
5406 ERR("Unable to set group on %s", path
);
5412 /* Create the kconsumerd error unix socket */
5413 consumer_data
->err_sock
=
5414 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
5415 if (consumer_data
->err_sock
< 0) {
5416 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
5422 * Set the CLOEXEC flag. Return code is useless because either way, the
5425 ret
= utils_set_fd_cloexec(consumer_data
->err_sock
);
5427 PERROR("utils_set_fd_cloexec");
5428 /* continue anyway */
5431 /* File permission MUST be 660 */
5432 ret
= chmod(consumer_data
->err_unix_sock_path
,
5433 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
5435 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
5445 * Signal handler for the daemon
5447 * Simply stop all worker threads, leaving main() return gracefully after
5448 * joining all threads and calling cleanup().
5450 static void sighandler(int sig
)
5454 DBG("SIGINT caught");
5458 DBG("SIGTERM caught");
5462 CMM_STORE_SHARED(recv_child_signal
, 1);
5470 * Setup signal handler for :
5471 * SIGINT, SIGTERM, SIGPIPE
5473 static int set_signal_handler(void)
5476 struct sigaction sa
;
5479 if ((ret
= sigemptyset(&sigset
)) < 0) {
5480 PERROR("sigemptyset");
5484 sa
.sa_mask
= sigset
;
5487 sa
.sa_handler
= sighandler
;
5488 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
5489 PERROR("sigaction");
5493 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
5494 PERROR("sigaction");
5498 if ((ret
= sigaction(SIGUSR1
, &sa
, NULL
)) < 0) {
5499 PERROR("sigaction");
5503 sa
.sa_handler
= SIG_IGN
;
5504 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
5505 PERROR("sigaction");
5509 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
5515 * Set open files limit to unlimited. This daemon can open a large number of
5516 * file descriptors in order to consume multiple kernel traces.
5518 static void set_ulimit(void)
5523 /* The kernel does not allow an infinite limit for open files */
5524 lim
.rlim_cur
= 65535;
5525 lim
.rlim_max
= 65535;
5527 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
5529 PERROR("failed to set open files limit");
5534 * Write pidfile using the rundir and opt_pidfile.
5536 static int write_pidfile(void)
5539 char pidfile_path
[PATH_MAX
];
5544 if (lttng_strncpy(pidfile_path
, opt_pidfile
, sizeof(pidfile_path
))) {
5549 /* Build pidfile path from rundir and opt_pidfile. */
5550 ret
= snprintf(pidfile_path
, sizeof(pidfile_path
), "%s/"
5551 DEFAULT_LTTNG_SESSIOND_PIDFILE
, rundir
);
5553 PERROR("snprintf pidfile path");
5559 * Create pid file in rundir.
5561 ret
= utils_create_pid_file(getpid(), pidfile_path
);
5567 * Create lockfile using the rundir and return its fd.
5569 static int create_lockfile(void)
5572 char lockfile_path
[PATH_MAX
];
5574 ret
= generate_lock_file_path(lockfile_path
, sizeof(lockfile_path
));
5579 ret
= utils_create_lock_file(lockfile_path
);
5585 * Write agent TCP port using the rundir.
5587 static int write_agent_port(void)
5590 char path
[PATH_MAX
];
5594 ret
= snprintf(path
, sizeof(path
), "%s/"
5595 DEFAULT_LTTNG_SESSIOND_AGENTPORT_FILE
, rundir
);
5597 PERROR("snprintf agent port path");
5602 * Create TCP agent port file in rundir.
5604 ret
= utils_create_pid_file(agent_tcp_port
, path
);
5613 int main(int argc
, char **argv
)
5615 int ret
= 0, retval
= 0;
5617 const char *home_path
, *env_app_timeout
;
5618 struct lttng_pipe
*ust32_channel_monitor_pipe
= NULL
,
5619 *ust64_channel_monitor_pipe
= NULL
,
5620 *kernel_channel_monitor_pipe
= NULL
;
5621 bool notification_thread_running
= false;
5623 init_kernel_workarounds();
5625 rcu_register_thread();
5627 if (set_signal_handler()) {
5629 goto exit_set_signal_handler
;
5632 setup_consumerd_path();
5634 page_size
= sysconf(_SC_PAGESIZE
);
5635 if (page_size
< 0) {
5636 PERROR("sysconf _SC_PAGESIZE");
5637 page_size
= LONG_MAX
;
5638 WARN("Fallback page size to %ld", page_size
);
5642 * Parse arguments and load the daemon configuration file.
5644 * We have an exit_options exit path to free memory reserved by
5645 * set_options. This is needed because the rest of sessiond_cleanup()
5646 * depends on ht_cleanup_thread, which depends on lttng_daemonize, which
5647 * depends on set_options.
5650 if (set_options(argc
, argv
)) {
5656 if (opt_daemon
|| opt_background
) {
5659 ret
= lttng_daemonize(&child_ppid
, &recv_child_signal
,
5667 * We are in the child. Make sure all other file descriptors are
5668 * closed, in case we are called with more opened file
5669 * descriptors than the standard ones.
5671 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
5676 if (run_as_create_worker(argv
[0]) < 0) {
5677 goto exit_create_run_as_worker_cleanup
;
5681 * Starting from here, we can create threads. This needs to be after
5682 * lttng_daemonize due to RCU.
5686 * Initialize the health check subsystem. This call should set the
5687 * appropriate time values.
5689 health_sessiond
= health_app_create(NR_HEALTH_SESSIOND_TYPES
);
5690 if (!health_sessiond
) {
5691 PERROR("health_app_create error");
5693 goto exit_health_sessiond_cleanup
;
5696 /* Create thread to clean up RCU hash tables */
5697 if (init_ht_cleanup_thread(&ht_cleanup_thread
)) {
5699 goto exit_ht_cleanup
;
5702 /* Create thread quit pipe */
5703 if (init_thread_quit_pipe()) {
5705 goto exit_init_data
;
5708 /* Check if daemon is UID = 0 */
5709 is_root
= !getuid();
5712 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
5715 goto exit_init_data
;
5718 /* Create global run dir with root access */
5719 if (create_lttng_rundir(rundir
)) {
5721 goto exit_init_data
;
5724 if (strlen(apps_unix_sock_path
) == 0) {
5725 ret
= snprintf(apps_unix_sock_path
, PATH_MAX
,
5726 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
5729 goto exit_init_data
;
5733 if (strlen(client_unix_sock_path
) == 0) {
5734 ret
= snprintf(client_unix_sock_path
, PATH_MAX
,
5735 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
5738 goto exit_init_data
;
5742 /* Set global SHM for ust */
5743 if (strlen(wait_shm_path
) == 0) {
5744 ret
= snprintf(wait_shm_path
, PATH_MAX
,
5745 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
5748 goto exit_init_data
;
5752 if (strlen(health_unix_sock_path
) == 0) {
5753 ret
= snprintf(health_unix_sock_path
,
5754 sizeof(health_unix_sock_path
),
5755 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK
);
5758 goto exit_init_data
;
5762 /* Setup kernel consumerd path */
5763 ret
= snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
5764 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
5767 goto exit_init_data
;
5769 ret
= snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
5770 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
5773 goto exit_init_data
;
5776 DBG2("Kernel consumer err path: %s",
5777 kconsumer_data
.err_unix_sock_path
);
5778 DBG2("Kernel consumer cmd path: %s",
5779 kconsumer_data
.cmd_unix_sock_path
);
5780 kernel_channel_monitor_pipe
= lttng_pipe_open(0);
5781 if (!kernel_channel_monitor_pipe
) {
5782 ERR("Failed to create kernel consumer channel monitor pipe");
5784 goto exit_init_data
;
5786 kconsumer_data
.channel_monitor_pipe
=
5787 lttng_pipe_release_writefd(
5788 kernel_channel_monitor_pipe
);
5789 if (kconsumer_data
.channel_monitor_pipe
< 0) {
5791 goto exit_init_data
;
5794 home_path
= utils_get_home_dir();
5795 if (home_path
== NULL
) {
5796 /* TODO: Add --socket PATH option */
5797 ERR("Can't get HOME directory for sockets creation.");
5799 goto exit_init_data
;
5803 * Create rundir from home path. This will create something like
5806 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
5809 goto exit_init_data
;
5812 if (create_lttng_rundir(rundir
)) {
5814 goto exit_init_data
;
5817 if (strlen(apps_unix_sock_path
) == 0) {
5818 ret
= snprintf(apps_unix_sock_path
, PATH_MAX
,
5819 DEFAULT_HOME_APPS_UNIX_SOCK
,
5823 goto exit_init_data
;
5827 /* Set the cli tool unix socket path */
5828 if (strlen(client_unix_sock_path
) == 0) {
5829 ret
= snprintf(client_unix_sock_path
, PATH_MAX
,
5830 DEFAULT_HOME_CLIENT_UNIX_SOCK
,
5834 goto exit_init_data
;
5838 /* Set global SHM for ust */
5839 if (strlen(wait_shm_path
) == 0) {
5840 ret
= snprintf(wait_shm_path
, PATH_MAX
,
5841 DEFAULT_HOME_APPS_WAIT_SHM_PATH
,
5845 goto exit_init_data
;
5849 /* Set health check Unix path */
5850 if (strlen(health_unix_sock_path
) == 0) {
5851 ret
= snprintf(health_unix_sock_path
,
5852 sizeof(health_unix_sock_path
),
5853 DEFAULT_HOME_HEALTH_UNIX_SOCK
,
5857 goto exit_init_data
;
5862 lockfile_fd
= create_lockfile();
5863 if (lockfile_fd
< 0) {
5865 goto exit_init_data
;
5868 /* Set consumer initial state */
5869 kernel_consumerd_state
= CONSUMER_STOPPED
;
5870 ust_consumerd_state
= CONSUMER_STOPPED
;
5872 DBG("Client socket path %s", client_unix_sock_path
);
5873 DBG("Application socket path %s", apps_unix_sock_path
);
5874 DBG("Application wait path %s", wait_shm_path
);
5875 DBG("LTTng run directory path: %s", rundir
);
5877 /* 32 bits consumerd path setup */
5878 ret
= snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
5879 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
5881 PERROR("snprintf 32-bit consumer error socket path");
5883 goto exit_init_data
;
5885 ret
= snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
5886 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
5888 PERROR("snprintf 32-bit consumer command socket path");
5890 goto exit_init_data
;
5893 DBG2("UST consumer 32 bits err path: %s",
5894 ustconsumer32_data
.err_unix_sock_path
);
5895 DBG2("UST consumer 32 bits cmd path: %s",
5896 ustconsumer32_data
.cmd_unix_sock_path
);
5897 ust32_channel_monitor_pipe
= lttng_pipe_open(0);
5898 if (!ust32_channel_monitor_pipe
) {
5899 ERR("Failed to create 32-bit user space consumer channel monitor pipe");
5901 goto exit_init_data
;
5903 ustconsumer32_data
.channel_monitor_pipe
= lttng_pipe_release_writefd(
5904 ust32_channel_monitor_pipe
);
5905 if (ustconsumer32_data
.channel_monitor_pipe
< 0) {
5907 goto exit_init_data
;
5910 /* 64 bits consumerd path setup */
5911 ret
= snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
5912 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
5914 PERROR("snprintf 64-bit consumer error socket path");
5916 goto exit_init_data
;
5918 ret
= snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
5919 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
5921 PERROR("snprintf 64-bit consumer command socket path");
5923 goto exit_init_data
;
5926 DBG2("UST consumer 64 bits err path: %s",
5927 ustconsumer64_data
.err_unix_sock_path
);
5928 DBG2("UST consumer 64 bits cmd path: %s",
5929 ustconsumer64_data
.cmd_unix_sock_path
);
5930 ust64_channel_monitor_pipe
= lttng_pipe_open(0);
5931 if (!ust64_channel_monitor_pipe
) {
5932 ERR("Failed to create 64-bit user space consumer channel monitor pipe");
5934 goto exit_init_data
;
5936 ustconsumer64_data
.channel_monitor_pipe
= lttng_pipe_release_writefd(
5937 ust64_channel_monitor_pipe
);
5938 if (ustconsumer64_data
.channel_monitor_pipe
< 0) {
5940 goto exit_init_data
;
5944 * See if daemon already exist.
5946 if (check_existing_daemon()) {
5947 ERR("Already running daemon.\n");
5949 * We do not goto exit because we must not cleanup()
5950 * because a daemon is already running.
5953 goto exit_init_data
;
5957 * Init UST app hash table. Alloc hash table before this point since
5958 * cleanup() can get called after that point.
5960 if (ust_app_ht_alloc()) {
5961 ERR("Failed to allocate UST app hash table");
5963 goto exit_init_data
;
5967 * Initialize agent app hash table. We allocate the hash table here
5968 * since cleanup() can get called after this point.
5970 if (agent_app_ht_alloc()) {
5971 ERR("Failed to allocate Agent app hash table");
5973 goto exit_init_data
;
5977 * These actions must be executed as root. We do that *after* setting up
5978 * the sockets path because we MUST make the check for another daemon using
5979 * those paths *before* trying to set the kernel consumer sockets and init
5983 if (set_consumer_sockets(&kconsumer_data
, rundir
)) {
5985 goto exit_init_data
;
5988 /* Setup kernel tracer */
5989 if (!opt_no_kernel
) {
5990 init_kernel_tracer();
5991 if (kernel_tracer_fd
>= 0) {
5992 ret
= syscall_init_table();
5994 ERR("Unable to populate syscall table. "
5995 "Syscall tracing won't work "
5996 "for this session daemon.");
6001 /* Set ulimit for open files */
6004 /* init lttng_fd tracking must be done after set_ulimit. */
6007 if (set_consumer_sockets(&ustconsumer64_data
, rundir
)) {
6009 goto exit_init_data
;
6012 if (set_consumer_sockets(&ustconsumer32_data
, rundir
)) {
6014 goto exit_init_data
;
6017 /* Setup the needed unix socket */
6018 if (init_daemon_socket()) {
6020 goto exit_init_data
;
6023 /* Set credentials to socket */
6024 if (is_root
&& set_permissions(rundir
)) {
6026 goto exit_init_data
;
6029 /* Get parent pid if -S, --sig-parent is specified. */
6030 if (opt_sig_parent
) {
6034 /* Setup the kernel pipe for waking up the kernel thread */
6035 if (is_root
&& !opt_no_kernel
) {
6036 if (utils_create_pipe_cloexec(kernel_poll_pipe
)) {
6038 goto exit_init_data
;
6042 /* Setup the thread apps communication pipe. */
6043 if (utils_create_pipe_cloexec(apps_cmd_pipe
)) {
6045 goto exit_init_data
;
6048 /* Setup the thread apps notify communication pipe. */
6049 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe
)) {
6051 goto exit_init_data
;
6054 /* Initialize global buffer per UID and PID registry. */
6055 buffer_reg_init_uid_registry();
6056 buffer_reg_init_pid_registry();
6058 /* Init UST command queue. */
6059 cds_wfcq_init(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
6062 * Get session list pointer. This pointer MUST NOT be free'd. This list
6063 * is statically declared in session.c
6065 session_list_ptr
= session_get_list();
6069 /* Check for the application socket timeout env variable. */
6070 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
6071 if (env_app_timeout
) {
6072 app_socket_timeout
= atoi(env_app_timeout
);
6074 app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
6077 ret
= write_pidfile();
6079 ERR("Error in write_pidfile");
6081 goto exit_init_data
;
6083 ret
= write_agent_port();
6085 ERR("Error in write_agent_port");
6087 goto exit_init_data
;
6090 /* Initialize communication library */
6092 /* Initialize TCP timeout values */
6093 lttcomm_inet_init();
6095 if (load_session_init_data(&load_info
) < 0) {
6097 goto exit_init_data
;
6099 load_info
->path
= opt_load_session_path
;
6101 /* Create health-check thread. */
6102 ret
= pthread_create(&health_thread
, default_pthread_attr(),
6103 thread_manage_health
, (void *) NULL
);
6106 PERROR("pthread_create health");
6111 /* notification_thread_data acquires the pipes' read side. */
6112 notification_thread_handle
= notification_thread_handle_create(
6113 ust32_channel_monitor_pipe
,
6114 ust64_channel_monitor_pipe
,
6115 kernel_channel_monitor_pipe
);
6116 if (!notification_thread_handle
) {
6118 ERR("Failed to create notification thread shared data");
6120 goto exit_notification
;
6123 /* Create notification thread. */
6124 ret
= pthread_create(¬ification_thread
, default_pthread_attr(),
6125 thread_notification
, notification_thread_handle
);
6128 PERROR("pthread_create notification");
6131 goto exit_notification
;
6133 notification_thread_running
= true;
6135 /* Create thread to manage the client socket */
6136 ret
= pthread_create(&client_thread
, default_pthread_attr(),
6137 thread_manage_clients
, (void *) NULL
);
6140 PERROR("pthread_create clients");
6146 /* Create thread to dispatch registration */
6147 ret
= pthread_create(&dispatch_thread
, default_pthread_attr(),
6148 thread_dispatch_ust_registration
, (void *) NULL
);
6151 PERROR("pthread_create dispatch");
6157 /* Create thread to manage application registration. */
6158 ret
= pthread_create(®_apps_thread
, default_pthread_attr(),
6159 thread_registration_apps
, (void *) NULL
);
6162 PERROR("pthread_create registration");
6168 /* Create thread to manage application socket */
6169 ret
= pthread_create(&apps_thread
, default_pthread_attr(),
6170 thread_manage_apps
, (void *) NULL
);
6173 PERROR("pthread_create apps");
6179 /* Create thread to manage application notify socket */
6180 ret
= pthread_create(&apps_notify_thread
, default_pthread_attr(),
6181 ust_thread_manage_notify
, (void *) NULL
);
6184 PERROR("pthread_create notify");
6187 goto exit_apps_notify
;
6190 /* Create agent registration thread. */
6191 ret
= pthread_create(&agent_reg_thread
, default_pthread_attr(),
6192 agent_thread_manage_registration
, (void *) NULL
);
6195 PERROR("pthread_create agent");
6198 goto exit_agent_reg
;
6201 /* Don't start this thread if kernel tracing is not requested nor root */
6202 if (is_root
&& !opt_no_kernel
) {
6203 /* Create kernel thread to manage kernel event */
6204 ret
= pthread_create(&kernel_thread
, default_pthread_attr(),
6205 thread_manage_kernel
, (void *) NULL
);
6208 PERROR("pthread_create kernel");
6215 /* Create session loading thread. */
6216 ret
= pthread_create(&load_session_thread
, default_pthread_attr(),
6217 thread_load_session
, load_info
);
6220 PERROR("pthread_create load_session_thread");
6223 goto exit_load_session
;
6227 * This is where we start awaiting program completion (e.g. through
6228 * signal that asks threads to teardown).
6231 ret
= pthread_join(load_session_thread
, &status
);
6234 PERROR("pthread_join load_session_thread");
6239 if (is_root
&& !opt_no_kernel
) {
6240 ret
= pthread_join(kernel_thread
, &status
);
6243 PERROR("pthread_join");
6249 ret
= pthread_join(agent_reg_thread
, &status
);
6252 PERROR("pthread_join agent");
6257 ret
= pthread_join(apps_notify_thread
, &status
);
6260 PERROR("pthread_join apps notify");
6265 ret
= pthread_join(apps_thread
, &status
);
6268 PERROR("pthread_join apps");
6273 ret
= pthread_join(reg_apps_thread
, &status
);
6276 PERROR("pthread_join");
6282 * Join dispatch thread after joining reg_apps_thread to ensure
6283 * we don't leak applications in the queue.
6285 ret
= pthread_join(dispatch_thread
, &status
);
6288 PERROR("pthread_join");
6293 ret
= pthread_join(client_thread
, &status
);
6296 PERROR("pthread_join");
6302 ret
= pthread_join(health_thread
, &status
);
6305 PERROR("pthread_join health thread");
6312 * Wait for all pending call_rcu work to complete before tearing
6313 * down data structures. call_rcu worker may be trying to
6314 * perform lookups in those structures.
6318 * sessiond_cleanup() is called when no other thread is running, except
6319 * the ht_cleanup thread, which is needed to destroy the hash tables.
6321 rcu_thread_online();
6325 * Ensure all prior call_rcu are done. call_rcu callbacks may push
6326 * hash tables to the ht_cleanup thread. Therefore, we ensure that
6327 * the queue is empty before shutting down the clean-up thread.
6332 * The teardown of the notification system is performed after the
6333 * session daemon's teardown in order to allow it to be notified
6334 * of the active session and channels at the moment of the teardown.
6336 if (notification_thread_handle
) {
6337 if (notification_thread_running
) {
6338 notification_thread_command_quit(
6339 notification_thread_handle
);
6340 ret
= pthread_join(notification_thread
, &status
);
6343 PERROR("pthread_join notification thread");
6347 notification_thread_handle_destroy(notification_thread_handle
);
6350 rcu_thread_offline();
6351 rcu_unregister_thread();
6353 ret
= fini_ht_cleanup_thread(&ht_cleanup_thread
);
6357 lttng_pipe_destroy(ust32_channel_monitor_pipe
);
6358 lttng_pipe_destroy(ust64_channel_monitor_pipe
);
6359 lttng_pipe_destroy(kernel_channel_monitor_pipe
);
6362 health_app_destroy(health_sessiond
);
6363 exit_health_sessiond_cleanup
:
6364 exit_create_run_as_worker_cleanup
:
6367 sessiond_cleanup_options();
6369 exit_set_signal_handler
: