Fix: 32-bit x86 strict-aliasing warnings
[deliverable/lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
... / ...
CommitLineData
1/*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#define _LGPL_SOURCE
23#include <sys/types.h>
24#include <sys/socket.h>
25#include <sys/mman.h>
26#include <sys/stat.h>
27#include <sys/types.h>
28#include <sys/wait.h>
29#include <fcntl.h>
30#include <unistd.h>
31#include <errno.h>
32#include <pthread.h>
33#include <semaphore.h>
34#include <time.h>
35#include <assert.h>
36#include <signal.h>
37#include <urcu/uatomic.h>
38#include <urcu/futex.h>
39#include <urcu/compiler.h>
40
41#include <lttng/ust-events.h>
42#include <lttng/ust-abi.h>
43#include <lttng/ust.h>
44#include <ust-comm.h>
45#include <usterr-signal-safe.h>
46#include <helper.h>
47#include "tracepoint-internal.h"
48#include "ltt-tracer-core.h"
49#include "compat.h"
50#include "../libringbuffer/tlsfixup.h"
51
52/*
53 * Has lttng ust comm constructor been called ?
54 */
55static int initialized;
56
57/*
58 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
59 * Held when handling a command, also held by fork() to deal with
60 * removal of threads, and by exit path.
61 */
62
63/* Should the ust comm thread quit ? */
64static int lttng_ust_comm_should_quit;
65
66/*
67 * Wait for either of these before continuing to the main
68 * program:
69 * - the register_done message from sessiond daemon
70 * (will let the sessiond daemon enable sessions before main
71 * starts.)
72 * - sessiond daemon is not reachable.
73 * - timeout (ensuring applications are resilient to session
74 * daemon problems).
75 */
76static sem_t constructor_wait;
77/*
78 * Doing this for both the global and local sessiond.
79 */
80static int sem_count = { 2 };
81
82/*
83 * Counting nesting within lttng-ust. Used to ensure that calling fork()
84 * from liblttng-ust does not execute the pre/post fork handlers.
85 */
86static int __thread lttng_ust_nest_count;
87
88/*
89 * Info about socket and associated listener thread.
90 */
91struct sock_info {
92 const char *name;
93 pthread_t ust_listener; /* listener thread */
94 int root_handle;
95 int constructor_sem_posted;
96 int allowed;
97 int global;
98
99 char sock_path[PATH_MAX];
100 int socket;
101
102 char wait_shm_path[PATH_MAX];
103 char *wait_shm_mmap;
104};
105
106/* Socket from app (connect) to session daemon (listen) for communication */
107struct sock_info global_apps = {
108 .name = "global",
109 .global = 1,
110
111 .root_handle = -1,
112 .allowed = 1,
113
114 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
115 .socket = -1,
116
117 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
118};
119
120/* TODO: allow global_apps_sock_path override */
121
122struct sock_info local_apps = {
123 .name = "local",
124 .global = 0,
125 .root_handle = -1,
126 .allowed = 0, /* Check setuid bit first */
127
128 .socket = -1,
129};
130
131static int wait_poll_fallback;
132
133extern void ltt_ring_buffer_client_overwrite_init(void);
134extern void ltt_ring_buffer_client_discard_init(void);
135extern void ltt_ring_buffer_metadata_client_init(void);
136extern void ltt_ring_buffer_client_overwrite_exit(void);
137extern void ltt_ring_buffer_client_discard_exit(void);
138extern void ltt_ring_buffer_metadata_client_exit(void);
139
140/*
141 * Force a read (imply TLS fixup for dlopen) of TLS variables.
142 */
143static
144void lttng_fixup_nest_count_tls(void)
145{
146 asm volatile ("" : : "m" (lttng_ust_nest_count));
147}
148
149static
150int setup_local_apps(void)
151{
152 const char *home_dir;
153 uid_t uid;
154
155 uid = getuid();
156 /*
157 * Disallow per-user tracing for setuid binaries.
158 */
159 if (uid != geteuid()) {
160 local_apps.allowed = 0;
161 return 0;
162 } else {
163 local_apps.allowed = 1;
164 }
165 home_dir = (const char *) getenv("HOME");
166 if (!home_dir)
167 return -ENOENT;
168 snprintf(local_apps.sock_path, PATH_MAX,
169 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
170 snprintf(local_apps.wait_shm_path, PATH_MAX,
171 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
172 return 0;
173}
174
175static
176int register_app_to_sessiond(int socket)
177{
178 ssize_t ret;
179 struct {
180 uint32_t major;
181 uint32_t minor;
182 pid_t pid;
183 pid_t ppid;
184 uid_t uid;
185 gid_t gid;
186 uint32_t bits_per_long;
187 char name[16]; /* process name */
188 } reg_msg;
189
190 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
191 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
192 reg_msg.pid = getpid();
193 reg_msg.ppid = getppid();
194 reg_msg.uid = getuid();
195 reg_msg.gid = getgid();
196 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
197 lttng_ust_getprocname(reg_msg.name);
198
199 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
200 if (ret >= 0 && ret != sizeof(reg_msg))
201 return -EIO;
202 return ret;
203}
204
205static
206int send_reply(int sock, struct ustcomm_ust_reply *lur)
207{
208 ssize_t len;
209
210 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
211 switch (len) {
212 case sizeof(*lur):
213 DBG("message successfully sent");
214 return 0;
215 case -1:
216 if (errno == ECONNRESET) {
217 printf("remote end closed connection\n");
218 return 0;
219 }
220 return -1;
221 default:
222 printf("incorrect message size: %zd\n", len);
223 return -1;
224 }
225}
226
227static
228int handle_register_done(struct sock_info *sock_info)
229{
230 int ret;
231
232 if (sock_info->constructor_sem_posted)
233 return 0;
234 sock_info->constructor_sem_posted = 1;
235 if (uatomic_read(&sem_count) <= 0) {
236 return 0;
237 }
238 ret = uatomic_add_return(&sem_count, -1);
239 if (ret == 0) {
240 ret = sem_post(&constructor_wait);
241 assert(!ret);
242 }
243 return 0;
244}
245
246static
247int handle_message(struct sock_info *sock_info,
248 int sock, struct ustcomm_ust_msg *lum)
249{
250 int ret = 0;
251 const struct lttng_ust_objd_ops *ops;
252 struct ustcomm_ust_reply lur;
253 int shm_fd, wait_fd;
254 union ust_args args;
255 ssize_t len;
256
257 ust_lock();
258
259 memset(&lur, 0, sizeof(lur));
260
261 if (lttng_ust_comm_should_quit) {
262 ret = -EPERM;
263 goto end;
264 }
265
266 ops = objd_ops(lum->handle);
267 if (!ops) {
268 ret = -ENOENT;
269 goto end;
270 }
271
272 switch (lum->cmd) {
273 case LTTNG_UST_REGISTER_DONE:
274 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
275 ret = handle_register_done(sock_info);
276 else
277 ret = -EINVAL;
278 break;
279 case LTTNG_UST_RELEASE:
280 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
281 ret = -EPERM;
282 else
283 ret = lttng_ust_objd_unref(lum->handle);
284 break;
285 case LTTNG_UST_FILTER:
286 {
287 /* Receive filter data */
288 struct lttng_ust_filter_bytecode *bytecode;
289
290 if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) {
291 ERR("Filter data size is too large: %u bytes\n",
292 lum->u.filter.data_size);
293 ret = -EINVAL;
294 goto error;
295 }
296 bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size);
297 if (!bytecode) {
298 ret = -ENOMEM;
299 goto error;
300 }
301 len = ustcomm_recv_unix_sock(sock, bytecode->data,
302 lum->u.filter.data_size);
303 switch (len) {
304 case 0: /* orderly shutdown */
305 ret = 0;
306 free(bytecode);
307 goto error;
308 case -1:
309 DBG("Receive failed from lttng-sessiond with errno %d", errno);
310 if (errno == ECONNRESET) {
311 ERR("%s remote end closed connection\n", sock_info->name);
312 ret = -EINVAL;
313 free(bytecode);
314 goto error;
315 }
316 ret = -EINVAL;
317 goto end;
318 default:
319 if (len == lum->u.filter.data_size) {
320 DBG("filter data received\n");
321 break;
322 } else {
323 ERR("incorrect filter data message size: %zd\n", len);
324 ret = -EINVAL;
325 free(bytecode);
326 goto end;
327 }
328 }
329 bytecode->len = lum->u.filter.data_size;
330 bytecode->reloc_offset = lum->u.filter.reloc_offset;
331 if (ops->cmd) {
332 ret = ops->cmd(lum->handle, lum->cmd,
333 (unsigned long) bytecode,
334 &args);
335 if (ret) {
336 free(bytecode);
337 }
338 /* don't free bytecode if everything went fine. */
339 } else {
340 ret = -ENOSYS;
341 free(bytecode);
342 }
343 break;
344 }
345 default:
346 if (ops->cmd)
347 ret = ops->cmd(lum->handle, lum->cmd,
348 (unsigned long) &lum->u,
349 &args);
350 else
351 ret = -ENOSYS;
352 break;
353 }
354
355end:
356 lur.handle = lum->handle;
357 lur.cmd = lum->cmd;
358 lur.ret_val = ret;
359 if (ret >= 0) {
360 lur.ret_code = USTCOMM_OK;
361 } else {
362 //lur.ret_code = USTCOMM_SESSION_FAIL;
363 lur.ret_code = ret;
364 }
365 if (ret >= 0) {
366 switch (lum->cmd) {
367 case LTTNG_UST_STREAM:
368 /*
369 * Special-case reply to send stream info.
370 * Use lum.u output.
371 */
372 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
373 shm_fd = *args.stream.shm_fd;
374 wait_fd = *args.stream.wait_fd;
375 break;
376 case LTTNG_UST_METADATA:
377 case LTTNG_UST_CHANNEL:
378 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
379 shm_fd = *args.channel.shm_fd;
380 wait_fd = *args.channel.wait_fd;
381 break;
382 case LTTNG_UST_TRACER_VERSION:
383 lur.u.version = lum->u.version;
384 break;
385 case LTTNG_UST_TRACEPOINT_LIST_GET:
386 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
387 break;
388 }
389 }
390 ret = send_reply(sock, &lur);
391 if (ret < 0) {
392 perror("error sending reply");
393 goto error;
394 }
395
396 if ((lum->cmd == LTTNG_UST_STREAM
397 || lum->cmd == LTTNG_UST_CHANNEL
398 || lum->cmd == LTTNG_UST_METADATA)
399 && lur.ret_code == USTCOMM_OK) {
400 int sendret = 0;
401
402 /* we also need to send the file descriptors. */
403 ret = ustcomm_send_fds_unix_sock(sock,
404 &shm_fd, &shm_fd,
405 1, sizeof(int));
406 if (ret < 0) {
407 perror("send shm_fd");
408 sendret = ret;
409 }
410 /*
411 * The sessiond expects 2 file descriptors, even upon
412 * error.
413 */
414 ret = ustcomm_send_fds_unix_sock(sock,
415 &wait_fd, &wait_fd,
416 1, sizeof(int));
417 if (ret < 0) {
418 perror("send wait_fd");
419 goto error;
420 }
421 if (sendret) {
422 ret = sendret;
423 goto error;
424 }
425 }
426 /*
427 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
428 * after the reply.
429 */
430 if (lur.ret_code == USTCOMM_OK) {
431 switch (lum->cmd) {
432 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
433 len = ustcomm_send_unix_sock(sock,
434 &args.field_list.entry,
435 sizeof(args.field_list.entry));
436 if (len != sizeof(args.field_list.entry)) {
437 ret = -1;
438 goto error;
439 }
440 }
441 }
442 /*
443 * We still have the memory map reference, and the fds have been
444 * sent to the sessiond. We can therefore close those fds. Note
445 * that we keep the write side of the wait_fd open, but close
446 * the read side.
447 */
448 if (lur.ret_code == USTCOMM_OK) {
449 switch (lum->cmd) {
450 case LTTNG_UST_STREAM:
451 if (shm_fd >= 0) {
452 ret = close(shm_fd);
453 if (ret) {
454 PERROR("Error closing stream shm_fd");
455 }
456 *args.stream.shm_fd = -1;
457 }
458 if (wait_fd >= 0) {
459 ret = close(wait_fd);
460 if (ret) {
461 PERROR("Error closing stream wait_fd");
462 }
463 *args.stream.wait_fd = -1;
464 }
465 break;
466 case LTTNG_UST_METADATA:
467 case LTTNG_UST_CHANNEL:
468 if (shm_fd >= 0) {
469 ret = close(shm_fd);
470 if (ret) {
471 PERROR("Error closing channel shm_fd");
472 }
473 *args.channel.shm_fd = -1;
474 }
475 if (wait_fd >= 0) {
476 ret = close(wait_fd);
477 if (ret) {
478 PERROR("Error closing channel wait_fd");
479 }
480 *args.channel.wait_fd = -1;
481 }
482 break;
483 }
484 }
485
486error:
487 ust_unlock();
488 return ret;
489}
490
491static
492void cleanup_sock_info(struct sock_info *sock_info, int exiting)
493{
494 int ret;
495
496 if (sock_info->socket != -1) {
497 ret = ustcomm_close_unix_sock(sock_info->socket);
498 if (ret) {
499 ERR("Error closing apps socket");
500 }
501 sock_info->socket = -1;
502 }
503 if (sock_info->root_handle != -1) {
504 ret = lttng_ust_objd_unref(sock_info->root_handle);
505 if (ret) {
506 ERR("Error unref root handle");
507 }
508 sock_info->root_handle = -1;
509 }
510 sock_info->constructor_sem_posted = 0;
511 /*
512 * wait_shm_mmap is used by listener threads outside of the
513 * ust lock, so we cannot tear it down ourselves, because we
514 * cannot join on these threads. Leave this task to the OS
515 * process exit.
516 */
517 if (!exiting && sock_info->wait_shm_mmap) {
518 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
519 if (ret) {
520 ERR("Error unmapping wait shm");
521 }
522 sock_info->wait_shm_mmap = NULL;
523 }
524}
525
526/*
527 * Using fork to set umask in the child process (not multi-thread safe).
528 * We deal with the shm_open vs ftruncate race (happening when the
529 * sessiond owns the shm and does not let everybody modify it, to ensure
530 * safety against shm_unlink) by simply letting the mmap fail and
531 * retrying after a few seconds.
532 * For global shm, everybody has rw access to it until the sessiond
533 * starts.
534 */
535static
536int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
537{
538 int wait_shm_fd, ret;
539 pid_t pid;
540
541 /*
542 * Try to open read-only.
543 */
544 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
545 if (wait_shm_fd >= 0) {
546 goto end;
547 } else if (wait_shm_fd < 0 && errno != ENOENT) {
548 /*
549 * Real-only open did not work, and it's not because the
550 * entry was not present. It's a failure that prohibits
551 * using shm.
552 */
553 ERR("Error opening shm %s", sock_info->wait_shm_path);
554 goto end;
555 }
556 /*
557 * If the open failed because the file did not exist, try
558 * creating it ourself.
559 */
560 lttng_ust_nest_count++;
561 pid = fork();
562 lttng_ust_nest_count--;
563 if (pid > 0) {
564 int status;
565
566 /*
567 * Parent: wait for child to return, in which case the
568 * shared memory map will have been created.
569 */
570 pid = wait(&status);
571 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
572 wait_shm_fd = -1;
573 goto end;
574 }
575 /*
576 * Try to open read-only again after creation.
577 */
578 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
579 if (wait_shm_fd < 0) {
580 /*
581 * Real-only open did not work. It's a failure
582 * that prohibits using shm.
583 */
584 ERR("Error opening shm %s", sock_info->wait_shm_path);
585 goto end;
586 }
587 goto end;
588 } else if (pid == 0) {
589 int create_mode;
590
591 /* Child */
592 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
593 if (sock_info->global)
594 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
595 /*
596 * We're alone in a child process, so we can modify the
597 * process-wide umask.
598 */
599 umask(~create_mode);
600 /*
601 * Try creating shm (or get rw access).
602 * We don't do an exclusive open, because we allow other
603 * processes to create+ftruncate it concurrently.
604 */
605 wait_shm_fd = shm_open(sock_info->wait_shm_path,
606 O_RDWR | O_CREAT, create_mode);
607 if (wait_shm_fd >= 0) {
608 ret = ftruncate(wait_shm_fd, mmap_size);
609 if (ret) {
610 PERROR("ftruncate");
611 exit(EXIT_FAILURE);
612 }
613 exit(EXIT_SUCCESS);
614 }
615 /*
616 * For local shm, we need to have rw access to accept
617 * opening it: this means the local sessiond will be
618 * able to wake us up. For global shm, we open it even
619 * if rw access is not granted, because the root.root
620 * sessiond will be able to override all rights and wake
621 * us up.
622 */
623 if (!sock_info->global && errno != EACCES) {
624 ERR("Error opening shm %s", sock_info->wait_shm_path);
625 exit(EXIT_FAILURE);
626 }
627 /*
628 * The shm exists, but we cannot open it RW. Report
629 * success.
630 */
631 exit(EXIT_SUCCESS);
632 } else {
633 return -1;
634 }
635end:
636 if (wait_shm_fd >= 0 && !sock_info->global) {
637 struct stat statbuf;
638
639 /*
640 * Ensure that our user is the owner of the shm file for
641 * local shm. If we do not own the file, it means our
642 * sessiond will not have access to wake us up (there is
643 * probably a rogue process trying to fake our
644 * sessiond). Fallback to polling method in this case.
645 */
646 ret = fstat(wait_shm_fd, &statbuf);
647 if (ret) {
648 PERROR("fstat");
649 goto error_close;
650 }
651 if (statbuf.st_uid != getuid())
652 goto error_close;
653 }
654 return wait_shm_fd;
655
656error_close:
657 ret = close(wait_shm_fd);
658 if (ret) {
659 PERROR("Error closing fd");
660 }
661 return -1;
662}
663
664static
665char *get_map_shm(struct sock_info *sock_info)
666{
667 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
668 int wait_shm_fd, ret;
669 char *wait_shm_mmap;
670
671 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
672 if (wait_shm_fd < 0) {
673 goto error;
674 }
675 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
676 MAP_SHARED, wait_shm_fd, 0);
677 /* close shm fd immediately after taking the mmap reference */
678 ret = close(wait_shm_fd);
679 if (ret) {
680 PERROR("Error closing fd");
681 }
682 if (wait_shm_mmap == MAP_FAILED) {
683 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
684 goto error;
685 }
686 return wait_shm_mmap;
687
688error:
689 return NULL;
690}
691
692static
693void wait_for_sessiond(struct sock_info *sock_info)
694{
695 int ret;
696
697 ust_lock();
698 if (lttng_ust_comm_should_quit) {
699 goto quit;
700 }
701 if (wait_poll_fallback) {
702 goto error;
703 }
704 if (!sock_info->wait_shm_mmap) {
705 sock_info->wait_shm_mmap = get_map_shm(sock_info);
706 if (!sock_info->wait_shm_mmap)
707 goto error;
708 }
709 ust_unlock();
710
711 DBG("Waiting for %s apps sessiond", sock_info->name);
712 /* Wait for futex wakeup */
713 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
714 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
715 FUTEX_WAIT, 0, NULL, NULL, 0);
716 if (ret < 0) {
717 if (errno == EFAULT) {
718 wait_poll_fallback = 1;
719 DBG(
720"Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
721"do not support FUTEX_WAKE on read-only memory mappings correctly. "
722"Please upgrade your kernel "
723"(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
724"mainline). LTTng-UST will use polling mode fallback.");
725 if (ust_debug())
726 PERROR("futex");
727 }
728 }
729 }
730 return;
731
732quit:
733 ust_unlock();
734 return;
735
736error:
737 ust_unlock();
738 return;
739}
740
741/*
742 * This thread does not allocate any resource, except within
743 * handle_message, within mutex protection. This mutex protects against
744 * fork and exit.
745 * The other moment it allocates resources is at socket connection, which
746 * is also protected by the mutex.
747 */
748static
749void *ust_listener_thread(void *arg)
750{
751 struct sock_info *sock_info = arg;
752 int sock, ret, prev_connect_failed = 0, has_waited = 0;
753
754 /* Restart trying to connect to the session daemon */
755restart:
756 if (prev_connect_failed) {
757 /* Wait for sessiond availability with pipe */
758 wait_for_sessiond(sock_info);
759 if (has_waited) {
760 has_waited = 0;
761 /*
762 * Sleep for 5 seconds before retrying after a
763 * sequence of failure / wait / failure. This
764 * deals with a killed or broken session daemon.
765 */
766 sleep(5);
767 }
768 has_waited = 1;
769 prev_connect_failed = 0;
770 }
771 ust_lock();
772
773 if (lttng_ust_comm_should_quit) {
774 ust_unlock();
775 goto quit;
776 }
777
778 if (sock_info->socket != -1) {
779 ret = ustcomm_close_unix_sock(sock_info->socket);
780 if (ret) {
781 ERR("Error closing %s apps socket", sock_info->name);
782 }
783 sock_info->socket = -1;
784 }
785
786 /* Register */
787 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
788 if (ret < 0) {
789 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
790 prev_connect_failed = 1;
791 /*
792 * If we cannot find the sessiond daemon, don't delay
793 * constructor execution.
794 */
795 ret = handle_register_done(sock_info);
796 assert(!ret);
797 ust_unlock();
798 goto restart;
799 }
800
801 sock_info->socket = sock = ret;
802
803 /*
804 * Create only one root handle per listener thread for the whole
805 * process lifetime.
806 */
807 if (sock_info->root_handle == -1) {
808 ret = lttng_abi_create_root_handle();
809 if (ret < 0) {
810 ERR("Error creating root handle");
811 ust_unlock();
812 goto quit;
813 }
814 sock_info->root_handle = ret;
815 }
816
817 ret = register_app_to_sessiond(sock);
818 if (ret < 0) {
819 ERR("Error registering to %s apps socket", sock_info->name);
820 prev_connect_failed = 1;
821 /*
822 * If we cannot register to the sessiond daemon, don't
823 * delay constructor execution.
824 */
825 ret = handle_register_done(sock_info);
826 assert(!ret);
827 ust_unlock();
828 goto restart;
829 }
830 ust_unlock();
831
832 for (;;) {
833 ssize_t len;
834 struct ustcomm_ust_msg lum;
835
836 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
837 switch (len) {
838 case 0: /* orderly shutdown */
839 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
840 ust_lock();
841 /*
842 * Either sessiond has shutdown or refused us by closing the socket.
843 * In either case, we don't want to delay construction execution,
844 * and we need to wait before retry.
845 */
846 prev_connect_failed = 1;
847 /*
848 * If we cannot register to the sessiond daemon, don't
849 * delay constructor execution.
850 */
851 ret = handle_register_done(sock_info);
852 assert(!ret);
853 ust_unlock();
854 goto end;
855 case sizeof(lum):
856 DBG("message received\n");
857 ret = handle_message(sock_info, sock, &lum);
858 if (ret < 0) {
859 ERR("Error handling message for %s socket", sock_info->name);
860 }
861 continue;
862 case -1:
863 DBG("Receive failed from lttng-sessiond with errno %d", errno);
864 if (errno == ECONNRESET) {
865 ERR("%s remote end closed connection\n", sock_info->name);
866 goto end;
867 }
868 goto end;
869 default:
870 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
871 continue;
872 }
873
874 }
875end:
876 goto restart; /* try to reconnect */
877quit:
878 return NULL;
879}
880
881/*
882 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
883 */
884static
885int get_timeout(struct timespec *constructor_timeout)
886{
887 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
888 char *str_delay;
889 int ret;
890
891 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
892 if (str_delay) {
893 constructor_delay_ms = strtol(str_delay, NULL, 10);
894 }
895
896 switch (constructor_delay_ms) {
897 case -1:/* fall-through */
898 case 0:
899 return constructor_delay_ms;
900 default:
901 break;
902 }
903
904 /*
905 * If we are unable to find the current time, don't wait.
906 */
907 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
908 if (ret) {
909 return -1;
910 }
911 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
912 constructor_timeout->tv_nsec +=
913 (constructor_delay_ms % 1000UL) * 1000000UL;
914 if (constructor_timeout->tv_nsec >= 1000000000UL) {
915 constructor_timeout->tv_sec++;
916 constructor_timeout->tv_nsec -= 1000000000UL;
917 }
918 return 1;
919}
920
921/*
922 * sessiond monitoring thread: monitor presence of global and per-user
923 * sessiond by polling the application common named pipe.
924 */
925/* TODO */
926
927void __attribute__((constructor)) lttng_ust_init(void)
928{
929 struct timespec constructor_timeout;
930 sigset_t sig_all_blocked, orig_parent_mask;
931 int timeout_mode;
932 int ret;
933
934 if (uatomic_xchg(&initialized, 1) == 1)
935 return;
936
937 /*
938 * Fixup interdependency between TLS fixup mutex (which happens
939 * to be the dynamic linker mutex) and ust_lock, taken within
940 * the ust lock.
941 */
942 lttng_fixup_event_tls();
943 lttng_fixup_ringbuffer_tls();
944 lttng_fixup_vtid_tls();
945 lttng_fixup_nest_count_tls();
946
947 /*
948 * We want precise control over the order in which we construct
949 * our sub-libraries vs starting to receive commands from
950 * sessiond (otherwise leading to errors when trying to create
951 * sessiond before the init functions are completed).
952 */
953 init_usterr();
954 init_tracepoint();
955 ltt_ring_buffer_metadata_client_init();
956 ltt_ring_buffer_client_overwrite_init();
957 ltt_ring_buffer_client_discard_init();
958
959 timeout_mode = get_timeout(&constructor_timeout);
960
961 ret = sem_init(&constructor_wait, 0, 0);
962 assert(!ret);
963
964 ret = setup_local_apps();
965 if (ret) {
966 ERR("Error setting up to local apps");
967 }
968
969 /* A new thread created by pthread_create inherits the signal mask
970 * from the parent. To avoid any signal being received by the
971 * listener thread, we block all signals temporarily in the parent,
972 * while we create the listener thread.
973 */
974 sigfillset(&sig_all_blocked);
975 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
976 if (ret) {
977 ERR("pthread_sigmask: %s", strerror(ret));
978 }
979
980 ret = pthread_create(&global_apps.ust_listener, NULL,
981 ust_listener_thread, &global_apps);
982 if (ret) {
983 ERR("pthread_create global: %s", strerror(ret));
984 }
985 if (local_apps.allowed) {
986 ret = pthread_create(&local_apps.ust_listener, NULL,
987 ust_listener_thread, &local_apps);
988 if (ret) {
989 ERR("pthread_create local: %s", strerror(ret));
990 }
991 } else {
992 handle_register_done(&local_apps);
993 }
994
995 /* Restore original signal mask in parent */
996 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
997 if (ret) {
998 ERR("pthread_sigmask: %s", strerror(ret));
999 }
1000
1001 switch (timeout_mode) {
1002 case 1: /* timeout wait */
1003 do {
1004 ret = sem_timedwait(&constructor_wait,
1005 &constructor_timeout);
1006 } while (ret < 0 && errno == EINTR);
1007 if (ret < 0 && errno == ETIMEDOUT) {
1008 ERR("Timed out waiting for ltt-sessiond");
1009 } else {
1010 assert(!ret);
1011 }
1012 break;
1013 case -1:/* wait forever */
1014 do {
1015 ret = sem_wait(&constructor_wait);
1016 } while (ret < 0 && errno == EINTR);
1017 assert(!ret);
1018 break;
1019 case 0: /* no timeout */
1020 break;
1021 }
1022}
1023
1024static
1025void lttng_ust_cleanup(int exiting)
1026{
1027 cleanup_sock_info(&global_apps, exiting);
1028 if (local_apps.allowed) {
1029 cleanup_sock_info(&local_apps, exiting);
1030 }
1031 /*
1032 * The teardown in this function all affect data structures
1033 * accessed under the UST lock by the listener thread. This
1034 * lock, along with the lttng_ust_comm_should_quit flag, ensure
1035 * that none of these threads are accessing this data at this
1036 * point.
1037 */
1038 lttng_ust_abi_exit();
1039 lttng_ust_events_exit();
1040 ltt_ring_buffer_client_discard_exit();
1041 ltt_ring_buffer_client_overwrite_exit();
1042 ltt_ring_buffer_metadata_client_exit();
1043 exit_tracepoint();
1044 if (!exiting) {
1045 /* Reinitialize values for fork */
1046 sem_count = 2;
1047 lttng_ust_comm_should_quit = 0;
1048 initialized = 0;
1049 }
1050}
1051
1052void __attribute__((destructor)) lttng_ust_exit(void)
1053{
1054 int ret;
1055
1056 /*
1057 * Using pthread_cancel here because:
1058 * A) we don't want to hang application teardown.
1059 * B) the thread is not allocating any resource.
1060 */
1061
1062 /*
1063 * Require the communication thread to quit. Synchronize with
1064 * mutexes to ensure it is not in a mutex critical section when
1065 * pthread_cancel is later called.
1066 */
1067 ust_lock();
1068 lttng_ust_comm_should_quit = 1;
1069 ust_unlock();
1070
1071 /* cancel threads */
1072 ret = pthread_cancel(global_apps.ust_listener);
1073 if (ret) {
1074 ERR("Error cancelling global ust listener thread: %s",
1075 strerror(ret));
1076 }
1077 if (local_apps.allowed) {
1078 ret = pthread_cancel(local_apps.ust_listener);
1079 if (ret) {
1080 ERR("Error cancelling local ust listener thread: %s",
1081 strerror(ret));
1082 }
1083 }
1084 /*
1085 * Do NOT join threads: use of sys_futex makes it impossible to
1086 * join the threads without using async-cancel, but async-cancel
1087 * is delivered by a signal, which could hit the target thread
1088 * anywhere in its code path, including while the ust_lock() is
1089 * held, causing a deadlock for the other thread. Let the OS
1090 * cleanup the threads if there are stalled in a syscall.
1091 */
1092 lttng_ust_cleanup(1);
1093}
1094
1095/*
1096 * We exclude the worker threads across fork and clone (except
1097 * CLONE_VM), because these system calls only keep the forking thread
1098 * running in the child. Therefore, we don't want to call fork or clone
1099 * in the middle of an tracepoint or ust tracing state modification.
1100 * Holding this mutex protects these structures across fork and clone.
1101 */
1102void ust_before_fork(sigset_t *save_sigset)
1103{
1104 /*
1105 * Disable signals. This is to avoid that the child intervenes
1106 * before it is properly setup for tracing. It is safer to
1107 * disable all signals, because then we know we are not breaking
1108 * anything by restoring the original mask.
1109 */
1110 sigset_t all_sigs;
1111 int ret;
1112
1113 if (lttng_ust_nest_count)
1114 return;
1115 /* Disable signals */
1116 sigfillset(&all_sigs);
1117 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
1118 if (ret == -1) {
1119 PERROR("sigprocmask");
1120 }
1121 ust_lock();
1122 rcu_bp_before_fork();
1123}
1124
1125static void ust_after_fork_common(sigset_t *restore_sigset)
1126{
1127 int ret;
1128
1129 DBG("process %d", getpid());
1130 ust_unlock();
1131 /* Restore signals */
1132 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
1133 if (ret == -1) {
1134 PERROR("sigprocmask");
1135 }
1136}
1137
1138void ust_after_fork_parent(sigset_t *restore_sigset)
1139{
1140 if (lttng_ust_nest_count)
1141 return;
1142 DBG("process %d", getpid());
1143 rcu_bp_after_fork_parent();
1144 /* Release mutexes and reenable signals */
1145 ust_after_fork_common(restore_sigset);
1146}
1147
1148/*
1149 * After fork, in the child, we need to cleanup all the leftover state,
1150 * except the worker thread which already magically disappeared thanks
1151 * to the weird Linux fork semantics. After tyding up, we call
1152 * lttng_ust_init() again to start over as a new PID.
1153 *
1154 * This is meant for forks() that have tracing in the child between the
1155 * fork and following exec call (if there is any).
1156 */
1157void ust_after_fork_child(sigset_t *restore_sigset)
1158{
1159 if (lttng_ust_nest_count)
1160 return;
1161 DBG("process %d", getpid());
1162 /* Release urcu mutexes */
1163 rcu_bp_after_fork_child();
1164 lttng_ust_cleanup(0);
1165 lttng_context_vtid_reset();
1166 /* Release mutexes and reenable signals */
1167 ust_after_fork_common(restore_sigset);
1168 lttng_ust_init();
1169}
This page took 0.031845 seconds and 5 git commands to generate.