Fix: only print event errors every 1048576 hits
[deliverable/lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/prctl.h>
26 #include <sys/mman.h>
27 #include <sys/stat.h>
28 #include <sys/types.h>
29 #include <sys/wait.h>
30 #include <fcntl.h>
31 #include <unistd.h>
32 #include <errno.h>
33 #include <pthread.h>
34 #include <semaphore.h>
35 #include <time.h>
36 #include <assert.h>
37 #include <signal.h>
38 #include <urcu/uatomic.h>
39 #include <urcu/futex.h>
40 #include <urcu/compiler.h>
41
42 #include <lttng/ust-events.h>
43 #include <lttng/ust-abi.h>
44 #include <lttng/ust.h>
45 #include <ust-comm.h>
46 #include <usterr-signal-safe.h>
47 #include "tracepoint-internal.h"
48 #include "ltt-tracer-core.h"
49
50 /*
51 * Has lttng ust comm constructor been called ?
52 */
53 static int initialized;
54
55 /*
56 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
57 * Held when handling a command, also held by fork() to deal with
58 * removal of threads, and by exit path.
59 */
60
61 /* Should the ust comm thread quit ? */
62 static int lttng_ust_comm_should_quit;
63
64 /*
65 * Wait for either of these before continuing to the main
66 * program:
67 * - the register_done message from sessiond daemon
68 * (will let the sessiond daemon enable sessions before main
69 * starts.)
70 * - sessiond daemon is not reachable.
71 * - timeout (ensuring applications are resilient to session
72 * daemon problems).
73 */
74 static sem_t constructor_wait;
75 /*
76 * Doing this for both the global and local sessiond.
77 */
78 static int sem_count = { 2 };
79
80 /*
81 * Info about socket and associated listener thread.
82 */
83 struct sock_info {
84 const char *name;
85 pthread_t ust_listener; /* listener thread */
86 int root_handle;
87 int constructor_sem_posted;
88 int allowed;
89 int global;
90
91 char sock_path[PATH_MAX];
92 int socket;
93
94 char wait_shm_path[PATH_MAX];
95 char *wait_shm_mmap;
96 };
97
98 /* Socket from app (connect) to session daemon (listen) for communication */
99 struct sock_info global_apps = {
100 .name = "global",
101 .global = 1,
102
103 .root_handle = -1,
104 .allowed = 1,
105
106 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
107 .socket = -1,
108
109 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
110 };
111
112 /* TODO: allow global_apps_sock_path override */
113
114 struct sock_info local_apps = {
115 .name = "local",
116 .global = 0,
117 .root_handle = -1,
118 .allowed = 0, /* Check setuid bit first */
119
120 .socket = -1,
121 };
122
123 static int wait_poll_fallback;
124
125 extern void ltt_ring_buffer_client_overwrite_init(void);
126 extern void ltt_ring_buffer_client_discard_init(void);
127 extern void ltt_ring_buffer_metadata_client_init(void);
128 extern void ltt_ring_buffer_client_overwrite_exit(void);
129 extern void ltt_ring_buffer_client_discard_exit(void);
130 extern void ltt_ring_buffer_metadata_client_exit(void);
131
132 static
133 int setup_local_apps(void)
134 {
135 const char *home_dir;
136 uid_t uid;
137
138 uid = getuid();
139 /*
140 * Disallow per-user tracing for setuid binaries.
141 */
142 if (uid != geteuid()) {
143 local_apps.allowed = 0;
144 return 0;
145 } else {
146 local_apps.allowed = 1;
147 }
148 home_dir = (const char *) getenv("HOME");
149 if (!home_dir)
150 return -ENOENT;
151 snprintf(local_apps.sock_path, PATH_MAX,
152 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
153 snprintf(local_apps.wait_shm_path, PATH_MAX,
154 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
155 return 0;
156 }
157
158 static
159 int register_app_to_sessiond(int socket)
160 {
161 ssize_t ret;
162 int prctl_ret;
163 struct {
164 uint32_t major;
165 uint32_t minor;
166 pid_t pid;
167 pid_t ppid;
168 uid_t uid;
169 gid_t gid;
170 uint32_t bits_per_long;
171 char name[16]; /* process name */
172 } reg_msg;
173
174 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
175 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
176 reg_msg.pid = getpid();
177 reg_msg.ppid = getppid();
178 reg_msg.uid = getuid();
179 reg_msg.gid = getgid();
180 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
181 prctl_ret = prctl(PR_GET_NAME, (unsigned long) reg_msg.name, 0, 0, 0);
182 if (prctl_ret) {
183 ERR("Error executing prctl");
184 return -errno;
185 }
186
187 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
188 if (ret >= 0 && ret != sizeof(reg_msg))
189 return -EIO;
190 return ret;
191 }
192
193 static
194 int send_reply(int sock, struct ustcomm_ust_reply *lur)
195 {
196 ssize_t len;
197
198 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
199 switch (len) {
200 case sizeof(*lur):
201 DBG("message successfully sent");
202 return 0;
203 case -1:
204 if (errno == ECONNRESET) {
205 printf("remote end closed connection\n");
206 return 0;
207 }
208 return -1;
209 default:
210 printf("incorrect message size: %zd\n", len);
211 return -1;
212 }
213 }
214
215 static
216 int handle_register_done(struct sock_info *sock_info)
217 {
218 int ret;
219
220 if (sock_info->constructor_sem_posted)
221 return 0;
222 sock_info->constructor_sem_posted = 1;
223 if (uatomic_read(&sem_count) <= 0) {
224 return 0;
225 }
226 ret = uatomic_add_return(&sem_count, -1);
227 if (ret == 0) {
228 ret = sem_post(&constructor_wait);
229 assert(!ret);
230 }
231 return 0;
232 }
233
234 static
235 int handle_message(struct sock_info *sock_info,
236 int sock, struct ustcomm_ust_msg *lum)
237 {
238 int ret = 0;
239 const struct lttng_ust_objd_ops *ops;
240 struct ustcomm_ust_reply lur;
241 int shm_fd, wait_fd;
242 union ust_args args;
243
244 ust_lock();
245
246 memset(&lur, 0, sizeof(lur));
247
248 if (lttng_ust_comm_should_quit) {
249 ret = -EPERM;
250 goto end;
251 }
252
253 ops = objd_ops(lum->handle);
254 if (!ops) {
255 ret = -ENOENT;
256 goto end;
257 }
258
259 switch (lum->cmd) {
260 case LTTNG_UST_REGISTER_DONE:
261 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
262 ret = handle_register_done(sock_info);
263 else
264 ret = -EINVAL;
265 break;
266 case LTTNG_UST_RELEASE:
267 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
268 ret = -EPERM;
269 else
270 ret = lttng_ust_objd_unref(lum->handle);
271 break;
272 default:
273 if (ops->cmd)
274 ret = ops->cmd(lum->handle, lum->cmd,
275 (unsigned long) &lum->u,
276 &args);
277 else
278 ret = -ENOSYS;
279 break;
280 }
281
282 end:
283 lur.handle = lum->handle;
284 lur.cmd = lum->cmd;
285 lur.ret_val = ret;
286 if (ret >= 0) {
287 lur.ret_code = USTCOMM_OK;
288 } else {
289 //lur.ret_code = USTCOMM_SESSION_FAIL;
290 lur.ret_code = ret;
291 }
292 if (ret >= 0) {
293 switch (lum->cmd) {
294 case LTTNG_UST_STREAM:
295 /*
296 * Special-case reply to send stream info.
297 * Use lum.u output.
298 */
299 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
300 shm_fd = *args.stream.shm_fd;
301 wait_fd = *args.stream.wait_fd;
302 break;
303 case LTTNG_UST_METADATA:
304 case LTTNG_UST_CHANNEL:
305 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
306 shm_fd = *args.channel.shm_fd;
307 wait_fd = *args.channel.wait_fd;
308 break;
309 case LTTNG_UST_TRACER_VERSION:
310 lur.u.version = lum->u.version;
311 break;
312 case LTTNG_UST_TRACEPOINT_LIST_GET:
313 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
314 break;
315 }
316 }
317 ret = send_reply(sock, &lur);
318 if (ret < 0) {
319 perror("error sending reply");
320 goto error;
321 }
322
323 if ((lum->cmd == LTTNG_UST_STREAM
324 || lum->cmd == LTTNG_UST_CHANNEL
325 || lum->cmd == LTTNG_UST_METADATA)
326 && lur.ret_code == USTCOMM_OK) {
327 /* we also need to send the file descriptors. */
328 ret = ustcomm_send_fds_unix_sock(sock,
329 &shm_fd, &shm_fd,
330 1, sizeof(int));
331 if (ret < 0) {
332 perror("send shm_fd");
333 goto error;
334 }
335 ret = ustcomm_send_fds_unix_sock(sock,
336 &wait_fd, &wait_fd,
337 1, sizeof(int));
338 if (ret < 0) {
339 perror("send wait_fd");
340 goto error;
341 }
342 }
343 /*
344 * We still have the memory map reference, and the fds have been
345 * sent to the sessiond. We can therefore close those fds. Note
346 * that we keep the write side of the wait_fd open, but close
347 * the read side.
348 */
349 if (lur.ret_code == USTCOMM_OK) {
350 switch (lum->cmd) {
351 case LTTNG_UST_STREAM:
352 if (shm_fd >= 0) {
353 ret = close(shm_fd);
354 if (ret) {
355 PERROR("Error closing stream shm_fd");
356 }
357 *args.stream.shm_fd = -1;
358 }
359 if (wait_fd >= 0) {
360 ret = close(wait_fd);
361 if (ret) {
362 PERROR("Error closing stream wait_fd");
363 }
364 *args.stream.wait_fd = -1;
365 }
366 break;
367 case LTTNG_UST_METADATA:
368 case LTTNG_UST_CHANNEL:
369 if (shm_fd >= 0) {
370 ret = close(shm_fd);
371 if (ret) {
372 PERROR("Error closing channel shm_fd");
373 }
374 *args.channel.shm_fd = -1;
375 }
376 if (wait_fd >= 0) {
377 ret = close(wait_fd);
378 if (ret) {
379 PERROR("Error closing channel wait_fd");
380 }
381 *args.channel.wait_fd = -1;
382 }
383 break;
384 }
385 }
386
387 error:
388 ust_unlock();
389 return ret;
390 }
391
392 static
393 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
394 {
395 int ret;
396
397 if (sock_info->socket != -1) {
398 ret = close(sock_info->socket);
399 if (ret) {
400 ERR("Error closing apps socket");
401 }
402 sock_info->socket = -1;
403 }
404 if (sock_info->root_handle != -1) {
405 ret = lttng_ust_objd_unref(sock_info->root_handle);
406 if (ret) {
407 ERR("Error unref root handle");
408 }
409 sock_info->root_handle = -1;
410 }
411 sock_info->constructor_sem_posted = 0;
412 /*
413 * wait_shm_mmap is used by listener threads outside of the
414 * ust lock, so we cannot tear it down ourselves, because we
415 * cannot join on these threads. Leave this task to the OS
416 * process exit.
417 */
418 if (!exiting && sock_info->wait_shm_mmap) {
419 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
420 if (ret) {
421 ERR("Error unmapping wait shm");
422 }
423 sock_info->wait_shm_mmap = NULL;
424 }
425 }
426
427 /*
428 * Using fork to set umask in the child process (not multi-thread safe).
429 * We deal with the shm_open vs ftruncate race (happening when the
430 * sessiond owns the shm and does not let everybody modify it, to ensure
431 * safety against shm_unlink) by simply letting the mmap fail and
432 * retrying after a few seconds.
433 * For global shm, everybody has rw access to it until the sessiond
434 * starts.
435 */
436 static
437 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
438 {
439 int wait_shm_fd, ret;
440 pid_t pid;
441
442 /*
443 * Try to open read-only.
444 */
445 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
446 if (wait_shm_fd >= 0) {
447 goto end;
448 } else if (wait_shm_fd < 0 && errno != ENOENT) {
449 /*
450 * Real-only open did not work, and it's not because the
451 * entry was not present. It's a failure that prohibits
452 * using shm.
453 */
454 ERR("Error opening shm %s", sock_info->wait_shm_path);
455 goto end;
456 }
457 /*
458 * If the open failed because the file did not exist, try
459 * creating it ourself.
460 */
461 pid = fork();
462 if (pid > 0) {
463 int status;
464
465 /*
466 * Parent: wait for child to return, in which case the
467 * shared memory map will have been created.
468 */
469 pid = wait(&status);
470 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
471 wait_shm_fd = -1;
472 goto end;
473 }
474 /*
475 * Try to open read-only again after creation.
476 */
477 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
478 if (wait_shm_fd < 0) {
479 /*
480 * Real-only open did not work. It's a failure
481 * that prohibits using shm.
482 */
483 ERR("Error opening shm %s", sock_info->wait_shm_path);
484 goto end;
485 }
486 goto end;
487 } else if (pid == 0) {
488 int create_mode;
489
490 /* Child */
491 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
492 if (sock_info->global)
493 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
494 /*
495 * We're alone in a child process, so we can modify the
496 * process-wide umask.
497 */
498 umask(~create_mode);
499 /*
500 * Try creating shm (or get rw access).
501 * We don't do an exclusive open, because we allow other
502 * processes to create+ftruncate it concurrently.
503 */
504 wait_shm_fd = shm_open(sock_info->wait_shm_path,
505 O_RDWR | O_CREAT, create_mode);
506 if (wait_shm_fd >= 0) {
507 ret = ftruncate(wait_shm_fd, mmap_size);
508 if (ret) {
509 PERROR("ftruncate");
510 exit(EXIT_FAILURE);
511 }
512 exit(EXIT_SUCCESS);
513 }
514 /*
515 * For local shm, we need to have rw access to accept
516 * opening it: this means the local sessiond will be
517 * able to wake us up. For global shm, we open it even
518 * if rw access is not granted, because the root.root
519 * sessiond will be able to override all rights and wake
520 * us up.
521 */
522 if (!sock_info->global && errno != EACCES) {
523 ERR("Error opening shm %s", sock_info->wait_shm_path);
524 exit(EXIT_FAILURE);
525 }
526 /*
527 * The shm exists, but we cannot open it RW. Report
528 * success.
529 */
530 exit(EXIT_SUCCESS);
531 } else {
532 return -1;
533 }
534 end:
535 if (wait_shm_fd >= 0 && !sock_info->global) {
536 struct stat statbuf;
537
538 /*
539 * Ensure that our user is the owner of the shm file for
540 * local shm. If we do not own the file, it means our
541 * sessiond will not have access to wake us up (there is
542 * probably a rogue process trying to fake our
543 * sessiond). Fallback to polling method in this case.
544 */
545 ret = fstat(wait_shm_fd, &statbuf);
546 if (ret) {
547 PERROR("fstat");
548 goto error_close;
549 }
550 if (statbuf.st_uid != getuid())
551 goto error_close;
552 }
553 return wait_shm_fd;
554
555 error_close:
556 ret = close(wait_shm_fd);
557 if (ret) {
558 PERROR("Error closing fd");
559 }
560 return -1;
561 }
562
563 static
564 char *get_map_shm(struct sock_info *sock_info)
565 {
566 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
567 int wait_shm_fd, ret;
568 char *wait_shm_mmap;
569
570 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
571 if (wait_shm_fd < 0) {
572 goto error;
573 }
574 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
575 MAP_SHARED, wait_shm_fd, 0);
576 /* close shm fd immediately after taking the mmap reference */
577 ret = close(wait_shm_fd);
578 if (ret) {
579 PERROR("Error closing fd");
580 }
581 if (wait_shm_mmap == MAP_FAILED) {
582 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
583 goto error;
584 }
585 return wait_shm_mmap;
586
587 error:
588 return NULL;
589 }
590
591 static
592 void wait_for_sessiond(struct sock_info *sock_info)
593 {
594 int ret;
595
596 ust_lock();
597 if (lttng_ust_comm_should_quit) {
598 goto quit;
599 }
600 if (wait_poll_fallback) {
601 goto error;
602 }
603 if (!sock_info->wait_shm_mmap) {
604 sock_info->wait_shm_mmap = get_map_shm(sock_info);
605 if (!sock_info->wait_shm_mmap)
606 goto error;
607 }
608 ust_unlock();
609
610 DBG("Waiting for %s apps sessiond", sock_info->name);
611 /* Wait for futex wakeup */
612 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
613 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
614 FUTEX_WAIT, 0, NULL, NULL, 0);
615 if (ret < 0) {
616 if (errno == EFAULT) {
617 wait_poll_fallback = 1;
618 DBG(
619 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
620 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
621 "Please upgrade your kernel "
622 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
623 "mainline). LTTng-UST will use polling mode fallback.");
624 if (ust_debug())
625 PERROR("futex");
626 }
627 }
628 }
629 return;
630
631 quit:
632 ust_unlock();
633 return;
634
635 error:
636 ust_unlock();
637 return;
638 }
639
640 /*
641 * This thread does not allocate any resource, except within
642 * handle_message, within mutex protection. This mutex protects against
643 * fork and exit.
644 * The other moment it allocates resources is at socket connexion, which
645 * is also protected by the mutex.
646 */
647 static
648 void *ust_listener_thread(void *arg)
649 {
650 struct sock_info *sock_info = arg;
651 int sock, ret, prev_connect_failed = 0, has_waited = 0;
652
653 /* Restart trying to connect to the session daemon */
654 restart:
655 if (prev_connect_failed) {
656 /* Wait for sessiond availability with pipe */
657 wait_for_sessiond(sock_info);
658 if (has_waited) {
659 has_waited = 0;
660 /*
661 * Sleep for 5 seconds before retrying after a
662 * sequence of failure / wait / failure. This
663 * deals with a killed or broken session daemon.
664 */
665 sleep(5);
666 }
667 has_waited = 1;
668 prev_connect_failed = 0;
669 }
670 ust_lock();
671
672 if (lttng_ust_comm_should_quit) {
673 ust_unlock();
674 goto quit;
675 }
676
677 if (sock_info->socket != -1) {
678 ret = close(sock_info->socket);
679 if (ret) {
680 ERR("Error closing %s apps socket", sock_info->name);
681 }
682 sock_info->socket = -1;
683 }
684
685 /* Register */
686 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
687 if (ret < 0) {
688 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
689 prev_connect_failed = 1;
690 /*
691 * If we cannot find the sessiond daemon, don't delay
692 * constructor execution.
693 */
694 ret = handle_register_done(sock_info);
695 assert(!ret);
696 ust_unlock();
697 goto restart;
698 }
699
700 sock_info->socket = sock = ret;
701
702 /*
703 * Create only one root handle per listener thread for the whole
704 * process lifetime.
705 */
706 if (sock_info->root_handle == -1) {
707 ret = lttng_abi_create_root_handle();
708 if (ret < 0) {
709 ERR("Error creating root handle");
710 ust_unlock();
711 goto quit;
712 }
713 sock_info->root_handle = ret;
714 }
715
716 ret = register_app_to_sessiond(sock);
717 if (ret < 0) {
718 ERR("Error registering to %s apps socket", sock_info->name);
719 prev_connect_failed = 1;
720 /*
721 * If we cannot register to the sessiond daemon, don't
722 * delay constructor execution.
723 */
724 ret = handle_register_done(sock_info);
725 assert(!ret);
726 ust_unlock();
727 goto restart;
728 }
729 ust_unlock();
730
731 for (;;) {
732 ssize_t len;
733 struct ustcomm_ust_msg lum;
734
735 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
736 switch (len) {
737 case 0: /* orderly shutdown */
738 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
739 ust_lock();
740 /*
741 * Either sessiond has shutdown or refused us by closing the socket.
742 * In either case, we don't want to delay construction execution,
743 * and we need to wait before retry.
744 */
745 prev_connect_failed = 1;
746 /*
747 * If we cannot register to the sessiond daemon, don't
748 * delay constructor execution.
749 */
750 ret = handle_register_done(sock_info);
751 assert(!ret);
752 ust_unlock();
753 goto end;
754 case sizeof(lum):
755 DBG("message received\n");
756 ret = handle_message(sock_info, sock, &lum);
757 if (ret < 0) {
758 ERR("Error handling message for %s socket", sock_info->name);
759 }
760 continue;
761 case -1:
762 DBG("Receive failed from lttng-sessiond with errno %d", errno);
763 if (errno == ECONNRESET) {
764 ERR("%s remote end closed connection\n", sock_info->name);
765 goto end;
766 }
767 goto end;
768 default:
769 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
770 continue;
771 }
772
773 }
774 end:
775 goto restart; /* try to reconnect */
776 quit:
777 return NULL;
778 }
779
780 /*
781 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
782 */
783 static
784 int get_timeout(struct timespec *constructor_timeout)
785 {
786 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
787 char *str_delay;
788 int ret;
789
790 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
791 if (str_delay) {
792 constructor_delay_ms = strtol(str_delay, NULL, 10);
793 }
794
795 switch (constructor_delay_ms) {
796 case -1:/* fall-through */
797 case 0:
798 return constructor_delay_ms;
799 default:
800 break;
801 }
802
803 /*
804 * If we are unable to find the current time, don't wait.
805 */
806 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
807 if (ret) {
808 return -1;
809 }
810 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
811 constructor_timeout->tv_nsec +=
812 (constructor_delay_ms % 1000UL) * 1000000UL;
813 if (constructor_timeout->tv_nsec >= 1000000000UL) {
814 constructor_timeout->tv_sec++;
815 constructor_timeout->tv_nsec -= 1000000000UL;
816 }
817 return 1;
818 }
819
820 /*
821 * sessiond monitoring thread: monitor presence of global and per-user
822 * sessiond by polling the application common named pipe.
823 */
824 /* TODO */
825
826 void __attribute__((constructor)) lttng_ust_init(void)
827 {
828 struct timespec constructor_timeout;
829 int timeout_mode;
830 int ret;
831
832 if (uatomic_xchg(&initialized, 1) == 1)
833 return;
834
835 /*
836 * We want precise control over the order in which we construct
837 * our sub-libraries vs starting to receive commands from
838 * sessiond (otherwise leading to errors when trying to create
839 * sessiond before the init functions are completed).
840 */
841 init_usterr();
842 init_tracepoint();
843 ltt_ring_buffer_metadata_client_init();
844 ltt_ring_buffer_client_overwrite_init();
845 ltt_ring_buffer_client_discard_init();
846
847 timeout_mode = get_timeout(&constructor_timeout);
848
849 ret = sem_init(&constructor_wait, 0, 0);
850 assert(!ret);
851
852 ret = setup_local_apps();
853 if (ret) {
854 ERR("Error setting up to local apps");
855 }
856 ret = pthread_create(&local_apps.ust_listener, NULL,
857 ust_listener_thread, &local_apps);
858
859 if (local_apps.allowed) {
860 ret = pthread_create(&global_apps.ust_listener, NULL,
861 ust_listener_thread, &global_apps);
862 } else {
863 handle_register_done(&local_apps);
864 }
865
866 switch (timeout_mode) {
867 case 1: /* timeout wait */
868 do {
869 ret = sem_timedwait(&constructor_wait,
870 &constructor_timeout);
871 } while (ret < 0 && errno == EINTR);
872 if (ret < 0 && errno == ETIMEDOUT) {
873 ERR("Timed out waiting for ltt-sessiond");
874 } else {
875 assert(!ret);
876 }
877 break;
878 case -1:/* wait forever */
879 do {
880 ret = sem_wait(&constructor_wait);
881 } while (ret < 0 && errno == EINTR);
882 assert(!ret);
883 break;
884 case 0: /* no timeout */
885 break;
886 }
887 }
888
889 static
890 void lttng_ust_cleanup(int exiting)
891 {
892 cleanup_sock_info(&global_apps, exiting);
893 if (local_apps.allowed) {
894 cleanup_sock_info(&local_apps, exiting);
895 }
896 /*
897 * The teardown in this function all affect data structures
898 * accessed under the UST lock by the listener thread. This
899 * lock, along with the lttng_ust_comm_should_quit flag, ensure
900 * that none of these threads are accessing this data at this
901 * point.
902 */
903 lttng_ust_abi_exit();
904 lttng_ust_events_exit();
905 ltt_ring_buffer_client_discard_exit();
906 ltt_ring_buffer_client_overwrite_exit();
907 ltt_ring_buffer_metadata_client_exit();
908 exit_tracepoint();
909 if (!exiting) {
910 /* Reinitialize values for fork */
911 sem_count = 2;
912 lttng_ust_comm_should_quit = 0;
913 initialized = 0;
914 }
915 }
916
917 void __attribute__((destructor)) lttng_ust_exit(void)
918 {
919 int ret;
920
921 /*
922 * Using pthread_cancel here because:
923 * A) we don't want to hang application teardown.
924 * B) the thread is not allocating any resource.
925 */
926
927 /*
928 * Require the communication thread to quit. Synchronize with
929 * mutexes to ensure it is not in a mutex critical section when
930 * pthread_cancel is later called.
931 */
932 ust_lock();
933 lttng_ust_comm_should_quit = 1;
934 ust_unlock();
935
936 /* cancel threads */
937 ret = pthread_cancel(global_apps.ust_listener);
938 if (ret) {
939 ERR("Error cancelling global ust listener thread");
940 }
941 if (local_apps.allowed) {
942 ret = pthread_cancel(local_apps.ust_listener);
943 if (ret) {
944 ERR("Error cancelling local ust listener thread");
945 }
946 }
947 /*
948 * Do NOT join threads: use of sys_futex makes it impossible to
949 * join the threads without using async-cancel, but async-cancel
950 * is delivered by a signal, which could hit the target thread
951 * anywhere in its code path, including while the ust_lock() is
952 * held, causing a deadlock for the other thread. Let the OS
953 * cleanup the threads if there are stalled in a syscall.
954 */
955 lttng_ust_cleanup(1);
956 }
957
958 /*
959 * We exclude the worker threads across fork and clone (except
960 * CLONE_VM), because these system calls only keep the forking thread
961 * running in the child. Therefore, we don't want to call fork or clone
962 * in the middle of an tracepoint or ust tracing state modification.
963 * Holding this mutex protects these structures across fork and clone.
964 */
965 void ust_before_fork(sigset_t *save_sigset)
966 {
967 /*
968 * Disable signals. This is to avoid that the child intervenes
969 * before it is properly setup for tracing. It is safer to
970 * disable all signals, because then we know we are not breaking
971 * anything by restoring the original mask.
972 */
973 sigset_t all_sigs;
974 int ret;
975
976 /* Disable signals */
977 sigfillset(&all_sigs);
978 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
979 if (ret == -1) {
980 PERROR("sigprocmask");
981 }
982 ust_lock();
983 rcu_bp_before_fork();
984 }
985
986 static void ust_after_fork_common(sigset_t *restore_sigset)
987 {
988 int ret;
989
990 DBG("process %d", getpid());
991 ust_unlock();
992 /* Restore signals */
993 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
994 if (ret == -1) {
995 PERROR("sigprocmask");
996 }
997 }
998
999 void ust_after_fork_parent(sigset_t *restore_sigset)
1000 {
1001 DBG("process %d", getpid());
1002 rcu_bp_after_fork_parent();
1003 /* Release mutexes and reenable signals */
1004 ust_after_fork_common(restore_sigset);
1005 }
1006
1007 /*
1008 * After fork, in the child, we need to cleanup all the leftover state,
1009 * except the worker thread which already magically disappeared thanks
1010 * to the weird Linux fork semantics. After tyding up, we call
1011 * lttng_ust_init() again to start over as a new PID.
1012 *
1013 * This is meant for forks() that have tracing in the child between the
1014 * fork and following exec call (if there is any).
1015 */
1016 void ust_after_fork_child(sigset_t *restore_sigset)
1017 {
1018 DBG("process %d", getpid());
1019 /* Release urcu mutexes */
1020 rcu_bp_after_fork_child();
1021 lttng_ust_cleanup(0);
1022 lttng_context_vtid_reset();
1023 /* Release mutexes and reenable signals */
1024 ust_after_fork_common(restore_sigset);
1025 lttng_ust_init();
1026 }
This page took 0.053037 seconds and 6 git commands to generate.