2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; only version 2
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 #include <sys/socket.h>
29 #include <sys/types.h>
32 #include <lttng-kernel-ctl.h>
33 #include <lttng-sessiond-comm.h>
34 #include <lttng/lttng-kconsumerd.h>
37 static struct lttng_kconsumerd_global_data
{
39 * kconsumerd_data.lock protects kconsumerd_data.fd_list,
40 * kconsumerd_data.fds_count, and kconsumerd_data.need_update. It ensures
41 * the count matches the number of items in the fd_list. It ensures the
42 * list updates *always* trigger an fd_array update (therefore need to make
43 * list update vs kconsumerd_data.need_update flag update atomic, and also
44 * flag read, fd array and flag clear atomic).
48 * Number of element for the list below. Protected by kconsumerd_data.lock.
50 unsigned int fds_count
;
52 * List of FDs. Protected by kconsumerd_data.lock.
54 struct lttng_kconsumerd_fd_list fd_list
;
56 * Flag specifying if the local array of FDs needs update in the poll
57 * function. Protected by kconsumerd_data.lock.
59 unsigned int need_update
;
61 .fd_list
.head
= CDS_LIST_HEAD_INIT(kconsumerd_data
.fd_list
.head
),
66 /* timeout parameter, to control the polling thread grace period. */
67 static int kconsumerd_poll_timeout
= -1;
70 * Flag to inform the polling thread to quit when all fd hung up. Updated by
71 * the kconsumerd_thread_receive_fds when it notices that all fds has hung up.
72 * Also updated by the signal handler (kconsumerd_should_exit()). Read by the
75 static volatile int kconsumerd_quit
= 0;
78 * Find a session fd in the global list. The kconsumerd_data.lock must be
79 * locked during this call.
81 * Return 1 if found else 0.
83 static int kconsumerd_find_session_fd(int fd
)
85 struct lttng_kconsumerd_fd
*iter
;
87 cds_list_for_each_entry(iter
, &kconsumerd_data
.fd_list
.head
, list
) {
88 if (iter
->sessiond_fd
== fd
) {
89 DBG("Duplicate session fd %d", fd
);
98 * Remove a fd from the global list protected by a mutex.
100 static void kconsumerd_del_fd(struct lttng_kconsumerd_fd
*lcf
)
103 pthread_mutex_lock(&kconsumerd_data
.lock
);
104 cds_list_del(&lcf
->list
);
105 if (kconsumerd_data
.fds_count
> 0) {
106 kconsumerd_data
.fds_count
--;
108 if (lcf
->mmap_base
!= NULL
) {
109 ret
= munmap(lcf
->mmap_base
, lcf
->mmap_len
);
114 if (lcf
->out_fd
!= 0) {
117 close(lcf
->consumerd_fd
);
122 kconsumerd_data
.need_update
= 1;
123 pthread_mutex_unlock(&kconsumerd_data
.lock
);
127 * Create a struct lttcomm_kconsumerd_msg from the
128 * information received on the receiving socket
130 struct lttng_kconsumerd_fd
*kconsumerd_allocate_fd(
131 struct lttcomm_kconsumerd_msg
*buf
,
134 struct lttng_kconsumerd_fd
*tmp_fd
;
136 tmp_fd
= malloc(sizeof(struct lttng_kconsumerd_fd
));
137 if (tmp_fd
== NULL
) {
138 perror("malloc struct lttng_kconsumerd_fd");
142 tmp_fd
->sessiond_fd
= buf
->fd
;
143 tmp_fd
->consumerd_fd
= consumerd_fd
;
144 tmp_fd
->state
= buf
->state
;
145 tmp_fd
->max_sb_size
= buf
->max_sb_size
;
147 tmp_fd
->out_fd_offset
= 0;
148 tmp_fd
->mmap_len
= 0;
149 tmp_fd
->mmap_base
= NULL
;
150 tmp_fd
->output
= buf
->output
;
151 strncpy(tmp_fd
->path_name
, buf
->path_name
, PATH_MAX
);
152 tmp_fd
->path_name
[PATH_MAX
- 1] = '\0';
153 DBG("Allocated %s (sessiond_fd %d, consumerd_fd %d, out_fd %d)",
154 tmp_fd
->path_name
, tmp_fd
->sessiond_fd
,
155 tmp_fd
->consumerd_fd
, tmp_fd
->out_fd
);
162 * Add a fd to the global list protected by a mutex.
164 static int kconsumerd_add_fd(struct lttng_kconsumerd_fd
*tmp_fd
)
168 pthread_mutex_lock(&kconsumerd_data
.lock
);
169 /* Check if already exist */
170 ret
= kconsumerd_find_session_fd(tmp_fd
->sessiond_fd
);
174 cds_list_add(&tmp_fd
->list
, &kconsumerd_data
.fd_list
.head
);
175 kconsumerd_data
.fds_count
++;
176 kconsumerd_data
.need_update
= 1;
179 pthread_mutex_unlock(&kconsumerd_data
.lock
);
184 * Update a fd according to what we just received.
186 static void kconsumerd_change_fd_state(int sessiond_fd
,
187 enum lttng_kconsumerd_fd_state state
)
189 struct lttng_kconsumerd_fd
*iter
;
191 pthread_mutex_lock(&kconsumerd_data
.lock
);
192 cds_list_for_each_entry(iter
, &kconsumerd_data
.fd_list
.head
, list
) {
193 if (iter
->sessiond_fd
== sessiond_fd
) {
198 kconsumerd_data
.need_update
= 1;
199 pthread_mutex_unlock(&kconsumerd_data
.lock
);
203 * Allocate the pollfd structure and the local view of the out fds to avoid
204 * doing a lookup in the linked list and concurrency issues when writing is
205 * needed. Called with kconsumerd_data.lock held.
207 * Returns the number of fds in the structures.
209 static int kconsumerd_update_poll_array(
210 struct lttng_kconsumerd_local_data
*ctx
, struct pollfd
**pollfd
,
211 struct lttng_kconsumerd_fd
**local_kconsumerd_fd
)
213 struct lttng_kconsumerd_fd
*iter
;
216 DBG("Updating poll fd array");
217 cds_list_for_each_entry(iter
, &kconsumerd_data
.fd_list
.head
, list
) {
218 if (iter
->state
== ACTIVE_FD
) {
219 DBG("Active FD %d", iter
->consumerd_fd
);
220 (*pollfd
)[i
].fd
= iter
->consumerd_fd
;
221 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
222 local_kconsumerd_fd
[i
] = iter
;
228 * Insert the kconsumerd_poll_pipe at the end of the array and don't
229 * increment i so nb_fd is the number of real FD.
231 (*pollfd
)[i
].fd
= ctx
->kconsumerd_poll_pipe
[0];
232 (*pollfd
)[i
].events
= POLLIN
;
237 * Receives an array of file descriptors and the associated structures
238 * describing each fd (path name).
240 * Returns the size of received data
242 static int kconsumerd_consumerd_recv_fd(
243 struct lttng_kconsumerd_local_data
*ctx
, int sfd
,
244 struct pollfd
*kconsumerd_sockpoll
, int size
,
245 enum lttng_kconsumerd_command cmd_type
)
248 int ret
= 0, i
, tmp2
;
249 struct cmsghdr
*cmsg
;
251 char recv_fd
[CMSG_SPACE(sizeof(int))];
252 struct lttcomm_kconsumerd_msg lkm
;
253 struct lttng_kconsumerd_fd
*new_fd
;
255 /* the number of fds we are about to receive */
256 nb_fd
= size
/ sizeof(struct lttcomm_kconsumerd_msg
);
259 * nb_fd is the number of fds we receive. One fd per recvmsg.
261 for (i
= 0; i
< nb_fd
; i
++) {
262 struct msghdr msg
= { 0 };
264 /* Prepare to receive the structures */
265 iov
[0].iov_base
= &lkm
;
266 iov
[0].iov_len
= sizeof(lkm
);
270 msg
.msg_control
= recv_fd
;
271 msg
.msg_controllen
= sizeof(recv_fd
);
273 DBG("Waiting to receive fd");
274 if (lttng_kconsumerd_poll_socket(kconsumerd_sockpoll
) < 0) {
278 if ((ret
= recvmsg(sfd
, &msg
, 0)) < 0) {
283 if (ret
!= (size
/ nb_fd
)) {
284 ERR("Received only %d, expected %d", ret
, size
);
285 lttng_kconsumerd_send_error(ctx
, KCONSUMERD_ERROR_RECV_FD
);
289 cmsg
= CMSG_FIRSTHDR(&msg
);
291 ERR("Invalid control message header");
293 lttng_kconsumerd_send_error(ctx
, KCONSUMERD_ERROR_RECV_FD
);
297 /* if we received fds */
298 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
301 DBG("kconsumerd_add_fd %s (%d)", lkm
.path_name
,
302 ((int *) CMSG_DATA(cmsg
))[0]);
304 new_fd
= kconsumerd_allocate_fd(&lkm
, ((int *) CMSG_DATA(cmsg
))[0]);
305 if (new_fd
== NULL
) {
306 lttng_kconsumerd_send_error(ctx
, KCONSUMERD_OUTFD_ERROR
);
310 if (ctx
->on_recv_fd
!= NULL
) {
311 ret
= ctx
->on_recv_fd(new_fd
);
313 kconsumerd_add_fd(new_fd
);
314 } else if (ret
< 0) {
318 kconsumerd_add_fd(new_fd
);
322 if (ctx
->on_update_fd
!= NULL
) {
323 ret
= ctx
->on_update_fd(lkm
.fd
, lkm
.state
);
325 kconsumerd_change_fd_state(lkm
.fd
, lkm
.state
);
326 } else if (ret
< 0) {
330 kconsumerd_change_fd_state(lkm
.fd
, lkm
.state
);
336 /* signal the poll thread */
337 tmp2
= write(ctx
->kconsumerd_poll_pipe
[1], "4", 1);
339 perror("write kconsumerd poll");
342 ERR("Didn't received any fd");
343 lttng_kconsumerd_send_error(ctx
, KCONSUMERD_ERROR_RECV_FD
);
354 * Set the error socket.
356 void lttng_kconsumerd_set_error_sock(
357 struct lttng_kconsumerd_local_data
*ctx
, int sock
)
359 ctx
->kconsumerd_error_socket
= sock
;
363 * Set the command socket path.
366 void lttng_kconsumerd_set_command_sock_path(
367 struct lttng_kconsumerd_local_data
*ctx
, char *sock
)
369 ctx
->kconsumerd_command_sock_path
= sock
;
372 static void lttng_kconsumerd_sync_trace_file(
373 struct lttng_kconsumerd_fd
*kconsumerd_fd
, off_t orig_offset
)
375 int outfd
= kconsumerd_fd
->out_fd
;
377 * This does a blocking write-and-wait on any page that belongs to the
378 * subbuffer prior to the one we just wrote.
379 * Don't care about error values, as these are just hints and ways to
380 * limit the amount of page cache used.
382 if (orig_offset
>= kconsumerd_fd
->max_sb_size
) {
383 sync_file_range(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
384 kconsumerd_fd
->max_sb_size
,
385 SYNC_FILE_RANGE_WAIT_BEFORE
386 | SYNC_FILE_RANGE_WRITE
387 | SYNC_FILE_RANGE_WAIT_AFTER
);
389 * Give hints to the kernel about how we access the file:
390 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
393 * We need to call fadvise again after the file grows because the
394 * kernel does not seem to apply fadvise to non-existing parts of the
397 * Call fadvise _after_ having waited for the page writeback to
398 * complete because the dirty page writeback semantic is not well
399 * defined. So it can be expected to lead to lower throughput in
402 posix_fadvise(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
403 kconsumerd_fd
->max_sb_size
, POSIX_FADV_DONTNEED
);
409 * Mmap the ring buffer, read it and write the data to the tracefile.
411 * Returns the number of bytes written
413 int lttng_kconsumerd_on_read_subbuffer_mmap(
414 struct lttng_kconsumerd_local_data
*ctx
,
415 struct lttng_kconsumerd_fd
*kconsumerd_fd
, unsigned long len
)
417 unsigned long mmap_offset
;
419 off_t orig_offset
= kconsumerd_fd
->out_fd_offset
;
420 int fd
= kconsumerd_fd
->consumerd_fd
;
421 int outfd
= kconsumerd_fd
->out_fd
;
423 /* get the offset inside the fd to mmap */
424 ret
= kernctl_get_mmap_read_offset(fd
, &mmap_offset
);
427 perror("kernctl_get_mmap_read_offset");
432 ret
= write(outfd
, kconsumerd_fd
->mmap_base
+ mmap_offset
, len
);
435 } else if (ret
< 0) {
437 perror("Error in file write");
440 /* This won't block, but will start writeout asynchronously */
441 sync_file_range(outfd
, kconsumerd_fd
->out_fd_offset
, ret
,
442 SYNC_FILE_RANGE_WRITE
);
443 kconsumerd_fd
->out_fd_offset
+= ret
;
446 lttng_kconsumerd_sync_trace_file(kconsumerd_fd
, orig_offset
);
455 * Splice the data from the ring buffer to the tracefile.
457 * Returns the number of bytes spliced.
459 int lttng_kconsumerd_on_read_subbuffer_splice(
460 struct lttng_kconsumerd_local_data
*ctx
,
461 struct lttng_kconsumerd_fd
*kconsumerd_fd
, unsigned long len
)
465 off_t orig_offset
= kconsumerd_fd
->out_fd_offset
;
466 int fd
= kconsumerd_fd
->consumerd_fd
;
467 int outfd
= kconsumerd_fd
->out_fd
;
470 DBG("splice chan to pipe offset %lu (fd : %d)",
471 (unsigned long)offset
, fd
);
472 ret
= splice(fd
, &offset
, ctx
->kconsumerd_thread_pipe
[1], NULL
, len
,
473 SPLICE_F_MOVE
| SPLICE_F_MORE
);
474 DBG("splice chan to pipe ret %ld", ret
);
477 perror("Error in relay splice");
481 ret
= splice(ctx
->kconsumerd_thread_pipe
[0], NULL
, outfd
, NULL
, ret
,
482 SPLICE_F_MOVE
| SPLICE_F_MORE
);
483 DBG("splice pipe to file %ld", ret
);
486 perror("Error in file splice");
490 /* This won't block, but will start writeout asynchronously */
491 sync_file_range(outfd
, kconsumerd_fd
->out_fd_offset
, ret
,
492 SYNC_FILE_RANGE_WRITE
);
493 kconsumerd_fd
->out_fd_offset
+= ret
;
495 lttng_kconsumerd_sync_trace_file(kconsumerd_fd
, orig_offset
);
500 /* send the appropriate error description to sessiond */
503 lttng_kconsumerd_send_error(ctx
, KCONSUMERD_SPLICE_EBADF
);
506 lttng_kconsumerd_send_error(ctx
, KCONSUMERD_SPLICE_EINVAL
);
509 lttng_kconsumerd_send_error(ctx
, KCONSUMERD_SPLICE_ENOMEM
);
512 lttng_kconsumerd_send_error(ctx
, KCONSUMERD_SPLICE_ESPIPE
);
521 * Take a snapshot for a specific fd
523 * Returns 0 on success, < 0 on error
525 int lttng_kconsumerd_take_snapshot(struct lttng_kconsumerd_local_data
*ctx
,
526 struct lttng_kconsumerd_fd
*kconsumerd_fd
)
529 int infd
= kconsumerd_fd
->consumerd_fd
;
531 ret
= kernctl_snapshot(infd
);
534 perror("Getting sub-buffer snapshot.");
541 * Get the produced position
543 * Returns 0 on success, < 0 on error
545 int lttng_kconsumerd_get_produced_snapshot(
546 struct lttng_kconsumerd_local_data
*ctx
,
547 struct lttng_kconsumerd_fd
*kconsumerd_fd
,
551 int infd
= kconsumerd_fd
->consumerd_fd
;
553 ret
= kernctl_snapshot_get_produced(infd
, pos
);
556 perror("kernctl_snapshot_get_produced");
563 * Poll on the should_quit pipe and the command socket return -1 on error and
564 * should exit, 0 if data is available on the command socket
566 int lttng_kconsumerd_poll_socket(struct pollfd
*kconsumerd_sockpoll
)
570 num_rdy
= poll(kconsumerd_sockpoll
, 2, -1);
572 perror("Poll error");
575 if (kconsumerd_sockpoll
[0].revents
== POLLIN
) {
576 DBG("kconsumerd_should_quit wake up");
586 * This thread polls the fds in the ltt_fd_list to consume the data and write
587 * it to tracefile if necessary.
589 void *lttng_kconsumerd_thread_poll_fds(void *data
)
591 int num_rdy
, num_hup
, high_prio
, ret
, i
;
592 struct pollfd
*pollfd
= NULL
;
593 /* local view of the fds */
594 struct lttng_kconsumerd_fd
**local_kconsumerd_fd
= NULL
;
595 /* local view of kconsumerd_data.fds_count */
599 struct lttng_kconsumerd_local_data
*ctx
= data
;
602 local_kconsumerd_fd
= malloc(sizeof(struct lttng_kconsumerd_fd
));
609 * the ltt_fd_list has been updated, we need to update our
610 * local array as well
612 pthread_mutex_lock(&kconsumerd_data
.lock
);
613 if (kconsumerd_data
.need_update
) {
614 if (pollfd
!= NULL
) {
618 if (local_kconsumerd_fd
!= NULL
) {
619 free(local_kconsumerd_fd
);
620 local_kconsumerd_fd
= NULL
;
623 /* allocate for all fds + 1 for the kconsumerd_poll_pipe */
624 pollfd
= malloc((kconsumerd_data
.fds_count
+ 1) * sizeof(struct pollfd
));
625 if (pollfd
== NULL
) {
626 perror("pollfd malloc");
627 pthread_mutex_unlock(&kconsumerd_data
.lock
);
631 /* allocate for all fds + 1 for the kconsumerd_poll_pipe */
632 local_kconsumerd_fd
= malloc((kconsumerd_data
.fds_count
+ 1) *
633 sizeof(struct lttng_kconsumerd_fd
));
634 if (local_kconsumerd_fd
== NULL
) {
635 perror("local_kconsumerd_fd malloc");
636 pthread_mutex_unlock(&kconsumerd_data
.lock
);
639 ret
= kconsumerd_update_poll_array(ctx
, &pollfd
, local_kconsumerd_fd
);
641 ERR("Error in allocating pollfd or local_outfds");
642 lttng_kconsumerd_send_error(ctx
, KCONSUMERD_POLL_ERROR
);
643 pthread_mutex_unlock(&kconsumerd_data
.lock
);
647 kconsumerd_data
.need_update
= 0;
649 pthread_mutex_unlock(&kconsumerd_data
.lock
);
651 /* poll on the array of fds */
652 DBG("polling on %d fd", nb_fd
+ 1);
653 num_rdy
= poll(pollfd
, nb_fd
+ 1, kconsumerd_poll_timeout
);
654 DBG("poll num_rdy : %d", num_rdy
);
656 perror("Poll error");
657 lttng_kconsumerd_send_error(ctx
, KCONSUMERD_POLL_ERROR
);
659 } else if (num_rdy
== 0) {
660 DBG("Polling thread timed out");
664 /* No FDs and kconsumerd_quit, kconsumerd_cleanup the thread */
665 if (nb_fd
== 0 && kconsumerd_quit
== 1) {
670 * If the kconsumerd_poll_pipe triggered poll go
671 * directly to the beginning of the loop to update the
672 * array. We want to prioritize array update over
673 * low-priority reads.
675 if (pollfd
[nb_fd
].revents
== POLLIN
) {
676 DBG("kconsumerd_poll_pipe wake up");
677 tmp2
= read(ctx
->kconsumerd_poll_pipe
[0], &tmp
, 1);
679 perror("read kconsumerd poll");
684 /* Take care of high priority channels first. */
685 for (i
= 0; i
< nb_fd
; i
++) {
686 switch(pollfd
[i
].revents
) {
688 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
689 kconsumerd_del_fd(local_kconsumerd_fd
[i
]);
693 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
694 kconsumerd_del_fd(local_kconsumerd_fd
[i
]);
698 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
699 kconsumerd_del_fd(local_kconsumerd_fd
[i
]);
703 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
705 ret
= ctx
->on_buffer_ready(local_kconsumerd_fd
[i
]);
706 /* it's ok to have an unavailable sub-buffer */
714 /* If every buffer FD has hung up, we end the read loop here */
715 if (nb_fd
> 0 && num_hup
== nb_fd
) {
716 DBG("every buffer FD has hung up\n");
717 if (kconsumerd_quit
== 1) {
723 /* Take care of low priority channels. */
724 if (high_prio
== 0) {
725 for (i
= 0; i
< nb_fd
; i
++) {
726 if (pollfd
[i
].revents
== POLLIN
) {
727 DBG("Normal read on fd %d", pollfd
[i
].fd
);
728 ret
= ctx
->on_buffer_ready(local_kconsumerd_fd
[i
]);
729 /* it's ok to have an unavailable subbuffer */
738 DBG("polling thread exiting");
739 if (pollfd
!= NULL
) {
743 if (local_kconsumerd_fd
!= NULL
) {
744 free(local_kconsumerd_fd
);
745 local_kconsumerd_fd
= NULL
;
751 * Initialise the necessary environnement :
752 * - create a new context
753 * - create the poll_pipe
754 * - create the should_quit pipe (for signal handler)
755 * - create the thread pipe (for splice)
757 * Takes a function pointer as argument, this function is called when data is
758 * available on a buffer. This function is responsible to do the
759 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
760 * buffer configuration and then kernctl_put_next_subbuf at the end.
762 * Returns a pointer to the new context or NULL on error.
764 struct lttng_kconsumerd_local_data
*lttng_kconsumerd_create(
765 int (*buffer_ready
)(struct lttng_kconsumerd_fd
*kconsumerd_fd
),
766 int (*recv_fd
)(struct lttng_kconsumerd_fd
*kconsumerd_fd
),
767 int (*update_fd
)(int sessiond_fd
, uint32_t state
))
770 struct lttng_kconsumerd_local_data
*ctx
;
772 ctx
= malloc(sizeof(struct lttng_kconsumerd_local_data
));
774 perror("allocating context");
778 ctx
->kconsumerd_error_socket
= -1;
779 /* assign the callbacks */
780 ctx
->on_buffer_ready
= buffer_ready
;
781 ctx
->on_recv_fd
= recv_fd
;
782 ctx
->on_update_fd
= update_fd
;
784 ret
= pipe(ctx
->kconsumerd_poll_pipe
);
786 perror("Error creating poll pipe");
787 goto error_poll_pipe
;
790 ret
= pipe(ctx
->kconsumerd_should_quit
);
792 perror("Error creating recv pipe");
793 goto error_quit_pipe
;
796 ret
= pipe(ctx
->kconsumerd_thread_pipe
);
798 perror("Error creating thread pipe");
799 goto error_thread_pipe
;
806 for (i
= 0; i
< 2; i
++) {
809 err
= close(ctx
->kconsumerd_should_quit
[i
]);
813 for (i
= 0; i
< 2; i
++) {
816 err
= close(ctx
->kconsumerd_poll_pipe
[i
]);
826 * Close all fds associated with the instance and free the context.
828 void lttng_kconsumerd_destroy(struct lttng_kconsumerd_local_data
*ctx
)
830 close(ctx
->kconsumerd_error_socket
);
831 close(ctx
->kconsumerd_thread_pipe
[0]);
832 close(ctx
->kconsumerd_thread_pipe
[1]);
833 close(ctx
->kconsumerd_poll_pipe
[0]);
834 close(ctx
->kconsumerd_poll_pipe
[1]);
835 close(ctx
->kconsumerd_should_quit
[0]);
836 close(ctx
->kconsumerd_should_quit
[1]);
837 unlink(ctx
->kconsumerd_command_sock_path
);
843 * This thread listens on the consumerd socket and receives the file
844 * descriptors from the session daemon.
846 void *lttng_kconsumerd_thread_receive_fds(void *data
)
848 int sock
, client_socket
, ret
;
849 struct lttcomm_kconsumerd_header tmp
;
851 * structure to poll for incoming data on communication socket avoids
852 * making blocking sockets.
854 struct pollfd kconsumerd_sockpoll
[2];
855 struct lttng_kconsumerd_local_data
*ctx
= data
;
858 DBG("Creating command socket %s", ctx
->kconsumerd_command_sock_path
);
859 unlink(ctx
->kconsumerd_command_sock_path
);
860 client_socket
= lttcomm_create_unix_sock(ctx
->kconsumerd_command_sock_path
);
861 if (client_socket
< 0) {
862 ERR("Cannot create command socket");
866 ret
= lttcomm_listen_unix_sock(client_socket
);
871 DBG("Sending ready command to ltt-sessiond");
872 ret
= lttng_kconsumerd_send_error(ctx
, KCONSUMERD_COMMAND_SOCK_READY
);
873 /* return < 0 on error, but == 0 is not fatal */
875 ERR("Error sending ready command to ltt-sessiond");
879 ret
= fcntl(client_socket
, F_SETFL
, O_NONBLOCK
);
881 perror("fcntl O_NONBLOCK");
885 /* prepare the FDs to poll : to client socket and the should_quit pipe */
886 kconsumerd_sockpoll
[0].fd
= ctx
->kconsumerd_should_quit
[0];
887 kconsumerd_sockpoll
[0].events
= POLLIN
| POLLPRI
;
888 kconsumerd_sockpoll
[1].fd
= client_socket
;
889 kconsumerd_sockpoll
[1].events
= POLLIN
| POLLPRI
;
891 if (lttng_kconsumerd_poll_socket(kconsumerd_sockpoll
) < 0) {
894 DBG("Connection on client_socket");
896 /* Blocking call, waiting for transmission */
897 sock
= lttcomm_accept_unix_sock(client_socket
);
902 ret
= fcntl(sock
, F_SETFL
, O_NONBLOCK
);
904 perror("fcntl O_NONBLOCK");
908 /* update the polling structure to poll on the established socket */
909 kconsumerd_sockpoll
[1].fd
= sock
;
910 kconsumerd_sockpoll
[1].events
= POLLIN
| POLLPRI
;
913 if (lttng_kconsumerd_poll_socket(kconsumerd_sockpoll
) < 0) {
916 DBG("Incoming fds on sock");
918 /* We first get the number of fd we are about to receive */
919 ret
= lttcomm_recv_unix_sock(sock
, &tmp
,
920 sizeof(struct lttcomm_kconsumerd_header
));
922 ERR("Communication interrupted on command socket");
925 if (tmp
.cmd_type
== STOP
) {
926 DBG("Received STOP command");
929 if (kconsumerd_quit
) {
930 DBG("kconsumerd_thread_receive_fds received quit from signal");
934 /* we received a command to add or update fds */
935 ret
= kconsumerd_consumerd_recv_fd(ctx
, sock
, kconsumerd_sockpoll
,
936 tmp
.payload_size
, tmp
.cmd_type
);
938 ERR("Receiving the FD, exiting");
941 DBG("received fds on sock");
945 DBG("kconsumerd_thread_receive_fds exiting");
948 * when all fds have hung up, the polling thread
954 * 2s of grace period, if no polling events occur during
955 * this period, the polling thread will exit even if there
956 * are still open FDs (should not happen, but safety mechanism).
958 kconsumerd_poll_timeout
= LTTNG_KCONSUMERD_POLL_GRACE_PERIOD
;
960 /* wake up the polling thread */
961 ret
= write(ctx
->kconsumerd_poll_pipe
[1], "4", 1);
963 perror("poll pipe write");
969 * Close all the tracefiles and stream fds, should be called when all instances
972 void lttng_kconsumerd_cleanup(void)
974 struct lttng_kconsumerd_fd
*iter
, *tmp
;
977 * close all outfd. Called when there are no more threads
978 * running (after joining on the threads), no need to protect
979 * list iteration with mutex.
981 cds_list_for_each_entry_safe(iter
, tmp
,
982 &kconsumerd_data
.fd_list
.head
, list
) {
983 kconsumerd_del_fd(iter
);
988 * Called from signal handler.
990 void lttng_kconsumerd_should_exit(struct lttng_kconsumerd_local_data
*ctx
)
994 ret
= write(ctx
->kconsumerd_should_quit
[1], "4", 1);
996 perror("write kconsumerd quit");
1001 * Send return code to the session daemon.
1002 * If the socket is not defined, we return 0, it is not a fatal error
1004 int lttng_kconsumerd_send_error(
1005 struct lttng_kconsumerd_local_data
*ctx
, int cmd
)
1007 if (ctx
->kconsumerd_error_socket
> 0) {
1008 return lttcomm_send_unix_sock(ctx
->kconsumerd_error_socket
, &cmd
,
1009 sizeof(enum lttcomm_sessiond_command
));