Fix: set FD_CLOEXEC on incoming FDs.
[lttng-ust.git] / libringbuffer / backend.h
CommitLineData
e92f3e28
MD
1#ifndef _LTTNG_RING_BUFFER_BACKEND_H
2#define _LTTNG_RING_BUFFER_BACKEND_H
852c2936
MD
3
4/*
e92f3e28 5 * libringbuffer/backend.h
852c2936
MD
6 *
7 * Ring buffer backend (API).
8 *
e92f3e28
MD
9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
852c2936
MD
24 *
25 * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
26 * the reader in flight recorder mode.
27 */
28
14641deb
MD
29#include <unistd.h>
30
852c2936 31/* Internal helpers */
4931a13e
MD
32#include "backend_internal.h"
33#include "frontend_internal.h"
852c2936
MD
34
35/* Ring buffer backend API */
36
37/* Ring buffer backend access (read/write) */
38
4cfec15c 39extern size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 40 size_t offset, void *dest, size_t len,
38fae1d3 41 struct lttng_ust_shm_handle *handle);
852c2936 42
4cfec15c 43extern int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 44 size_t offset, void *dest, size_t len,
38fae1d3 45 struct lttng_ust_shm_handle *handle);
852c2936 46
852c2936
MD
47/*
48 * Return the address where a given offset is located.
49 * Should be used to get the current subbuffer header pointer. Given we know
50 * it's never on a page boundary, it's safe to write directly to this address,
51 * as long as the write is never bigger than a page size.
52 */
53extern void *
4cfec15c 54lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 55 size_t offset,
38fae1d3 56 struct lttng_ust_shm_handle *handle);
852c2936 57extern void *
4cfec15c 58lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 59 size_t offset,
38fae1d3 60 struct lttng_ust_shm_handle *handle);
852c2936
MD
61
62/**
63 * lib_ring_buffer_write - write data to a buffer backend
64 * @config : ring buffer instance configuration
65 * @ctx: ring buffer context. (input arguments only)
66 * @src : source pointer to copy from
67 * @len : length of data to copy
68 *
69 * This function copies "len" bytes of data from a source pointer to a buffer
70 * backend, at the current context offset. This is more or less a buffer
71 * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
72 * if copy is crossing a page boundary.
73 */
00d0f8eb 74static inline __attribute__((always_inline))
4cfec15c
MD
75void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
76 struct lttng_ust_lib_ring_buffer_ctx *ctx,
852c2936
MD
77 const void *src, size_t len)
78{
852c2936 79 struct channel_backend *chanb = &ctx->chan->backend;
38fae1d3 80 struct lttng_ust_shm_handle *handle = ctx->handle;
852c2936 81 size_t offset = ctx->buf_offset;
15500a1b
MD
82 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
83 void *p;
852c2936 84
0bf3c920
MD
85 if (caa_unlikely(!len))
86 return;
a6352fd4
MD
87 /*
88 * Underlying layer should never ask for writes across
89 * subbuffers.
90 */
a3492932
MD
91 CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
92 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
93 if (caa_unlikely(!backend_pages)) {
94 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
95 return;
96 }
15500a1b
MD
97 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
98 if (caa_unlikely(!p))
99 return;
100 lib_ring_buffer_do_copy(config, p, src, len);
852c2936
MD
101 ctx->buf_offset += len;
102}
103
a44c74d9
MD
104/*
105 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
106 * terminating character is found in @src. Returns the number of bytes
107 * copied. Does *not* terminate @dest with NULL terminating character.
108 */
00d0f8eb 109static inline __attribute__((always_inline))
a44c74d9
MD
110size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
111 char *dest, const char *src, size_t len)
112{
113 size_t count;
114
115 for (count = 0; count < len; count++) {
116 char c;
117
118 /*
119 * Only read source character once, in case it is
120 * modified concurrently.
121 */
122 c = CMM_LOAD_SHARED(src[count]);
123 if (!c)
124 break;
125 lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
126 }
127 return count;
128}
129
130/**
131 * lib_ring_buffer_strcpy - write string data to a buffer backend
132 * @config : ring buffer instance configuration
133 * @ctx: ring buffer context. (input arguments only)
134 * @src : source pointer to copy from
135 * @len : length of data to copy
136 * @pad : character to use for padding
137 *
138 * This function copies @len - 1 bytes of string data from a source
139 * pointer to a buffer backend, followed by a terminating '\0'
140 * character, at the current context offset. This is more or less a
141 * buffer backend-specific strncpy() operation. If a terminating '\0'
142 * character is found in @src before @len - 1 characters are copied, pad
143 * the buffer with @pad characters (e.g. '#').
144 */
00d0f8eb 145static inline __attribute__((always_inline))
a44c74d9
MD
146void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
147 struct lttng_ust_lib_ring_buffer_ctx *ctx,
148 const char *src, size_t len, int pad)
149{
a44c74d9
MD
150 struct channel_backend *chanb = &ctx->chan->backend;
151 struct lttng_ust_shm_handle *handle = ctx->handle;
a3492932 152 size_t count;
a44c74d9 153 size_t offset = ctx->buf_offset;
a3492932
MD
154 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
155 void *p;
a44c74d9
MD
156
157 if (caa_unlikely(!len))
158 return;
a44c74d9
MD
159 /*
160 * Underlying layer should never ask for writes across
161 * subbuffers.
162 */
a3492932
MD
163 CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
164 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
165 if (caa_unlikely(!backend_pages)) {
166 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
167 return;
168 }
169 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
170 if (caa_unlikely(!p))
171 return;
172
173 count = lib_ring_buffer_do_strcpy(config, p, src, len - 1);
a44c74d9
MD
174 offset += count;
175 /* Padding */
176 if (caa_unlikely(count < len - 1)) {
177 size_t pad_len = len - 1 - count;
178
a3492932
MD
179 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
180 if (caa_unlikely(!p))
181 return;
182 lib_ring_buffer_do_memset(p, pad, pad_len);
a44c74d9
MD
183 offset += pad_len;
184 }
185 /* Final '\0' */
a3492932
MD
186 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
187 if (caa_unlikely(!p))
188 return;
189 lib_ring_buffer_do_memset(p, '\0', 1);
a44c74d9
MD
190 ctx->buf_offset += len;
191}
192
852c2936
MD
193/*
194 * This accessor counts the number of unread records in a buffer.
195 * It only provides a consistent value if no reads not writes are performed
196 * concurrently.
197 */
198static inline
199unsigned long lib_ring_buffer_get_records_unread(
4cfec15c
MD
200 const struct lttng_ust_lib_ring_buffer_config *config,
201 struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 202 struct lttng_ust_shm_handle *handle)
852c2936 203{
4cfec15c 204 struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
15500a1b 205 unsigned long records_unread = 0, sb_bindex;
852c2936 206 unsigned int i;
15500a1b 207 struct channel *chan;
852c2936 208
15500a1b
MD
209 chan = shmp(handle, bufb->chan);
210 if (!chan)
211 return 0;
212 for (i = 0; i < chan->backend.num_subbuf; i++) {
213 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
214 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
215 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
216
217 wsb = shmp_index(handle, bufb->buf_wsb, i);
218 if (!wsb)
219 return 0;
220 sb_bindex = subbuffer_id_get_index(config, wsb->id);
221 rpages = shmp_index(handle, bufb->array, sb_bindex);
222 if (!rpages)
223 return 0;
224 backend_pages = shmp(handle, rpages->shmp);
225 if (!backend_pages)
226 return 0;
227 records_unread += v_read(config, &backend_pages->records_unread);
852c2936
MD
228 }
229 if (config->mode == RING_BUFFER_OVERWRITE) {
15500a1b
MD
230 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
231 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
232
233 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
234 rpages = shmp_index(handle, bufb->array, sb_bindex);
235 if (!rpages)
236 return 0;
237 backend_pages = shmp(handle, rpages->shmp);
238 if (!backend_pages)
239 return 0;
240 records_unread += v_read(config, &backend_pages->records_unread);
852c2936
MD
241 }
242 return records_unread;
243}
244
e92f3e28 245#endif /* _LTTNG_RING_BUFFER_BACKEND_H */
This page took 0.04766 seconds and 5 git commands to generate.