1 #ifndef _LINUX_RING_BUFFER_FRONTEND_API_H
2 #define _LINUX_RING_BUFFER_FRONTEND_API_H
5 * linux/ringbuffer/frontend_api.h
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * Ring Buffer Library Synchronization Header (buffer write API).
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
17 * Dual LGPL v2.1/GPL v2 license.
22 #include <urcu/compiler.h>
25 * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
27 * Grabs RCU read-side lock and keeps a ring buffer nesting count as
28 * supplementary safety net to ensure tracer client code will never
29 * trigger an endless recursion. Returns the processor ID on success,
30 * -EPERM on failure (nesting count too high).
32 * asm volatile and "memory" clobber prevent the compiler from moving
33 * instructions out of the ring buffer nesting count. This is required to ensure
34 * that probe side-effects which can cause recursion (e.g. unforeseen traps,
35 * divisions by 0, ...) are triggered within the incremented nesting count
39 int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config
*config
)
44 cpu
= lttng_ust_get_cpu();
45 nesting
= ++lib_ring_buffer_nesting
; /* TLS */
48 if (caa_unlikely(nesting
> 4)) {
50 lib_ring_buffer_nesting
--; /* TLS */
58 * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
61 void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config
*config
)
64 lib_ring_buffer_nesting
--; /* TLS */
69 * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
70 * part of the API per se.
72 * returns 0 if reserve ok, or 1 if the slow path must be taken.
75 int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config
*config
,
76 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
77 unsigned long *o_begin
, unsigned long *o_end
,
78 unsigned long *o_old
, size_t *before_hdr_pad
)
80 struct channel
*chan
= ctx
->chan
;
81 struct lttng_ust_lib_ring_buffer
*buf
= ctx
->buf
;
82 *o_begin
= v_read(config
, &buf
->offset
);
85 ctx
->tsc
= lib_ring_buffer_clock_read(chan
);
86 if ((int64_t) ctx
->tsc
== -EIO
)
90 * Prefetch cacheline for read because we have to read the previous
91 * commit counter to increment it and commit seq value to compare it to
94 //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
97 * Because we don't use any timer in the application, we
98 * currently cannot guarantee that we have frequent
99 * events that let us detect 27-bit overflows.
100 * Therefore, for now, we force event headers
101 * to contain 64-bit timestamps.
103 ctx
->rflags
|= RING_BUFFER_RFLAG_FULL_TSC
;
105 if (last_tsc_overflow(config
, buf
, ctx
->tsc
))
106 ctx
->rflags
|= RING_BUFFER_RFLAG_FULL_TSC
;
109 if (caa_unlikely(subbuf_offset(*o_begin
, chan
) == 0))
112 ctx
->slot_size
= record_header_size(config
, chan
, *o_begin
,
113 before_hdr_pad
, ctx
);
115 lib_ring_buffer_align(*o_begin
+ ctx
->slot_size
,
116 ctx
->largest_align
) + ctx
->data_size
;
117 if (caa_unlikely((subbuf_offset(*o_begin
, chan
) + ctx
->slot_size
)
118 > chan
->backend
.subbuf_size
))
122 * Record fits in the current buffer and we are not on a switch
123 * boundary. It's safe to write.
125 *o_end
= *o_begin
+ ctx
->slot_size
;
127 if (caa_unlikely((subbuf_offset(*o_end
, chan
)) == 0))
129 * The offset_end will fall at the very beginning of the next
138 * lib_ring_buffer_reserve - Reserve space in a ring buffer.
139 * @config: ring buffer instance configuration.
140 * @ctx: ring buffer context. (input and output) Must be already initialized.
142 * Atomic wait-free slot reservation. The reserved space starts at the context
143 * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
147 * -EAGAIN if channel is disabled.
148 * -ENOSPC if event size is too large for packet.
149 * -ENOBUFS if there is currently not enough space in buffer for the event.
150 * -EIO if data cannot be written into the buffer for any other reason.
154 int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config
*config
,
155 struct lttng_ust_lib_ring_buffer_ctx
*ctx
)
157 struct channel
*chan
= ctx
->chan
;
158 struct lttng_ust_shm_handle
*handle
= ctx
->handle
;
159 struct lttng_ust_lib_ring_buffer
*buf
;
160 unsigned long o_begin
, o_end
, o_old
;
161 size_t before_hdr_pad
= 0;
163 if (uatomic_read(&chan
->record_disabled
))
166 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
167 buf
= shmp(handle
, chan
->backend
.buf
[ctx
->cpu
].shmp
);
169 buf
= shmp(handle
, chan
->backend
.buf
[0].shmp
);
170 if (uatomic_read(&buf
->record_disabled
))
175 * Perform retryable operations.
177 if (caa_unlikely(lib_ring_buffer_try_reserve(config
, ctx
, &o_begin
,
178 &o_end
, &o_old
, &before_hdr_pad
)))
181 if (caa_unlikely(v_cmpxchg(config
, &ctx
->buf
->offset
, o_old
, o_end
)
186 * Atomically update last_tsc. This update races against concurrent
187 * atomic updates, but the race will always cause supplementary full TSC
188 * record headers, never the opposite (missing a full TSC record header
189 * when it would be needed).
191 save_last_tsc(config
, ctx
->buf
, ctx
->tsc
);
194 * Push the reader if necessary
196 lib_ring_buffer_reserve_push_reader(ctx
->buf
, chan
, o_end
- 1);
199 * Clear noref flag for this subbuffer.
201 lib_ring_buffer_clear_noref(config
, &ctx
->buf
->backend
,
202 subbuf_index(o_end
- 1, chan
), handle
);
204 ctx
->pre_offset
= o_begin
;
205 ctx
->buf_offset
= o_begin
+ before_hdr_pad
;
208 return lib_ring_buffer_reserve_slow(ctx
);
212 * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
213 * @config: ring buffer instance configuration.
215 * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
217 * This operation is completely reentrant : can be called while tracing is
218 * active with absolutely no lock held.
220 * Note, however, that as a v_cmpxchg is used for some atomic operations and
221 * requires to be executed locally for per-CPU buffers, this function must be
222 * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
223 * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
226 void lib_ring_buffer_switch(const struct lttng_ust_lib_ring_buffer_config
*config
,
227 struct lttng_ust_lib_ring_buffer
*buf
, enum switch_mode mode
,
228 struct lttng_ust_shm_handle
*handle
)
230 lib_ring_buffer_switch_slow(buf
, mode
, handle
);
233 /* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
236 * lib_ring_buffer_commit - Commit an record.
237 * @config: ring buffer instance configuration.
238 * @ctx: ring buffer context. (input arguments only)
240 * Atomic unordered slot commit. Increments the commit count in the
241 * specified sub-buffer, and delivers it if necessary.
244 void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config
*config
,
245 const struct lttng_ust_lib_ring_buffer_ctx
*ctx
)
247 struct channel
*chan
= ctx
->chan
;
248 struct lttng_ust_shm_handle
*handle
= ctx
->handle
;
249 struct lttng_ust_lib_ring_buffer
*buf
= ctx
->buf
;
250 unsigned long offset_end
= ctx
->buf_offset
;
251 unsigned long endidx
= subbuf_index(offset_end
- 1, chan
);
252 unsigned long commit_count
;
255 * Must count record before incrementing the commit count.
257 subbuffer_count_record(config
, &buf
->backend
, endidx
, handle
);
260 * Order all writes to buffer before the commit count update that will
261 * determine that the subbuffer is full.
265 v_add(config
, ctx
->slot_size
, &shmp_index(handle
, buf
->commit_hot
, endidx
)->cc
);
268 * commit count read can race with concurrent OOO commit count updates.
269 * This is only needed for lib_ring_buffer_check_deliver (for
270 * non-polling delivery only) and for
271 * lib_ring_buffer_write_commit_counter. The race can only cause the
272 * counter to be read with the same value more than once, which could
274 * - Multiple delivery for the same sub-buffer (which is handled
275 * gracefully by the reader code) if the value is for a full
276 * sub-buffer. It's important that we can never miss a sub-buffer
277 * delivery. Re-reading the value after the v_add ensures this.
278 * - Reading a commit_count with a higher value that what was actually
279 * added to it for the lib_ring_buffer_write_commit_counter call
280 * (again caused by a concurrent committer). It does not matter,
281 * because this function is interested in the fact that the commit
282 * count reaches back the reserve offset for a specific sub-buffer,
283 * which is completely independent of the order.
285 commit_count
= v_read(config
, &shmp_index(handle
, buf
->commit_hot
, endidx
)->cc
);
287 lib_ring_buffer_check_deliver(config
, buf
, chan
, offset_end
- 1,
288 commit_count
, endidx
, handle
);
290 * Update used size at each commit. It's needed only for extracting
291 * ring_buffer buffers from vmcore, after crash.
293 lib_ring_buffer_write_commit_counter(config
, buf
, chan
, endidx
,
294 ctx
->buf_offset
, commit_count
,
295 ctx
->slot_size
, handle
);
299 * lib_ring_buffer_try_discard_reserve - Try discarding a record.
300 * @config: ring buffer instance configuration.
301 * @ctx: ring buffer context. (input arguments only)
303 * Only succeeds if no other record has been written after the record to
304 * discard. If discard fails, the record must be committed to the buffer.
306 * Returns 0 upon success, -EPERM if the record cannot be discarded.
309 int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config
*config
,
310 const struct lttng_ust_lib_ring_buffer_ctx
*ctx
)
312 struct lttng_ust_lib_ring_buffer
*buf
= ctx
->buf
;
313 unsigned long end_offset
= ctx
->pre_offset
+ ctx
->slot_size
;
316 * We need to ensure that if the cmpxchg succeeds and discards the
317 * record, the next record will record a full TSC, because it cannot
318 * rely on the last_tsc associated with the discarded record to detect
319 * overflows. The only way to ensure this is to set the last_tsc to 0
320 * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
321 * timestamp in the next record.
323 * Note: if discard fails, we must leave the TSC in the record header.
324 * It is needed to keep track of TSC overflows for the following
327 save_last_tsc(config
, buf
, 0ULL);
329 if (caa_likely(v_cmpxchg(config
, &buf
->offset
, end_offset
, ctx
->pre_offset
)
337 void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config
*config
,
338 struct channel
*chan
)
340 uatomic_inc(&chan
->record_disabled
);
344 void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config
*config
,
345 struct channel
*chan
)
347 uatomic_dec(&chan
->record_disabled
);
351 void lib_ring_buffer_record_disable(const struct lttng_ust_lib_ring_buffer_config
*config
,
352 struct lttng_ust_lib_ring_buffer
*buf
)
354 uatomic_inc(&buf
->record_disabled
);
358 void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config
*config
,
359 struct lttng_ust_lib_ring_buffer
*buf
)
361 uatomic_dec(&buf
->record_disabled
);
364 #endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */