Commit | Line | Data |
---|---|---|
b7cdc182 | 1 | /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only) |
9f36eaed | 2 | * |
886d51a3 | 3 | * lib/ringbuffer/frontend_api.h |
f3bc08c5 MD |
4 | * |
5 | * Ring Buffer Library Synchronization Header (buffer write API). | |
6 | * | |
886d51a3 MD |
7 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
8 | * | |
f3bc08c5 MD |
9 | * See ring_buffer_frontend.c for more information on wait-free algorithms. |
10 | * See linux/ringbuffer/frontend.h for channel allocation and read-side API. | |
f3bc08c5 MD |
11 | */ |
12 | ||
9f36eaed MJ |
13 | #ifndef _LIB_RING_BUFFER_FRONTEND_API_H |
14 | #define _LIB_RING_BUFFER_FRONTEND_API_H | |
15 | ||
5671a661 MD |
16 | #include <wrapper/ringbuffer/frontend.h> |
17 | #include <wrapper/percpu-defs.h> | |
f3bc08c5 | 18 | #include <linux/errno.h> |
dfbc2ec7 | 19 | #include <linux/prefetch.h> |
f3bc08c5 MD |
20 | |
21 | /** | |
22 | * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit. | |
23 | * | |
24 | * Disables preemption (acts as a RCU read-side critical section) and keeps a | |
25 | * ring buffer nesting count as supplementary safety net to ensure tracer client | |
26 | * code will never trigger an endless recursion. Returns the processor ID on | |
27 | * success, -EPERM on failure (nesting count too high). | |
28 | * | |
29 | * asm volatile and "memory" clobber prevent the compiler from moving | |
30 | * instructions out of the ring buffer nesting count. This is required to ensure | |
31 | * that probe side-effects which can cause recursion (e.g. unforeseen traps, | |
32 | * divisions by 0, ...) are triggered within the incremented nesting count | |
33 | * section. | |
34 | */ | |
35 | static inline | |
36 | int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config) | |
37 | { | |
38 | int cpu, nesting; | |
39 | ||
40 | rcu_read_lock_sched_notrace(); | |
41 | cpu = smp_processor_id(); | |
42 | nesting = ++per_cpu(lib_ring_buffer_nesting, cpu); | |
43 | barrier(); | |
44 | ||
b055caba | 45 | if (unlikely(nesting > RING_BUFFER_MAX_NESTING)) { |
f3bc08c5 MD |
46 | WARN_ON_ONCE(1); |
47 | per_cpu(lib_ring_buffer_nesting, cpu)--; | |
48 | rcu_read_unlock_sched_notrace(); | |
49 | return -EPERM; | |
50 | } else | |
51 | return cpu; | |
52 | } | |
53 | ||
54 | /** | |
55 | * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit. | |
56 | */ | |
57 | static inline | |
58 | void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config) | |
59 | { | |
60 | barrier(); | |
e6b06d7d | 61 | (*lttng_this_cpu_ptr(&lib_ring_buffer_nesting))--; |
f3bc08c5 MD |
62 | rcu_read_unlock_sched_notrace(); |
63 | } | |
64 | ||
65 | /* | |
66 | * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not | |
67 | * part of the API per se. | |
68 | * | |
69 | * returns 0 if reserve ok, or 1 if the slow path must be taken. | |
70 | */ | |
71 | static inline | |
72 | int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, | |
73 | struct lib_ring_buffer_ctx *ctx, | |
cc62f29e | 74 | void *client_ctx, |
f3bc08c5 MD |
75 | unsigned long *o_begin, unsigned long *o_end, |
76 | unsigned long *o_old, size_t *before_hdr_pad) | |
77 | { | |
78 | struct channel *chan = ctx->chan; | |
79 | struct lib_ring_buffer *buf = ctx->buf; | |
80 | *o_begin = v_read(config, &buf->offset); | |
81 | *o_old = *o_begin; | |
82 | ||
83 | ctx->tsc = lib_ring_buffer_clock_read(chan); | |
97ca2c54 MD |
84 | if ((int64_t) ctx->tsc == -EIO) |
85 | return 1; | |
f3bc08c5 MD |
86 | |
87 | /* | |
88 | * Prefetch cacheline for read because we have to read the previous | |
89 | * commit counter to increment it and commit seq value to compare it to | |
90 | * the commit counter. | |
91 | */ | |
92 | prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]); | |
93 | ||
94 | if (last_tsc_overflow(config, buf, ctx->tsc)) | |
64c796d8 | 95 | ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; |
f3bc08c5 MD |
96 | |
97 | if (unlikely(subbuf_offset(*o_begin, chan) == 0)) | |
98 | return 1; | |
99 | ||
100 | ctx->slot_size = record_header_size(config, chan, *o_begin, | |
cc62f29e | 101 | before_hdr_pad, ctx, client_ctx); |
f3bc08c5 MD |
102 | ctx->slot_size += |
103 | lib_ring_buffer_align(*o_begin + ctx->slot_size, | |
104 | ctx->largest_align) + ctx->data_size; | |
105 | if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size) | |
106 | > chan->backend.subbuf_size)) | |
107 | return 1; | |
108 | ||
109 | /* | |
110 | * Record fits in the current buffer and we are not on a switch | |
111 | * boundary. It's safe to write. | |
112 | */ | |
113 | *o_end = *o_begin + ctx->slot_size; | |
f5ea5800 MD |
114 | |
115 | if (unlikely((subbuf_offset(*o_end, chan)) == 0)) | |
116 | /* | |
117 | * The offset_end will fall at the very beginning of the next | |
118 | * subbuffer. | |
119 | */ | |
120 | return 1; | |
121 | ||
f3bc08c5 MD |
122 | return 0; |
123 | } | |
124 | ||
125 | /** | |
126 | * lib_ring_buffer_reserve - Reserve space in a ring buffer. | |
127 | * @config: ring buffer instance configuration. | |
128 | * @ctx: ring buffer context. (input and output) Must be already initialized. | |
129 | * | |
130 | * Atomic wait-free slot reservation. The reserved space starts at the context | |
131 | * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc". | |
132 | * | |
97ca2c54 MD |
133 | * Return : |
134 | * 0 on success. | |
135 | * -EAGAIN if channel is disabled. | |
136 | * -ENOSPC if event size is too large for packet. | |
137 | * -ENOBUFS if there is currently not enough space in buffer for the event. | |
138 | * -EIO if data cannot be written into the buffer for any other reason. | |
f3bc08c5 MD |
139 | */ |
140 | ||
141 | static inline | |
142 | int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config, | |
cc62f29e MD |
143 | struct lib_ring_buffer_ctx *ctx, |
144 | void *client_ctx) | |
f3bc08c5 MD |
145 | { |
146 | struct channel *chan = ctx->chan; | |
147 | struct lib_ring_buffer *buf; | |
148 | unsigned long o_begin, o_end, o_old; | |
149 | size_t before_hdr_pad = 0; | |
150 | ||
e650bcc1 | 151 | if (unlikely(atomic_read(&chan->record_disabled))) |
f3bc08c5 MD |
152 | return -EAGAIN; |
153 | ||
154 | if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) | |
155 | buf = per_cpu_ptr(chan->backend.buf, ctx->cpu); | |
156 | else | |
157 | buf = chan->backend.buf; | |
e650bcc1 | 158 | if (unlikely(atomic_read(&buf->record_disabled))) |
f3bc08c5 MD |
159 | return -EAGAIN; |
160 | ctx->buf = buf; | |
161 | ||
162 | /* | |
163 | * Perform retryable operations. | |
164 | */ | |
cc62f29e | 165 | if (unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin, |
f3bc08c5 MD |
166 | &o_end, &o_old, &before_hdr_pad))) |
167 | goto slow_path; | |
168 | ||
169 | if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end) | |
170 | != o_old)) | |
171 | goto slow_path; | |
172 | ||
173 | /* | |
174 | * Atomically update last_tsc. This update races against concurrent | |
175 | * atomic updates, but the race will always cause supplementary full TSC | |
176 | * record headers, never the opposite (missing a full TSC record header | |
177 | * when it would be needed). | |
178 | */ | |
179 | save_last_tsc(config, ctx->buf, ctx->tsc); | |
180 | ||
181 | /* | |
182 | * Push the reader if necessary | |
183 | */ | |
184 | lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1); | |
185 | ||
186 | /* | |
187 | * Clear noref flag for this subbuffer. | |
188 | */ | |
189 | lib_ring_buffer_clear_noref(config, &ctx->buf->backend, | |
190 | subbuf_index(o_end - 1, chan)); | |
191 | ||
192 | ctx->pre_offset = o_begin; | |
193 | ctx->buf_offset = o_begin + before_hdr_pad; | |
194 | return 0; | |
195 | slow_path: | |
cc62f29e | 196 | return lib_ring_buffer_reserve_slow(ctx, client_ctx); |
f3bc08c5 MD |
197 | } |
198 | ||
199 | /** | |
200 | * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer. | |
201 | * @config: ring buffer instance configuration. | |
202 | * @buf: buffer | |
203 | * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH) | |
204 | * | |
205 | * This operation is completely reentrant : can be called while tracing is | |
206 | * active with absolutely no lock held. | |
207 | * | |
208 | * Note, however, that as a v_cmpxchg is used for some atomic operations and | |
209 | * requires to be executed locally for per-CPU buffers, this function must be | |
210 | * called from the CPU which owns the buffer for a ACTIVE flush, with preemption | |
211 | * disabled, for RING_BUFFER_SYNC_PER_CPU configuration. | |
212 | */ | |
213 | static inline | |
214 | void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config, | |
215 | struct lib_ring_buffer *buf, enum switch_mode mode) | |
216 | { | |
217 | lib_ring_buffer_switch_slow(buf, mode); | |
218 | } | |
219 | ||
220 | /* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */ | |
221 | ||
222 | /** | |
223 | * lib_ring_buffer_commit - Commit an record. | |
224 | * @config: ring buffer instance configuration. | |
225 | * @ctx: ring buffer context. (input arguments only) | |
226 | * | |
227 | * Atomic unordered slot commit. Increments the commit count in the | |
228 | * specified sub-buffer, and delivers it if necessary. | |
229 | */ | |
230 | static inline | |
231 | void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config, | |
232 | const struct lib_ring_buffer_ctx *ctx) | |
233 | { | |
234 | struct channel *chan = ctx->chan; | |
235 | struct lib_ring_buffer *buf = ctx->buf; | |
236 | unsigned long offset_end = ctx->buf_offset; | |
237 | unsigned long endidx = subbuf_index(offset_end - 1, chan); | |
238 | unsigned long commit_count; | |
8ec496cf | 239 | struct commit_counters_hot *cc_hot = &buf->commit_hot[endidx]; |
f3bc08c5 MD |
240 | |
241 | /* | |
242 | * Must count record before incrementing the commit count. | |
243 | */ | |
244 | subbuffer_count_record(config, &buf->backend, endidx); | |
245 | ||
246 | /* | |
247 | * Order all writes to buffer before the commit count update that will | |
248 | * determine that the subbuffer is full. | |
249 | */ | |
250 | if (config->ipi == RING_BUFFER_IPI_BARRIER) { | |
251 | /* | |
252 | * Must write slot data before incrementing commit count. This | |
253 | * compiler barrier is upgraded into a smp_mb() by the IPI sent | |
254 | * by get_subbuf(). | |
255 | */ | |
256 | barrier(); | |
257 | } else | |
258 | smp_wmb(); | |
259 | ||
8ec496cf | 260 | v_add(config, ctx->slot_size, &cc_hot->cc); |
f3bc08c5 MD |
261 | |
262 | /* | |
263 | * commit count read can race with concurrent OOO commit count updates. | |
264 | * This is only needed for lib_ring_buffer_check_deliver (for | |
265 | * non-polling delivery only) and for | |
266 | * lib_ring_buffer_write_commit_counter. The race can only cause the | |
267 | * counter to be read with the same value more than once, which could | |
268 | * cause : | |
269 | * - Multiple delivery for the same sub-buffer (which is handled | |
270 | * gracefully by the reader code) if the value is for a full | |
271 | * sub-buffer. It's important that we can never miss a sub-buffer | |
272 | * delivery. Re-reading the value after the v_add ensures this. | |
273 | * - Reading a commit_count with a higher value that what was actually | |
274 | * added to it for the lib_ring_buffer_write_commit_counter call | |
275 | * (again caused by a concurrent committer). It does not matter, | |
276 | * because this function is interested in the fact that the commit | |
277 | * count reaches back the reserve offset for a specific sub-buffer, | |
278 | * which is completely independent of the order. | |
279 | */ | |
8ec496cf | 280 | commit_count = v_read(config, &cc_hot->cc); |
f3bc08c5 MD |
281 | |
282 | lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1, | |
635e457c | 283 | commit_count, endidx, ctx->tsc); |
f3bc08c5 MD |
284 | /* |
285 | * Update used size at each commit. It's needed only for extracting | |
286 | * ring_buffer buffers from vmcore, after crash. | |
287 | */ | |
8ec496cf MD |
288 | lib_ring_buffer_write_commit_counter(config, buf, chan, |
289 | offset_end, commit_count, cc_hot); | |
f3bc08c5 MD |
290 | } |
291 | ||
292 | /** | |
293 | * lib_ring_buffer_try_discard_reserve - Try discarding a record. | |
294 | * @config: ring buffer instance configuration. | |
295 | * @ctx: ring buffer context. (input arguments only) | |
296 | * | |
297 | * Only succeeds if no other record has been written after the record to | |
298 | * discard. If discard fails, the record must be committed to the buffer. | |
299 | * | |
300 | * Returns 0 upon success, -EPERM if the record cannot be discarded. | |
301 | */ | |
302 | static inline | |
303 | int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config, | |
304 | const struct lib_ring_buffer_ctx *ctx) | |
305 | { | |
306 | struct lib_ring_buffer *buf = ctx->buf; | |
307 | unsigned long end_offset = ctx->pre_offset + ctx->slot_size; | |
308 | ||
309 | /* | |
310 | * We need to ensure that if the cmpxchg succeeds and discards the | |
311 | * record, the next record will record a full TSC, because it cannot | |
312 | * rely on the last_tsc associated with the discarded record to detect | |
313 | * overflows. The only way to ensure this is to set the last_tsc to 0 | |
314 | * (assuming no 64-bit TSC overflow), which forces to write a 64-bit | |
315 | * timestamp in the next record. | |
316 | * | |
317 | * Note: if discard fails, we must leave the TSC in the record header. | |
318 | * It is needed to keep track of TSC overflows for the following | |
319 | * records. | |
320 | */ | |
321 | save_last_tsc(config, buf, 0ULL); | |
322 | ||
323 | if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset) | |
324 | != end_offset)) | |
325 | return -EPERM; | |
326 | else | |
327 | return 0; | |
328 | } | |
329 | ||
330 | static inline | |
331 | void channel_record_disable(const struct lib_ring_buffer_config *config, | |
332 | struct channel *chan) | |
333 | { | |
334 | atomic_inc(&chan->record_disabled); | |
335 | } | |
336 | ||
337 | static inline | |
338 | void channel_record_enable(const struct lib_ring_buffer_config *config, | |
339 | struct channel *chan) | |
340 | { | |
341 | atomic_dec(&chan->record_disabled); | |
342 | } | |
343 | ||
344 | static inline | |
345 | void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config, | |
346 | struct lib_ring_buffer *buf) | |
347 | { | |
348 | atomic_inc(&buf->record_disabled); | |
349 | } | |
350 | ||
351 | static inline | |
352 | void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config, | |
353 | struct lib_ring_buffer *buf) | |
354 | { | |
355 | atomic_dec(&buf->record_disabled); | |
356 | } | |
357 | ||
886d51a3 | 358 | #endif /* _LIB_RING_BUFFER_FRONTEND_API_H */ |