Update ring buffer and pretty print
[deliverable/lttng-modules.git] / lib / ringbuffer / config.h
1 #ifndef _LINUX_RING_BUFFER_CONFIG_H
2 #define _LINUX_RING_BUFFER_CONFIG_H
3
4 /*
5 * linux/ringbuffer/config.h
6 *
7 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring buffer configuration header. Note: after declaring the standard inline
10 * functions, clients should also include linux/ringbuffer/api.h.
11 *
12 * Dual LGPL v2.1/GPL v2 license.
13 */
14
15 #include <linux/types.h>
16 #include <linux/percpu.h>
17
18 struct lib_ring_buffer;
19 struct channel;
20 struct lib_ring_buffer_config;
21 struct lib_ring_buffer_ctx;
22
23 /*
24 * Ring buffer client callbacks. Only used by slow path, never on fast path.
25 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
26 * provided as inline functions too. These may simply return 0 if not used by
27 * the client.
28 */
29 struct lib_ring_buffer_client_cb {
30 /* Mandatory callbacks */
31
32 /* A static inline version is also required for fast path */
33 u64 (*ring_buffer_clock_read) (struct channel *chan);
34 size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
35 struct channel *chan, size_t offset,
36 size_t *pre_header_padding,
37 struct lib_ring_buffer_ctx *ctx);
38
39 /* Slow path only, at subbuffer switch */
40 size_t (*subbuffer_header_size) (void);
41 void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
42 unsigned int subbuf_idx);
43 void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
44 unsigned int subbuf_idx, unsigned long data_size);
45
46 /* Optional callbacks (can be set to NULL) */
47
48 /* Called at buffer creation/finalize */
49 int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
50 int cpu, const char *name);
51 /*
52 * Clients should guarantee that no new reader handle can be opened
53 * after finalize.
54 */
55 void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
56
57 /*
58 * Extract header length, payload length and timestamp from event
59 * record. Used by buffer iterators. Timestamp is only used by channel
60 * iterator.
61 */
62 void (*record_get) (const struct lib_ring_buffer_config *config,
63 struct channel *chan, struct lib_ring_buffer *buf,
64 size_t offset, size_t *header_len,
65 size_t *payload_len, u64 *timestamp);
66 };
67
68 /*
69 * Ring buffer instance configuration.
70 *
71 * Declare as "static const" within the client object to ensure the inline fast
72 * paths can be optimized.
73 *
74 * alloc/sync pairs:
75 *
76 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
77 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
78 * with preemption disabled (lib_ring_buffer_get_cpu() and
79 * lib_ring_buffer_put_cpu()).
80 *
81 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
82 * Per-cpu buffer with global synchronization. Tracing can be performed with
83 * preemption enabled, statistically stays on the local buffers.
84 *
85 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
86 * Should only be used for buffers belonging to a single thread or protected
87 * by mutual exclusion by the client. Note that periodical sub-buffer switch
88 * should be disabled in this kind of configuration.
89 *
90 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
91 * Global shared buffer with global synchronization.
92 *
93 * wakeup:
94 *
95 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
96 * buffers and wake up readers if data is ready. Mainly useful for tracers which
97 * don't want to call into the wakeup code on the tracing path. Use in
98 * combination with "read_timer_interval" channel_create() argument.
99 *
100 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
101 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
102 * for drivers.
103 *
104 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
105 * has the responsibility to perform wakeups.
106 */
107 struct lib_ring_buffer_config {
108 enum {
109 RING_BUFFER_ALLOC_PER_CPU,
110 RING_BUFFER_ALLOC_GLOBAL,
111 } alloc;
112 enum {
113 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
114 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
115 } sync;
116 enum {
117 RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
118 RING_BUFFER_DISCARD, /* Discard when buffer full */
119 } mode;
120 enum {
121 RING_BUFFER_SPLICE,
122 RING_BUFFER_MMAP,
123 RING_BUFFER_READ, /* TODO */
124 RING_BUFFER_ITERATOR,
125 RING_BUFFER_NONE,
126 } output;
127 enum {
128 RING_BUFFER_PAGE,
129 RING_BUFFER_VMAP, /* TODO */
130 RING_BUFFER_STATIC, /* TODO */
131 } backend;
132 enum {
133 RING_BUFFER_NO_OOPS_CONSISTENCY,
134 RING_BUFFER_OOPS_CONSISTENCY,
135 } oops;
136 enum {
137 RING_BUFFER_IPI_BARRIER,
138 RING_BUFFER_NO_IPI_BARRIER,
139 } ipi;
140 enum {
141 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
142 RING_BUFFER_WAKEUP_BY_WRITER, /*
143 * writer wakes up reader,
144 * not lock-free
145 * (takes spinlock).
146 */
147 } wakeup;
148 /*
149 * tsc_bits: timestamp bits saved at each record.
150 * 0 and 64 disable the timestamp compression scheme.
151 */
152 unsigned int tsc_bits;
153 struct lib_ring_buffer_client_cb cb;
154 };
155
156 /*
157 * ring buffer context
158 *
159 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
160 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
161 * lib_ring_buffer_write().
162 */
163 struct lib_ring_buffer_ctx {
164 /* input received by lib_ring_buffer_reserve(), saved here. */
165 struct channel *chan; /* channel */
166 void *priv; /* client private data */
167 size_t data_size; /* size of payload */
168 int largest_align; /*
169 * alignment of the largest element
170 * in the payload
171 */
172 int cpu; /* processor id */
173
174 /* output from lib_ring_buffer_reserve() */
175 struct lib_ring_buffer *buf; /*
176 * buffer corresponding to processor id
177 * for this channel
178 */
179 size_t slot_size; /* size of the reserved slot */
180 unsigned long buf_offset; /* offset following the record header */
181 unsigned long pre_offset; /*
182 * Initial offset position _before_
183 * the record is written. Positioned
184 * prior to record header alignment
185 * padding.
186 */
187 u64 tsc; /* time-stamp counter value */
188 unsigned int rflags; /* reservation flags */
189 };
190
191 /**
192 * lib_ring_buffer_ctx_init - initialize ring buffer context
193 * @ctx: ring buffer context to initialize
194 * @chan: channel
195 * @priv: client private data
196 * @data_size: size of record data payload
197 * @largest_align: largest alignment within data payload types
198 * @cpu: processor id
199 */
200 static inline
201 void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
202 struct channel *chan, void *priv,
203 size_t data_size, int largest_align,
204 int cpu)
205 {
206 ctx->chan = chan;
207 ctx->priv = priv;
208 ctx->data_size = data_size;
209 ctx->largest_align = largest_align;
210 ctx->cpu = cpu;
211 ctx->rflags = 0;
212 }
213
214 /*
215 * Reservation flags.
216 *
217 * RING_BUFFER_RFLAG_FULL_TSC
218 *
219 * This flag is passed to record_header_size() and to the primitive used to
220 * write the record header. It indicates that the full 64-bit time value is
221 * needed in the record header. If this flag is not set, the record header needs
222 * only to contain "tsc_bits" bit of time value.
223 *
224 * Reservation flags can be added by the client, starting from
225 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
226 * record_header_size() to lib_ring_buffer_write_record_header().
227 */
228 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
229 #define RING_BUFFER_RFLAG_END (1U << 1)
230
231 /*
232 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
233 * compile-time. We have to duplicate the "config->align" information and the
234 * definition here because config->align is used both in the slow and fast
235 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
236 */
237 #ifdef RING_BUFFER_ALIGN
238
239 # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
240
241 /*
242 * Calculate the offset needed to align the type.
243 * size_of_type must be non-zero.
244 */
245 static inline
246 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
247 {
248 return offset_align(align_drift, size_of_type);
249 }
250
251 #else
252
253 # define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
254
255 /*
256 * Calculate the offset needed to align the type.
257 * size_of_type must be non-zero.
258 */
259 static inline
260 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
261 {
262 return 0;
263 }
264
265 #endif
266
267 /**
268 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
269 * @ctx: ring buffer context.
270 */
271 static inline
272 void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
273 size_t alignment)
274 {
275 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
276 alignment);
277 }
278
279 /*
280 * lib_ring_buffer_check_config() returns 0 on success.
281 * Used internally to check for valid configurations at channel creation.
282 */
283 static inline
284 int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
285 unsigned int switch_timer_interval,
286 unsigned int read_timer_interval)
287 {
288 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
289 && config->sync == RING_BUFFER_SYNC_PER_CPU
290 && switch_timer_interval)
291 return -EINVAL;
292 return 0;
293 }
294
295 #include "../../wrapper/ringbuffer/vatomic.h"
296
297 #endif /* _LINUX_RING_BUFFER_CONFIG_H */
This page took 0.038575 seconds and 5 git commands to generate.