Cleanup: libringbuffer: remove duplicate pointer chasing in slow paths
[deliverable/lttng-modules.git] / lttng-ring-buffer-client.h
1 /*
2 * lttng-ring-buffer-client.h
3 *
4 * LTTng lib ring buffer client template.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <lib/bitfield.h>
26 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
27 #include <wrapper/trace-clock.h>
28 #include <lttng-events.h>
29 #include <lttng-tracer.h>
30 #include <wrapper/ringbuffer/frontend_types.h>
31
32 #define LTTNG_COMPACT_EVENT_BITS 5
33 #define LTTNG_COMPACT_TSC_BITS 27
34
35 static struct lttng_transport lttng_relay_transport;
36
37 /*
38 * Keep the natural field alignment for _each field_ within this structure if
39 * you ever add/remove a field from this header. Packed attribute is not used
40 * because gcc generates poor code on at least powerpc and mips. Don't ever
41 * let gcc add padding between the structure elements.
42 *
43 * The guarantee we have with timestamps is that all the events in a
44 * packet are included (inclusive) within the begin/end timestamps of
45 * the packet. Another guarantee we have is that the "timestamp begin",
46 * as well as the event timestamps, are monotonically increasing (never
47 * decrease) when moving forward in a stream (physically). But this
48 * guarantee does not apply to "timestamp end", because it is sampled at
49 * commit time, which is not ordered with respect to space reservation.
50 */
51
52 struct packet_header {
53 /* Trace packet header */
54 uint32_t magic; /*
55 * Trace magic number.
56 * contains endianness information.
57 */
58 uint8_t uuid[16];
59 uint32_t stream_id;
60 uint64_t stream_instance_id;
61
62 struct {
63 /* Stream packet context */
64 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
65 uint64_t timestamp_end; /* Cycle count at subbuffer end */
66 uint64_t content_size; /* Size of data in subbuffer */
67 uint64_t packet_size; /* Subbuffer size (include padding) */
68 uint64_t packet_seq_num; /* Packet sequence number */
69 unsigned long events_discarded; /*
70 * Events lost in this subbuffer since
71 * the beginning of the trace.
72 * (may overflow)
73 */
74 uint32_t cpu_id; /* CPU id associated with stream */
75 uint8_t header_end; /* End of header */
76 } ctx;
77 };
78
79
80 static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
81 {
82 return trace_clock_read64();
83 }
84
85 static inline
86 size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
87 {
88 int i;
89 size_t orig_offset = offset;
90
91 if (likely(!ctx))
92 return 0;
93 offset += lib_ring_buffer_align(offset, ctx->largest_align);
94 for (i = 0; i < ctx->nr_fields; i++)
95 offset += ctx->fields[i].get_size(offset);
96 return offset - orig_offset;
97 }
98
99 static inline
100 void ctx_record(struct lib_ring_buffer_ctx *bufctx,
101 struct lttng_channel *chan,
102 struct lttng_ctx *ctx)
103 {
104 int i;
105
106 if (likely(!ctx))
107 return;
108 lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
109 for (i = 0; i < ctx->nr_fields; i++)
110 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
111 }
112
113 /*
114 * record_header_size - Calculate the header size and padding necessary.
115 * @config: ring buffer instance configuration
116 * @chan: channel
117 * @offset: offset in the write buffer
118 * @pre_header_padding: padding to add before the header (output)
119 * @ctx: reservation context
120 *
121 * Returns the event header size (including padding).
122 *
123 * The payload must itself determine its own alignment from the biggest type it
124 * contains.
125 */
126 static __inline__
127 size_t record_header_size(const struct lib_ring_buffer_config *config,
128 struct channel *chan, size_t offset,
129 size_t *pre_header_padding,
130 struct lib_ring_buffer_ctx *ctx)
131 {
132 struct lttng_channel *lttng_chan = channel_get_private(chan);
133 struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
134 struct lttng_event *event = lttng_probe_ctx->event;
135 size_t orig_offset = offset;
136 size_t padding;
137
138 switch (lttng_chan->header_type) {
139 case 1: /* compact */
140 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
141 offset += padding;
142 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
143 offset += sizeof(uint32_t); /* id and timestamp */
144 } else {
145 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
146 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
147 /* Align extended struct on largest member */
148 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
149 offset += sizeof(uint32_t); /* id */
150 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
151 offset += sizeof(uint64_t); /* timestamp */
152 }
153 break;
154 case 2: /* large */
155 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
156 offset += padding;
157 offset += sizeof(uint16_t);
158 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
159 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
160 offset += sizeof(uint32_t); /* timestamp */
161 } else {
162 /* Align extended struct on largest member */
163 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
164 offset += sizeof(uint32_t); /* id */
165 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
166 offset += sizeof(uint64_t); /* timestamp */
167 }
168 break;
169 default:
170 padding = 0;
171 WARN_ON_ONCE(1);
172 }
173 offset += ctx_get_size(offset, lttng_chan->ctx);
174 offset += ctx_get_size(offset, event->ctx);
175
176 *pre_header_padding = padding;
177 return offset - orig_offset;
178 }
179
180 #include <wrapper/ringbuffer/api.h>
181
182 static
183 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
184 struct lib_ring_buffer_ctx *ctx,
185 uint32_t event_id);
186
187 /*
188 * lttng_write_event_header
189 *
190 * Writes the event header to the offset (already aligned on 32-bits).
191 *
192 * @config: ring buffer instance configuration
193 * @ctx: reservation context
194 * @event_id: event ID
195 */
196 static __inline__
197 void lttng_write_event_header(const struct lib_ring_buffer_config *config,
198 struct lib_ring_buffer_ctx *ctx,
199 uint32_t event_id)
200 {
201 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
202 struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
203 struct lttng_event *event = lttng_probe_ctx->event;
204
205 if (unlikely(ctx->rflags))
206 goto slow_path;
207
208 switch (lttng_chan->header_type) {
209 case 1: /* compact */
210 {
211 uint32_t id_time = 0;
212
213 bt_bitfield_write(&id_time, uint32_t,
214 0,
215 LTTNG_COMPACT_EVENT_BITS,
216 event_id);
217 bt_bitfield_write(&id_time, uint32_t,
218 LTTNG_COMPACT_EVENT_BITS,
219 LTTNG_COMPACT_TSC_BITS,
220 ctx->tsc);
221 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
222 break;
223 }
224 case 2: /* large */
225 {
226 uint32_t timestamp = (uint32_t) ctx->tsc;
227 uint16_t id = event_id;
228
229 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
230 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
231 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
232 break;
233 }
234 default:
235 WARN_ON_ONCE(1);
236 }
237
238 ctx_record(ctx, lttng_chan, lttng_chan->ctx);
239 ctx_record(ctx, lttng_chan, event->ctx);
240 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
241
242 return;
243
244 slow_path:
245 lttng_write_event_header_slow(config, ctx, event_id);
246 }
247
248 static
249 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
250 struct lib_ring_buffer_ctx *ctx,
251 uint32_t event_id)
252 {
253 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
254 struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
255 struct lttng_event *event = lttng_probe_ctx->event;
256
257 switch (lttng_chan->header_type) {
258 case 1: /* compact */
259 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
260 uint32_t id_time = 0;
261
262 bt_bitfield_write(&id_time, uint32_t,
263 0,
264 LTTNG_COMPACT_EVENT_BITS,
265 event_id);
266 bt_bitfield_write(&id_time, uint32_t,
267 LTTNG_COMPACT_EVENT_BITS,
268 LTTNG_COMPACT_TSC_BITS, ctx->tsc);
269 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
270 } else {
271 uint8_t id = 0;
272 uint64_t timestamp = ctx->tsc;
273
274 bt_bitfield_write(&id, uint8_t,
275 0,
276 LTTNG_COMPACT_EVENT_BITS,
277 31);
278 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
279 /* Align extended struct on largest member */
280 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
281 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
282 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
283 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
284 }
285 break;
286 case 2: /* large */
287 {
288 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
289 uint32_t timestamp = (uint32_t) ctx->tsc;
290 uint16_t id = event_id;
291
292 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
293 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
294 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
295 } else {
296 uint16_t id = 65535;
297 uint64_t timestamp = ctx->tsc;
298
299 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
300 /* Align extended struct on largest member */
301 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
302 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
303 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
304 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
305 }
306 break;
307 }
308 default:
309 WARN_ON_ONCE(1);
310 }
311 ctx_record(ctx, lttng_chan, lttng_chan->ctx);
312 ctx_record(ctx, lttng_chan, event->ctx);
313 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
314 }
315
316 static const struct lib_ring_buffer_config client_config;
317
318 static u64 client_ring_buffer_clock_read(struct channel *chan)
319 {
320 return lib_ring_buffer_clock_read(chan);
321 }
322
323 static
324 size_t client_record_header_size(const struct lib_ring_buffer_config *config,
325 struct channel *chan, size_t offset,
326 size_t *pre_header_padding,
327 struct lib_ring_buffer_ctx *ctx)
328 {
329 return record_header_size(config, chan, offset,
330 pre_header_padding, ctx);
331 }
332
333 /**
334 * client_packet_header_size - called on buffer-switch to a new sub-buffer
335 *
336 * Return header size without padding after the structure. Don't use packed
337 * structure because gcc generates inefficient code on some architectures
338 * (powerpc, mips..)
339 */
340 static size_t client_packet_header_size(void)
341 {
342 return offsetof(struct packet_header, ctx.header_end);
343 }
344
345 static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
346 unsigned int subbuf_idx)
347 {
348 struct channel *chan = buf->backend.chan;
349 struct packet_header *header =
350 (struct packet_header *)
351 lib_ring_buffer_offset_address(&buf->backend,
352 subbuf_idx * chan->backend.subbuf_size);
353 struct lttng_channel *lttng_chan = channel_get_private(chan);
354 struct lttng_session *session = lttng_chan->session;
355
356 header->magic = CTF_MAGIC_NUMBER;
357 memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
358 header->stream_id = lttng_chan->id;
359 header->stream_instance_id = buf->backend.cpu;
360 header->ctx.timestamp_begin = tsc;
361 header->ctx.timestamp_end = 0;
362 header->ctx.content_size = ~0ULL; /* for debugging */
363 header->ctx.packet_size = ~0ULL;
364 header->ctx.packet_seq_num = chan->backend.num_subbuf * \
365 buf->backend.buf_cnt[subbuf_idx].seq_cnt + \
366 subbuf_idx;
367 header->ctx.events_discarded = 0;
368 header->ctx.cpu_id = buf->backend.cpu;
369 }
370
371 /*
372 * offset is assumed to never be 0 here : never deliver a completely empty
373 * subbuffer. data_size is between 1 and subbuf_size.
374 */
375 static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
376 unsigned int subbuf_idx, unsigned long data_size)
377 {
378 struct channel *chan = buf->backend.chan;
379 struct packet_header *header =
380 (struct packet_header *)
381 lib_ring_buffer_offset_address(&buf->backend,
382 subbuf_idx * chan->backend.subbuf_size);
383 unsigned long records_lost = 0;
384
385 header->ctx.timestamp_end = tsc;
386 header->ctx.content_size =
387 (uint64_t) data_size * CHAR_BIT; /* in bits */
388 header->ctx.packet_size =
389 (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
390 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
391 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
392 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
393 header->ctx.events_discarded = records_lost;
394 }
395
396 static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
397 int cpu, const char *name)
398 {
399 return 0;
400 }
401
402 static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
403 {
404 }
405
406 static struct packet_header *client_packet_header(
407 const struct lib_ring_buffer_config *config,
408 struct lib_ring_buffer *buf)
409 {
410 return lib_ring_buffer_read_offset_address(&buf->backend, 0);
411 }
412
413 static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
414 struct lib_ring_buffer *buf,
415 uint64_t *timestamp_begin)
416 {
417 struct packet_header *header = client_packet_header(config, buf);
418 *timestamp_begin = header->ctx.timestamp_begin;
419
420 return 0;
421 }
422
423 static int client_timestamp_end(const struct lib_ring_buffer_config *config,
424 struct lib_ring_buffer *buf,
425 uint64_t *timestamp_end)
426 {
427 struct packet_header *header = client_packet_header(config, buf);
428 *timestamp_end = header->ctx.timestamp_end;
429
430 return 0;
431 }
432
433 static int client_events_discarded(const struct lib_ring_buffer_config *config,
434 struct lib_ring_buffer *buf,
435 uint64_t *events_discarded)
436 {
437 struct packet_header *header = client_packet_header(config, buf);
438 *events_discarded = header->ctx.events_discarded;
439
440 return 0;
441 }
442
443 static int client_content_size(const struct lib_ring_buffer_config *config,
444 struct lib_ring_buffer *buf,
445 uint64_t *content_size)
446 {
447 struct packet_header *header = client_packet_header(config, buf);
448 *content_size = header->ctx.content_size;
449
450 return 0;
451 }
452
453 static int client_packet_size(const struct lib_ring_buffer_config *config,
454 struct lib_ring_buffer *buf,
455 uint64_t *packet_size)
456 {
457 struct packet_header *header = client_packet_header(config, buf);
458 *packet_size = header->ctx.packet_size;
459
460 return 0;
461 }
462
463 static int client_stream_id(const struct lib_ring_buffer_config *config,
464 struct lib_ring_buffer *buf,
465 uint64_t *stream_id)
466 {
467 struct packet_header *header = client_packet_header(config, buf);
468 *stream_id = header->stream_id;
469
470 return 0;
471 }
472
473 static int client_current_timestamp(const struct lib_ring_buffer_config *config,
474 struct lib_ring_buffer *bufb,
475 uint64_t *ts)
476 {
477 *ts = config->cb.ring_buffer_clock_read(bufb->backend.chan);
478
479 return 0;
480 }
481
482 static int client_sequence_number(const struct lib_ring_buffer_config *config,
483 struct lib_ring_buffer *buf,
484 uint64_t *seq)
485 {
486 struct packet_header *header = client_packet_header(config, buf);
487
488 *seq = header->ctx.packet_seq_num;
489
490 return 0;
491 }
492
493 static
494 int client_instance_id(const struct lib_ring_buffer_config *config,
495 struct lib_ring_buffer *buf,
496 uint64_t *id)
497 {
498 struct packet_header *header = client_packet_header(config, buf);
499 *id = header->stream_instance_id;
500
501 return 0;
502 }
503
504 static const struct lib_ring_buffer_config client_config = {
505 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
506 .cb.record_header_size = client_record_header_size,
507 .cb.subbuffer_header_size = client_packet_header_size,
508 .cb.buffer_begin = client_buffer_begin,
509 .cb.buffer_end = client_buffer_end,
510 .cb.buffer_create = client_buffer_create,
511 .cb.buffer_finalize = client_buffer_finalize,
512
513 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
514 .alloc = RING_BUFFER_ALLOC_PER_CPU,
515 .sync = RING_BUFFER_SYNC_PER_CPU,
516 .mode = RING_BUFFER_MODE_TEMPLATE,
517 .backend = RING_BUFFER_PAGE,
518 .output = RING_BUFFER_OUTPUT_TEMPLATE,
519 .oops = RING_BUFFER_OOPS_CONSISTENCY,
520 .ipi = RING_BUFFER_IPI_BARRIER,
521 .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
522 };
523
524 static
525 void release_priv_ops(void *priv_ops)
526 {
527 module_put(THIS_MODULE);
528 }
529
530 static
531 void lttng_channel_destroy(struct channel *chan)
532 {
533 channel_destroy(chan);
534 }
535
536 static
537 struct channel *_channel_create(const char *name,
538 struct lttng_channel *lttng_chan, void *buf_addr,
539 size_t subbuf_size, size_t num_subbuf,
540 unsigned int switch_timer_interval,
541 unsigned int read_timer_interval)
542 {
543 struct channel *chan;
544
545 chan = channel_create(&client_config, name, lttng_chan, buf_addr,
546 subbuf_size, num_subbuf, switch_timer_interval,
547 read_timer_interval);
548 if (chan) {
549 /*
550 * Ensure this module is not unloaded before we finish
551 * using lttng_relay_transport.ops.
552 */
553 if (!try_module_get(THIS_MODULE)) {
554 printk(KERN_WARNING "LTT : Can't lock transport module.\n");
555 goto error;
556 }
557 chan->backend.priv_ops = &lttng_relay_transport.ops;
558 chan->backend.release_priv_ops = release_priv_ops;
559 }
560 return chan;
561
562 error:
563 lttng_channel_destroy(chan);
564 return NULL;
565 }
566
567 static
568 struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
569 {
570 struct lib_ring_buffer *buf;
571 int cpu;
572
573 for_each_channel_cpu(cpu, chan) {
574 buf = channel_get_ring_buffer(&client_config, chan, cpu);
575 if (!lib_ring_buffer_open_read(buf))
576 return buf;
577 }
578 return NULL;
579 }
580
581 static
582 int lttng_buffer_has_read_closed_stream(struct channel *chan)
583 {
584 struct lib_ring_buffer *buf;
585 int cpu;
586
587 for_each_channel_cpu(cpu, chan) {
588 buf = channel_get_ring_buffer(&client_config, chan, cpu);
589 if (!atomic_long_read(&buf->active_readers))
590 return 1;
591 }
592 return 0;
593 }
594
595 static
596 void lttng_buffer_read_close(struct lib_ring_buffer *buf)
597 {
598 lib_ring_buffer_release_read(buf);
599 }
600
601 static
602 int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
603 uint32_t event_id)
604 {
605 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
606 int ret, cpu;
607
608 cpu = lib_ring_buffer_get_cpu(&client_config);
609 if (cpu < 0)
610 return -EPERM;
611 ctx->cpu = cpu;
612
613 switch (lttng_chan->header_type) {
614 case 1: /* compact */
615 if (event_id > 30)
616 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
617 break;
618 case 2: /* large */
619 if (event_id > 65534)
620 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
621 break;
622 default:
623 WARN_ON_ONCE(1);
624 }
625
626 ret = lib_ring_buffer_reserve(&client_config, ctx);
627 if (ret)
628 goto put;
629 lttng_write_event_header(&client_config, ctx, event_id);
630 return 0;
631 put:
632 lib_ring_buffer_put_cpu(&client_config);
633 return ret;
634 }
635
636 static
637 void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
638 {
639 lib_ring_buffer_commit(&client_config, ctx);
640 lib_ring_buffer_put_cpu(&client_config);
641 }
642
643 static
644 void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
645 size_t len)
646 {
647 lib_ring_buffer_write(&client_config, ctx, src, len);
648 }
649
650 static
651 void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
652 const void __user *src, size_t len)
653 {
654 lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
655 }
656
657 static
658 void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
659 int c, size_t len)
660 {
661 lib_ring_buffer_memset(&client_config, ctx, c, len);
662 }
663
664 static
665 void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
666 size_t len)
667 {
668 lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
669 }
670
671 static
672 void lttng_event_strcpy_from_user(struct lib_ring_buffer_ctx *ctx,
673 const char __user *src, size_t len)
674 {
675 lib_ring_buffer_strcpy_from_user_inatomic(&client_config, ctx, src,
676 len, '#');
677 }
678
679 static
680 wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
681 {
682 struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
683 chan, cpu);
684 return &buf->write_wait;
685 }
686
687 static
688 wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
689 {
690 return &chan->hp_wait;
691 }
692
693 static
694 int lttng_is_finalized(struct channel *chan)
695 {
696 return lib_ring_buffer_channel_is_finalized(chan);
697 }
698
699 static
700 int lttng_is_disabled(struct channel *chan)
701 {
702 return lib_ring_buffer_channel_is_disabled(chan);
703 }
704
705 static struct lttng_transport lttng_relay_transport = {
706 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
707 .owner = THIS_MODULE,
708 .ops = {
709 .channel_create = _channel_create,
710 .channel_destroy = lttng_channel_destroy,
711 .buffer_read_open = lttng_buffer_read_open,
712 .buffer_has_read_closed_stream =
713 lttng_buffer_has_read_closed_stream,
714 .buffer_read_close = lttng_buffer_read_close,
715 .event_reserve = lttng_event_reserve,
716 .event_commit = lttng_event_commit,
717 .event_write = lttng_event_write,
718 .event_write_from_user = lttng_event_write_from_user,
719 .event_memset = lttng_event_memset,
720 .event_strcpy = lttng_event_strcpy,
721 .event_strcpy_from_user = lttng_event_strcpy_from_user,
722 .packet_avail_size = NULL, /* Would be racy anyway */
723 .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
724 .get_hp_wait_queue = lttng_get_hp_wait_queue,
725 .is_finalized = lttng_is_finalized,
726 .is_disabled = lttng_is_disabled,
727 .timestamp_begin = client_timestamp_begin,
728 .timestamp_end = client_timestamp_end,
729 .events_discarded = client_events_discarded,
730 .content_size = client_content_size,
731 .packet_size = client_packet_size,
732 .stream_id = client_stream_id,
733 .current_timestamp = client_current_timestamp,
734 .sequence_number = client_sequence_number,
735 .instance_id = client_instance_id,
736 },
737 };
738
739 static int __init lttng_ring_buffer_client_init(void)
740 {
741 /*
742 * This vmalloc sync all also takes care of the lib ring buffer
743 * vmalloc'd module pages when it is built as a module into LTTng.
744 */
745 wrapper_vmalloc_sync_all();
746 lttng_transport_register(&lttng_relay_transport);
747 return 0;
748 }
749
750 module_init(lttng_ring_buffer_client_init);
751
752 static void __exit lttng_ring_buffer_client_exit(void)
753 {
754 lttng_transport_unregister(&lttng_relay_transport);
755 }
756
757 module_exit(lttng_ring_buffer_client_exit);
758
759 MODULE_LICENSE("GPL and additional rights");
760 MODULE_AUTHOR("Mathieu Desnoyers");
761 MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
762 " client");
This page took 0.065842 seconds and 6 git commands to generate.