Use the memory pool instead of kmalloc
[deliverable/lttng-modules.git] / probes / lttng-tracepoint-event-impl.h
... / ...
CommitLineData
1/*
2 * lttng-tracepoint-event-impl.h
3 *
4 * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
5 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include <linux/uaccess.h>
23#include <linux/debugfs.h>
24#include <linux/rculist.h>
25#include <asm/byteorder.h>
26#include <linux/swab.h>
27
28#include <probes/lttng.h>
29#include <probes/lttng-types.h>
30#include <probes/lttng-probe-user.h>
31#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
32#include <wrapper/ringbuffer/frontend_types.h>
33#include <wrapper/ringbuffer/backend.h>
34#include <wrapper/rcu.h>
35#include <lttng-events.h>
36#include <lttng-tracer-core.h>
37#include <lttng-tp-mempool.h>
38
39#define __LTTNG_NULL_STRING "(null)"
40
41/*
42 * Macro declarations used for all stages.
43 */
44
45/*
46 * LTTng name mapping macros. LTTng remaps some of the kernel events to
47 * enforce name-spacing.
48 */
49#undef LTTNG_TRACEPOINT_EVENT_MAP
50#define LTTNG_TRACEPOINT_EVENT_MAP(name, map, proto, args, fields) \
51 LTTNG_TRACEPOINT_EVENT_CLASS(map, \
52 PARAMS(proto), \
53 PARAMS(args), \
54 PARAMS(fields)) \
55 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
56
57#undef LTTNG_TRACEPOINT_EVENT_MAP_NOARGS
58#define LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, map, fields) \
59 LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(map, \
60 PARAMS(fields)) \
61 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(map, name, map)
62
63#undef LTTNG_TRACEPOINT_EVENT_CODE_MAP
64#define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code_pre, fields, _code_post) \
65 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(map, \
66 PARAMS(proto), \
67 PARAMS(args), \
68 PARAMS(_locvar), \
69 PARAMS(_code_pre), \
70 PARAMS(fields), \
71 PARAMS(_code_post)) \
72 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
73
74#undef LTTNG_TRACEPOINT_EVENT_CODE
75#define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code_pre, fields, _code_post) \
76 LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, name, \
77 PARAMS(proto), \
78 PARAMS(args), \
79 PARAMS(_locvar), \
80 PARAMS(_code_pre), \
81 PARAMS(fields), \
82 PARAMS(_code_post))
83
84/*
85 * LTTNG_TRACEPOINT_EVENT_CLASS can be used to add a generic function
86 * handlers for events. That is, if all events have the same parameters
87 * and just have distinct trace points. Each tracepoint can be defined
88 * with LTTNG_TRACEPOINT_EVENT_INSTANCE and that will map the
89 * LTTNG_TRACEPOINT_EVENT_CLASS to the tracepoint.
90 *
91 * LTTNG_TRACEPOINT_EVENT is a one to one mapping between tracepoint and
92 * template.
93 */
94
95#undef LTTNG_TRACEPOINT_EVENT
96#define LTTNG_TRACEPOINT_EVENT(name, proto, args, fields) \
97 LTTNG_TRACEPOINT_EVENT_MAP(name, name, \
98 PARAMS(proto), \
99 PARAMS(args), \
100 PARAMS(fields))
101
102#undef LTTNG_TRACEPOINT_EVENT_NOARGS
103#define LTTNG_TRACEPOINT_EVENT_NOARGS(name, fields) \
104 LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, name, PARAMS(fields))
105
106#undef LTTNG_TRACEPOINT_EVENT_INSTANCE
107#define LTTNG_TRACEPOINT_EVENT_INSTANCE(template, name, proto, args) \
108 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(template, name, name, PARAMS(proto), PARAMS(args))
109
110#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
111#define LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(template, name) \
112 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(template, name, name)
113
114#undef LTTNG_TRACEPOINT_EVENT_CLASS
115#define LTTNG_TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
116 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, PARAMS(_proto), PARAMS(_args), , , \
117 PARAMS(_fields), )
118
119#undef LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
120#define LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
121 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields), )
122
123
124/*
125 * Stage 1 of the trace events.
126 *
127 * Create dummy trace calls for each events, verifying that the LTTng module
128 * instrumentation headers match the kernel arguments. Will be optimized
129 * out by the compiler.
130 */
131
132/* Reset all macros within TRACEPOINT_EVENT */
133#include <probes/lttng-events-reset.h>
134
135#undef TP_PROTO
136#define TP_PROTO(...) __VA_ARGS__
137
138#undef TP_ARGS
139#define TP_ARGS(...) __VA_ARGS__
140
141#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
142#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
143void trace_##_name(_proto);
144
145#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
146#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
147void trace_##_name(void);
148
149#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
150
151/*
152 * Stage 1.1 of the trace events.
153 *
154 * Create dummy trace prototypes for each event class, and for each used
155 * template. This will allow checking whether the prototypes from the
156 * class and the instance using the class actually match.
157 */
158
159#include <probes/lttng-events-reset.h> /* Reset all macros within TRACE_EVENT */
160
161#undef TP_PROTO
162#define TP_PROTO(...) __VA_ARGS__
163
164#undef TP_ARGS
165#define TP_ARGS(...) __VA_ARGS__
166
167#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
168#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
169void __event_template_proto___##_template(_proto);
170
171#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
172#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
173void __event_template_proto___##_template(void);
174
175#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
176#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
177void __event_template_proto___##_name(_proto);
178
179#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
180#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
181void __event_template_proto___##_name(void);
182
183#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
184
185/*
186 * Stage 1.2 of tracepoint event generation
187 *
188 * Unfolding the enums
189 */
190#include <probes/lttng-events-reset.h> /* Reset all macros within TRACE_EVENT */
191
192/* Enumeration entry (single value) */
193#undef ctf_enum_value
194#define ctf_enum_value(_string, _value) \
195 { \
196 .start = { \
197 .signedness = lttng_is_signed_type(__typeof__(_value)), \
198 .value = lttng_is_signed_type(__typeof__(_value)) ? \
199 (long long) (_value) : (_value), \
200 }, \
201 .end = { \
202 .signedness = lttng_is_signed_type(__typeof__(_value)), \
203 .value = lttng_is_signed_type(__typeof__(_value)) ? \
204 (long long) (_value) : (_value), \
205 }, \
206 .string = (_string), \
207 },
208
209/* Enumeration entry (range) */
210#undef ctf_enum_range
211#define ctf_enum_range(_string, _range_start, _range_end) \
212 { \
213 .start = { \
214 .signedness = lttng_is_signed_type(__typeof__(_range_start)), \
215 .value = lttng_is_signed_type(__typeof__(_range_start)) ? \
216 (long long) (_range_start) : (_range_start), \
217 }, \
218 .end = { \
219 .signedness = lttng_is_signed_type(__typeof__(_range_end)), \
220 .value = lttng_is_signed_type(__typeof__(_range_end)) ? \
221 (long long) (_range_end) : (_range_end), \
222 }, \
223 .string = (_string), \
224 },
225
226/* Enumeration entry (automatic value; follows the rules of CTF) */
227#undef ctf_enum_auto
228#define ctf_enum_auto(_string) \
229 { \
230 .start = { \
231 .signedness = -1, \
232 .value = -1, \
233 }, \
234 .end = { \
235 .signedness = -1, \
236 .value = -1, \
237 }, \
238 .string = (_string), \
239 .options = { \
240 .is_auto = 1, \
241 } \
242 },
243
244#undef TP_ENUM_VALUES
245#define TP_ENUM_VALUES(...) \
246 __VA_ARGS__
247
248#undef LTTNG_TRACEPOINT_ENUM
249#define LTTNG_TRACEPOINT_ENUM(_name, _values) \
250 const struct lttng_enum_entry __enum_values__##_name[] = { \
251 _values \
252 };
253
254#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
255
256/*
257 * Stage 2 of the trace events.
258 *
259 * Create event field type metadata section.
260 * Each event produce an array of fields.
261 */
262
263/* Reset all macros within TRACEPOINT_EVENT */
264#include <probes/lttng-events-reset.h>
265#include <probes/lttng-events-write.h>
266#include <probes/lttng-events-nowrite.h>
267
268#undef _ctf_integer_ext
269#define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
270 { \
271 .name = #_item, \
272 .type = __type_integer(_type, 0, 0, -1, _byte_order, _base, none),\
273 .nowrite = _nowrite, \
274 .user = _user, \
275 },
276
277#undef _ctf_array_encoded
278#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
279 { \
280 .name = #_item, \
281 .type = \
282 { \
283 .atype = atype_array, \
284 .u = \
285 { \
286 .array = \
287 { \
288 .elem_type = __type_integer(_type, 0, 0, 0, __BYTE_ORDER, 10, _encoding), \
289 .length = _length, \
290 } \
291 } \
292 }, \
293 .nowrite = _nowrite, \
294 .user = _user, \
295 },
296
297#undef _ctf_array_bitfield
298#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
299 { \
300 .name = #_item, \
301 .type = \
302 { \
303 .atype = atype_array, \
304 .u = \
305 { \
306 .array = \
307 { \
308 .elem_type = __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none), \
309 .length = (_length) * sizeof(_type) * CHAR_BIT, \
310 .elem_alignment = lttng_alignof(_type), \
311 } \
312 } \
313 }, \
314 .nowrite = _nowrite, \
315 .user = _user, \
316 },
317
318
319#undef _ctf_sequence_encoded
320#define _ctf_sequence_encoded(_type, _item, _src, \
321 _length_type, _src_length, _encoding, \
322 _byte_order, _base, _user, _nowrite) \
323 { \
324 .name = #_item, \
325 .type = \
326 { \
327 .atype = atype_sequence, \
328 .u = \
329 { \
330 .sequence = \
331 { \
332 .length_type = __type_integer(_length_type, 0, 0, 0, __BYTE_ORDER, 10, none), \
333 .elem_type = __type_integer(_type, 0, 0, -1, _byte_order, _base, _encoding), \
334 }, \
335 }, \
336 }, \
337 .nowrite = _nowrite, \
338 .user = _user, \
339 },
340
341#undef _ctf_sequence_bitfield
342#define _ctf_sequence_bitfield(_type, _item, _src, \
343 _length_type, _src_length, \
344 _user, _nowrite) \
345 { \
346 .name = #_item, \
347 .type = \
348 { \
349 .atype = atype_sequence, \
350 .u = \
351 { \
352 .sequence = \
353 { \
354 .length_type = __type_integer(_length_type, 0, 0, 0, __BYTE_ORDER, 10, none), \
355 .elem_type = __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none), \
356 .elem_alignment = lttng_alignof(_type), \
357 }, \
358 }, \
359 }, \
360 .nowrite = _nowrite, \
361 .user = _user, \
362 },
363
364#undef _ctf_string
365#define _ctf_string(_item, _src, _user, _nowrite) \
366 { \
367 .name = #_item, \
368 .type = \
369 { \
370 .atype = atype_string, \
371 .u = \
372 { \
373 .basic = { .string = { .encoding = lttng_encode_UTF8 } } \
374 }, \
375 }, \
376 .nowrite = _nowrite, \
377 .user = _user, \
378 },
379
380#undef _ctf_enum
381#define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
382 { \
383 .name = #_item, \
384 .type = { \
385 .atype = atype_enum, \
386 .u = { \
387 .basic = { \
388 .enumeration = { \
389 .desc = &__enum_##_name, \
390 .container_type = { \
391 .size = sizeof(_type) * CHAR_BIT, \
392 .alignment = lttng_alignof(_type) * CHAR_BIT, \
393 .signedness = lttng_is_signed_type(_type), \
394 .reverse_byte_order = 0, \
395 .base = 10, \
396 .encoding = lttng_encode_none, \
397 }, \
398 }, \
399 }, \
400 }, \
401 }, \
402 .nowrite = _nowrite, \
403 .user = _user, \
404 },
405
406#undef ctf_custom_field
407#define ctf_custom_field(_type, _item, _code) \
408 { \
409 .name = #_item, \
410 .type = { _type }, \
411 .nowrite = 0, \
412 .user = 0, \
413 },
414
415#undef ctf_custom_type
416#define ctf_custom_type(...) __VA_ARGS__
417
418#undef TP_FIELDS
419#define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */
420
421#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
422#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
423 static const struct lttng_event_field __event_fields___##_name[] = { \
424 _fields \
425 };
426
427#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
428#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
429 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, PARAMS(_fields), _code_post)
430
431#undef LTTNG_TRACEPOINT_ENUM
432#define LTTNG_TRACEPOINT_ENUM(_name, _values) \
433 static const struct lttng_enum_desc __enum_##_name = { \
434 .name = #_name, \
435 .entries = __enum_values__##_name, \
436 .nr_entries = ARRAY_SIZE(__enum_values__##_name), \
437 };
438
439#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
440
441/*
442 * Stage 3 of the trace events.
443 *
444 * Create probe callback prototypes.
445 */
446
447/* Reset all macros within TRACEPOINT_EVENT */
448#include <probes/lttng-events-reset.h>
449
450#undef TP_PROTO
451#define TP_PROTO(...) __VA_ARGS__
452
453#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
454#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
455static void __event_probe__##_name(void *__data, _proto);
456
457#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
458#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
459static void __event_probe__##_name(void *__data);
460
461#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
462
463/*
464 * Stage 4 of the trace events.
465 *
466 * Create static inline function that calculates event size.
467 */
468
469/* Reset all macros within TRACEPOINT_EVENT */
470#include <probes/lttng-events-reset.h>
471#include <probes/lttng-events-write.h>
472
473#undef _ctf_integer_ext
474#define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
475 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
476 __event_len += sizeof(_type);
477
478#undef _ctf_array_encoded
479#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
480 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
481 __event_len += sizeof(_type) * (_length);
482
483#undef _ctf_array_bitfield
484#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
485 _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
486
487#undef _ctf_sequence_encoded
488#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
489 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
490 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \
491 __event_len += sizeof(_length_type); \
492 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
493 { \
494 size_t __seqlen = (_src_length); \
495 \
496 if (unlikely(++this_cpu_ptr(&lttng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \
497 goto error; \
498 barrier(); /* reserve before use. */ \
499 this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = __seqlen; \
500 __event_len += sizeof(_type) * __seqlen; \
501 }
502
503#undef _ctf_sequence_bitfield
504#define _ctf_sequence_bitfield(_type, _item, _src, \
505 _length_type, _src_length, \
506 _user, _nowrite) \
507 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
508 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
509
510/*
511 * ctf_user_string includes \0. If returns 0, it faulted, so we set size to
512 * 1 (\0 only).
513 */
514#undef _ctf_string
515#define _ctf_string(_item, _src, _user, _nowrite) \
516 if (unlikely(++this_cpu_ptr(&lttng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \
517 goto error; \
518 barrier(); /* reserve before use. */ \
519 if (_user) { \
520 __event_len += this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = \
521 max_t(size_t, lttng_strlen_user_inatomic(_src), 1); \
522 } else { \
523 __event_len += this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = \
524 strlen((_src) ? (_src) : __LTTNG_NULL_STRING) + 1; \
525 }
526
527#undef _ctf_enum
528#define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
529 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
530
531#undef ctf_align
532#define ctf_align(_type) \
533 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type));
534
535#undef ctf_custom_field
536#define ctf_custom_field(_type, _item, _code) \
537 { \
538 _code \
539 }
540
541#undef ctf_custom_code
542#define ctf_custom_code(...) __VA_ARGS__
543
544#undef TP_PROTO
545#define TP_PROTO(...) __VA_ARGS__
546
547#undef TP_FIELDS
548#define TP_FIELDS(...) __VA_ARGS__
549
550#undef TP_locvar
551#define TP_locvar(...) __VA_ARGS__
552
553#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
554#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
555static inline ssize_t __event_get_size__##_name(void *__tp_locvar, _proto) \
556{ \
557 size_t __event_len = 0; \
558 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
559 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
560 \
561 _fields \
562 return __event_len; \
563 \
564error: \
565 __attribute__((unused)); \
566 return -1; \
567}
568
569#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
570#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
571static inline ssize_t __event_get_size__##_name(void *__tp_locvar) \
572{ \
573 size_t __event_len = 0; \
574 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
575 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
576 \
577 _fields \
578 return __event_len; \
579 \
580error: \
581 __attribute__((unused)); \
582 return -1; \
583}
584
585#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
586
587
588/*
589 * Stage 4.1 of tracepoint event generation.
590 *
591 * Create static inline function that layout the filter stack data.
592 * We make both write and nowrite data available to the filter.
593 */
594
595/* Reset all macros within TRACEPOINT_EVENT */
596#include <probes/lttng-events-reset.h>
597#include <probes/lttng-events-write.h>
598#include <probes/lttng-events-nowrite.h>
599
600#undef _ctf_integer_ext_fetched
601#define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
602 if (lttng_is_signed_type(_type)) { \
603 int64_t __ctf_tmp_int64; \
604 switch (sizeof(_type)) { \
605 case 1: \
606 { \
607 union { _type t; int8_t v; } __tmp = { (_type) (_src) }; \
608 __ctf_tmp_int64 = (int64_t) __tmp.v; \
609 break; \
610 } \
611 case 2: \
612 { \
613 union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \
614 if (_byte_order != __BYTE_ORDER) \
615 __swab16s(&__tmp.v); \
616 __ctf_tmp_int64 = (int64_t) __tmp.v; \
617 break; \
618 } \
619 case 4: \
620 { \
621 union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \
622 if (_byte_order != __BYTE_ORDER) \
623 __swab32s(&__tmp.v); \
624 __ctf_tmp_int64 = (int64_t) __tmp.v; \
625 break; \
626 } \
627 case 8: \
628 { \
629 union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \
630 if (_byte_order != __BYTE_ORDER) \
631 __swab64s(&__tmp.v); \
632 __ctf_tmp_int64 = (int64_t) __tmp.v; \
633 break; \
634 } \
635 default: \
636 BUG_ON(1); \
637 }; \
638 memcpy(__stack_data, &__ctf_tmp_int64, sizeof(int64_t)); \
639 } else { \
640 uint64_t __ctf_tmp_uint64; \
641 switch (sizeof(_type)) { \
642 case 1: \
643 { \
644 union { _type t; uint8_t v; } __tmp = { (_type) (_src) }; \
645 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
646 break; \
647 } \
648 case 2: \
649 { \
650 union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \
651 if (_byte_order != __BYTE_ORDER) \
652 __swab16s(&__tmp.v); \
653 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
654 break; \
655 } \
656 case 4: \
657 { \
658 union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \
659 if (_byte_order != __BYTE_ORDER) \
660 __swab32s(&__tmp.v); \
661 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
662 break; \
663 } \
664 case 8: \
665 { \
666 union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \
667 if (_byte_order != __BYTE_ORDER) \
668 __swab64s(&__tmp.v); \
669 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
670 break; \
671 } \
672 default: \
673 BUG_ON(1); \
674 }; \
675 memcpy(__stack_data, &__ctf_tmp_uint64, sizeof(uint64_t)); \
676 } \
677 __stack_data += sizeof(int64_t);
678
679#undef _ctf_integer_ext_isuser0
680#define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
681 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
682
683#undef _ctf_integer_ext_isuser1
684#define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
685{ \
686 union { \
687 char __array[sizeof(_user_src)]; \
688 __typeof__(_user_src) __v; \
689 } __tmp_fetch; \
690 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
691 &(_user_src), sizeof(_user_src))) \
692 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
693 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
694}
695
696#undef _ctf_integer_ext
697#define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
698 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
699
700#undef _ctf_array_encoded
701#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
702 { \
703 unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \
704 const void *__ctf_tmp_ptr = (_src); \
705 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
706 __stack_data += sizeof(unsigned long); \
707 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
708 __stack_data += sizeof(void *); \
709 }
710
711#undef _ctf_array_bitfield
712#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
713 _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
714
715#undef _ctf_sequence_encoded
716#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
717 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
718 { \
719 unsigned long __ctf_tmp_ulong = (unsigned long) (_src_length); \
720 const void *__ctf_tmp_ptr = (_src); \
721 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
722 __stack_data += sizeof(unsigned long); \
723 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
724 __stack_data += sizeof(void *); \
725 }
726
727#undef _ctf_sequence_bitfield
728#define _ctf_sequence_bitfield(_type, _item, _src, \
729 _length_type, _src_length, \
730 _user, _nowrite) \
731 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
732 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
733
734#undef _ctf_string
735#define _ctf_string(_item, _src, _user, _nowrite) \
736 { \
737 const void *__ctf_tmp_ptr = \
738 ((_src) ? (_src) : __LTTNG_NULL_STRING); \
739 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
740 __stack_data += sizeof(void *); \
741 }
742
743#undef _ctf_enum
744#define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
745 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
746
747#undef TP_PROTO
748#define TP_PROTO(...) __VA_ARGS__
749
750#undef TP_FIELDS
751#define TP_FIELDS(...) __VA_ARGS__
752
753#undef TP_locvar
754#define TP_locvar(...) __VA_ARGS__
755
756#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
757#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
758static inline \
759void __event_prepare_filter_stack__##_name(char *__stack_data, \
760 void *__tp_locvar) \
761{ \
762 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
763 \
764 _fields \
765}
766
767#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
768#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
769static inline \
770void __event_prepare_filter_stack__##_name(char *__stack_data, \
771 void *__tp_locvar, _proto) \
772{ \
773 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
774 \
775 _fields \
776}
777
778#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
779
780/*
781 * Stage 5 of the trace events.
782 *
783 * Create static inline function that calculates event payload alignment.
784 */
785
786/* Reset all macros within TRACEPOINT_EVENT */
787#include <probes/lttng-events-reset.h>
788#include <probes/lttng-events-write.h>
789
790#undef _ctf_integer_ext
791#define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
792 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
793
794#undef _ctf_array_encoded
795#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
796 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
797
798#undef _ctf_array_bitfield
799#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
800 _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
801
802#undef _ctf_sequence_encoded
803#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
804 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
805 __event_align = max_t(size_t, __event_align, lttng_alignof(_length_type)); \
806 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
807
808#undef _ctf_sequence_bitfield
809#define _ctf_sequence_bitfield(_type, _item, _src, \
810 _length_type, _src_length, \
811 _user, _nowrite) \
812 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
813 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
814
815#undef _ctf_string
816#define _ctf_string(_item, _src, _user, _nowrite)
817
818#undef _ctf_enum
819#define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
820 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
821
822#undef ctf_align
823#define ctf_align(_type) \
824 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
825
826#undef TP_PROTO
827#define TP_PROTO(...) __VA_ARGS__
828
829#undef TP_FIELDS
830#define TP_FIELDS(...) __VA_ARGS__
831
832#undef TP_locvar
833#define TP_locvar(...) __VA_ARGS__
834
835#undef ctf_custom_field
836#define ctf_custom_field(_type, _item, _code) _code
837
838#undef ctf_custom_code
839#define ctf_custom_code(...) \
840 { \
841 __VA_ARGS__ \
842 }
843
844#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
845#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
846static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \
847{ \
848 size_t __event_align = 1; \
849 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
850 \
851 _fields \
852 return __event_align; \
853}
854
855#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
856#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
857static inline size_t __event_get_align__##_name(void *__tp_locvar) \
858{ \
859 size_t __event_align = 1; \
860 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
861 \
862 _fields \
863 return __event_align; \
864}
865
866#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
867
868/*
869 * Stage 6 of tracepoint event generation.
870 *
871 * Create the probe function. This function calls event size calculation
872 * and writes event data into the buffer.
873 */
874
875/* Reset all macros within TRACEPOINT_EVENT */
876#include <probes/lttng-events-reset.h>
877#include <probes/lttng-events-write.h>
878
879#undef _ctf_integer_ext_fetched
880#define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
881 { \
882 _type __tmp = _src; \
883 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\
884 __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
885 }
886
887#undef _ctf_integer_ext_isuser0
888#define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
889 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
890
891#undef _ctf_integer_ext_isuser1
892#define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
893{ \
894 union { \
895 char __array[sizeof(_user_src)]; \
896 __typeof__(_user_src) __v; \
897 } __tmp_fetch; \
898 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
899 &(_user_src), sizeof(_user_src))) \
900 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
901 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
902}
903
904#undef _ctf_integer_ext
905#define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
906 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
907
908#undef _ctf_array_encoded
909#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
910 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
911 if (_user) { \
912 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
913 } else { \
914 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
915 }
916
917#if (__BYTE_ORDER == __LITTLE_ENDIAN)
918#undef _ctf_array_bitfield
919#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
920 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
921 if (_user) { \
922 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
923 } else { \
924 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
925 }
926#else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
927/*
928 * For big endian, we need to byteswap into little endian.
929 */
930#undef _ctf_array_bitfield
931#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
932 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
933 { \
934 size_t _i; \
935 \
936 for (_i = 0; _i < (_length); _i++) { \
937 _type _tmp; \
938 \
939 if (_user) { \
940 if (get_user(_tmp, (_type *) _src + _i)) \
941 _tmp = 0; \
942 } else { \
943 _tmp = ((_type *) _src)[_i]; \
944 } \
945 switch (sizeof(_type)) { \
946 case 1: \
947 break; \
948 case 2: \
949 _tmp = cpu_to_le16(_tmp); \
950 break; \
951 case 4: \
952 _tmp = cpu_to_le32(_tmp); \
953 break; \
954 case 8: \
955 _tmp = cpu_to_le64(_tmp); \
956 break; \
957 default: \
958 BUG_ON(1); \
959 } \
960 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
961 } \
962 }
963#endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
964
965#undef _ctf_sequence_encoded
966#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
967 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
968 { \
969 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx]; \
970 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
971 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
972 } \
973 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
974 if (_user) { \
975 __chan->ops->event_write_from_user(&__ctx, _src, \
976 sizeof(_type) * __get_dynamic_len(dest)); \
977 } else { \
978 __chan->ops->event_write(&__ctx, _src, \
979 sizeof(_type) * __get_dynamic_len(dest)); \
980 }
981
982#if (__BYTE_ORDER == __LITTLE_ENDIAN)
983#undef _ctf_sequence_bitfield
984#define _ctf_sequence_bitfield(_type, _item, _src, \
985 _length_type, _src_length, \
986 _user, _nowrite) \
987 { \
988 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
989 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
990 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
991 } \
992 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
993 if (_user) { \
994 __chan->ops->event_write_from_user(&__ctx, _src, \
995 sizeof(_type) * __get_dynamic_len(dest)); \
996 } else { \
997 __chan->ops->event_write(&__ctx, _src, \
998 sizeof(_type) * __get_dynamic_len(dest)); \
999 }
1000#else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
1001/*
1002 * For big endian, we need to byteswap into little endian.
1003 */
1004#undef _ctf_sequence_bitfield
1005#define _ctf_sequence_bitfield(_type, _item, _src, \
1006 _length_type, _src_length, \
1007 _user, _nowrite) \
1008 { \
1009 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
1010 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
1011 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
1012 } \
1013 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
1014 { \
1015 size_t _i, _length; \
1016 \
1017 _length = __get_dynamic_len(dest); \
1018 for (_i = 0; _i < _length; _i++) { \
1019 _type _tmp; \
1020 \
1021 if (_user) { \
1022 if (get_user(_tmp, (_type *) _src + _i)) \
1023 _tmp = 0; \
1024 } else { \
1025 _tmp = ((_type *) _src)[_i]; \
1026 } \
1027 switch (sizeof(_type)) { \
1028 case 1: \
1029 break; \
1030 case 2: \
1031 _tmp = cpu_to_le16(_tmp); \
1032 break; \
1033 case 4: \
1034 _tmp = cpu_to_le32(_tmp); \
1035 break; \
1036 case 8: \
1037 _tmp = cpu_to_le64(_tmp); \
1038 break; \
1039 default: \
1040 BUG_ON(1); \
1041 } \
1042 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
1043 } \
1044 }
1045#endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
1046
1047#undef _ctf_string
1048#define _ctf_string(_item, _src, _user, _nowrite) \
1049 if (_user) { \
1050 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \
1051 __chan->ops->event_strcpy_from_user(&__ctx, _src, \
1052 __get_dynamic_len(dest)); \
1053 } else { \
1054 const char *__ctf_tmp_string = \
1055 ((_src) ? (_src) : __LTTNG_NULL_STRING); \
1056 lib_ring_buffer_align_ctx(&__ctx, \
1057 lttng_alignof(*__ctf_tmp_string)); \
1058 __chan->ops->event_strcpy(&__ctx, __ctf_tmp_string, \
1059 __get_dynamic_len(dest)); \
1060 }
1061
1062#undef _ctf_enum
1063#define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
1064 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
1065
1066#undef ctf_align
1067#define ctf_align(_type) \
1068 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type));
1069
1070#undef ctf_custom_field
1071#define ctf_custom_field(_type, _item, _code) _code
1072
1073#undef ctf_custom_code
1074#define ctf_custom_code(...) \
1075 { \
1076 __VA_ARGS__ \
1077 }
1078
1079/* Beware: this get len actually consumes the len value */
1080#undef __get_dynamic_len
1081#define __get_dynamic_len(field) this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx++]
1082
1083#undef TP_PROTO
1084#define TP_PROTO(...) __VA_ARGS__
1085
1086#undef TP_ARGS
1087#define TP_ARGS(...) __VA_ARGS__
1088
1089#undef TP_FIELDS
1090#define TP_FIELDS(...) __VA_ARGS__
1091
1092#undef TP_locvar
1093#define TP_locvar(...) __VA_ARGS__
1094
1095#undef TP_code_pre
1096#define TP_code_pre(...) __VA_ARGS__
1097
1098#undef TP_code_post
1099#define TP_code_post(...) __VA_ARGS__
1100
1101/*
1102 * For state dump, check that "session" argument (mandatory) matches the
1103 * session this event belongs to. Ensures that we write state dump data only
1104 * into the started session, not into all sessions.
1105 */
1106#ifdef TP_SESSION_CHECK
1107#define _TP_SESSION_CHECK(session, csession) (session == csession)
1108#else /* TP_SESSION_CHECK */
1109#define _TP_SESSION_CHECK(session, csession) 1
1110#endif /* TP_SESSION_CHECK */
1111
1112/*
1113 * Using twice size for filter stack data to hold size and pointer for
1114 * each field (worse case). For integers, max size required is 64-bit.
1115 * Same for double-precision floats. Those fit within
1116 * 2*sizeof(unsigned long) for all supported architectures.
1117 * Perform UNION (||) of filter runtime list.
1118 */
1119#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
1120#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
1121static void __event_probe__##_name(void *__data, _proto) \
1122{ \
1123 struct probe_local_vars { _locvar }; \
1124 struct lttng_event *__event = __data; \
1125 struct lttng_probe_ctx __lttng_probe_ctx = { \
1126 .event = __event, \
1127 .interruptible = !irqs_disabled(), \
1128 }; \
1129 struct lttng_channel *__chan = __event->chan; \
1130 struct lttng_session *__session = __chan->session; \
1131 struct lib_ring_buffer_ctx __ctx; \
1132 ssize_t __event_len; \
1133 size_t __event_align; \
1134 size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \
1135 union { \
1136 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1137 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1138 } __stackvar; \
1139 int __ret; \
1140 struct probe_local_vars __tp_locvar; \
1141 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1142 &__tp_locvar; \
1143 struct lttng_pid_tracker *__lpf; \
1144 \
1145 if (!_TP_SESSION_CHECK(session, __session)) \
1146 return; \
1147 if (unlikely(!READ_ONCE(__session->active))) \
1148 return; \
1149 if (unlikely(!READ_ONCE(__chan->enabled))) \
1150 return; \
1151 if (unlikely(!READ_ONCE(__event->enabled))) \
1152 return; \
1153 __lpf = lttng_rcu_dereference(__session->pid_tracker); \
1154 if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->tgid))) \
1155 return; \
1156 __orig_dynamic_len_offset = this_cpu_ptr(&lttng_dynamic_len_stack)->offset; \
1157 __dynamic_len_idx = __orig_dynamic_len_offset; \
1158 _code_pre \
1159 if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
1160 struct lttng_bytecode_runtime *bc_runtime; \
1161 int __filter_record = __event->has_enablers_without_bytecode; \
1162 \
1163 __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
1164 tp_locvar, _args); \
1165 lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
1166 if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
1167 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
1168 __filter_record = 1; \
1169 } \
1170 if (likely(!__filter_record)) \
1171 goto __post; \
1172 } \
1173 __event_len = __event_get_size__##_name(tp_locvar, _args); \
1174 if (unlikely(__event_len < 0)) { \
1175 lib_ring_buffer_lost_event_too_big(__chan->chan); \
1176 goto __post; \
1177 } \
1178 __event_align = __event_get_align__##_name(tp_locvar, _args); \
1179 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1180 __event_align, -1); \
1181 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1182 if (__ret < 0) \
1183 goto __post; \
1184 _fields \
1185 __chan->ops->event_commit(&__ctx); \
1186__post: \
1187 _code_post \
1188 barrier(); /* use before un-reserve. */ \
1189 this_cpu_ptr(&lttng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \
1190 return; \
1191}
1192
1193#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
1194#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
1195static void __event_probe__##_name(void *__data) \
1196{ \
1197 struct probe_local_vars { _locvar }; \
1198 struct lttng_event *__event = __data; \
1199 struct lttng_probe_ctx __lttng_probe_ctx = { \
1200 .event = __event, \
1201 .interruptible = !irqs_disabled(), \
1202 }; \
1203 struct lttng_channel *__chan = __event->chan; \
1204 struct lttng_session *__session = __chan->session; \
1205 struct lib_ring_buffer_ctx __ctx; \
1206 ssize_t __event_len; \
1207 size_t __event_align; \
1208 size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \
1209 union { \
1210 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1211 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1212 } __stackvar; \
1213 int __ret; \
1214 struct probe_local_vars __tp_locvar; \
1215 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1216 &__tp_locvar; \
1217 struct lttng_pid_tracker *__lpf; \
1218 \
1219 if (!_TP_SESSION_CHECK(session, __session)) \
1220 return; \
1221 if (unlikely(!READ_ONCE(__session->active))) \
1222 return; \
1223 if (unlikely(!READ_ONCE(__chan->enabled))) \
1224 return; \
1225 if (unlikely(!READ_ONCE(__event->enabled))) \
1226 return; \
1227 __lpf = lttng_rcu_dereference(__session->pid_tracker); \
1228 if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \
1229 return; \
1230 __orig_dynamic_len_offset = this_cpu_ptr(&lttng_dynamic_len_stack)->offset; \
1231 __dynamic_len_idx = __orig_dynamic_len_offset; \
1232 _code_pre \
1233 if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
1234 struct lttng_bytecode_runtime *bc_runtime; \
1235 int __filter_record = __event->has_enablers_without_bytecode; \
1236 \
1237 __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
1238 tp_locvar); \
1239 lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
1240 if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
1241 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
1242 __filter_record = 1; \
1243 } \
1244 if (likely(!__filter_record)) \
1245 goto __post; \
1246 } \
1247 __event_len = __event_get_size__##_name(tp_locvar); \
1248 if (unlikely(__event_len < 0)) { \
1249 lib_ring_buffer_lost_event_too_big(__chan->chan); \
1250 goto __post; \
1251 } \
1252 __event_align = __event_get_align__##_name(tp_locvar); \
1253 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1254 __event_align, -1); \
1255 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1256 if (__ret < 0) \
1257 goto __post; \
1258 _fields \
1259 __chan->ops->event_commit(&__ctx); \
1260__post: \
1261 _code_post \
1262 barrier(); /* use before un-reserve. */ \
1263 this_cpu_ptr(&lttng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \
1264 return; \
1265}
1266
1267#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1268
1269#undef __get_dynamic_len
1270
1271/*
1272 * Stage 7 of the trace events.
1273 *
1274 * Create event descriptions.
1275 */
1276
1277/* Named field types must be defined in lttng-types.h */
1278
1279#include <probes/lttng-events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1280
1281#ifndef TP_PROBE_CB
1282#define TP_PROBE_CB(_template) &__event_probe__##_template
1283#endif
1284
1285#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1286#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1287static const struct lttng_event_desc __event_desc___##_map = { \
1288 .fields = __event_fields___##_template, \
1289 .name = #_map, \
1290 .kname = #_name, \
1291 .probe_callback = (void *) TP_PROBE_CB(_template), \
1292 .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
1293 .owner = THIS_MODULE, \
1294};
1295
1296#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1297#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1298 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1299
1300#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1301
1302/*
1303 * Stage 8 of the trace events.
1304 *
1305 * Create an array of event description pointers.
1306 */
1307
1308#include <probes/lttng-events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1309
1310#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1311#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1312 &__event_desc___##_map,
1313
1314#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1315#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1316 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1317
1318#define TP_ID1(_token, _system) _token##_system
1319#define TP_ID(_token, _system) TP_ID1(_token, _system)
1320
1321static const struct lttng_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
1322#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1323};
1324
1325#undef TP_ID1
1326#undef TP_ID
1327
1328/*
1329 * Stage 9 of the trace events.
1330 *
1331 * Create a toplevel descriptor for the whole probe.
1332 */
1333
1334#define TP_ID1(_token, _system) _token##_system
1335#define TP_ID(_token, _system) TP_ID1(_token, _system)
1336
1337/* non-const because list head will be modified when registered. */
1338static __used struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
1339 .provider = __stringify(TRACE_SYSTEM),
1340 .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
1341 .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
1342 .head = { NULL, NULL },
1343 .lazy_init_head = { NULL, NULL },
1344 .lazy = 0,
1345};
1346
1347#undef TP_ID1
1348#undef TP_ID
1349
1350/*
1351 * Stage 10 of the trace events.
1352 *
1353 * Register/unregister probes at module load/unload.
1354 */
1355
1356#include <probes/lttng-events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1357
1358#define TP_ID1(_token, _system) _token##_system
1359#define TP_ID(_token, _system) TP_ID1(_token, _system)
1360#define module_init_eval1(_token, _system) module_init(_token##_system)
1361#define module_init_eval(_token, _system) module_init_eval1(_token, _system)
1362#define module_exit_eval1(_token, _system) module_exit(_token##_system)
1363#define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
1364
1365#ifndef TP_MODULE_NOINIT
1366static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
1367{
1368 wrapper_vmalloc_sync_all();
1369 return lttng_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1370}
1371
1372static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
1373{
1374 lttng_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1375}
1376
1377#ifndef TP_MODULE_NOAUTOLOAD
1378module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
1379module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
1380#endif
1381
1382#endif
1383
1384#undef module_init_eval
1385#undef module_exit_eval
1386#undef TP_ID1
1387#undef TP_ID
1388
1389#undef TP_PROTO
1390#undef TP_ARGS
This page took 0.029111 seconds and 5 git commands to generate.