1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 #include <side/trace.h>
13 /* Top 8 bits reserved for kernel tracer use. */
14 #if SIDE_BITS_PER_LONG == 64
15 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
16 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
18 /* Allow 2^56 tracer references on an event. */
19 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
21 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
22 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
24 /* Allow 2^24 tracer references on an event. */
25 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
28 struct side_events_register_handle
{
29 struct side_list_node node
;
30 struct side_event_description
**events
;
34 struct side_tracer_handle
{
35 struct side_list_node node
;
36 void (*cb
)(enum side_tracer_notification notif
,
37 struct side_event_description
**events
, uint32_t nr_events
, void *priv
);
41 static struct side_rcu_gp_state rcu_gp
;
44 * Lazy initialization for early use within library constructors.
46 static bool initialized
;
48 * Do not register/unregister any more events after destructor.
50 static bool finalized
;
53 * Recursive mutex to allow tracer callbacks to use the side API.
55 static pthread_mutex_t side_lock
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
;
57 static DEFINE_SIDE_LIST_HEAD(side_events_list
);
58 static DEFINE_SIDE_LIST_HEAD(side_tracer_list
);
61 * The empty callback has a NULL function callback pointer, which stops
62 * iteration on the array of callbacks immediately.
64 const struct side_callback side_empty_callback
= { };
66 void side_init(void) __attribute__((constructor
));
67 void side_exit(void) __attribute__((destructor
));
69 void side_call(const struct side_event_description
*desc
, const struct side_arg_vec
*side_arg_vec
)
71 const struct side_callback
*side_cb
;
72 unsigned int rcu_period
;
75 if (side_unlikely(finalized
))
77 if (side_unlikely(!initialized
))
79 if (side_unlikely(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)) {
80 printf("ERROR: unexpected variadic event description\n");
83 enabled
= __atomic_load_n(desc
->enabled
, __ATOMIC_RELAXED
);
84 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
85 // TODO: call kernel write.
87 rcu_period
= side_rcu_read_begin(&rcu_gp
);
88 for (side_cb
= side_rcu_dereference(desc
->callbacks
); side_cb
->u
.call
!= NULL
; side_cb
++)
89 side_cb
->u
.call(desc
, side_arg_vec
, side_cb
->priv
);
90 side_rcu_read_end(&rcu_gp
, rcu_period
);
93 void side_call_variadic(const struct side_event_description
*desc
,
94 const struct side_arg_vec
*side_arg_vec
,
95 const struct side_arg_dynamic_struct
*var_struct
)
97 const struct side_callback
*side_cb
;
98 unsigned int rcu_period
;
101 if (side_unlikely(finalized
))
103 if (side_unlikely(!initialized
))
105 if (side_unlikely(!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))) {
106 printf("ERROR: unexpected non-variadic event description\n");
109 enabled
= __atomic_load_n(desc
->enabled
, __ATOMIC_RELAXED
);
110 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
111 // TODO: call kernel write.
113 rcu_period
= side_rcu_read_begin(&rcu_gp
);
114 for (side_cb
= side_rcu_dereference(desc
->callbacks
); side_cb
->u
.call_variadic
!= NULL
; side_cb
++)
115 side_cb
->u
.call_variadic(desc
, side_arg_vec
, var_struct
, side_cb
->priv
);
116 side_rcu_read_end(&rcu_gp
, rcu_period
);
120 const struct side_callback
*side_tracer_callback_lookup(
121 const struct side_event_description
*desc
,
122 void (*call
)(), void *priv
)
124 const struct side_callback
*cb
;
126 for (cb
= desc
->callbacks
; cb
->u
.call
!= NULL
; cb
++) {
127 if (cb
->u
.call
== call
&& cb
->priv
== priv
)
134 int _side_tracer_callback_register(struct side_event_description
*desc
,
135 void *call
, void *priv
)
137 struct side_callback
*old_cb
, *new_cb
;
138 int ret
= SIDE_ERROR_OK
;
142 return SIDE_ERROR_INVAL
;
144 return SIDE_ERROR_EXITING
;
147 pthread_mutex_lock(&side_lock
);
148 old_nr_cb
= desc
->nr_callbacks
;
149 if (old_nr_cb
== UINT32_MAX
) {
150 ret
= SIDE_ERROR_INVAL
;
153 /* Reject duplicate (call, priv) tuples. */
154 if (side_tracer_callback_lookup(desc
, call
, priv
)) {
155 ret
= SIDE_ERROR_EXIST
;
158 old_cb
= (struct side_callback
*) desc
->callbacks
;
159 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
160 new_cb
= (struct side_callback
*) calloc(old_nr_cb
+ 2, sizeof(struct side_callback
));
162 ret
= SIDE_ERROR_NOMEM
;
165 memcpy(new_cb
, old_cb
, old_nr_cb
);
166 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
167 new_cb
[old_nr_cb
].u
.call_variadic
= call
;
169 new_cb
[old_nr_cb
].u
.call
= call
;
170 new_cb
[old_nr_cb
].priv
= priv
;
171 side_rcu_assign_pointer(desc
->callbacks
, new_cb
);
172 side_rcu_wait_grace_period(&rcu_gp
);
175 desc
->nr_callbacks
++;
176 /* Increment concurrently with kernel setting the top bits. */
178 (void) __atomic_add_fetch(desc
->enabled
, 1, __ATOMIC_RELAXED
);
180 pthread_mutex_unlock(&side_lock
);
184 int side_tracer_callback_register(struct side_event_description
*desc
,
185 side_tracer_callback_func call
,
188 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
189 return SIDE_ERROR_INVAL
;
190 return _side_tracer_callback_register(desc
, call
, priv
);
193 int side_tracer_callback_variadic_register(struct side_event_description
*desc
,
194 side_tracer_callback_variadic_func call_variadic
,
197 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
198 return SIDE_ERROR_INVAL
;
199 return _side_tracer_callback_register(desc
, call_variadic
, priv
);
202 int _side_tracer_callback_unregister(struct side_event_description
*desc
,
203 void *call
, void *priv
)
205 struct side_callback
*old_cb
, *new_cb
;
206 const struct side_callback
*cb_pos
;
208 int ret
= SIDE_ERROR_OK
;
212 return SIDE_ERROR_INVAL
;
214 return SIDE_ERROR_EXITING
;
217 pthread_mutex_lock(&side_lock
);
218 cb_pos
= side_tracer_callback_lookup(desc
, call
, priv
);
220 ret
= SIDE_ERROR_NOENT
;
223 old_nr_cb
= desc
->nr_callbacks
;
224 old_cb
= (struct side_callback
*) desc
->callbacks
;
225 if (old_nr_cb
== 1) {
226 new_cb
= (struct side_callback
*) &side_empty_callback
;
228 pos_idx
= cb_pos
- desc
->callbacks
;
229 /* Remove entry at pos_idx. */
230 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
231 new_cb
= (struct side_callback
*) calloc(old_nr_cb
, sizeof(struct side_callback
));
233 ret
= SIDE_ERROR_NOMEM
;
236 memcpy(new_cb
, old_cb
, pos_idx
);
237 memcpy(&new_cb
[pos_idx
], &old_cb
[pos_idx
+ 1], old_nr_cb
- pos_idx
- 1);
239 side_rcu_assign_pointer(desc
->callbacks
, new_cb
);
240 side_rcu_wait_grace_period(&rcu_gp
);
242 desc
->nr_callbacks
--;
243 /* Decrement concurrently with kernel setting the top bits. */
245 (void) __atomic_add_fetch(desc
->enabled
, -1, __ATOMIC_RELAXED
);
247 pthread_mutex_unlock(&side_lock
);
251 int side_tracer_callback_unregister(struct side_event_description
*desc
,
252 side_tracer_callback_func call
,
255 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
256 return SIDE_ERROR_INVAL
;
257 return _side_tracer_callback_unregister(desc
, call
, priv
);
260 int side_tracer_callback_variadic_unregister(struct side_event_description
*desc
,
261 side_tracer_callback_variadic_func call_variadic
,
264 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
265 return SIDE_ERROR_INVAL
;
266 return _side_tracer_callback_unregister(desc
, call_variadic
, priv
);
269 struct side_events_register_handle
*side_events_register(struct side_event_description
**events
, uint32_t nr_events
)
271 struct side_events_register_handle
*events_handle
= NULL
;
272 struct side_tracer_handle
*tracer_handle
;
278 events_handle
= calloc(1, sizeof(struct side_events_register_handle
));
281 events_handle
->events
= events
;
282 events_handle
->nr_events
= nr_events
;
284 pthread_mutex_lock(&side_lock
);
285 side_list_insert_node_tail(&side_events_list
, &events_handle
->node
);
286 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
287 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
288 events
, nr_events
, tracer_handle
->priv
);
290 pthread_mutex_unlock(&side_lock
);
291 //TODO: call event batch register ioctl
292 return events_handle
;
296 void side_event_remove_callbacks(struct side_event_description
*desc
)
298 uint32_t nr_cb
= desc
->nr_callbacks
;
299 struct side_callback
*old_cb
;
303 old_cb
= (struct side_callback
*) desc
->callbacks
;
304 (void) __atomic_add_fetch(desc
->enabled
, -1, __ATOMIC_RELAXED
);
306 * Setting the state back to 0 cb and empty callbacks out of
307 * caution. This should not matter because instrumentation is
310 desc
->nr_callbacks
= 0;
311 side_rcu_assign_pointer(desc
->callbacks
, &side_empty_callback
);
313 * No need to wait for grace period because instrumentation is
320 * Unregister event handle. At this point, all side events in that
321 * handle should be unreachable.
323 void side_events_unregister(struct side_events_register_handle
*events_handle
)
325 struct side_tracer_handle
*tracer_handle
;
334 pthread_mutex_lock(&side_lock
);
335 side_list_remove_node(&events_handle
->node
);
336 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
337 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
338 events_handle
->events
, events_handle
->nr_events
,
339 tracer_handle
->priv
);
341 for (i
= 0; i
< events_handle
->nr_events
; i
++) {
342 struct side_event_description
*event
= events_handle
->events
[i
];
344 /* Skip NULL pointers */
347 side_event_remove_callbacks(event
);
349 pthread_mutex_unlock(&side_lock
);
350 //TODO: call event batch unregister ioctl
354 struct side_tracer_handle
*side_tracer_event_notification_register(
355 void (*cb
)(enum side_tracer_notification notif
,
356 struct side_event_description
**events
, uint32_t nr_events
, void *priv
),
359 struct side_tracer_handle
*tracer_handle
;
360 struct side_events_register_handle
*events_handle
;
366 tracer_handle
= calloc(1, sizeof(struct side_tracer_handle
));
369 pthread_mutex_lock(&side_lock
);
370 tracer_handle
->cb
= cb
;
371 tracer_handle
->priv
= priv
;
372 side_list_insert_node_tail(&side_tracer_list
, &tracer_handle
->node
);
373 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
374 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
375 events_handle
->events
, events_handle
->nr_events
, priv
);
377 pthread_mutex_unlock(&side_lock
);
378 return tracer_handle
;
381 void side_tracer_event_notification_unregister(struct side_tracer_handle
*tracer_handle
)
383 struct side_events_register_handle
*events_handle
;
389 pthread_mutex_lock(&side_lock
);
390 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
391 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
392 events_handle
->events
, events_handle
->nr_events
,
393 tracer_handle
->priv
);
395 side_list_remove_node(&tracer_handle
->node
);
396 pthread_mutex_unlock(&side_lock
);
403 side_rcu_gp_init(&rcu_gp
);
408 * side_exit() is executed from a library destructor. It can be called
409 * explicitly at application exit as well. Concurrent side API use is
410 * not expected at that point.
414 struct side_events_register_handle
*handle
, *tmp
;
418 side_list_for_each_entry_safe(handle
, tmp
, &side_events_list
, node
)
419 side_events_unregister(handle
);
420 side_rcu_gp_exit(&rcu_gp
);