1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 #include <side/trace.h>
13 /* Top 8 bits reserved for kernel tracer use. */
14 #define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000
15 #define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000
17 /* Allow 2^24 tracer callbacks to be registered on an event. */
18 #define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFF
20 struct side_events_register_handle
{
21 struct side_list_node node
;
22 struct side_event_description
**events
;
26 struct side_tracer_handle
{
27 struct side_list_node node
;
28 void (*cb
)(enum side_tracer_notification notif
,
29 struct side_event_description
**events
, uint32_t nr_events
, void *priv
);
33 static struct side_rcu_gp_state rcu_gp
;
36 * Lazy initialization for early use within library constructors.
38 static bool initialized
;
40 * Do not register/unregister any more events after destructor.
42 static bool finalized
;
44 static pthread_mutex_t side_lock
= PTHREAD_MUTEX_INITIALIZER
;
46 static DEFINE_SIDE_LIST_HEAD(side_events_list
);
47 static DEFINE_SIDE_LIST_HEAD(side_tracer_list
);
50 * The empty callback has a NULL function callback pointer, which stops
51 * iteration on the array of callbacks immediately.
53 const struct side_callback side_empty_callback
;
55 void side_init(void) __attribute__((constructor
));
56 void side_exit(void) __attribute__((destructor
));
58 void side_call(const struct side_event_description
*desc
, const struct side_arg_vec_description
*sav_desc
)
60 const struct side_callback
*side_cb
;
61 unsigned int rcu_period
;
64 if (side_unlikely(finalized
))
66 if (side_unlikely(!initialized
))
68 if (side_unlikely(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)) {
69 printf("ERROR: unexpected variadic event description\n");
72 enabled
= __atomic_load_n(desc
->enabled
, __ATOMIC_RELAXED
);
73 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
74 // TODO: call kernel write.
76 if (side_unlikely(!(enabled
& SIDE_EVENT_ENABLED_USER_MASK
)))
79 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
80 tracer_call(desc
, sav_desc
, NULL
);
82 rcu_period
= side_rcu_read_begin(&rcu_gp
);
83 for (side_cb
= side_rcu_dereference(desc
->callbacks
); side_cb
->u
.call
!= NULL
; side_cb
++)
84 side_cb
->u
.call(desc
, sav_desc
, side_cb
->priv
);
85 side_rcu_read_end(&rcu_gp
, rcu_period
);
88 void side_call_variadic(const struct side_event_description
*desc
,
89 const struct side_arg_vec_description
*sav_desc
,
90 const struct side_arg_dynamic_event_struct
*var_struct
)
92 const struct side_callback
*side_cb
;
93 unsigned int rcu_period
;
96 if (side_unlikely(finalized
))
98 if (side_unlikely(!initialized
))
100 if (side_unlikely(!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))) {
101 printf("ERROR: unexpected non-variadic event description\n");
104 enabled
= __atomic_load_n(desc
->enabled
, __ATOMIC_RELAXED
);
105 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
106 // TODO: call kernel write.
108 if (side_unlikely(!(enabled
& SIDE_EVENT_ENABLED_USER_MASK
)))
111 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
112 tracer_call_variadic(desc
, sav_desc
, var_struct
, NULL
);
114 rcu_period
= side_rcu_read_begin(&rcu_gp
);
115 for (side_cb
= side_rcu_dereference(desc
->callbacks
); side_cb
->u
.call_variadic
!= NULL
; side_cb
++)
116 side_cb
->u
.call_variadic(desc
, sav_desc
, var_struct
, side_cb
->priv
);
117 side_rcu_read_end(&rcu_gp
, rcu_period
);
121 const struct side_callback
*side_tracer_callback_lookup(
122 const struct side_event_description
*desc
,
123 void (*call
)(), void *priv
)
125 const struct side_callback
*cb
;
127 for (cb
= desc
->callbacks
; cb
->u
.call
!= NULL
; cb
++) {
128 if (cb
->u
.call
== call
&& cb
->priv
== priv
)
135 int _side_tracer_callback_register(struct side_event_description
*desc
,
136 void (*call
)(), void *priv
)
138 struct side_callback
*old_cb
, *new_cb
;
139 int ret
= SIDE_ERROR_OK
;
143 return SIDE_ERROR_INVAL
;
145 return SIDE_ERROR_EXITING
;
148 pthread_mutex_lock(&side_lock
);
149 old_nr_cb
= *desc
->enabled
& SIDE_EVENT_ENABLED_USER_MASK
;
150 if (old_nr_cb
== SIDE_EVENT_ENABLED_USER_MASK
) {
151 ret
= SIDE_ERROR_INVAL
;
154 /* Reject duplicate (call, priv) tuples. */
155 if (side_tracer_callback_lookup(desc
, call
, priv
)) {
156 ret
= SIDE_ERROR_EXIST
;
159 old_cb
= (struct side_callback
*) desc
->callbacks
;
160 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
161 new_cb
= (struct side_callback
*) calloc(old_nr_cb
+ 2, sizeof(struct side_callback
));
163 ret
= SIDE_ERROR_NOMEM
;
166 memcpy(new_cb
, old_cb
, old_nr_cb
);
167 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
168 new_cb
[old_nr_cb
].u
.call_variadic
= call
;
170 new_cb
[old_nr_cb
].u
.call
= call
;
171 new_cb
[old_nr_cb
].priv
= priv
;
172 side_rcu_assign_pointer(desc
->callbacks
, new_cb
);
173 side_rcu_wait_grace_period(&rcu_gp
);
176 /* Increment concurrently with kernel setting the top bits. */
177 (void) __atomic_add_fetch(desc
->enabled
, 1, __ATOMIC_RELAXED
);
179 pthread_mutex_unlock(&side_lock
);
183 int side_tracer_callback_register(struct side_event_description
*desc
,
184 void (*call
)(const struct side_event_description
*desc
,
185 const struct side_arg_vec_description
*sav_desc
,
189 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
190 return SIDE_ERROR_INVAL
;
191 return _side_tracer_callback_register(desc
, call
, priv
);
194 int side_tracer_callback_variadic_register(struct side_event_description
*desc
,
195 void (*call_variadic
)(const struct side_event_description
*desc
,
196 const struct side_arg_vec_description
*sav_desc
,
197 const struct side_arg_dynamic_event_struct
*var_struct
,
201 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
202 return SIDE_ERROR_INVAL
;
203 return _side_tracer_callback_register(desc
, call_variadic
, priv
);
206 int _side_tracer_callback_unregister(struct side_event_description
*desc
,
207 void (*call
)(), void *priv
)
209 struct side_callback
*old_cb
, *new_cb
;
210 const struct side_callback
*cb_pos
;
212 int ret
= SIDE_ERROR_OK
;
216 return SIDE_ERROR_INVAL
;
218 return SIDE_ERROR_EXITING
;
221 pthread_mutex_lock(&side_lock
);
222 cb_pos
= side_tracer_callback_lookup(desc
, call
, priv
);
224 ret
= SIDE_ERROR_NOENT
;
227 old_nr_cb
= *desc
->enabled
& SIDE_EVENT_ENABLED_USER_MASK
;
228 old_cb
= (struct side_callback
*) desc
->callbacks
;
229 if (old_nr_cb
== 1) {
230 new_cb
= (struct side_callback
*) &side_empty_callback
;
232 pos_idx
= cb_pos
- desc
->callbacks
;
233 /* Remove entry at pos_idx. */
234 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
235 new_cb
= (struct side_callback
*) calloc(old_nr_cb
, sizeof(struct side_callback
));
237 ret
= SIDE_ERROR_NOMEM
;
240 memcpy(new_cb
, old_cb
, pos_idx
);
241 memcpy(&new_cb
[pos_idx
], &old_cb
[pos_idx
+ 1], old_nr_cb
- pos_idx
- 1);
243 side_rcu_assign_pointer(desc
->callbacks
, new_cb
);
244 side_rcu_wait_grace_period(&rcu_gp
);
246 /* Decrement concurrently with kernel setting the top bits. */
247 (void) __atomic_add_fetch(desc
->enabled
, -1, __ATOMIC_RELAXED
);
249 pthread_mutex_unlock(&side_lock
);
253 int side_tracer_callback_unregister(struct side_event_description
*desc
,
254 void (*call
)(const struct side_event_description
*desc
,
255 const struct side_arg_vec_description
*sav_desc
,
259 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
260 return SIDE_ERROR_INVAL
;
261 return _side_tracer_callback_unregister(desc
, call
, priv
);
264 int side_tracer_callback_variadic_unregister(struct side_event_description
*desc
,
265 void (*call_variadic
)(const struct side_event_description
*desc
,
266 const struct side_arg_vec_description
*sav_desc
,
267 const struct side_arg_dynamic_event_struct
*var_struct
,
271 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
272 return SIDE_ERROR_INVAL
;
273 return _side_tracer_callback_unregister(desc
, call_variadic
, priv
);
276 struct side_events_register_handle
*side_events_register(struct side_event_description
**events
, uint32_t nr_events
)
278 struct side_events_register_handle
*events_handle
= NULL
;
279 struct side_tracer_handle
*tracer_handle
;
285 events_handle
= calloc(1, sizeof(struct side_events_register_handle
));
288 events_handle
->events
= events
;
289 events_handle
->nr_events
= nr_events
;
291 pthread_mutex_lock(&side_lock
);
292 side_list_insert_node_tail(&side_events_list
, &events_handle
->node
);
293 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
294 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
295 events
, nr_events
, tracer_handle
->priv
);
297 pthread_mutex_unlock(&side_lock
);
298 //TODO: call event batch register ioctl
299 return events_handle
;
303 void side_event_remove_callbacks(struct side_event_description
*desc
)
305 uint32_t nr_cb
= *desc
->enabled
& SIDE_EVENT_ENABLED_USER_MASK
;
306 struct side_callback
*old_cb
;
310 old_cb
= (struct side_callback
*) desc
->callbacks
;
312 * Setting the state back to 0 cb and empty callbacks out of
313 * caution. This should not matter because instrumentation is
316 (void) __atomic_add_fetch(desc
->enabled
, -nr_cb
, __ATOMIC_RELAXED
);
317 side_rcu_assign_pointer(desc
->callbacks
, &side_empty_callback
);
319 * No need to wait for grace period because instrumentation is
326 * Unregister event handle. At this point, all side events in that
327 * handle should be unreachable.
329 void side_events_unregister(struct side_events_register_handle
*events_handle
)
331 struct side_tracer_handle
*tracer_handle
;
340 pthread_mutex_lock(&side_lock
);
341 side_list_remove_node(&events_handle
->node
);
342 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
343 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
344 events_handle
->events
, events_handle
->nr_events
,
345 tracer_handle
->priv
);
347 for (i
= 0; i
< events_handle
->nr_events
; i
++) {
348 struct side_event_description
*event
= events_handle
->events
[i
];
350 /* Skip NULL pointers */
353 side_event_remove_callbacks(event
);
355 pthread_mutex_unlock(&side_lock
);
356 //TODO: call event batch unregister ioctl
360 struct side_tracer_handle
*side_tracer_event_notification_register(
361 void (*cb
)(enum side_tracer_notification notif
,
362 struct side_event_description
**events
, uint32_t nr_events
, void *priv
),
365 struct side_tracer_handle
*tracer_handle
;
366 struct side_events_register_handle
*events_handle
;
372 tracer_handle
= calloc(1, sizeof(struct side_tracer_handle
));
375 pthread_mutex_lock(&side_lock
);
376 tracer_handle
->cb
= cb
;
377 tracer_handle
->priv
= priv
;
378 side_list_insert_node_tail(&side_tracer_list
, &tracer_handle
->node
);
379 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
380 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
381 events_handle
->events
, events_handle
->nr_events
, priv
);
383 pthread_mutex_unlock(&side_lock
);
384 return tracer_handle
;
387 void side_tracer_event_notification_unregister(struct side_tracer_handle
*tracer_handle
)
389 struct side_events_register_handle
*events_handle
;
395 pthread_mutex_lock(&side_lock
);
396 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
397 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
398 events_handle
->events
, events_handle
->nr_events
,
399 tracer_handle
->priv
);
401 side_list_remove_node(&tracer_handle
->node
);
402 pthread_mutex_unlock(&side_lock
);
409 side_rcu_gp_init(&rcu_gp
);
414 * side_exit() is executed from a library destructor. It can be called
415 * explicitly at application exit as well. Concurrent side API use is
416 * not expected at that point.
420 struct side_events_register_handle
*handle
, *tmp
;
424 side_rcu_gp_exit(&rcu_gp
);
425 side_list_for_each_entry_safe(handle
, tmp
, &side_events_list
, node
)
426 side_events_unregister(handle
);