1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 #include <side/trace.h>
12 /* Top 8 bits reserved for kernel tracer use. */
13 #if SIDE_BITS_PER_LONG == 64
14 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
15 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
17 /* Allow 2^56 tracer references on an event. */
18 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
20 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
21 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
23 /* Allow 2^24 tracer references on an event. */
24 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
27 struct side_events_register_handle
{
28 struct side_list_node node
;
29 struct side_event_description
**events
;
33 struct side_tracer_handle
{
34 struct side_list_node node
;
35 void (*cb
)(enum side_tracer_notification notif
,
36 struct side_event_description
**events
, uint32_t nr_events
, void *priv
);
40 static struct side_rcu_gp_state rcu_gp
;
43 * Lazy initialization for early use within library constructors.
45 static bool initialized
;
47 * Do not register/unregister any more events after destructor.
49 static bool finalized
;
52 * Recursive mutex to allow tracer callbacks to use the side API.
54 static pthread_mutex_t side_lock
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
;
56 static DEFINE_SIDE_LIST_HEAD(side_events_list
);
57 static DEFINE_SIDE_LIST_HEAD(side_tracer_list
);
60 * The empty callback has a NULL function callback pointer, which stops
61 * iteration on the array of callbacks immediately.
63 const struct side_callback side_empty_callback
= { };
65 void side_call(const struct side_event_description
*desc
, const struct side_arg_vec
*side_arg_vec
)
67 struct side_event_state
*event_state
;
68 struct side_rcu_read_state rcu_read_state
;
69 const struct side_callback
*side_cb
;
72 if (side_unlikely(finalized
))
74 if (side_unlikely(!initialized
))
76 if (side_unlikely(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)) {
77 printf("ERROR: unexpected variadic event description\n");
80 event_state
= side_ptr_get(desc
->state
);
81 enabled
= __atomic_load_n(&event_state
->enabled
, __ATOMIC_RELAXED
);
82 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
83 // TODO: call kernel write.
85 side_rcu_read_begin(&rcu_gp
, &rcu_read_state
);
86 for (side_cb
= side_rcu_dereference(event_state
->callbacks
); side_cb
->u
.call
!= NULL
; side_cb
++)
87 side_cb
->u
.call(desc
, side_arg_vec
, side_cb
->priv
);
88 side_rcu_read_end(&rcu_gp
, &rcu_read_state
);
91 void side_call_variadic(const struct side_event_description
*desc
,
92 const struct side_arg_vec
*side_arg_vec
,
93 const struct side_arg_dynamic_struct
*var_struct
)
95 struct side_event_state
*event_state
;
96 struct side_rcu_read_state rcu_read_state
;
97 const struct side_callback
*side_cb
;
100 if (side_unlikely(finalized
))
102 if (side_unlikely(!initialized
))
104 if (side_unlikely(!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))) {
105 printf("ERROR: unexpected non-variadic event description\n");
108 event_state
= side_ptr_get(desc
->state
);
109 enabled
= __atomic_load_n(&event_state
->enabled
, __ATOMIC_RELAXED
);
110 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
111 // TODO: call kernel write.
113 side_rcu_read_begin(&rcu_gp
, &rcu_read_state
);
114 for (side_cb
= side_rcu_dereference(event_state
->callbacks
); side_cb
->u
.call_variadic
!= NULL
; side_cb
++)
115 side_cb
->u
.call_variadic(desc
, side_arg_vec
, var_struct
, side_cb
->priv
);
116 side_rcu_read_end(&rcu_gp
, &rcu_read_state
);
120 const struct side_callback
*side_tracer_callback_lookup(
121 const struct side_event_description
*desc
,
122 void *call
, void *priv
)
124 struct side_event_state
*event_state
= side_ptr_get(desc
->state
);
125 const struct side_callback
*cb
;
127 for (cb
= event_state
->callbacks
; cb
->u
.call
!= NULL
; cb
++) {
128 if ((void *) cb
->u
.call
== call
&& cb
->priv
== priv
)
135 int _side_tracer_callback_register(struct side_event_description
*desc
,
136 void *call
, void *priv
)
138 struct side_event_state
*event_state
;
139 struct side_callback
*old_cb
, *new_cb
;
140 int ret
= SIDE_ERROR_OK
;
144 return SIDE_ERROR_INVAL
;
146 return SIDE_ERROR_EXITING
;
149 pthread_mutex_lock(&side_lock
);
150 event_state
= side_ptr_get(desc
->state
);
151 old_nr_cb
= desc
->nr_callbacks
;
152 if (old_nr_cb
== UINT32_MAX
) {
153 ret
= SIDE_ERROR_INVAL
;
156 /* Reject duplicate (call, priv) tuples. */
157 if (side_tracer_callback_lookup(desc
, call
, priv
)) {
158 ret
= SIDE_ERROR_EXIST
;
161 old_cb
= (struct side_callback
*) event_state
->callbacks
;
162 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
163 new_cb
= (struct side_callback
*) calloc(old_nr_cb
+ 2, sizeof(struct side_callback
));
165 ret
= SIDE_ERROR_NOMEM
;
168 memcpy(new_cb
, old_cb
, old_nr_cb
);
169 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
170 new_cb
[old_nr_cb
].u
.call_variadic
=
171 (side_tracer_callback_variadic_func
) call
;
173 new_cb
[old_nr_cb
].u
.call
=
174 (side_tracer_callback_func
) call
;
175 new_cb
[old_nr_cb
].priv
= priv
;
176 side_rcu_assign_pointer(event_state
->callbacks
, new_cb
);
177 side_rcu_wait_grace_period(&rcu_gp
);
180 desc
->nr_callbacks
++;
181 /* Increment concurrently with kernel setting the top bits. */
183 (void) __atomic_add_fetch(&event_state
->enabled
, 1, __ATOMIC_RELAXED
);
185 pthread_mutex_unlock(&side_lock
);
189 int side_tracer_callback_register(struct side_event_description
*desc
,
190 side_tracer_callback_func call
,
193 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
194 return SIDE_ERROR_INVAL
;
195 return _side_tracer_callback_register(desc
, (void *) call
, priv
);
198 int side_tracer_callback_variadic_register(struct side_event_description
*desc
,
199 side_tracer_callback_variadic_func call_variadic
,
202 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
203 return SIDE_ERROR_INVAL
;
204 return _side_tracer_callback_register(desc
, (void *) call_variadic
, priv
);
207 static int _side_tracer_callback_unregister(struct side_event_description
*desc
,
208 void *call
, void *priv
)
210 struct side_event_state
*event_state
;
211 struct side_callback
*old_cb
, *new_cb
;
212 const struct side_callback
*cb_pos
;
214 int ret
= SIDE_ERROR_OK
;
218 return SIDE_ERROR_INVAL
;
220 return SIDE_ERROR_EXITING
;
223 pthread_mutex_lock(&side_lock
);
224 event_state
= side_ptr_get(desc
->state
);
225 cb_pos
= side_tracer_callback_lookup(desc
, call
, priv
);
227 ret
= SIDE_ERROR_NOENT
;
230 old_nr_cb
= desc
->nr_callbacks
;
231 old_cb
= (struct side_callback
*) event_state
->callbacks
;
232 if (old_nr_cb
== 1) {
233 new_cb
= (struct side_callback
*) &side_empty_callback
;
235 pos_idx
= cb_pos
- event_state
->callbacks
;
236 /* Remove entry at pos_idx. */
237 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
238 new_cb
= (struct side_callback
*) calloc(old_nr_cb
, sizeof(struct side_callback
));
240 ret
= SIDE_ERROR_NOMEM
;
243 memcpy(new_cb
, old_cb
, pos_idx
);
244 memcpy(&new_cb
[pos_idx
], &old_cb
[pos_idx
+ 1], old_nr_cb
- pos_idx
- 1);
246 side_rcu_assign_pointer(event_state
->callbacks
, new_cb
);
247 side_rcu_wait_grace_period(&rcu_gp
);
249 desc
->nr_callbacks
--;
250 /* Decrement concurrently with kernel setting the top bits. */
252 (void) __atomic_add_fetch(&event_state
->enabled
, -1, __ATOMIC_RELAXED
);
254 pthread_mutex_unlock(&side_lock
);
258 int side_tracer_callback_unregister(struct side_event_description
*desc
,
259 side_tracer_callback_func call
,
262 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
263 return SIDE_ERROR_INVAL
;
264 return _side_tracer_callback_unregister(desc
, (void *) call
, priv
);
267 int side_tracer_callback_variadic_unregister(struct side_event_description
*desc
,
268 side_tracer_callback_variadic_func call_variadic
,
271 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
272 return SIDE_ERROR_INVAL
;
273 return _side_tracer_callback_unregister(desc
, (void *) call_variadic
, priv
);
276 struct side_events_register_handle
*side_events_register(struct side_event_description
**events
, uint32_t nr_events
)
278 struct side_events_register_handle
*events_handle
= NULL
;
279 struct side_tracer_handle
*tracer_handle
;
285 events_handle
= (struct side_events_register_handle
*)
286 calloc(1, sizeof(struct side_events_register_handle
));
289 events_handle
->events
= events
;
290 events_handle
->nr_events
= nr_events
;
292 pthread_mutex_lock(&side_lock
);
293 side_list_insert_node_tail(&side_events_list
, &events_handle
->node
);
294 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
295 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
296 events
, nr_events
, tracer_handle
->priv
);
298 pthread_mutex_unlock(&side_lock
);
299 //TODO: call event batch register ioctl
300 return events_handle
;
304 void side_event_remove_callbacks(struct side_event_description
*desc
)
306 struct side_event_state
*event_state
= side_ptr_get(desc
->state
);
307 uint32_t nr_cb
= desc
->nr_callbacks
;
308 struct side_callback
*old_cb
;
312 old_cb
= (struct side_callback
*) event_state
->callbacks
;
313 (void) __atomic_add_fetch(&event_state
->enabled
, -1, __ATOMIC_RELAXED
);
315 * Setting the state back to 0 cb and empty callbacks out of
316 * caution. This should not matter because instrumentation is
319 desc
->nr_callbacks
= 0;
320 side_rcu_assign_pointer(event_state
->callbacks
, &side_empty_callback
);
322 * No need to wait for grace period because instrumentation is
329 * Unregister event handle. At this point, all side events in that
330 * handle should be unreachable.
332 void side_events_unregister(struct side_events_register_handle
*events_handle
)
334 struct side_tracer_handle
*tracer_handle
;
343 pthread_mutex_lock(&side_lock
);
344 side_list_remove_node(&events_handle
->node
);
345 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
346 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
347 events_handle
->events
, events_handle
->nr_events
,
348 tracer_handle
->priv
);
350 for (i
= 0; i
< events_handle
->nr_events
; i
++) {
351 struct side_event_description
*event
= events_handle
->events
[i
];
353 /* Skip NULL pointers */
356 side_event_remove_callbacks(event
);
358 pthread_mutex_unlock(&side_lock
);
359 //TODO: call event batch unregister ioctl
363 struct side_tracer_handle
*side_tracer_event_notification_register(
364 void (*cb
)(enum side_tracer_notification notif
,
365 struct side_event_description
**events
, uint32_t nr_events
, void *priv
),
368 struct side_tracer_handle
*tracer_handle
;
369 struct side_events_register_handle
*events_handle
;
375 tracer_handle
= (struct side_tracer_handle
*)
376 calloc(1, sizeof(struct side_tracer_handle
));
379 pthread_mutex_lock(&side_lock
);
380 tracer_handle
->cb
= cb
;
381 tracer_handle
->priv
= priv
;
382 side_list_insert_node_tail(&side_tracer_list
, &tracer_handle
->node
);
383 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
384 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
385 events_handle
->events
, events_handle
->nr_events
, priv
);
387 pthread_mutex_unlock(&side_lock
);
388 return tracer_handle
;
391 void side_tracer_event_notification_unregister(struct side_tracer_handle
*tracer_handle
)
393 struct side_events_register_handle
*events_handle
;
399 pthread_mutex_lock(&side_lock
);
400 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
401 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
402 events_handle
->events
, events_handle
->nr_events
,
403 tracer_handle
->priv
);
405 side_list_remove_node(&tracer_handle
->node
);
406 pthread_mutex_unlock(&side_lock
);
414 side_rcu_gp_init(&rcu_gp
);
419 * side_exit() is executed from a library destructor. It can be called
420 * explicitly at application exit as well. Concurrent side API use is
421 * not expected at that point.
425 struct side_events_register_handle
*handle
, *tmp
;
429 side_list_for_each_entry_safe(handle
, tmp
, &side_events_list
, node
)
430 side_events_unregister(handle
);
431 side_rcu_gp_exit(&rcu_gp
);