1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 #include <side/trace.h>
13 /* Top 8 bits reserved for kernel tracer use. */
14 #if SIDE_BITS_PER_LONG == 64
15 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
16 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
18 /* Allow 2^56 tracer references on an event. */
19 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
21 # define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
22 # define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
24 /* Allow 2^24 tracer references on an event. */
25 # define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
28 struct side_events_register_handle
{
29 struct side_list_node node
;
30 struct side_event_description
**events
;
34 struct side_tracer_handle
{
35 struct side_list_node node
;
36 void (*cb
)(enum side_tracer_notification notif
,
37 struct side_event_description
**events
, uint32_t nr_events
, void *priv
);
41 static struct side_rcu_gp_state rcu_gp
;
44 * Lazy initialization for early use within library constructors.
46 static bool initialized
;
48 * Do not register/unregister any more events after destructor.
50 static bool finalized
;
53 * Recursive mutex to allow tracer callbacks to use the side API.
55 static pthread_mutex_t side_lock
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
;
57 static DEFINE_SIDE_LIST_HEAD(side_events_list
);
58 static DEFINE_SIDE_LIST_HEAD(side_tracer_list
);
61 * The empty callback has a NULL function callback pointer, which stops
62 * iteration on the array of callbacks immediately.
64 const struct side_callback side_empty_callback
= { };
66 void side_call(const struct side_event_state
*event_state
, const struct side_arg_vec
*side_arg_vec
)
68 struct side_rcu_read_state rcu_read_state
;
69 const struct side_callback
*side_cb
;
72 if (side_unlikely(finalized
))
74 if (side_unlikely(!initialized
))
76 assert(!(event_state
->desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
));
77 enabled
= __atomic_load_n(&event_state
->enabled
, __ATOMIC_RELAXED
);
78 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
79 // TODO: call kernel write.
81 side_rcu_read_begin(&rcu_gp
, &rcu_read_state
);
82 for (side_cb
= side_rcu_dereference(event_state
->callbacks
); side_cb
->u
.call
!= NULL
; side_cb
++)
83 side_cb
->u
.call(event_state
->desc
, side_arg_vec
, side_cb
->priv
);
84 side_rcu_read_end(&rcu_gp
, &rcu_read_state
);
87 void side_call_variadic(const struct side_event_state
*event_state
,
88 const struct side_arg_vec
*side_arg_vec
,
89 const struct side_arg_dynamic_struct
*var_struct
)
91 struct side_rcu_read_state rcu_read_state
;
92 const struct side_callback
*side_cb
;
95 if (side_unlikely(finalized
))
97 if (side_unlikely(!initialized
))
99 assert(event_state
->desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
);
100 enabled
= __atomic_load_n(&event_state
->enabled
, __ATOMIC_RELAXED
);
101 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
102 // TODO: call kernel write.
104 side_rcu_read_begin(&rcu_gp
, &rcu_read_state
);
105 for (side_cb
= side_rcu_dereference(event_state
->callbacks
); side_cb
->u
.call_variadic
!= NULL
; side_cb
++)
106 side_cb
->u
.call_variadic(event_state
->desc
, side_arg_vec
, var_struct
, side_cb
->priv
);
107 side_rcu_read_end(&rcu_gp
, &rcu_read_state
);
111 const struct side_callback
*side_tracer_callback_lookup(
112 const struct side_event_description
*desc
,
113 void *call
, void *priv
)
115 struct side_event_state
*event_state
= side_ptr_get(desc
->state
);
116 const struct side_callback
*cb
;
118 for (cb
= event_state
->callbacks
; cb
->u
.call
!= NULL
; cb
++) {
119 if ((void *) cb
->u
.call
== call
&& cb
->priv
== priv
)
126 int _side_tracer_callback_register(struct side_event_description
*desc
,
127 void *call
, void *priv
)
129 struct side_event_state
*event_state
;
130 struct side_callback
*old_cb
, *new_cb
;
131 int ret
= SIDE_ERROR_OK
;
135 return SIDE_ERROR_INVAL
;
137 return SIDE_ERROR_EXITING
;
140 pthread_mutex_lock(&side_lock
);
141 event_state
= side_ptr_get(desc
->state
);
142 old_nr_cb
= desc
->nr_callbacks
;
143 if (old_nr_cb
== UINT32_MAX
) {
144 ret
= SIDE_ERROR_INVAL
;
147 /* Reject duplicate (call, priv) tuples. */
148 if (side_tracer_callback_lookup(desc
, call
, priv
)) {
149 ret
= SIDE_ERROR_EXIST
;
152 old_cb
= (struct side_callback
*) event_state
->callbacks
;
153 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
154 new_cb
= (struct side_callback
*) calloc(old_nr_cb
+ 2, sizeof(struct side_callback
));
156 ret
= SIDE_ERROR_NOMEM
;
159 memcpy(new_cb
, old_cb
, old_nr_cb
);
160 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
161 new_cb
[old_nr_cb
].u
.call_variadic
=
162 (side_tracer_callback_variadic_func
) call
;
164 new_cb
[old_nr_cb
].u
.call
=
165 (side_tracer_callback_func
) call
;
166 new_cb
[old_nr_cb
].priv
= priv
;
167 side_rcu_assign_pointer(event_state
->callbacks
, new_cb
);
168 side_rcu_wait_grace_period(&rcu_gp
);
171 desc
->nr_callbacks
++;
172 /* Increment concurrently with kernel setting the top bits. */
174 (void) __atomic_add_fetch(&event_state
->enabled
, 1, __ATOMIC_RELAXED
);
176 pthread_mutex_unlock(&side_lock
);
180 int side_tracer_callback_register(struct side_event_description
*desc
,
181 side_tracer_callback_func call
,
184 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
185 return SIDE_ERROR_INVAL
;
186 return _side_tracer_callback_register(desc
, (void *) call
, priv
);
189 int side_tracer_callback_variadic_register(struct side_event_description
*desc
,
190 side_tracer_callback_variadic_func call_variadic
,
193 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
194 return SIDE_ERROR_INVAL
;
195 return _side_tracer_callback_register(desc
, (void *) call_variadic
, priv
);
198 static int _side_tracer_callback_unregister(struct side_event_description
*desc
,
199 void *call
, void *priv
)
201 struct side_event_state
*event_state
;
202 struct side_callback
*old_cb
, *new_cb
;
203 const struct side_callback
*cb_pos
;
205 int ret
= SIDE_ERROR_OK
;
209 return SIDE_ERROR_INVAL
;
211 return SIDE_ERROR_EXITING
;
214 pthread_mutex_lock(&side_lock
);
215 event_state
= side_ptr_get(desc
->state
);
216 cb_pos
= side_tracer_callback_lookup(desc
, call
, priv
);
218 ret
= SIDE_ERROR_NOENT
;
221 old_nr_cb
= desc
->nr_callbacks
;
222 old_cb
= (struct side_callback
*) event_state
->callbacks
;
223 if (old_nr_cb
== 1) {
224 new_cb
= (struct side_callback
*) &side_empty_callback
;
226 pos_idx
= cb_pos
- event_state
->callbacks
;
227 /* Remove entry at pos_idx. */
228 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
229 new_cb
= (struct side_callback
*) calloc(old_nr_cb
, sizeof(struct side_callback
));
231 ret
= SIDE_ERROR_NOMEM
;
234 memcpy(new_cb
, old_cb
, pos_idx
);
235 memcpy(&new_cb
[pos_idx
], &old_cb
[pos_idx
+ 1], old_nr_cb
- pos_idx
- 1);
237 side_rcu_assign_pointer(event_state
->callbacks
, new_cb
);
238 side_rcu_wait_grace_period(&rcu_gp
);
240 desc
->nr_callbacks
--;
241 /* Decrement concurrently with kernel setting the top bits. */
243 (void) __atomic_add_fetch(&event_state
->enabled
, -1, __ATOMIC_RELAXED
);
245 pthread_mutex_unlock(&side_lock
);
249 int side_tracer_callback_unregister(struct side_event_description
*desc
,
250 side_tracer_callback_func call
,
253 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
254 return SIDE_ERROR_INVAL
;
255 return _side_tracer_callback_unregister(desc
, (void *) call
, priv
);
258 int side_tracer_callback_variadic_unregister(struct side_event_description
*desc
,
259 side_tracer_callback_variadic_func call_variadic
,
262 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
263 return SIDE_ERROR_INVAL
;
264 return _side_tracer_callback_unregister(desc
, (void *) call_variadic
, priv
);
267 struct side_events_register_handle
*side_events_register(struct side_event_description
**events
, uint32_t nr_events
)
269 struct side_events_register_handle
*events_handle
= NULL
;
270 struct side_tracer_handle
*tracer_handle
;
276 events_handle
= (struct side_events_register_handle
*)
277 calloc(1, sizeof(struct side_events_register_handle
));
280 events_handle
->events
= events
;
281 events_handle
->nr_events
= nr_events
;
283 pthread_mutex_lock(&side_lock
);
284 side_list_insert_node_tail(&side_events_list
, &events_handle
->node
);
285 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
286 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
287 events
, nr_events
, tracer_handle
->priv
);
289 pthread_mutex_unlock(&side_lock
);
290 //TODO: call event batch register ioctl
291 return events_handle
;
295 void side_event_remove_callbacks(struct side_event_description
*desc
)
297 struct side_event_state
*event_state
= side_ptr_get(desc
->state
);
298 uint32_t nr_cb
= desc
->nr_callbacks
;
299 struct side_callback
*old_cb
;
303 old_cb
= (struct side_callback
*) event_state
->callbacks
;
304 (void) __atomic_add_fetch(&event_state
->enabled
, -1, __ATOMIC_RELAXED
);
306 * Setting the state back to 0 cb and empty callbacks out of
307 * caution. This should not matter because instrumentation is
310 desc
->nr_callbacks
= 0;
311 side_rcu_assign_pointer(event_state
->callbacks
, &side_empty_callback
);
313 * No need to wait for grace period because instrumentation is
320 * Unregister event handle. At this point, all side events in that
321 * handle should be unreachable.
323 void side_events_unregister(struct side_events_register_handle
*events_handle
)
325 struct side_tracer_handle
*tracer_handle
;
334 pthread_mutex_lock(&side_lock
);
335 side_list_remove_node(&events_handle
->node
);
336 side_list_for_each_entry(tracer_handle
, &side_tracer_list
, node
) {
337 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
338 events_handle
->events
, events_handle
->nr_events
,
339 tracer_handle
->priv
);
341 for (i
= 0; i
< events_handle
->nr_events
; i
++) {
342 struct side_event_description
*event
= events_handle
->events
[i
];
344 /* Skip NULL pointers */
347 side_event_remove_callbacks(event
);
349 pthread_mutex_unlock(&side_lock
);
350 //TODO: call event batch unregister ioctl
354 struct side_tracer_handle
*side_tracer_event_notification_register(
355 void (*cb
)(enum side_tracer_notification notif
,
356 struct side_event_description
**events
, uint32_t nr_events
, void *priv
),
359 struct side_tracer_handle
*tracer_handle
;
360 struct side_events_register_handle
*events_handle
;
366 tracer_handle
= (struct side_tracer_handle
*)
367 calloc(1, sizeof(struct side_tracer_handle
));
370 pthread_mutex_lock(&side_lock
);
371 tracer_handle
->cb
= cb
;
372 tracer_handle
->priv
= priv
;
373 side_list_insert_node_tail(&side_tracer_list
, &tracer_handle
->node
);
374 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
375 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS
,
376 events_handle
->events
, events_handle
->nr_events
, priv
);
378 pthread_mutex_unlock(&side_lock
);
379 return tracer_handle
;
382 void side_tracer_event_notification_unregister(struct side_tracer_handle
*tracer_handle
)
384 struct side_events_register_handle
*events_handle
;
390 pthread_mutex_lock(&side_lock
);
391 side_list_for_each_entry(events_handle
, &side_events_list
, node
) {
392 tracer_handle
->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS
,
393 events_handle
->events
, events_handle
->nr_events
,
394 tracer_handle
->priv
);
396 side_list_remove_node(&tracer_handle
->node
);
397 pthread_mutex_unlock(&side_lock
);
405 side_rcu_gp_init(&rcu_gp
);
410 * side_exit() is executed from a library destructor. It can be called
411 * explicitly at application exit as well. Concurrent side API use is
412 * not expected at that point.
416 struct side_events_register_handle
*handle
, *tmp
;
420 side_list_for_each_entry_safe(handle
, tmp
, &side_events_list
, node
)
421 side_events_unregister(handle
);
422 side_rcu_gp_exit(&rcu_gp
);