Enabled is now uintptr_t
[libside.git] / src / side.c
CommitLineData
6841ae81
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
a3f36db7
MD
7#include <string.h>
8
6841ae81 9#include "tracer.h"
85b765b8 10#include "rcu.h"
b59abc69 11#include "list.h"
6841ae81 12
054b7b5c 13/* Top 8 bits reserved for kernel tracer use. */
f61301bb
MD
14#if SIDE_BITS_PER_LONG == 64
15# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF00000000000000ULL
16# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x8000000000000000ULL
17
18/* Allow 2^56 tracer references on an event. */
19# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFFFFFFFFFULL
20#else
21# define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000UL
22# define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000UL
29b3374e 23
45172226 24/* Allow 2^24 tracer references on an event. */
f61301bb
MD
25# define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFFUL
26#endif
054b7b5c 27
6e46f5e6
MD
28struct side_events_register_handle {
29 struct side_list_node node;
30 struct side_event_description **events;
31 uint32_t nr_events;
32};
33
a13c9d2e
MD
34struct side_tracer_handle {
35 struct side_list_node node;
36 void (*cb)(enum side_tracer_notification notif,
37 struct side_event_description **events, uint32_t nr_events, void *priv);
38 void *priv;
39};
40
a3f36db7 41static struct side_rcu_gp_state rcu_gp;
075ceef7
MD
42
43/*
44 * Lazy initialization for early use within library constructors.
45 */
46static bool initialized;
6e46f5e6
MD
47/*
48 * Do not register/unregister any more events after destructor.
49 */
50static bool finalized;
075ceef7 51
a3f36db7
MD
52static pthread_mutex_t side_lock = PTHREAD_MUTEX_INITIALIZER;
53
a13c9d2e
MD
54static DEFINE_SIDE_LIST_HEAD(side_events_list);
55static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
b59abc69 56
a3f36db7
MD
57/*
58 * The empty callback has a NULL function callback pointer, which stops
59 * iteration on the array of callbacks immediately.
60 */
054b7b5c
MD
61const struct side_callback side_empty_callback;
62
6e46f5e6
MD
63void side_init(void) __attribute__((constructor));
64void side_exit(void) __attribute__((destructor));
65
6841ae81
MD
66void side_call(const struct side_event_description *desc, const struct side_arg_vec_description *sav_desc)
67{
054b7b5c
MD
68 const struct side_callback *side_cb;
69 unsigned int rcu_period;
beea6e2e 70 uintptr_t enabled;
054b7b5c 71
6e46f5e6
MD
72 if (side_unlikely(finalized))
73 return;
075ceef7
MD
74 if (side_unlikely(!initialized))
75 side_init();
6841ae81
MD
76 if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
77 printf("ERROR: unexpected variadic event description\n");
78 abort();
79 }
3b4f86f6
MD
80 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
81 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
dda82f46 82 // TODO: call kernel write.
6841ae81 83 }
3b4f86f6 84 if (side_unlikely(!(enabled & SIDE_EVENT_ENABLED_USER_MASK)))
054b7b5c
MD
85 return;
86
054b7b5c
MD
87 rcu_period = side_rcu_read_begin(&rcu_gp);
88 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
89 side_cb->u.call(desc, sav_desc, side_cb->priv);
90 side_rcu_read_end(&rcu_gp, rcu_period);
6841ae81
MD
91}
92
93void side_call_variadic(const struct side_event_description *desc,
94 const struct side_arg_vec_description *sav_desc,
95 const struct side_arg_dynamic_event_struct *var_struct)
96{
054b7b5c
MD
97 const struct side_callback *side_cb;
98 unsigned int rcu_period;
beea6e2e 99 uintptr_t enabled;
054b7b5c 100
6e46f5e6
MD
101 if (side_unlikely(finalized))
102 return;
075ceef7
MD
103 if (side_unlikely(!initialized))
104 side_init();
3b4f86f6
MD
105 if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
106 printf("ERROR: unexpected non-variadic event description\n");
107 abort();
108 }
109 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
110 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
dda82f46 111 // TODO: call kernel write.
6841ae81 112 }
3b4f86f6 113 if (side_unlikely(!(enabled & SIDE_EVENT_ENABLED_USER_MASK)))
054b7b5c
MD
114 return;
115
054b7b5c
MD
116 rcu_period = side_rcu_read_begin(&rcu_gp);
117 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
118 side_cb->u.call_variadic(desc, sav_desc, var_struct, side_cb->priv);
119 side_rcu_read_end(&rcu_gp, rcu_period);
6841ae81 120}
075ceef7 121
a3f36db7
MD
122static
123const struct side_callback *side_tracer_callback_lookup(
124 const struct side_event_description *desc,
125 void (*call)(), void *priv)
126{
127 const struct side_callback *cb;
128
129 for (cb = desc->callbacks; cb->u.call != NULL; cb++) {
130 if (cb->u.call == call && cb->priv == priv)
131 return cb;
132 }
133 return NULL;
134}
135
136static
137int _side_tracer_callback_register(struct side_event_description *desc,
138 void (*call)(), void *priv)
139{
140 struct side_callback *old_cb, *new_cb;
141 int ret = SIDE_ERROR_OK;
142 uint32_t old_nr_cb;
143
144 if (!call)
145 return SIDE_ERROR_INVAL;
6e46f5e6
MD
146 if (finalized)
147 return SIDE_ERROR_EXITING;
a13c9d2e
MD
148 if (!initialized)
149 side_init();
a3f36db7 150 pthread_mutex_lock(&side_lock);
45172226
MD
151 old_nr_cb = desc->nr_callbacks;
152 if (old_nr_cb == UINT32_MAX) {
a3f36db7
MD
153 ret = SIDE_ERROR_INVAL;
154 goto unlock;
155 }
156 /* Reject duplicate (call, priv) tuples. */
157 if (side_tracer_callback_lookup(desc, call, priv)) {
158 ret = SIDE_ERROR_EXIST;
159 goto unlock;
160 }
161 old_cb = (struct side_callback *) desc->callbacks;
162 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
163 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
164 if (!new_cb) {
165 ret = SIDE_ERROR_NOMEM;
166 goto unlock;
167 }
168 memcpy(new_cb, old_cb, old_nr_cb);
169 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
170 new_cb[old_nr_cb].u.call_variadic = call;
171 else
172 new_cb[old_nr_cb].u.call = call;
173 new_cb[old_nr_cb].priv = priv;
174 side_rcu_assign_pointer(desc->callbacks, new_cb);
175 side_rcu_wait_grace_period(&rcu_gp);
176 if (old_nr_cb)
177 free(old_cb);
45172226 178 desc->nr_callbacks++;
a3f36db7 179 /* Increment concurrently with kernel setting the top bits. */
45172226
MD
180 if (!old_nr_cb)
181 (void) __atomic_add_fetch(desc->enabled, 1, __ATOMIC_RELAXED);
a3f36db7
MD
182unlock:
183 pthread_mutex_unlock(&side_lock);
184 return ret;
185}
186
187int side_tracer_callback_register(struct side_event_description *desc,
188 void (*call)(const struct side_event_description *desc,
189 const struct side_arg_vec_description *sav_desc,
190 void *priv),
191 void *priv)
192{
193 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
194 return SIDE_ERROR_INVAL;
195 return _side_tracer_callback_register(desc, call, priv);
196}
197
198int side_tracer_callback_variadic_register(struct side_event_description *desc,
199 void (*call_variadic)(const struct side_event_description *desc,
200 const struct side_arg_vec_description *sav_desc,
201 const struct side_arg_dynamic_event_struct *var_struct,
202 void *priv),
203 void *priv)
204{
205 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
206 return SIDE_ERROR_INVAL;
207 return _side_tracer_callback_register(desc, call_variadic, priv);
208}
209
210int _side_tracer_callback_unregister(struct side_event_description *desc,
211 void (*call)(), void *priv)
212{
213 struct side_callback *old_cb, *new_cb;
214 const struct side_callback *cb_pos;
215 uint32_t pos_idx;
216 int ret = SIDE_ERROR_OK;
217 uint32_t old_nr_cb;
218
219 if (!call)
220 return SIDE_ERROR_INVAL;
6e46f5e6
MD
221 if (finalized)
222 return SIDE_ERROR_EXITING;
a13c9d2e
MD
223 if (!initialized)
224 side_init();
a3f36db7 225 pthread_mutex_lock(&side_lock);
a3f36db7
MD
226 cb_pos = side_tracer_callback_lookup(desc, call, priv);
227 if (!cb_pos) {
228 ret = SIDE_ERROR_NOENT;
229 goto unlock;
230 }
45172226 231 old_nr_cb = desc->nr_callbacks;
9a83b759 232 old_cb = (struct side_callback *) desc->callbacks;
a3f36db7
MD
233 if (old_nr_cb == 1) {
234 new_cb = (struct side_callback *) &side_empty_callback;
235 } else {
236 pos_idx = cb_pos - desc->callbacks;
237 /* Remove entry at pos_idx. */
238 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
239 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
240 if (!new_cb) {
241 ret = SIDE_ERROR_NOMEM;
242 goto unlock;
243 }
244 memcpy(new_cb, old_cb, pos_idx);
245 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
246 }
247 side_rcu_assign_pointer(desc->callbacks, new_cb);
248 side_rcu_wait_grace_period(&rcu_gp);
249 free(old_cb);
45172226 250 desc->nr_callbacks--;
a3f36db7 251 /* Decrement concurrently with kernel setting the top bits. */
45172226
MD
252 if (old_nr_cb == 1)
253 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
a3f36db7
MD
254unlock:
255 pthread_mutex_unlock(&side_lock);
256 return ret;
257}
258
259int side_tracer_callback_unregister(struct side_event_description *desc,
260 void (*call)(const struct side_event_description *desc,
261 const struct side_arg_vec_description *sav_desc,
262 void *priv),
263 void *priv)
264{
265 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
266 return SIDE_ERROR_INVAL;
267 return _side_tracer_callback_unregister(desc, call, priv);
268}
269
270int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
271 void (*call_variadic)(const struct side_event_description *desc,
272 const struct side_arg_vec_description *sav_desc,
273 const struct side_arg_dynamic_event_struct *var_struct,
274 void *priv),
275 void *priv)
276{
277 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
278 return SIDE_ERROR_INVAL;
279 return _side_tracer_callback_unregister(desc, call_variadic, priv);
280}
281
6e46f5e6
MD
282struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
283{
a13c9d2e
MD
284 struct side_events_register_handle *events_handle = NULL;
285 struct side_tracer_handle *tracer_handle;
6e46f5e6
MD
286
287 if (finalized)
288 return NULL;
a13c9d2e
MD
289 if (!initialized)
290 side_init();
291 events_handle = calloc(1, sizeof(struct side_events_register_handle));
292 if (!events_handle)
6e46f5e6 293 return NULL;
a13c9d2e
MD
294 events_handle->events = events;
295 events_handle->nr_events = nr_events;
296
6e46f5e6 297 pthread_mutex_lock(&side_lock);
a13c9d2e
MD
298 side_list_insert_node_tail(&side_events_list, &events_handle->node);
299 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
300 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
301 events, nr_events, tracer_handle->priv);
302 }
6e46f5e6
MD
303 pthread_mutex_unlock(&side_lock);
304 //TODO: call event batch register ioctl
a13c9d2e 305 return events_handle;
6e46f5e6
MD
306}
307
a3f36db7 308static
6e46f5e6
MD
309void side_event_remove_callbacks(struct side_event_description *desc)
310{
ea0d2bea 311 uint32_t nr_cb = desc->nr_callbacks;
6e46f5e6
MD
312 struct side_callback *old_cb;
313
314 if (!nr_cb)
315 return;
316 old_cb = (struct side_callback *) desc->callbacks;
ea0d2bea 317 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
6e46f5e6
MD
318 /*
319 * Setting the state back to 0 cb and empty callbacks out of
320 * caution. This should not matter because instrumentation is
321 * unreachable.
322 */
45172226 323 desc->nr_callbacks = 0;
6e46f5e6
MD
324 side_rcu_assign_pointer(desc->callbacks, &side_empty_callback);
325 /*
326 * No need to wait for grace period because instrumentation is
327 * unreachable.
328 */
329 free(old_cb);
330}
331
332/*
333 * Unregister event handle. At this point, all side events in that
334 * handle should be unreachable.
335 */
a13c9d2e 336void side_events_unregister(struct side_events_register_handle *events_handle)
6e46f5e6 337{
a13c9d2e 338 struct side_tracer_handle *tracer_handle;
6e46f5e6
MD
339 uint32_t i;
340
314c22c3
MD
341 if (!events_handle)
342 return;
6e46f5e6
MD
343 if (finalized)
344 return;
a13c9d2e
MD
345 if (!initialized)
346 side_init();
6e46f5e6 347 pthread_mutex_lock(&side_lock);
a13c9d2e
MD
348 side_list_remove_node(&events_handle->node);
349 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
350 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
351 events_handle->events, events_handle->nr_events,
352 tracer_handle->priv);
353 }
354 for (i = 0; i < events_handle->nr_events; i++) {
355 struct side_event_description *event = events_handle->events[i];
6e46f5e6
MD
356
357 /* Skip NULL pointers */
358 if (!event)
359 continue;
360 side_event_remove_callbacks(event);
361 }
362 pthread_mutex_unlock(&side_lock);
363 //TODO: call event batch unregister ioctl
a13c9d2e
MD
364 free(events_handle);
365}
366
367struct side_tracer_handle *side_tracer_event_notification_register(
368 void (*cb)(enum side_tracer_notification notif,
369 struct side_event_description **events, uint32_t nr_events, void *priv),
370 void *priv)
371{
372 struct side_tracer_handle *tracer_handle;
373 struct side_events_register_handle *events_handle;
374
375 if (finalized)
376 return NULL;
377 if (!initialized)
378 side_init();
379 tracer_handle = calloc(1, sizeof(struct side_tracer_handle));
380 if (!tracer_handle)
381 return NULL;
382 pthread_mutex_lock(&side_lock);
383 tracer_handle->cb = cb;
384 tracer_handle->priv = priv;
385 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
386 side_list_for_each_entry(events_handle, &side_events_list, node) {
387 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
388 events_handle->events, events_handle->nr_events, priv);
389 }
390 pthread_mutex_unlock(&side_lock);
391 return tracer_handle;
392}
393
394void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
395{
396 struct side_events_register_handle *events_handle;
397
398 if (finalized)
399 return;
400 if (!initialized)
401 side_init();
402 pthread_mutex_lock(&side_lock);
403 side_list_for_each_entry(events_handle, &side_events_list, node) {
404 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
405 events_handle->events, events_handle->nr_events,
406 tracer_handle->priv);
407 }
408 side_list_remove_node(&tracer_handle->node);
409 pthread_mutex_unlock(&side_lock);
6e46f5e6
MD
410}
411
075ceef7
MD
412void side_init(void)
413{
414 if (initialized)
415 return;
054b7b5c 416 side_rcu_gp_init(&rcu_gp);
075ceef7
MD
417 initialized = true;
418}
6e46f5e6 419
aa584c16
MD
420/*
421 * side_exit() is executed from a library destructor. It can be called
422 * explicitly at application exit as well. Concurrent side API use is
423 * not expected at that point.
424 */
6e46f5e6
MD
425void side_exit(void)
426{
427 struct side_events_register_handle *handle, *tmp;
428
429 if (finalized)
430 return;
431 side_rcu_gp_exit(&rcu_gp);
a13c9d2e 432 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
6e46f5e6
MD
433 side_events_unregister(handle);
434 finalized = true;
435}
This page took 0.060299 seconds and 4 git commands to generate.