Use uintptr_t for enabled state
[libside.git] / src / side.c
CommitLineData
6841ae81
MD
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6#include <side/trace.h>
a3f36db7
MD
7#include <string.h>
8
6841ae81 9#include "tracer.h"
85b765b8 10#include "rcu.h"
b59abc69 11#include "list.h"
6841ae81 12
054b7b5c
MD
13/* Top 8 bits reserved for kernel tracer use. */
14#define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000
29b3374e
MD
15#define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000
16
45172226 17/* Allow 2^24 tracer references on an event. */
054b7b5c
MD
18#define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFF
19
6e46f5e6
MD
20struct side_events_register_handle {
21 struct side_list_node node;
22 struct side_event_description **events;
23 uint32_t nr_events;
24};
25
a13c9d2e
MD
26struct side_tracer_handle {
27 struct side_list_node node;
28 void (*cb)(enum side_tracer_notification notif,
29 struct side_event_description **events, uint32_t nr_events, void *priv);
30 void *priv;
31};
32
a3f36db7 33static struct side_rcu_gp_state rcu_gp;
075ceef7
MD
34
35/*
36 * Lazy initialization for early use within library constructors.
37 */
38static bool initialized;
6e46f5e6
MD
39/*
40 * Do not register/unregister any more events after destructor.
41 */
42static bool finalized;
075ceef7 43
a3f36db7
MD
44static pthread_mutex_t side_lock = PTHREAD_MUTEX_INITIALIZER;
45
a13c9d2e
MD
46static DEFINE_SIDE_LIST_HEAD(side_events_list);
47static DEFINE_SIDE_LIST_HEAD(side_tracer_list);
b59abc69 48
a3f36db7
MD
49/*
50 * The empty callback has a NULL function callback pointer, which stops
51 * iteration on the array of callbacks immediately.
52 */
054b7b5c
MD
53const struct side_callback side_empty_callback;
54
6e46f5e6
MD
55void side_init(void) __attribute__((constructor));
56void side_exit(void) __attribute__((destructor));
57
6841ae81
MD
58void side_call(const struct side_event_description *desc, const struct side_arg_vec_description *sav_desc)
59{
054b7b5c
MD
60 const struct side_callback *side_cb;
61 unsigned int rcu_period;
3b4f86f6 62 uint32_t enabled;
054b7b5c 63
6e46f5e6
MD
64 if (side_unlikely(finalized))
65 return;
075ceef7
MD
66 if (side_unlikely(!initialized))
67 side_init();
6841ae81
MD
68 if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
69 printf("ERROR: unexpected variadic event description\n");
70 abort();
71 }
3b4f86f6
MD
72 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
73 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
dda82f46 74 // TODO: call kernel write.
6841ae81 75 }
3b4f86f6 76 if (side_unlikely(!(enabled & SIDE_EVENT_ENABLED_USER_MASK)))
054b7b5c
MD
77 return;
78
054b7b5c
MD
79 rcu_period = side_rcu_read_begin(&rcu_gp);
80 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
81 side_cb->u.call(desc, sav_desc, side_cb->priv);
82 side_rcu_read_end(&rcu_gp, rcu_period);
6841ae81
MD
83}
84
85void side_call_variadic(const struct side_event_description *desc,
86 const struct side_arg_vec_description *sav_desc,
87 const struct side_arg_dynamic_event_struct *var_struct)
88{
054b7b5c
MD
89 const struct side_callback *side_cb;
90 unsigned int rcu_period;
3b4f86f6 91 uint32_t enabled;
054b7b5c 92
6e46f5e6
MD
93 if (side_unlikely(finalized))
94 return;
075ceef7
MD
95 if (side_unlikely(!initialized))
96 side_init();
3b4f86f6
MD
97 if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
98 printf("ERROR: unexpected non-variadic event description\n");
99 abort();
100 }
101 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
102 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
dda82f46 103 // TODO: call kernel write.
6841ae81 104 }
3b4f86f6 105 if (side_unlikely(!(enabled & SIDE_EVENT_ENABLED_USER_MASK)))
054b7b5c
MD
106 return;
107
054b7b5c
MD
108 rcu_period = side_rcu_read_begin(&rcu_gp);
109 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
110 side_cb->u.call_variadic(desc, sav_desc, var_struct, side_cb->priv);
111 side_rcu_read_end(&rcu_gp, rcu_period);
6841ae81 112}
075ceef7 113
a3f36db7
MD
114static
115const struct side_callback *side_tracer_callback_lookup(
116 const struct side_event_description *desc,
117 void (*call)(), void *priv)
118{
119 const struct side_callback *cb;
120
121 for (cb = desc->callbacks; cb->u.call != NULL; cb++) {
122 if (cb->u.call == call && cb->priv == priv)
123 return cb;
124 }
125 return NULL;
126}
127
128static
129int _side_tracer_callback_register(struct side_event_description *desc,
130 void (*call)(), void *priv)
131{
132 struct side_callback *old_cb, *new_cb;
133 int ret = SIDE_ERROR_OK;
134 uint32_t old_nr_cb;
135
136 if (!call)
137 return SIDE_ERROR_INVAL;
6e46f5e6
MD
138 if (finalized)
139 return SIDE_ERROR_EXITING;
a13c9d2e
MD
140 if (!initialized)
141 side_init();
a3f36db7 142 pthread_mutex_lock(&side_lock);
45172226
MD
143 old_nr_cb = desc->nr_callbacks;
144 if (old_nr_cb == UINT32_MAX) {
a3f36db7
MD
145 ret = SIDE_ERROR_INVAL;
146 goto unlock;
147 }
148 /* Reject duplicate (call, priv) tuples. */
149 if (side_tracer_callback_lookup(desc, call, priv)) {
150 ret = SIDE_ERROR_EXIST;
151 goto unlock;
152 }
153 old_cb = (struct side_callback *) desc->callbacks;
154 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
155 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
156 if (!new_cb) {
157 ret = SIDE_ERROR_NOMEM;
158 goto unlock;
159 }
160 memcpy(new_cb, old_cb, old_nr_cb);
161 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
162 new_cb[old_nr_cb].u.call_variadic = call;
163 else
164 new_cb[old_nr_cb].u.call = call;
165 new_cb[old_nr_cb].priv = priv;
166 side_rcu_assign_pointer(desc->callbacks, new_cb);
167 side_rcu_wait_grace_period(&rcu_gp);
168 if (old_nr_cb)
169 free(old_cb);
45172226 170 desc->nr_callbacks++;
a3f36db7 171 /* Increment concurrently with kernel setting the top bits. */
45172226
MD
172 if (!old_nr_cb)
173 (void) __atomic_add_fetch(desc->enabled, 1, __ATOMIC_RELAXED);
a3f36db7
MD
174unlock:
175 pthread_mutex_unlock(&side_lock);
176 return ret;
177}
178
179int side_tracer_callback_register(struct side_event_description *desc,
180 void (*call)(const struct side_event_description *desc,
181 const struct side_arg_vec_description *sav_desc,
182 void *priv),
183 void *priv)
184{
185 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
186 return SIDE_ERROR_INVAL;
187 return _side_tracer_callback_register(desc, call, priv);
188}
189
190int side_tracer_callback_variadic_register(struct side_event_description *desc,
191 void (*call_variadic)(const struct side_event_description *desc,
192 const struct side_arg_vec_description *sav_desc,
193 const struct side_arg_dynamic_event_struct *var_struct,
194 void *priv),
195 void *priv)
196{
197 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
198 return SIDE_ERROR_INVAL;
199 return _side_tracer_callback_register(desc, call_variadic, priv);
200}
201
202int _side_tracer_callback_unregister(struct side_event_description *desc,
203 void (*call)(), void *priv)
204{
205 struct side_callback *old_cb, *new_cb;
206 const struct side_callback *cb_pos;
207 uint32_t pos_idx;
208 int ret = SIDE_ERROR_OK;
209 uint32_t old_nr_cb;
210
211 if (!call)
212 return SIDE_ERROR_INVAL;
6e46f5e6
MD
213 if (finalized)
214 return SIDE_ERROR_EXITING;
a13c9d2e
MD
215 if (!initialized)
216 side_init();
a3f36db7 217 pthread_mutex_lock(&side_lock);
a3f36db7
MD
218 cb_pos = side_tracer_callback_lookup(desc, call, priv);
219 if (!cb_pos) {
220 ret = SIDE_ERROR_NOENT;
221 goto unlock;
222 }
45172226 223 old_nr_cb = desc->nr_callbacks;
9a83b759 224 old_cb = (struct side_callback *) desc->callbacks;
a3f36db7
MD
225 if (old_nr_cb == 1) {
226 new_cb = (struct side_callback *) &side_empty_callback;
227 } else {
228 pos_idx = cb_pos - desc->callbacks;
229 /* Remove entry at pos_idx. */
230 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
231 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
232 if (!new_cb) {
233 ret = SIDE_ERROR_NOMEM;
234 goto unlock;
235 }
236 memcpy(new_cb, old_cb, pos_idx);
237 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
238 }
239 side_rcu_assign_pointer(desc->callbacks, new_cb);
240 side_rcu_wait_grace_period(&rcu_gp);
241 free(old_cb);
45172226 242 desc->nr_callbacks--;
a3f36db7 243 /* Decrement concurrently with kernel setting the top bits. */
45172226
MD
244 if (old_nr_cb == 1)
245 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
a3f36db7
MD
246unlock:
247 pthread_mutex_unlock(&side_lock);
248 return ret;
249}
250
251int side_tracer_callback_unregister(struct side_event_description *desc,
252 void (*call)(const struct side_event_description *desc,
253 const struct side_arg_vec_description *sav_desc,
254 void *priv),
255 void *priv)
256{
257 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
258 return SIDE_ERROR_INVAL;
259 return _side_tracer_callback_unregister(desc, call, priv);
260}
261
262int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
263 void (*call_variadic)(const struct side_event_description *desc,
264 const struct side_arg_vec_description *sav_desc,
265 const struct side_arg_dynamic_event_struct *var_struct,
266 void *priv),
267 void *priv)
268{
269 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
270 return SIDE_ERROR_INVAL;
271 return _side_tracer_callback_unregister(desc, call_variadic, priv);
272}
273
6e46f5e6
MD
274struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
275{
a13c9d2e
MD
276 struct side_events_register_handle *events_handle = NULL;
277 struct side_tracer_handle *tracer_handle;
6e46f5e6
MD
278
279 if (finalized)
280 return NULL;
a13c9d2e
MD
281 if (!initialized)
282 side_init();
283 events_handle = calloc(1, sizeof(struct side_events_register_handle));
284 if (!events_handle)
6e46f5e6 285 return NULL;
a13c9d2e
MD
286 events_handle->events = events;
287 events_handle->nr_events = nr_events;
288
6e46f5e6 289 pthread_mutex_lock(&side_lock);
a13c9d2e
MD
290 side_list_insert_node_tail(&side_events_list, &events_handle->node);
291 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
292 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
293 events, nr_events, tracer_handle->priv);
294 }
6e46f5e6
MD
295 pthread_mutex_unlock(&side_lock);
296 //TODO: call event batch register ioctl
a13c9d2e 297 return events_handle;
6e46f5e6
MD
298}
299
a3f36db7 300static
6e46f5e6
MD
301void side_event_remove_callbacks(struct side_event_description *desc)
302{
ea0d2bea 303 uint32_t nr_cb = desc->nr_callbacks;
6e46f5e6
MD
304 struct side_callback *old_cb;
305
306 if (!nr_cb)
307 return;
308 old_cb = (struct side_callback *) desc->callbacks;
ea0d2bea 309 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
6e46f5e6
MD
310 /*
311 * Setting the state back to 0 cb and empty callbacks out of
312 * caution. This should not matter because instrumentation is
313 * unreachable.
314 */
45172226 315 desc->nr_callbacks = 0;
6e46f5e6
MD
316 side_rcu_assign_pointer(desc->callbacks, &side_empty_callback);
317 /*
318 * No need to wait for grace period because instrumentation is
319 * unreachable.
320 */
321 free(old_cb);
322}
323
324/*
325 * Unregister event handle. At this point, all side events in that
326 * handle should be unreachable.
327 */
a13c9d2e 328void side_events_unregister(struct side_events_register_handle *events_handle)
6e46f5e6 329{
a13c9d2e 330 struct side_tracer_handle *tracer_handle;
6e46f5e6
MD
331 uint32_t i;
332
314c22c3
MD
333 if (!events_handle)
334 return;
6e46f5e6
MD
335 if (finalized)
336 return;
a13c9d2e
MD
337 if (!initialized)
338 side_init();
6e46f5e6 339 pthread_mutex_lock(&side_lock);
a13c9d2e
MD
340 side_list_remove_node(&events_handle->node);
341 side_list_for_each_entry(tracer_handle, &side_tracer_list, node) {
342 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
343 events_handle->events, events_handle->nr_events,
344 tracer_handle->priv);
345 }
346 for (i = 0; i < events_handle->nr_events; i++) {
347 struct side_event_description *event = events_handle->events[i];
6e46f5e6
MD
348
349 /* Skip NULL pointers */
350 if (!event)
351 continue;
352 side_event_remove_callbacks(event);
353 }
354 pthread_mutex_unlock(&side_lock);
355 //TODO: call event batch unregister ioctl
a13c9d2e
MD
356 free(events_handle);
357}
358
359struct side_tracer_handle *side_tracer_event_notification_register(
360 void (*cb)(enum side_tracer_notification notif,
361 struct side_event_description **events, uint32_t nr_events, void *priv),
362 void *priv)
363{
364 struct side_tracer_handle *tracer_handle;
365 struct side_events_register_handle *events_handle;
366
367 if (finalized)
368 return NULL;
369 if (!initialized)
370 side_init();
371 tracer_handle = calloc(1, sizeof(struct side_tracer_handle));
372 if (!tracer_handle)
373 return NULL;
374 pthread_mutex_lock(&side_lock);
375 tracer_handle->cb = cb;
376 tracer_handle->priv = priv;
377 side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node);
378 side_list_for_each_entry(events_handle, &side_events_list, node) {
379 cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS,
380 events_handle->events, events_handle->nr_events, priv);
381 }
382 pthread_mutex_unlock(&side_lock);
383 return tracer_handle;
384}
385
386void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle)
387{
388 struct side_events_register_handle *events_handle;
389
390 if (finalized)
391 return;
392 if (!initialized)
393 side_init();
394 pthread_mutex_lock(&side_lock);
395 side_list_for_each_entry(events_handle, &side_events_list, node) {
396 tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS,
397 events_handle->events, events_handle->nr_events,
398 tracer_handle->priv);
399 }
400 side_list_remove_node(&tracer_handle->node);
401 pthread_mutex_unlock(&side_lock);
6e46f5e6
MD
402}
403
075ceef7
MD
404void side_init(void)
405{
406 if (initialized)
407 return;
054b7b5c 408 side_rcu_gp_init(&rcu_gp);
075ceef7
MD
409 initialized = true;
410}
6e46f5e6 411
aa584c16
MD
412/*
413 * side_exit() is executed from a library destructor. It can be called
414 * explicitly at application exit as well. Concurrent side API use is
415 * not expected at that point.
416 */
6e46f5e6
MD
417void side_exit(void)
418{
419 struct side_events_register_handle *handle, *tmp;
420
421 if (finalized)
422 return;
423 side_rcu_gp_exit(&rcu_gp);
a13c9d2e 424 side_list_for_each_entry_safe(handle, tmp, &side_events_list, node)
6e46f5e6
MD
425 side_events_unregister(handle);
426 finalized = true;
427}
This page took 0.039744 seconds and 4 git commands to generate.