Commit | Line | Data |
---|---|---|
67337c4a MD |
1 | // SPDX-License-Identifier: MIT |
2 | /* | |
3 | * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
4 | */ | |
5 | ||
6 | #include <side/trace.h> | |
7 | #include <string.h> | |
b1bf768c | 8 | #include <assert.h> |
67337c4a MD |
9 | |
10 | #include "rcu.h" | |
11 | #include "list.h" | |
873bbf16 | 12 | #include "rculist.h" |
67337c4a | 13 | |
871851e7 | 14 | /* Top 8 bits reserved for shared tracer use. */ |
67337c4a | 15 | #if SIDE_BITS_PER_LONG == 64 |
871851e7 MD |
16 | # define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF00000000000000ULL |
17 | # define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x8000000000000000ULL | |
18 | # define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x4000000000000000ULL | |
67337c4a | 19 | |
871851e7 MD |
20 | /* Allow 2^56 private tracer references on an event. */ |
21 | # define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFFFFFFFFFULL | |
67337c4a | 22 | #else |
871851e7 MD |
23 | # define SIDE_EVENT_ENABLED_SHARED_MASK 0xFF000000UL |
24 | # define SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK 0x80000000UL | |
25 | # define SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK 0x40000000UL | |
67337c4a | 26 | |
871851e7 MD |
27 | /* Allow 2^24 private tracer references on an event. */ |
28 | # define SIDE_EVENT_ENABLED_PRIVATE_MASK 0x00FFFFFFUL | |
67337c4a MD |
29 | #endif |
30 | ||
871851e7 MD |
31 | /* Key 0x1 is reserved for user event. */ |
32 | #define SIDE_USER_EVENT_KEY ((void *)0x1UL) | |
33 | /* Key 0x2 is reserved for ptrace. */ | |
34 | #define SIDE_PTRACE_KEY ((void *)0x2UL) | |
35 | ||
67337c4a MD |
36 | struct side_events_register_handle { |
37 | struct side_list_node node; | |
38 | struct side_event_description **events; | |
39 | uint32_t nr_events; | |
40 | }; | |
41 | ||
42 | struct side_tracer_handle { | |
43 | struct side_list_node node; | |
44 | void (*cb)(enum side_tracer_notification notif, | |
45 | struct side_event_description **events, uint32_t nr_events, void *priv); | |
46 | void *priv; | |
47 | }; | |
48 | ||
f0b01832 | 49 | struct side_statedump_request_handle { |
873bbf16 | 50 | struct side_list_node node; /* RCU list. */ |
f0b01832 MD |
51 | void (*cb)(void); |
52 | }; | |
53 | ||
867b4725 MD |
54 | struct side_callback { |
55 | union { | |
56 | void (*call)(const struct side_event_description *desc, | |
57 | const struct side_arg_vec *side_arg_vec, | |
58 | void *priv); | |
59 | void (*call_variadic)(const struct side_event_description *desc, | |
60 | const struct side_arg_vec *side_arg_vec, | |
61 | const struct side_arg_dynamic_struct *var_struct, | |
62 | void *priv); | |
63 | } u; | |
64 | void *priv; | |
92c377f9 | 65 | void *key; |
867b4725 MD |
66 | }; |
67 | ||
873bbf16 | 68 | static struct side_rcu_gp_state event_rcu_gp, statedump_rcu_gp; |
67337c4a MD |
69 | |
70 | /* | |
71 | * Lazy initialization for early use within library constructors. | |
72 | */ | |
73 | static bool initialized; | |
74 | /* | |
75 | * Do not register/unregister any more events after destructor. | |
76 | */ | |
77 | static bool finalized; | |
78 | ||
79 | /* | |
80 | * Recursive mutex to allow tracer callbacks to use the side API. | |
81 | */ | |
873bbf16 MD |
82 | static pthread_mutex_t side_event_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; |
83 | static pthread_mutex_t side_statedump_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; | |
67337c4a MD |
84 | |
85 | static DEFINE_SIDE_LIST_HEAD(side_events_list); | |
86 | static DEFINE_SIDE_LIST_HEAD(side_tracer_list); | |
f0b01832 | 87 | static DEFINE_SIDE_LIST_HEAD(side_statedump_list); |
67337c4a | 88 | |
74be90b7 MD |
89 | /* |
90 | * Callback filter key for state dump. | |
91 | */ | |
92 | static __thread void *filter_key; | |
93 | ||
67337c4a MD |
94 | /* |
95 | * The empty callback has a NULL function callback pointer, which stops | |
96 | * iteration on the array of callbacks immediately. | |
97 | */ | |
867b4725 | 98 | const char side_empty_callback[sizeof(struct side_callback)]; |
67337c4a | 99 | |
871851e7 MD |
100 | /* |
101 | * side_ptrace_hook is a place holder for a debugger breakpoint. | |
102 | * var_struct is NULL if not variadic. | |
103 | */ | |
104 | void side_ptrace_hook(const struct side_event_state *event_state __attribute__((unused)), | |
105 | const struct side_arg_vec *side_arg_vec __attribute__((unused)), | |
106 | const struct side_arg_dynamic_struct *var_struct __attribute__((unused))) | |
107 | __attribute__((noinline)); | |
108 | void side_ptrace_hook(const struct side_event_state *event_state __attribute__((unused)), | |
109 | const struct side_arg_vec *side_arg_vec __attribute__((unused)), | |
110 | const struct side_arg_dynamic_struct *var_struct __attribute__((unused))) | |
111 | { | |
112 | } | |
113 | ||
74be90b7 MD |
114 | static |
115 | void _side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec, void *key) | |
67337c4a MD |
116 | { |
117 | struct side_rcu_read_state rcu_read_state; | |
b2a84b9f | 118 | const struct side_event_state_0 *es0; |
67337c4a MD |
119 | const struct side_callback *side_cb; |
120 | uintptr_t enabled; | |
121 | ||
122 | if (side_unlikely(finalized)) | |
123 | return; | |
124 | if (side_unlikely(!initialized)) | |
125 | side_init(); | |
b2a84b9f MD |
126 | if (side_unlikely(event_state->version != 0)) |
127 | abort(); | |
49aea3ef | 128 | es0 = side_container_of(event_state, const struct side_event_state_0, parent); |
7269a8a3 | 129 | assert(!(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC)); |
b2a84b9f | 130 | enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED); |
871851e7 MD |
131 | if (side_unlikely(enabled & SIDE_EVENT_ENABLED_SHARED_MASK)) { |
132 | if ((enabled & SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK) && | |
133 | (!key || key == SIDE_USER_EVENT_KEY)) { | |
134 | // TODO: call kernel write. | |
135 | } | |
136 | if ((enabled & SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK) && | |
137 | (!key || key == SIDE_PTRACE_KEY)) | |
138 | side_ptrace_hook(event_state, side_arg_vec, NULL); | |
67337c4a | 139 | } |
873bbf16 | 140 | side_rcu_read_begin(&event_rcu_gp, &rcu_read_state); |
92c377f9 MD |
141 | for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call != NULL; side_cb++) { |
142 | /* A NULL key is always a match. */ | |
143 | if (key && side_cb->key && side_cb->key != key) | |
144 | continue; | |
7269a8a3 | 145 | side_cb->u.call(es0->desc, side_arg_vec, side_cb->priv); |
92c377f9 | 146 | } |
873bbf16 | 147 | side_rcu_read_end(&event_rcu_gp, &rcu_read_state); |
67337c4a MD |
148 | } |
149 | ||
92c377f9 MD |
150 | void side_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec) |
151 | { | |
74be90b7 | 152 | _side_call(event_state, side_arg_vec, NULL); |
92c377f9 MD |
153 | } |
154 | ||
f0b01832 | 155 | void side_statedump_call(const struct side_event_state *event_state, const struct side_arg_vec *side_arg_vec) |
74be90b7 MD |
156 | { |
157 | _side_call(event_state, side_arg_vec, filter_key); | |
158 | } | |
159 | ||
160 | static | |
161 | void _side_call_variadic(const struct side_event_state *event_state, | |
67337c4a | 162 | const struct side_arg_vec *side_arg_vec, |
92c377f9 MD |
163 | const struct side_arg_dynamic_struct *var_struct, |
164 | void *key) | |
67337c4a MD |
165 | { |
166 | struct side_rcu_read_state rcu_read_state; | |
b2a84b9f | 167 | const struct side_event_state_0 *es0; |
67337c4a MD |
168 | const struct side_callback *side_cb; |
169 | uintptr_t enabled; | |
170 | ||
171 | if (side_unlikely(finalized)) | |
172 | return; | |
173 | if (side_unlikely(!initialized)) | |
174 | side_init(); | |
b2a84b9f MD |
175 | if (side_unlikely(event_state->version != 0)) |
176 | abort(); | |
49aea3ef | 177 | es0 = side_container_of(event_state, const struct side_event_state_0, parent); |
7269a8a3 | 178 | assert(es0->desc->flags & SIDE_EVENT_FLAG_VARIADIC); |
b2a84b9f | 179 | enabled = __atomic_load_n(&es0->enabled, __ATOMIC_RELAXED); |
871851e7 MD |
180 | if (side_unlikely(enabled & SIDE_EVENT_ENABLED_SHARED_MASK)) { |
181 | if ((enabled & SIDE_EVENT_ENABLED_SHARED_USER_EVENT_MASK) && | |
182 | (!key || key == SIDE_USER_EVENT_KEY)) { | |
183 | // TODO: call kernel write. | |
184 | } | |
185 | if ((enabled & SIDE_EVENT_ENABLED_SHARED_PTRACE_MASK) && | |
186 | (!key || key == SIDE_PTRACE_KEY)) | |
187 | side_ptrace_hook(event_state, side_arg_vec, var_struct); | |
67337c4a | 188 | } |
873bbf16 | 189 | side_rcu_read_begin(&event_rcu_gp, &rcu_read_state); |
92c377f9 MD |
190 | for (side_cb = side_rcu_dereference(es0->callbacks); side_cb->u.call_variadic != NULL; side_cb++) { |
191 | /* A NULL key is always a match. */ | |
192 | if (key && side_cb->key && side_cb->key != key) | |
193 | continue; | |
7269a8a3 | 194 | side_cb->u.call_variadic(es0->desc, side_arg_vec, var_struct, side_cb->priv); |
92c377f9 | 195 | } |
873bbf16 | 196 | side_rcu_read_end(&event_rcu_gp, &rcu_read_state); |
67337c4a MD |
197 | } |
198 | ||
92c377f9 MD |
199 | void side_call_variadic(const struct side_event_state *event_state, |
200 | const struct side_arg_vec *side_arg_vec, | |
201 | const struct side_arg_dynamic_struct *var_struct) | |
202 | { | |
74be90b7 MD |
203 | _side_call_variadic(event_state, side_arg_vec, var_struct, NULL); |
204 | } | |
205 | ||
f0b01832 | 206 | void side_statedump_call_variadic(const struct side_event_state *event_state, |
74be90b7 MD |
207 | const struct side_arg_vec *side_arg_vec, |
208 | const struct side_arg_dynamic_struct *var_struct) | |
209 | { | |
210 | _side_call_variadic(event_state, side_arg_vec, var_struct, filter_key); | |
92c377f9 MD |
211 | } |
212 | ||
67337c4a MD |
213 | static |
214 | const struct side_callback *side_tracer_callback_lookup( | |
215 | const struct side_event_description *desc, | |
92c377f9 | 216 | void *call, void *priv, void *key) |
67337c4a | 217 | { |
0b9e59d6 | 218 | struct side_event_state *event_state = side_ptr_get(desc->state); |
b2a84b9f | 219 | const struct side_event_state_0 *es0; |
67337c4a MD |
220 | const struct side_callback *cb; |
221 | ||
b2a84b9f MD |
222 | if (side_unlikely(event_state->version != 0)) |
223 | abort(); | |
49aea3ef | 224 | es0 = side_container_of(event_state, const struct side_event_state_0, parent); |
7269a8a3 | 225 | for (cb = es0->callbacks; cb->u.call != NULL; cb++) { |
92c377f9 | 226 | if ((void *) cb->u.call == call && cb->priv == priv && cb->key == key) |
67337c4a MD |
227 | return cb; |
228 | } | |
229 | return NULL; | |
230 | } | |
231 | ||
232 | static | |
233 | int _side_tracer_callback_register(struct side_event_description *desc, | |
92c377f9 | 234 | void *call, void *priv, void *key) |
67337c4a | 235 | { |
0b9e59d6 | 236 | struct side_event_state *event_state; |
67337c4a | 237 | struct side_callback *old_cb, *new_cb; |
b2a84b9f | 238 | struct side_event_state_0 *es0; |
67337c4a MD |
239 | int ret = SIDE_ERROR_OK; |
240 | uint32_t old_nr_cb; | |
241 | ||
242 | if (!call) | |
243 | return SIDE_ERROR_INVAL; | |
244 | if (finalized) | |
245 | return SIDE_ERROR_EXITING; | |
246 | if (!initialized) | |
247 | side_init(); | |
873bbf16 | 248 | pthread_mutex_lock(&side_event_lock); |
0b9e59d6 | 249 | event_state = side_ptr_get(desc->state); |
b2a84b9f MD |
250 | if (side_unlikely(event_state->version != 0)) |
251 | abort(); | |
49aea3ef | 252 | es0 = side_container_of(event_state, struct side_event_state_0, parent); |
3cac1780 | 253 | old_nr_cb = es0->nr_callbacks; |
67337c4a MD |
254 | if (old_nr_cb == UINT32_MAX) { |
255 | ret = SIDE_ERROR_INVAL; | |
256 | goto unlock; | |
257 | } | |
258 | /* Reject duplicate (call, priv) tuples. */ | |
92c377f9 | 259 | if (side_tracer_callback_lookup(desc, call, priv, key)) { |
67337c4a MD |
260 | ret = SIDE_ERROR_EXIST; |
261 | goto unlock; | |
262 | } | |
7269a8a3 | 263 | old_cb = (struct side_callback *) es0->callbacks; |
67337c4a MD |
264 | /* old_nr_cb + 1 (new cb) + 1 (NULL) */ |
265 | new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback)); | |
266 | if (!new_cb) { | |
267 | ret = SIDE_ERROR_NOMEM; | |
268 | goto unlock; | |
269 | } | |
270 | memcpy(new_cb, old_cb, old_nr_cb); | |
271 | if (desc->flags & SIDE_EVENT_FLAG_VARIADIC) | |
272 | new_cb[old_nr_cb].u.call_variadic = | |
273 | (side_tracer_callback_variadic_func) call; | |
274 | else | |
275 | new_cb[old_nr_cb].u.call = | |
276 | (side_tracer_callback_func) call; | |
277 | new_cb[old_nr_cb].priv = priv; | |
92c377f9 | 278 | new_cb[old_nr_cb].key = key; |
f60d8121 | 279 | /* High order bits are already zeroed. */ |
7269a8a3 | 280 | side_rcu_assign_pointer(es0->callbacks, new_cb); |
873bbf16 | 281 | side_rcu_wait_grace_period(&event_rcu_gp); |
67337c4a MD |
282 | if (old_nr_cb) |
283 | free(old_cb); | |
3cac1780 | 284 | es0->nr_callbacks++; |
67337c4a MD |
285 | /* Increment concurrently with kernel setting the top bits. */ |
286 | if (!old_nr_cb) | |
b2a84b9f | 287 | (void) __atomic_add_fetch(&es0->enabled, 1, __ATOMIC_RELAXED); |
67337c4a | 288 | unlock: |
873bbf16 | 289 | pthread_mutex_unlock(&side_event_lock); |
67337c4a MD |
290 | return ret; |
291 | } | |
292 | ||
293 | int side_tracer_callback_register(struct side_event_description *desc, | |
294 | side_tracer_callback_func call, | |
92c377f9 | 295 | void *priv, void *key) |
67337c4a MD |
296 | { |
297 | if (desc->flags & SIDE_EVENT_FLAG_VARIADIC) | |
298 | return SIDE_ERROR_INVAL; | |
92c377f9 | 299 | return _side_tracer_callback_register(desc, (void *) call, priv, key); |
67337c4a MD |
300 | } |
301 | ||
302 | int side_tracer_callback_variadic_register(struct side_event_description *desc, | |
303 | side_tracer_callback_variadic_func call_variadic, | |
92c377f9 | 304 | void *priv, void *key) |
67337c4a MD |
305 | { |
306 | if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) | |
307 | return SIDE_ERROR_INVAL; | |
92c377f9 | 308 | return _side_tracer_callback_register(desc, (void *) call_variadic, priv, key); |
67337c4a MD |
309 | } |
310 | ||
311 | static int _side_tracer_callback_unregister(struct side_event_description *desc, | |
92c377f9 | 312 | void *call, void *priv, void *key) |
67337c4a | 313 | { |
0b9e59d6 | 314 | struct side_event_state *event_state; |
67337c4a MD |
315 | struct side_callback *old_cb, *new_cb; |
316 | const struct side_callback *cb_pos; | |
b2a84b9f | 317 | struct side_event_state_0 *es0; |
67337c4a MD |
318 | uint32_t pos_idx; |
319 | int ret = SIDE_ERROR_OK; | |
320 | uint32_t old_nr_cb; | |
321 | ||
322 | if (!call) | |
323 | return SIDE_ERROR_INVAL; | |
324 | if (finalized) | |
325 | return SIDE_ERROR_EXITING; | |
326 | if (!initialized) | |
327 | side_init(); | |
873bbf16 | 328 | pthread_mutex_lock(&side_event_lock); |
0b9e59d6 | 329 | event_state = side_ptr_get(desc->state); |
b2a84b9f MD |
330 | if (side_unlikely(event_state->version != 0)) |
331 | abort(); | |
49aea3ef | 332 | es0 = side_container_of(event_state, struct side_event_state_0, parent); |
92c377f9 | 333 | cb_pos = side_tracer_callback_lookup(desc, call, priv, key); |
67337c4a MD |
334 | if (!cb_pos) { |
335 | ret = SIDE_ERROR_NOENT; | |
336 | goto unlock; | |
337 | } | |
3cac1780 | 338 | old_nr_cb = es0->nr_callbacks; |
7269a8a3 | 339 | old_cb = (struct side_callback *) es0->callbacks; |
67337c4a MD |
340 | if (old_nr_cb == 1) { |
341 | new_cb = (struct side_callback *) &side_empty_callback; | |
342 | } else { | |
7269a8a3 | 343 | pos_idx = cb_pos - es0->callbacks; |
67337c4a MD |
344 | /* Remove entry at pos_idx. */ |
345 | /* old_nr_cb - 1 (removed cb) + 1 (NULL) */ | |
346 | new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback)); | |
347 | if (!new_cb) { | |
348 | ret = SIDE_ERROR_NOMEM; | |
349 | goto unlock; | |
350 | } | |
351 | memcpy(new_cb, old_cb, pos_idx); | |
352 | memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1); | |
353 | } | |
f60d8121 | 354 | /* High order bits are already zeroed. */ |
7269a8a3 | 355 | side_rcu_assign_pointer(es0->callbacks, new_cb); |
873bbf16 | 356 | side_rcu_wait_grace_period(&event_rcu_gp); |
67337c4a | 357 | free(old_cb); |
3cac1780 | 358 | es0->nr_callbacks--; |
67337c4a MD |
359 | /* Decrement concurrently with kernel setting the top bits. */ |
360 | if (old_nr_cb == 1) | |
b2a84b9f | 361 | (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED); |
67337c4a | 362 | unlock: |
873bbf16 | 363 | pthread_mutex_unlock(&side_event_lock); |
67337c4a MD |
364 | return ret; |
365 | } | |
366 | ||
367 | int side_tracer_callback_unregister(struct side_event_description *desc, | |
368 | side_tracer_callback_func call, | |
92c377f9 | 369 | void *priv, void *key) |
67337c4a MD |
370 | { |
371 | if (desc->flags & SIDE_EVENT_FLAG_VARIADIC) | |
372 | return SIDE_ERROR_INVAL; | |
92c377f9 | 373 | return _side_tracer_callback_unregister(desc, (void *) call, priv, key); |
67337c4a MD |
374 | } |
375 | ||
376 | int side_tracer_callback_variadic_unregister(struct side_event_description *desc, | |
377 | side_tracer_callback_variadic_func call_variadic, | |
92c377f9 | 378 | void *priv, void *key) |
67337c4a MD |
379 | { |
380 | if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) | |
381 | return SIDE_ERROR_INVAL; | |
92c377f9 | 382 | return _side_tracer_callback_unregister(desc, (void *) call_variadic, priv, key); |
67337c4a MD |
383 | } |
384 | ||
385 | struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events) | |
386 | { | |
387 | struct side_events_register_handle *events_handle = NULL; | |
388 | struct side_tracer_handle *tracer_handle; | |
389 | ||
390 | if (finalized) | |
391 | return NULL; | |
392 | if (!initialized) | |
393 | side_init(); | |
394 | events_handle = (struct side_events_register_handle *) | |
395 | calloc(1, sizeof(struct side_events_register_handle)); | |
396 | if (!events_handle) | |
397 | return NULL; | |
398 | events_handle->events = events; | |
399 | events_handle->nr_events = nr_events; | |
400 | ||
873bbf16 | 401 | pthread_mutex_lock(&side_event_lock); |
67337c4a MD |
402 | side_list_insert_node_tail(&side_events_list, &events_handle->node); |
403 | side_list_for_each_entry(tracer_handle, &side_tracer_list, node) { | |
404 | tracer_handle->cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS, | |
405 | events, nr_events, tracer_handle->priv); | |
406 | } | |
873bbf16 | 407 | pthread_mutex_unlock(&side_event_lock); |
67337c4a MD |
408 | //TODO: call event batch register ioctl |
409 | return events_handle; | |
410 | } | |
411 | ||
412 | static | |
413 | void side_event_remove_callbacks(struct side_event_description *desc) | |
414 | { | |
0b9e59d6 | 415 | struct side_event_state *event_state = side_ptr_get(desc->state); |
b2a84b9f | 416 | struct side_event_state_0 *es0; |
67337c4a | 417 | struct side_callback *old_cb; |
3cac1780 | 418 | uint32_t nr_cb; |
67337c4a | 419 | |
b2a84b9f MD |
420 | if (side_unlikely(event_state->version != 0)) |
421 | abort(); | |
49aea3ef | 422 | es0 = side_container_of(event_state, struct side_event_state_0, parent); |
3cac1780 MD |
423 | nr_cb = es0->nr_callbacks; |
424 | if (!nr_cb) | |
425 | return; | |
7269a8a3 | 426 | old_cb = (struct side_callback *) es0->callbacks; |
b2a84b9f | 427 | (void) __atomic_add_fetch(&es0->enabled, -1, __ATOMIC_RELAXED); |
67337c4a MD |
428 | /* |
429 | * Setting the state back to 0 cb and empty callbacks out of | |
430 | * caution. This should not matter because instrumentation is | |
431 | * unreachable. | |
432 | */ | |
3cac1780 | 433 | es0->nr_callbacks = 0; |
7269a8a3 | 434 | side_rcu_assign_pointer(es0->callbacks, &side_empty_callback); |
67337c4a MD |
435 | /* |
436 | * No need to wait for grace period because instrumentation is | |
437 | * unreachable. | |
438 | */ | |
439 | free(old_cb); | |
440 | } | |
441 | ||
442 | /* | |
443 | * Unregister event handle. At this point, all side events in that | |
444 | * handle should be unreachable. | |
445 | */ | |
446 | void side_events_unregister(struct side_events_register_handle *events_handle) | |
447 | { | |
448 | struct side_tracer_handle *tracer_handle; | |
449 | uint32_t i; | |
450 | ||
451 | if (!events_handle) | |
452 | return; | |
453 | if (finalized) | |
454 | return; | |
455 | if (!initialized) | |
456 | side_init(); | |
873bbf16 | 457 | pthread_mutex_lock(&side_event_lock); |
67337c4a MD |
458 | side_list_remove_node(&events_handle->node); |
459 | side_list_for_each_entry(tracer_handle, &side_tracer_list, node) { | |
460 | tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS, | |
461 | events_handle->events, events_handle->nr_events, | |
462 | tracer_handle->priv); | |
463 | } | |
464 | for (i = 0; i < events_handle->nr_events; i++) { | |
465 | struct side_event_description *event = events_handle->events[i]; | |
466 | ||
467 | /* Skip NULL pointers */ | |
468 | if (!event) | |
469 | continue; | |
470 | side_event_remove_callbacks(event); | |
471 | } | |
873bbf16 | 472 | pthread_mutex_unlock(&side_event_lock); |
67337c4a MD |
473 | //TODO: call event batch unregister ioctl |
474 | free(events_handle); | |
475 | } | |
476 | ||
477 | struct side_tracer_handle *side_tracer_event_notification_register( | |
478 | void (*cb)(enum side_tracer_notification notif, | |
479 | struct side_event_description **events, uint32_t nr_events, void *priv), | |
480 | void *priv) | |
481 | { | |
482 | struct side_tracer_handle *tracer_handle; | |
483 | struct side_events_register_handle *events_handle; | |
484 | ||
485 | if (finalized) | |
486 | return NULL; | |
487 | if (!initialized) | |
488 | side_init(); | |
489 | tracer_handle = (struct side_tracer_handle *) | |
490 | calloc(1, sizeof(struct side_tracer_handle)); | |
491 | if (!tracer_handle) | |
492 | return NULL; | |
873bbf16 | 493 | pthread_mutex_lock(&side_event_lock); |
67337c4a MD |
494 | tracer_handle->cb = cb; |
495 | tracer_handle->priv = priv; | |
496 | side_list_insert_node_tail(&side_tracer_list, &tracer_handle->node); | |
497 | side_list_for_each_entry(events_handle, &side_events_list, node) { | |
498 | cb(SIDE_TRACER_NOTIFICATION_INSERT_EVENTS, | |
499 | events_handle->events, events_handle->nr_events, priv); | |
500 | } | |
873bbf16 | 501 | pthread_mutex_unlock(&side_event_lock); |
67337c4a MD |
502 | return tracer_handle; |
503 | } | |
504 | ||
505 | void side_tracer_event_notification_unregister(struct side_tracer_handle *tracer_handle) | |
506 | { | |
507 | struct side_events_register_handle *events_handle; | |
508 | ||
509 | if (finalized) | |
510 | return; | |
511 | if (!initialized) | |
512 | side_init(); | |
873bbf16 | 513 | pthread_mutex_lock(&side_event_lock); |
67337c4a MD |
514 | side_list_for_each_entry(events_handle, &side_events_list, node) { |
515 | tracer_handle->cb(SIDE_TRACER_NOTIFICATION_REMOVE_EVENTS, | |
516 | events_handle->events, events_handle->nr_events, | |
517 | tracer_handle->priv); | |
518 | } | |
519 | side_list_remove_node(&tracer_handle->node); | |
873bbf16 | 520 | pthread_mutex_unlock(&side_event_lock); |
be787080 | 521 | free(tracer_handle); |
67337c4a MD |
522 | } |
523 | ||
f0b01832 MD |
524 | struct side_statedump_request_handle *side_statedump_request_notification_register(void (*statedump_cb)(void)) |
525 | { | |
526 | struct side_statedump_request_handle *handle; | |
527 | ||
528 | if (finalized) | |
529 | return NULL; | |
530 | if (!initialized) | |
531 | side_init(); | |
532 | /* | |
533 | * The statedump request notification should not be registered | |
534 | * from a notification callback. | |
535 | */ | |
536 | assert(filter_key == NULL); | |
537 | handle = (struct side_statedump_request_handle *) | |
538 | calloc(1, sizeof(struct side_statedump_request_handle)); | |
539 | if (!handle) | |
540 | return NULL; | |
f0b01832 | 541 | handle->cb = statedump_cb; |
873bbf16 MD |
542 | |
543 | pthread_mutex_lock(&side_statedump_lock); | |
544 | side_list_insert_node_tail_rcu(&side_statedump_list, &handle->node); | |
545 | pthread_mutex_unlock(&side_statedump_lock); | |
546 | ||
f0b01832 MD |
547 | /* Invoke callback for all tracers. */ |
548 | statedump_cb(); | |
873bbf16 | 549 | |
f0b01832 MD |
550 | return handle; |
551 | } | |
552 | ||
553 | void side_statedump_request_notification_unregister(struct side_statedump_request_handle *handle) | |
554 | { | |
555 | if (finalized) | |
556 | return; | |
557 | if (!initialized) | |
558 | side_init(); | |
559 | assert(filter_key == NULL); | |
873bbf16 MD |
560 | |
561 | pthread_mutex_lock(&side_statedump_lock); | |
562 | side_list_remove_node_rcu(&handle->node); | |
563 | pthread_mutex_unlock(&side_statedump_lock); | |
564 | ||
565 | side_rcu_wait_grace_period(&statedump_rcu_gp); | |
f0b01832 MD |
566 | free(handle); |
567 | } | |
568 | ||
569 | void side_tracer_statedump_request(void *key) | |
570 | { | |
571 | struct side_statedump_request_handle *handle; | |
873bbf16 | 572 | struct side_rcu_read_state rcu_read_state; |
f0b01832 MD |
573 | |
574 | /* Invoke the state dump callback specifically for the tracer key. */ | |
575 | filter_key = key; | |
873bbf16 MD |
576 | side_rcu_read_begin(&statedump_rcu_gp, &rcu_read_state); |
577 | side_list_for_each_entry_rcu(handle, &side_statedump_list, node) | |
f0b01832 | 578 | handle->cb(); |
873bbf16 | 579 | side_rcu_read_end(&statedump_rcu_gp, &rcu_read_state); |
f0b01832 MD |
580 | filter_key = NULL; |
581 | } | |
582 | ||
67337c4a MD |
583 | void side_init(void) |
584 | { | |
585 | if (initialized) | |
586 | return; | |
873bbf16 MD |
587 | side_rcu_gp_init(&event_rcu_gp); |
588 | side_rcu_gp_init(&statedump_rcu_gp); | |
67337c4a MD |
589 | initialized = true; |
590 | } | |
591 | ||
592 | /* | |
593 | * side_exit() is executed from a library destructor. It can be called | |
594 | * explicitly at application exit as well. Concurrent side API use is | |
595 | * not expected at that point. | |
596 | */ | |
597 | void side_exit(void) | |
598 | { | |
599 | struct side_events_register_handle *handle, *tmp; | |
600 | ||
601 | if (finalized) | |
602 | return; | |
603 | side_list_for_each_entry_safe(handle, tmp, &side_events_list, node) | |
604 | side_events_unregister(handle); | |
873bbf16 MD |
605 | side_rcu_gp_exit(&event_rcu_gp); |
606 | side_rcu_gp_exit(&statedump_rcu_gp); | |
67337c4a MD |
607 | finalized = true; |
608 | } |