1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 #include <side/trace.h>
13 /* Top 8 bits reserved for kernel tracer use. */
14 #define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000
15 #define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000
17 /* Allow 2^24 tracer callbacks to be registered on an event. */
18 #define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFF
20 struct side_events_register_handle
{
21 struct side_list_node node
;
22 struct side_event_description
**events
;
26 static struct side_rcu_gp_state rcu_gp
;
29 * Lazy initialization for early use within library constructors.
31 static bool initialized
;
33 * Do not register/unregister any more events after destructor.
35 static bool finalized
;
37 static pthread_mutex_t side_lock
= PTHREAD_MUTEX_INITIALIZER
;
39 static DEFINE_SIDE_LIST_HEAD(side_list
);
42 * The empty callback has a NULL function callback pointer, which stops
43 * iteration on the array of callbacks immediately.
45 const struct side_callback side_empty_callback
;
47 void side_init(void) __attribute__((constructor
));
48 void side_exit(void) __attribute__((destructor
));
50 void side_call(const struct side_event_description
*desc
, const struct side_arg_vec_description
*sav_desc
)
52 const struct side_callback
*side_cb
;
53 unsigned int rcu_period
;
56 if (side_unlikely(finalized
))
58 if (side_unlikely(!initialized
))
60 if (side_unlikely(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)) {
61 printf("ERROR: unexpected variadic event description\n");
64 enabled
= __atomic_load_n(desc
->enabled
, __ATOMIC_RELAXED
);
65 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
66 // TODO: call kernel write.
68 if (side_unlikely(!(enabled
& SIDE_EVENT_ENABLED_USER_MASK
)))
71 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
72 tracer_call(desc
, sav_desc
, NULL
);
74 rcu_period
= side_rcu_read_begin(&rcu_gp
);
75 for (side_cb
= side_rcu_dereference(desc
->callbacks
); side_cb
->u
.call
!= NULL
; side_cb
++)
76 side_cb
->u
.call(desc
, sav_desc
, side_cb
->priv
);
77 side_rcu_read_end(&rcu_gp
, rcu_period
);
80 void side_call_variadic(const struct side_event_description
*desc
,
81 const struct side_arg_vec_description
*sav_desc
,
82 const struct side_arg_dynamic_event_struct
*var_struct
)
84 const struct side_callback
*side_cb
;
85 unsigned int rcu_period
;
88 if (side_unlikely(finalized
))
90 if (side_unlikely(!initialized
))
92 if (side_unlikely(!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))) {
93 printf("ERROR: unexpected non-variadic event description\n");
96 enabled
= __atomic_load_n(desc
->enabled
, __ATOMIC_RELAXED
);
97 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
98 // TODO: call kernel write.
100 if (side_unlikely(!(enabled
& SIDE_EVENT_ENABLED_USER_MASK
)))
103 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
104 tracer_call_variadic(desc
, sav_desc
, var_struct
, NULL
);
106 rcu_period
= side_rcu_read_begin(&rcu_gp
);
107 for (side_cb
= side_rcu_dereference(desc
->callbacks
); side_cb
->u
.call_variadic
!= NULL
; side_cb
++)
108 side_cb
->u
.call_variadic(desc
, sav_desc
, var_struct
, side_cb
->priv
);
109 side_rcu_read_end(&rcu_gp
, rcu_period
);
113 const struct side_callback
*side_tracer_callback_lookup(
114 const struct side_event_description
*desc
,
115 void (*call
)(), void *priv
)
117 const struct side_callback
*cb
;
119 for (cb
= desc
->callbacks
; cb
->u
.call
!= NULL
; cb
++) {
120 if (cb
->u
.call
== call
&& cb
->priv
== priv
)
127 int _side_tracer_callback_register(struct side_event_description
*desc
,
128 void (*call
)(), void *priv
)
130 struct side_callback
*old_cb
, *new_cb
;
131 int ret
= SIDE_ERROR_OK
;
135 return SIDE_ERROR_INVAL
;
137 return SIDE_ERROR_EXITING
;
138 pthread_mutex_lock(&side_lock
);
139 old_nr_cb
= *desc
->enabled
& SIDE_EVENT_ENABLED_USER_MASK
;
140 if (old_nr_cb
== SIDE_EVENT_ENABLED_USER_MASK
) {
141 ret
= SIDE_ERROR_INVAL
;
144 /* Reject duplicate (call, priv) tuples. */
145 if (side_tracer_callback_lookup(desc
, call
, priv
)) {
146 ret
= SIDE_ERROR_EXIST
;
149 old_cb
= (struct side_callback
*) desc
->callbacks
;
150 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
151 new_cb
= (struct side_callback
*) calloc(old_nr_cb
+ 2, sizeof(struct side_callback
));
153 ret
= SIDE_ERROR_NOMEM
;
156 memcpy(new_cb
, old_cb
, old_nr_cb
);
157 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
158 new_cb
[old_nr_cb
].u
.call_variadic
= call
;
160 new_cb
[old_nr_cb
].u
.call
= call
;
161 new_cb
[old_nr_cb
].priv
= priv
;
162 side_rcu_assign_pointer(desc
->callbacks
, new_cb
);
163 side_rcu_wait_grace_period(&rcu_gp
);
166 /* Increment concurrently with kernel setting the top bits. */
167 (void) __atomic_add_fetch(desc
->enabled
, 1, __ATOMIC_RELAXED
);
169 pthread_mutex_unlock(&side_lock
);
173 int side_tracer_callback_register(struct side_event_description
*desc
,
174 void (*call
)(const struct side_event_description
*desc
,
175 const struct side_arg_vec_description
*sav_desc
,
179 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
180 return SIDE_ERROR_INVAL
;
181 return _side_tracer_callback_register(desc
, call
, priv
);
184 int side_tracer_callback_variadic_register(struct side_event_description
*desc
,
185 void (*call_variadic
)(const struct side_event_description
*desc
,
186 const struct side_arg_vec_description
*sav_desc
,
187 const struct side_arg_dynamic_event_struct
*var_struct
,
191 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
192 return SIDE_ERROR_INVAL
;
193 return _side_tracer_callback_register(desc
, call_variadic
, priv
);
196 int _side_tracer_callback_unregister(struct side_event_description
*desc
,
197 void (*call
)(), void *priv
)
199 struct side_callback
*old_cb
, *new_cb
;
200 const struct side_callback
*cb_pos
;
202 int ret
= SIDE_ERROR_OK
;
206 return SIDE_ERROR_INVAL
;
208 return SIDE_ERROR_EXITING
;
209 pthread_mutex_lock(&side_lock
);
210 cb_pos
= side_tracer_callback_lookup(desc
, call
, priv
);
212 ret
= SIDE_ERROR_NOENT
;
215 old_nr_cb
= *desc
->enabled
& SIDE_EVENT_ENABLED_USER_MASK
;
216 old_cb
= (struct side_callback
*) desc
->callbacks
;
217 if (old_nr_cb
== 1) {
218 new_cb
= (struct side_callback
*) &side_empty_callback
;
220 pos_idx
= cb_pos
- desc
->callbacks
;
221 /* Remove entry at pos_idx. */
222 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
223 new_cb
= (struct side_callback
*) calloc(old_nr_cb
, sizeof(struct side_callback
));
225 ret
= SIDE_ERROR_NOMEM
;
228 memcpy(new_cb
, old_cb
, pos_idx
);
229 memcpy(&new_cb
[pos_idx
], &old_cb
[pos_idx
+ 1], old_nr_cb
- pos_idx
- 1);
231 side_rcu_assign_pointer(desc
->callbacks
, new_cb
);
232 side_rcu_wait_grace_period(&rcu_gp
);
234 /* Decrement concurrently with kernel setting the top bits. */
235 (void) __atomic_add_fetch(desc
->enabled
, -1, __ATOMIC_RELAXED
);
237 pthread_mutex_unlock(&side_lock
);
241 int side_tracer_callback_unregister(struct side_event_description
*desc
,
242 void (*call
)(const struct side_event_description
*desc
,
243 const struct side_arg_vec_description
*sav_desc
,
247 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
248 return SIDE_ERROR_INVAL
;
249 return _side_tracer_callback_unregister(desc
, call
, priv
);
252 int side_tracer_callback_variadic_unregister(struct side_event_description
*desc
,
253 void (*call_variadic
)(const struct side_event_description
*desc
,
254 const struct side_arg_vec_description
*sav_desc
,
255 const struct side_arg_dynamic_event_struct
*var_struct
,
259 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
260 return SIDE_ERROR_INVAL
;
261 return _side_tracer_callback_unregister(desc
, call_variadic
, priv
);
264 struct side_events_register_handle
*side_events_register(struct side_event_description
**events
, uint32_t nr_events
)
266 struct side_events_register_handle
*handle
= NULL
;
270 handle
= calloc(1, sizeof(struct side_events_register_handle
));
273 pthread_mutex_lock(&side_lock
);
274 handle
->events
= events
;
275 handle
->nr_events
= nr_events
;
276 side_list_insert_node_tail(&side_list
, &handle
->node
);
277 pthread_mutex_unlock(&side_lock
);
278 //TODO: call event batch register ioctl
283 void side_event_remove_callbacks(struct side_event_description
*desc
)
285 uint32_t nr_cb
= *desc
->enabled
& SIDE_EVENT_ENABLED_USER_MASK
;
286 struct side_callback
*old_cb
;
290 old_cb
= (struct side_callback
*) desc
->callbacks
;
292 * Setting the state back to 0 cb and empty callbacks out of
293 * caution. This should not matter because instrumentation is
296 (void) __atomic_add_fetch(desc
->enabled
, -nr_cb
, __ATOMIC_RELAXED
);
297 side_rcu_assign_pointer(desc
->callbacks
, &side_empty_callback
);
299 * No need to wait for grace period because instrumentation is
306 * Unregister event handle. At this point, all side events in that
307 * handle should be unreachable.
309 void side_events_unregister(struct side_events_register_handle
*handle
)
315 pthread_mutex_lock(&side_lock
);
316 side_list_remove_node(&handle
->node
);
317 for (i
= 0; i
< handle
->nr_events
; i
++) {
318 struct side_event_description
*event
= handle
->events
[i
];
320 /* Skip NULL pointers */
323 side_event_remove_callbacks(event
);
325 pthread_mutex_unlock(&side_lock
);
326 //TODO: call event batch unregister ioctl
334 side_rcu_gp_init(&rcu_gp
);
339 * side_exit() is executed from a library destructor. It can be called
340 * explicitly at application exit as well. Concurrent side API use is
341 * not expected at that point.
345 struct side_events_register_handle
*handle
, *tmp
;
349 side_rcu_gp_exit(&rcu_gp
);
350 side_list_for_each_entry_safe(handle
, tmp
, &side_list
, node
)
351 side_events_unregister(handle
);