1 // SPDX-License-Identifier: MIT
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 #include <side/trace.h>
13 /* Top 8 bits reserved for kernel tracer use. */
14 #define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000
15 #define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000
17 /* Allow 2^24 tracer callbacks to be registered on an event. */
18 #define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFF
20 static struct side_rcu_gp_state rcu_gp
;
23 * Lazy initialization for early use within library constructors.
25 static bool initialized
;
27 static pthread_mutex_t side_lock
= PTHREAD_MUTEX_INITIALIZER
;
29 static DEFINE_SIDE_LIST_HEAD(side_list
);
33 __attribute__((constructor
));
36 * The empty callback has a NULL function callback pointer, which stops
37 * iteration on the array of callbacks immediately.
39 const struct side_callback side_empty_callback
;
41 void side_call(const struct side_event_description
*desc
, const struct side_arg_vec_description
*sav_desc
)
43 const struct side_callback
*side_cb
;
44 unsigned int rcu_period
;
47 if (side_unlikely(!initialized
))
49 if (side_unlikely(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)) {
50 printf("ERROR: unexpected variadic event description\n");
53 enabled
= __atomic_load_n(desc
->enabled
, __ATOMIC_RELAXED
);
54 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
55 // TODO: call kernel write.
57 if (side_unlikely(!(enabled
& SIDE_EVENT_ENABLED_USER_MASK
)))
60 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
61 tracer_call(desc
, sav_desc
, NULL
);
63 rcu_period
= side_rcu_read_begin(&rcu_gp
);
64 for (side_cb
= side_rcu_dereference(desc
->callbacks
); side_cb
->u
.call
!= NULL
; side_cb
++)
65 side_cb
->u
.call(desc
, sav_desc
, side_cb
->priv
);
66 side_rcu_read_end(&rcu_gp
, rcu_period
);
69 void side_call_variadic(const struct side_event_description
*desc
,
70 const struct side_arg_vec_description
*sav_desc
,
71 const struct side_arg_dynamic_event_struct
*var_struct
)
73 const struct side_callback
*side_cb
;
74 unsigned int rcu_period
;
77 if (side_unlikely(!initialized
))
79 if (side_unlikely(!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))) {
80 printf("ERROR: unexpected non-variadic event description\n");
83 enabled
= __atomic_load_n(desc
->enabled
, __ATOMIC_RELAXED
);
84 if (side_unlikely(enabled
& SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK
)) {
85 // TODO: call kernel write.
87 if (side_unlikely(!(enabled
& SIDE_EVENT_ENABLED_USER_MASK
)))
90 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
91 tracer_call_variadic(desc
, sav_desc
, var_struct
, NULL
);
93 rcu_period
= side_rcu_read_begin(&rcu_gp
);
94 for (side_cb
= side_rcu_dereference(desc
->callbacks
); side_cb
->u
.call_variadic
!= NULL
; side_cb
++)
95 side_cb
->u
.call_variadic(desc
, sav_desc
, var_struct
, side_cb
->priv
);
96 side_rcu_read_end(&rcu_gp
, rcu_period
);
100 const struct side_callback
*side_tracer_callback_lookup(
101 const struct side_event_description
*desc
,
102 void (*call
)(), void *priv
)
104 const struct side_callback
*cb
;
106 for (cb
= desc
->callbacks
; cb
->u
.call
!= NULL
; cb
++) {
107 if (cb
->u
.call
== call
&& cb
->priv
== priv
)
114 int _side_tracer_callback_register(struct side_event_description
*desc
,
115 void (*call
)(), void *priv
)
117 struct side_callback
*old_cb
, *new_cb
;
118 int ret
= SIDE_ERROR_OK
;
122 return SIDE_ERROR_INVAL
;
123 pthread_mutex_lock(&side_lock
);
124 old_nr_cb
= *desc
->enabled
& SIDE_EVENT_ENABLED_USER_MASK
;
125 if (old_nr_cb
== SIDE_EVENT_ENABLED_USER_MASK
) {
126 ret
= SIDE_ERROR_INVAL
;
129 /* Reject duplicate (call, priv) tuples. */
130 if (side_tracer_callback_lookup(desc
, call
, priv
)) {
131 ret
= SIDE_ERROR_EXIST
;
134 old_cb
= (struct side_callback
*) desc
->callbacks
;
135 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
136 new_cb
= (struct side_callback
*) calloc(old_nr_cb
+ 2, sizeof(struct side_callback
));
138 ret
= SIDE_ERROR_NOMEM
;
141 memcpy(new_cb
, old_cb
, old_nr_cb
);
142 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
143 new_cb
[old_nr_cb
].u
.call_variadic
= call
;
145 new_cb
[old_nr_cb
].u
.call
= call
;
146 new_cb
[old_nr_cb
].priv
= priv
;
147 side_rcu_assign_pointer(desc
->callbacks
, new_cb
);
148 side_rcu_wait_grace_period(&rcu_gp
);
151 /* Increment concurrently with kernel setting the top bits. */
152 (void) __atomic_add_fetch(desc
->enabled
, 1, __ATOMIC_RELAXED
);
154 pthread_mutex_unlock(&side_lock
);
158 int side_tracer_callback_register(struct side_event_description
*desc
,
159 void (*call
)(const struct side_event_description
*desc
,
160 const struct side_arg_vec_description
*sav_desc
,
164 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
165 return SIDE_ERROR_INVAL
;
166 return _side_tracer_callback_register(desc
, call
, priv
);
169 int side_tracer_callback_variadic_register(struct side_event_description
*desc
,
170 void (*call_variadic
)(const struct side_event_description
*desc
,
171 const struct side_arg_vec_description
*sav_desc
,
172 const struct side_arg_dynamic_event_struct
*var_struct
,
176 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
177 return SIDE_ERROR_INVAL
;
178 return _side_tracer_callback_register(desc
, call_variadic
, priv
);
181 int _side_tracer_callback_unregister(struct side_event_description
*desc
,
182 void (*call
)(), void *priv
)
184 struct side_callback
*old_cb
, *new_cb
;
185 const struct side_callback
*cb_pos
;
187 int ret
= SIDE_ERROR_OK
;
191 return SIDE_ERROR_INVAL
;
192 pthread_mutex_lock(&side_lock
);
193 cb_pos
= side_tracer_callback_lookup(desc
, call
, priv
);
195 ret
= SIDE_ERROR_NOENT
;
198 old_nr_cb
= *desc
->enabled
& SIDE_EVENT_ENABLED_USER_MASK
;
199 old_cb
= (struct side_callback
*) desc
->callbacks
;
200 if (old_nr_cb
== 1) {
201 new_cb
= (struct side_callback
*) &side_empty_callback
;
203 pos_idx
= cb_pos
- desc
->callbacks
;
204 /* Remove entry at pos_idx. */
205 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
206 new_cb
= (struct side_callback
*) calloc(old_nr_cb
, sizeof(struct side_callback
));
208 ret
= SIDE_ERROR_NOMEM
;
211 memcpy(new_cb
, old_cb
, pos_idx
);
212 memcpy(&new_cb
[pos_idx
], &old_cb
[pos_idx
+ 1], old_nr_cb
- pos_idx
- 1);
214 side_rcu_assign_pointer(desc
->callbacks
, new_cb
);
215 side_rcu_wait_grace_period(&rcu_gp
);
217 /* Decrement concurrently with kernel setting the top bits. */
218 (void) __atomic_add_fetch(desc
->enabled
, -1, __ATOMIC_RELAXED
);
220 pthread_mutex_unlock(&side_lock
);
224 int side_tracer_callback_unregister(struct side_event_description
*desc
,
225 void (*call
)(const struct side_event_description
*desc
,
226 const struct side_arg_vec_description
*sav_desc
,
230 if (desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
)
231 return SIDE_ERROR_INVAL
;
232 return _side_tracer_callback_unregister(desc
, call
, priv
);
235 int side_tracer_callback_variadic_unregister(struct side_event_description
*desc
,
236 void (*call_variadic
)(const struct side_event_description
*desc
,
237 const struct side_arg_vec_description
*sav_desc
,
238 const struct side_arg_dynamic_event_struct
*var_struct
,
242 if (!(desc
->flags
& SIDE_EVENT_FLAG_VARIADIC
))
243 return SIDE_ERROR_INVAL
;
244 return _side_tracer_callback_unregister(desc
, call_variadic
, priv
);
252 side_rcu_gp_init(&rcu_gp
);