Circular linked-list implementation
[libside.git] / src / side.c
1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 */
5
6 #include <side/trace.h>
7 #include <string.h>
8
9 #include "tracer.h"
10 #include "rcu.h"
11 #include "list.h"
12
13 /* Top 8 bits reserved for kernel tracer use. */
14 #define SIDE_EVENT_ENABLED_KERNEL_MASK 0xFF000000
15 #define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK 0x80000000
16
17 /* Allow 2^24 tracer callbacks to be registered on an event. */
18 #define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFF
19
20 static struct side_rcu_gp_state rcu_gp;
21
22 /*
23 * Lazy initialization for early use within library constructors.
24 */
25 static bool initialized;
26
27 static pthread_mutex_t side_lock = PTHREAD_MUTEX_INITIALIZER;
28
29 static DEFINE_SIDE_LIST_HEAD(side_list);
30
31 static
32 void side_init(void)
33 __attribute__((constructor));
34
35 /*
36 * The empty callback has a NULL function callback pointer, which stops
37 * iteration on the array of callbacks immediately.
38 */
39 const struct side_callback side_empty_callback;
40
41 void side_call(const struct side_event_description *desc, const struct side_arg_vec_description *sav_desc)
42 {
43 const struct side_callback *side_cb;
44 unsigned int rcu_period;
45 uint32_t enabled;
46
47 if (side_unlikely(!initialized))
48 side_init();
49 if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
50 printf("ERROR: unexpected variadic event description\n");
51 abort();
52 }
53 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
54 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
55 // TODO: call kernel write.
56 }
57 if (side_unlikely(!(enabled & SIDE_EVENT_ENABLED_USER_MASK)))
58 return;
59
60 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
61 tracer_call(desc, sav_desc, NULL);
62
63 rcu_period = side_rcu_read_begin(&rcu_gp);
64 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
65 side_cb->u.call(desc, sav_desc, side_cb->priv);
66 side_rcu_read_end(&rcu_gp, rcu_period);
67 }
68
69 void side_call_variadic(const struct side_event_description *desc,
70 const struct side_arg_vec_description *sav_desc,
71 const struct side_arg_dynamic_event_struct *var_struct)
72 {
73 const struct side_callback *side_cb;
74 unsigned int rcu_period;
75 uint32_t enabled;
76
77 if (side_unlikely(!initialized))
78 side_init();
79 if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
80 printf("ERROR: unexpected non-variadic event description\n");
81 abort();
82 }
83 enabled = __atomic_load_n(desc->enabled, __ATOMIC_RELAXED);
84 if (side_unlikely(enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
85 // TODO: call kernel write.
86 }
87 if (side_unlikely(!(enabled & SIDE_EVENT_ENABLED_USER_MASK)))
88 return;
89
90 //TODO: replace tracer_call by rcu iteration on list of registered callbacks
91 tracer_call_variadic(desc, sav_desc, var_struct, NULL);
92
93 rcu_period = side_rcu_read_begin(&rcu_gp);
94 for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
95 side_cb->u.call_variadic(desc, sav_desc, var_struct, side_cb->priv);
96 side_rcu_read_end(&rcu_gp, rcu_period);
97 }
98
99 static
100 const struct side_callback *side_tracer_callback_lookup(
101 const struct side_event_description *desc,
102 void (*call)(), void *priv)
103 {
104 const struct side_callback *cb;
105
106 for (cb = desc->callbacks; cb->u.call != NULL; cb++) {
107 if (cb->u.call == call && cb->priv == priv)
108 return cb;
109 }
110 return NULL;
111 }
112
113 static
114 int _side_tracer_callback_register(struct side_event_description *desc,
115 void (*call)(), void *priv)
116 {
117 struct side_callback *old_cb, *new_cb;
118 int ret = SIDE_ERROR_OK;
119 uint32_t old_nr_cb;
120
121 if (!call)
122 return SIDE_ERROR_INVAL;
123 pthread_mutex_lock(&side_lock);
124 old_nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
125 if (old_nr_cb == SIDE_EVENT_ENABLED_USER_MASK) {
126 ret = SIDE_ERROR_INVAL;
127 goto unlock;
128 }
129 /* Reject duplicate (call, priv) tuples. */
130 if (side_tracer_callback_lookup(desc, call, priv)) {
131 ret = SIDE_ERROR_EXIST;
132 goto unlock;
133 }
134 old_cb = (struct side_callback *) desc->callbacks;
135 /* old_nr_cb + 1 (new cb) + 1 (NULL) */
136 new_cb = (struct side_callback *) calloc(old_nr_cb + 2, sizeof(struct side_callback));
137 if (!new_cb) {
138 ret = SIDE_ERROR_NOMEM;
139 goto unlock;
140 }
141 memcpy(new_cb, old_cb, old_nr_cb);
142 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
143 new_cb[old_nr_cb].u.call_variadic = call;
144 else
145 new_cb[old_nr_cb].u.call = call;
146 new_cb[old_nr_cb].priv = priv;
147 side_rcu_assign_pointer(desc->callbacks, new_cb);
148 side_rcu_wait_grace_period(&rcu_gp);
149 if (old_nr_cb)
150 free(old_cb);
151 /* Increment concurrently with kernel setting the top bits. */
152 (void) __atomic_add_fetch(desc->enabled, 1, __ATOMIC_RELAXED);
153 unlock:
154 pthread_mutex_unlock(&side_lock);
155 return ret;
156 }
157
158 int side_tracer_callback_register(struct side_event_description *desc,
159 void (*call)(const struct side_event_description *desc,
160 const struct side_arg_vec_description *sav_desc,
161 void *priv),
162 void *priv)
163 {
164 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
165 return SIDE_ERROR_INVAL;
166 return _side_tracer_callback_register(desc, call, priv);
167 }
168
169 int side_tracer_callback_variadic_register(struct side_event_description *desc,
170 void (*call_variadic)(const struct side_event_description *desc,
171 const struct side_arg_vec_description *sav_desc,
172 const struct side_arg_dynamic_event_struct *var_struct,
173 void *priv),
174 void *priv)
175 {
176 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
177 return SIDE_ERROR_INVAL;
178 return _side_tracer_callback_register(desc, call_variadic, priv);
179 }
180
181 int _side_tracer_callback_unregister(struct side_event_description *desc,
182 void (*call)(), void *priv)
183 {
184 struct side_callback *old_cb, *new_cb;
185 const struct side_callback *cb_pos;
186 uint32_t pos_idx;
187 int ret = SIDE_ERROR_OK;
188 uint32_t old_nr_cb;
189
190 if (!call)
191 return SIDE_ERROR_INVAL;
192 pthread_mutex_lock(&side_lock);
193 cb_pos = side_tracer_callback_lookup(desc, call, priv);
194 if (!cb_pos) {
195 ret = SIDE_ERROR_NOENT;
196 goto unlock;
197 }
198 old_nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
199 old_cb = (struct side_callback *) desc->callbacks;
200 if (old_nr_cb == 1) {
201 new_cb = (struct side_callback *) &side_empty_callback;
202 } else {
203 pos_idx = cb_pos - desc->callbacks;
204 /* Remove entry at pos_idx. */
205 /* old_nr_cb - 1 (removed cb) + 1 (NULL) */
206 new_cb = (struct side_callback *) calloc(old_nr_cb, sizeof(struct side_callback));
207 if (!new_cb) {
208 ret = SIDE_ERROR_NOMEM;
209 goto unlock;
210 }
211 memcpy(new_cb, old_cb, pos_idx);
212 memcpy(&new_cb[pos_idx], &old_cb[pos_idx + 1], old_nr_cb - pos_idx - 1);
213 }
214 side_rcu_assign_pointer(desc->callbacks, new_cb);
215 side_rcu_wait_grace_period(&rcu_gp);
216 free(old_cb);
217 /* Decrement concurrently with kernel setting the top bits. */
218 (void) __atomic_add_fetch(desc->enabled, -1, __ATOMIC_RELAXED);
219 unlock:
220 pthread_mutex_unlock(&side_lock);
221 return ret;
222 }
223
224 int side_tracer_callback_unregister(struct side_event_description *desc,
225 void (*call)(const struct side_event_description *desc,
226 const struct side_arg_vec_description *sav_desc,
227 void *priv),
228 void *priv)
229 {
230 if (desc->flags & SIDE_EVENT_FLAG_VARIADIC)
231 return SIDE_ERROR_INVAL;
232 return _side_tracer_callback_unregister(desc, call, priv);
233 }
234
235 int side_tracer_callback_variadic_unregister(struct side_event_description *desc,
236 void (*call_variadic)(const struct side_event_description *desc,
237 const struct side_arg_vec_description *sav_desc,
238 const struct side_arg_dynamic_event_struct *var_struct,
239 void *priv),
240 void *priv)
241 {
242 if (!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))
243 return SIDE_ERROR_INVAL;
244 return _side_tracer_callback_unregister(desc, call_variadic, priv);
245 }
246
247 static
248 void side_init(void)
249 {
250 if (initialized)
251 return;
252 side_rcu_gp_init(&rcu_gp);
253 initialized = true;
254 }
This page took 0.04017 seconds and 5 git commands to generate.