/* List iteration, safe against node reclaim while iterating. */
#define side_list_for_each_entry_safe(_entry, _next_entry, _head, _member) \
- for ((_entry) = side_container_of((_head)->node.next, __typeof__(*(_entry)), _member), (_next_entry) = (_entry)->next; \
- &(_entry)->member != &head->node; \
- (_entry) = (_next_entry)->next, (_next_entry) = (_entry)->next)
+ for ((_entry) = side_container_of((_head)->node.next, __typeof__(*(_entry)), _member), \
+ (_next_entry) = side_container_of((_entry)->_member.next, __typeof__(*(_entry)), _member); \
+ &(_entry)->_member != &(_head)->node; \
+ (_entry) = side_container_of((_next_entry)->_member.next, __typeof__(*(_entry)), _member), \
+ (_next_entry) = side_container_of((_entry)->_member.next, __typeof__(*(_entry)), _member))
#endif /* _SIDE_LIST_H */
/* Allow 2^24 tracer callbacks to be registered on an event. */
#define SIDE_EVENT_ENABLED_USER_MASK 0x00FFFFFF
+struct side_events_register_handle {
+ struct side_list_node node;
+ struct side_event_description **events;
+ uint32_t nr_events;
+};
+
static struct side_rcu_gp_state rcu_gp;
/*
* Lazy initialization for early use within library constructors.
*/
static bool initialized;
+/*
+ * Do not register/unregister any more events after destructor.
+ */
+static bool finalized;
static pthread_mutex_t side_lock = PTHREAD_MUTEX_INITIALIZER;
static DEFINE_SIDE_LIST_HEAD(side_list);
-static
-void side_init(void)
- __attribute__((constructor));
-
/*
* The empty callback has a NULL function callback pointer, which stops
* iteration on the array of callbacks immediately.
*/
const struct side_callback side_empty_callback;
+void side_init(void) __attribute__((constructor));
+void side_exit(void) __attribute__((destructor));
+
void side_call(const struct side_event_description *desc, const struct side_arg_vec_description *sav_desc)
{
const struct side_callback *side_cb;
unsigned int rcu_period;
uint32_t enabled;
+ if (side_unlikely(finalized))
+ return;
if (side_unlikely(!initialized))
side_init();
if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
unsigned int rcu_period;
uint32_t enabled;
+ if (side_unlikely(finalized))
+ return;
if (side_unlikely(!initialized))
side_init();
if (side_unlikely(!(desc->flags & SIDE_EVENT_FLAG_VARIADIC))) {
if (!call)
return SIDE_ERROR_INVAL;
+ if (finalized)
+ return SIDE_ERROR_EXITING;
pthread_mutex_lock(&side_lock);
old_nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
if (old_nr_cb == SIDE_EVENT_ENABLED_USER_MASK) {
if (!call)
return SIDE_ERROR_INVAL;
+ if (finalized)
+ return SIDE_ERROR_EXITING;
pthread_mutex_lock(&side_lock);
cb_pos = side_tracer_callback_lookup(desc, call, priv);
if (!cb_pos) {
return _side_tracer_callback_unregister(desc, call_variadic, priv);
}
+struct side_events_register_handle *side_events_register(struct side_event_description **events, uint32_t nr_events)
+{
+ struct side_events_register_handle *handle = NULL;
+
+ if (finalized)
+ return NULL;
+ handle = calloc(1, sizeof(struct side_events_register_handle));
+ if (!handle)
+ return NULL;
+ pthread_mutex_lock(&side_lock);
+ handle->events = events;
+ handle->nr_events = nr_events;
+ side_list_insert_node_tail(&side_list, &handle->node);
+ pthread_mutex_unlock(&side_lock);
+ //TODO: call event batch register ioctl
+ return handle;
+}
+
static
+void side_event_remove_callbacks(struct side_event_description *desc)
+{
+ uint32_t nr_cb = *desc->enabled & SIDE_EVENT_ENABLED_USER_MASK;
+ struct side_callback *old_cb;
+
+ if (!nr_cb)
+ return;
+ old_cb = (struct side_callback *) desc->callbacks;
+ /*
+ * Setting the state back to 0 cb and empty callbacks out of
+ * caution. This should not matter because instrumentation is
+ * unreachable.
+ */
+ (void) __atomic_add_fetch(desc->enabled, -nr_cb, __ATOMIC_RELAXED);
+ side_rcu_assign_pointer(desc->callbacks, &side_empty_callback);
+ /*
+ * No need to wait for grace period because instrumentation is
+ * unreachable.
+ */
+ free(old_cb);
+}
+
+/*
+ * Unregister event handle. At this point, all side events in that
+ * handle should be unreachable.
+ */
+void side_events_unregister(struct side_events_register_handle *handle)
+{
+ uint32_t i;
+
+ if (finalized)
+ return;
+ pthread_mutex_lock(&side_lock);
+ side_list_remove_node(&handle->node);
+ for (i = 0; i < handle->nr_events; i++) {
+ struct side_event_description *event = handle->events[i];
+
+ /* Skip NULL pointers */
+ if (!event)
+ continue;
+ side_event_remove_callbacks(event);
+ }
+ pthread_mutex_unlock(&side_lock);
+ //TODO: call event batch unregister ioctl
+ free(handle);
+}
+
void side_init(void)
{
if (initialized)
side_rcu_gp_init(&rcu_gp);
initialized = true;
}
+
+void side_exit(void)
+{
+ struct side_events_register_handle *handle, *tmp;
+
+ if (finalized)
+ return;
+ side_rcu_gp_exit(&rcu_gp);
+ side_list_for_each_entry_safe(handle, tmp, &side_list, node)
+ side_events_unregister(handle);
+ finalized = true;
+}