Init RCU, add empty side callback
[libside.git] / src / side.c
index 72424ee59d46ee246ae69ce14222e8651dd77107..0715c74eab1ba01d39cc03841b013beabdf2aff4 100644 (file)
@@ -6,16 +6,15 @@
 #include <side/trace.h>
 #include "tracer.h"
 #include "rcu.h"
-#include "smp.h"
 
+/* Top 8 bits reserved for kernel tracer use. */
+#define SIDE_EVENT_ENABLED_KERNEL_MASK                 0xFF000000
 #define SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK      0x80000000
 
-struct side_rcu_gp_state rcu_gp = {
-       .percpu_state = NULL,
-       .nr_cpus = 0,
-       .period = 0,
-       .gp_lock = PTHREAD_MUTEX_INITIALIZER,
-};
+/* Allow 2^24 tracers to be registered on an event. */
+#define SIDE_EVENT_ENABLED_USER_MASK                   0x00FFFFFF
+
+struct side_rcu_gp_state rcu_gp;
 
 /*
  * Lazy initialization for early use within library constructors.
@@ -26,8 +25,13 @@ static
 void side_init(void)
        __attribute__((constructor));
 
+const struct side_callback side_empty_callback;
+
 void side_call(const struct side_event_description *desc, const struct side_arg_vec_description *sav_desc)
 {
+       const struct side_callback *side_cb;
+       unsigned int rcu_period;
+
        if (side_unlikely(!initialized))
                side_init();
        if (side_unlikely(desc->flags & SIDE_EVENT_FLAG_VARIADIC)) {
@@ -37,29 +41,46 @@ void side_call(const struct side_event_description *desc, const struct side_arg_
        if (side_unlikely(*desc->enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
                // TODO: call kernel write.
        }
+       if (side_unlikely(!(*desc->enabled & SIDE_EVENT_ENABLED_USER_MASK)))
+               return;
+
        //TODO: replace tracer_call by rcu iteration on list of registered callbacks
        tracer_call(desc, sav_desc, NULL);
+
+       rcu_period = side_rcu_read_begin(&rcu_gp);
+       for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call != NULL; side_cb++)
+               side_cb->u.call(desc, sav_desc, side_cb->priv);
+       side_rcu_read_end(&rcu_gp, rcu_period);
 }
 
 void side_call_variadic(const struct side_event_description *desc,
        const struct side_arg_vec_description *sav_desc,
        const struct side_arg_dynamic_event_struct *var_struct)
 {
+       const struct side_callback *side_cb;
+       unsigned int rcu_period;
+
        if (side_unlikely(!initialized))
                side_init();
        if (side_unlikely(*desc->enabled & SIDE_EVENT_ENABLED_KERNEL_USER_EVENT_MASK)) {
                // TODO: call kernel write.
        }
+       if (side_unlikely(!(*desc->enabled & SIDE_EVENT_ENABLED_USER_MASK)))
+               return;
+
        //TODO: replace tracer_call by rcu iteration on list of registered callbacks
        tracer_call_variadic(desc, sav_desc, var_struct, NULL);
+
+       rcu_period = side_rcu_read_begin(&rcu_gp);
+       for (side_cb = side_rcu_dereference(desc->callbacks); side_cb->u.call_variadic != NULL; side_cb++)
+               side_cb->u.call_variadic(desc, sav_desc, var_struct, side_cb->priv);
+       side_rcu_read_end(&rcu_gp, rcu_period);
 }
 
 void side_init(void)
 {
        if (initialized)
                return;
-       rcu_gp.nr_cpus = get_possible_cpus_array_len();
-       if (!rcu_gp.nr_cpus)
-               abort();
+       side_rcu_gp_init(&rcu_gp);
        initialized = true;
 }
This page took 0.023559 seconds and 4 git commands to generate.