X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=src%2Frcu.h;h=4db3500566abc002042d9f63a6b617d4c24d76de;hb=873bbf16c6bcfe2c11fca7e76dd7284c5afbee99;hp=1fe09e39162bd044f3f2162288515c48ce119302;hpb=d04d49036a407d68a89879b790c3cba6e081bea5;p=libside.git diff --git a/src/rcu.h b/src/rcu.h index 1fe09e3..4db3500 100644 --- a/src/rcu.h +++ b/src/rcu.h @@ -3,8 +3,8 @@ * Copyright 2022 Mathieu Desnoyers */ -#ifndef _TGIF_RCU_H -#define _TGIF_RCU_H +#ifndef _SIDE_RCU_H +#define _SIDE_RCU_H #include #include @@ -16,35 +16,35 @@ #include #include #include -#include +#include -#define TGIF_CACHE_LINE_SIZE 256 +#define SIDE_CACHE_LINE_SIZE 256 -struct tgif_rcu_percpu_count { +struct side_rcu_percpu_count { uintptr_t begin; uintptr_t rseq_begin; uintptr_t end; uintptr_t rseq_end; }; -struct tgif_rcu_cpu_gp_state { - struct tgif_rcu_percpu_count count[2]; -} __attribute__((__aligned__(TGIF_CACHE_LINE_SIZE))); +struct side_rcu_cpu_gp_state { + struct side_rcu_percpu_count count[2]; +} __attribute__((__aligned__(SIDE_CACHE_LINE_SIZE))); -struct tgif_rcu_gp_state { - struct tgif_rcu_cpu_gp_state *percpu_state; +struct side_rcu_gp_state { + struct side_rcu_cpu_gp_state *percpu_state; int nr_cpus; int32_t futex; unsigned int period; pthread_mutex_t gp_lock; }; -struct tgif_rcu_read_state { - struct tgif_rcu_percpu_count *percpu_count; +struct side_rcu_read_state { + struct side_rcu_percpu_count *percpu_count; int cpu; }; -extern unsigned int tgif_rcu_rseq_membarrier_available __attribute__((visibility("hidden"))); +extern unsigned int side_rcu_rseq_membarrier_available __attribute__((visibility("hidden"))); static inline int futex(int32_t *uaddr, int op, int32_t val, @@ -54,13 +54,13 @@ int futex(int32_t *uaddr, int op, int32_t val, } /* - * Wake-up tgif_rcu_wait_grace_period. Called concurrently from many + * Wake-up side_rcu_wait_grace_period. Called concurrently from many * threads. */ static inline -void tgif_rcu_wake_up_gp(struct tgif_rcu_gp_state *gp_state) +void side_rcu_wake_up_gp(struct side_rcu_gp_state *gp_state) { - if (tgif_unlikely(__atomic_load_n(&gp_state->futex, __ATOMIC_RELAXED) == -1)) { + if (side_unlikely(__atomic_load_n(&gp_state->futex, __ATOMIC_RELAXED) == -1)) { __atomic_store_n(&gp_state->futex, 0, __ATOMIC_RELAXED); /* TODO: handle futex return values. */ (void) futex(&gp_state->futex, FUTEX_WAKE, 1, NULL, NULL, 0); @@ -68,10 +68,10 @@ void tgif_rcu_wake_up_gp(struct tgif_rcu_gp_state *gp_state) } static inline -void tgif_rcu_read_begin(struct tgif_rcu_gp_state *gp_state, struct tgif_rcu_read_state *read_state) +void side_rcu_read_begin(struct side_rcu_gp_state *gp_state, struct side_rcu_read_state *read_state) { - struct tgif_rcu_percpu_count *begin_cpu_count; - struct tgif_rcu_cpu_gp_state *cpu_gp_state; + struct side_rcu_percpu_count *begin_cpu_count; + struct side_rcu_cpu_gp_state *cpu_gp_state; unsigned int period; int cpu; @@ -80,14 +80,15 @@ void tgif_rcu_read_begin(struct tgif_rcu_gp_state *gp_state, struct tgif_rcu_rea cpu_gp_state = &gp_state->percpu_state[cpu]; read_state->percpu_count = begin_cpu_count = &cpu_gp_state->count[period]; read_state->cpu = cpu; - if (tgif_likely(tgif_rcu_rseq_membarrier_available && - !rseq_addv((intptr_t *)&begin_cpu_count->rseq_begin, 1, cpu))) { + if (side_likely(side_rcu_rseq_membarrier_available && + !rseq_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU_CPU_ID, + (intptr_t *)&begin_cpu_count->rseq_begin, 1, cpu))) { /* * This compiler barrier (A) is paired with membarrier() at (C), * (D), (E). It effectively upgrades this compiler barrier to a * SEQ_CST fence with respect to the paired barriers. * - * This barrier (A) ensures that the contents of the read-tgif + * This barrier (A) ensures that the contents of the read-side * critical section does not leak before the "begin" counter * increment. It pairs with memory barriers (D) and (E). * @@ -100,7 +101,7 @@ void tgif_rcu_read_begin(struct tgif_rcu_gp_state *gp_state, struct tgif_rcu_rea } /* Fallback to atomic increment and SEQ_CST. */ cpu = sched_getcpu(); - if (tgif_unlikely(cpu < 0)) + if (side_unlikely(cpu < 0)) cpu = 0; read_state->cpu = cpu; cpu_gp_state = &gp_state->percpu_state[cpu]; @@ -109,9 +110,9 @@ void tgif_rcu_read_begin(struct tgif_rcu_gp_state *gp_state, struct tgif_rcu_rea } static inline -void tgif_rcu_read_end(struct tgif_rcu_gp_state *gp_state, struct tgif_rcu_read_state *read_state) +void side_rcu_read_end(struct side_rcu_gp_state *gp_state, struct side_rcu_read_state *read_state) { - struct tgif_rcu_percpu_count *begin_cpu_count = read_state->percpu_count; + struct side_rcu_percpu_count *begin_cpu_count = read_state->percpu_count; int cpu = read_state->cpu; /* @@ -119,7 +120,7 @@ void tgif_rcu_read_end(struct tgif_rcu_gp_state *gp_state, struct tgif_rcu_read_ * (D), (E). It effectively upgrades this compiler barrier to a * SEQ_CST fence with respect to the paired barriers. * - * This barrier (B) ensures that the contents of the read-tgif + * This barrier (B) ensures that the contents of the read-side * critical section does not leak after the "end" counter * increment. It pairs with memory barriers (D) and (E). * @@ -128,8 +129,9 @@ void tgif_rcu_read_end(struct tgif_rcu_gp_state *gp_state, struct tgif_rcu_read_ * It is redundant with barrier (A) for that purpose. */ rseq_barrier(); - if (tgif_likely(tgif_rcu_rseq_membarrier_available && - !rseq_addv((intptr_t *)&begin_cpu_count->rseq_end, 1, cpu))) { + if (side_likely(side_rcu_rseq_membarrier_available && + !rseq_addv(RSEQ_MO_RELAXED, RSEQ_PERCPU_CPU_ID, + (intptr_t *)&begin_cpu_count->rseq_end, 1, cpu))) { /* * This barrier (F) is paired with membarrier() * at (G). It orders increment of the begin/end @@ -141,26 +143,25 @@ void tgif_rcu_read_end(struct tgif_rcu_gp_state *gp_state, struct tgif_rcu_read_ /* Fallback to atomic increment and SEQ_CST. */ (void) __atomic_add_fetch(&begin_cpu_count->end, 1, __ATOMIC_SEQ_CST); /* - * This barrier (F) is paired with SEQ_CST barrier or - * membarrier() at (G). It orders increment of the begin/end - * counters before load/store to the futex. + * This barrier (F) implied by SEQ_CST is paired with SEQ_CST + * barrier or membarrier() at (G). It orders increment of the + * begin/end counters before load/store to the futex. */ - __atomic_thread_fence(__ATOMIC_SEQ_CST); end: - tgif_rcu_wake_up_gp(gp_state); + side_rcu_wake_up_gp(gp_state); } -#define tgif_rcu_dereference(p) \ +#define side_rcu_dereference(p) \ __extension__ \ ({ \ - __typeof__(p) _____tgif_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \ - (_____tgif_v); \ + __typeof__(p) _____side_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \ + (_____side_v); \ }) -#define tgif_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \ +#define side_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); -void tgif_rcu_wait_grace_period(struct tgif_rcu_gp_state *gp_state) __attribute__((visibility("hidden"))); -void tgif_rcu_gp_init(struct tgif_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden"))); -void tgif_rcu_gp_exit(struct tgif_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden"))); +void side_rcu_wait_grace_period(struct side_rcu_gp_state *gp_state) __attribute__((visibility("hidden"))); +void side_rcu_gp_init(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden"))); +void side_rcu_gp_exit(struct side_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden"))); -#endif /* _TGIF_RCU_H */ +#endif /* _SIDE_RCU_H */