| 1 | // SPDX-License-Identifier: MIT |
| 2 | /* |
| 3 | * Copyright 2022 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 4 | */ |
| 5 | |
| 6 | #ifndef _TGIF_RCU_H |
| 7 | #define _TGIF_RCU_H |
| 8 | |
| 9 | #include <sched.h> |
| 10 | #include <stdint.h> |
| 11 | #include <pthread.h> |
| 12 | #include <stdbool.h> |
| 13 | #include <poll.h> |
| 14 | #include <rseq/rseq.h> |
| 15 | #include <linux/futex.h> |
| 16 | #include <sys/time.h> |
| 17 | #include <unistd.h> |
| 18 | #include <sys/syscall.h> |
| 19 | #include <tgif/macros.h> |
| 20 | |
| 21 | #define TGIF_CACHE_LINE_SIZE 256 |
| 22 | |
| 23 | struct tgif_rcu_percpu_count { |
| 24 | uintptr_t begin; |
| 25 | uintptr_t rseq_begin; |
| 26 | uintptr_t end; |
| 27 | uintptr_t rseq_end; |
| 28 | }; |
| 29 | |
| 30 | struct tgif_rcu_cpu_gp_state { |
| 31 | struct tgif_rcu_percpu_count count[2]; |
| 32 | } __attribute__((__aligned__(TGIF_CACHE_LINE_SIZE))); |
| 33 | |
| 34 | struct tgif_rcu_gp_state { |
| 35 | struct tgif_rcu_cpu_gp_state *percpu_state; |
| 36 | int nr_cpus; |
| 37 | int32_t futex; |
| 38 | unsigned int period; |
| 39 | pthread_mutex_t gp_lock; |
| 40 | }; |
| 41 | |
| 42 | struct tgif_rcu_read_state { |
| 43 | struct tgif_rcu_percpu_count *percpu_count; |
| 44 | int cpu; |
| 45 | }; |
| 46 | |
| 47 | extern unsigned int tgif_rcu_rseq_membarrier_available __attribute__((visibility("hidden"))); |
| 48 | |
| 49 | static inline |
| 50 | int futex(int32_t *uaddr, int op, int32_t val, |
| 51 | const struct timespec *timeout, int32_t *uaddr2, int32_t val3) |
| 52 | { |
| 53 | return syscall(__NR_futex, uaddr, op, val, timeout, uaddr2, val3); |
| 54 | } |
| 55 | |
| 56 | /* |
| 57 | * Wake-up tgif_rcu_wait_grace_period. Called concurrently from many |
| 58 | * threads. |
| 59 | */ |
| 60 | static inline |
| 61 | void tgif_rcu_wake_up_gp(struct tgif_rcu_gp_state *gp_state) |
| 62 | { |
| 63 | if (tgif_unlikely(__atomic_load_n(&gp_state->futex, __ATOMIC_RELAXED) == -1)) { |
| 64 | __atomic_store_n(&gp_state->futex, 0, __ATOMIC_RELAXED); |
| 65 | /* TODO: handle futex return values. */ |
| 66 | (void) futex(&gp_state->futex, FUTEX_WAKE, 1, NULL, NULL, 0); |
| 67 | } |
| 68 | } |
| 69 | |
| 70 | static inline |
| 71 | void tgif_rcu_read_begin(struct tgif_rcu_gp_state *gp_state, struct tgif_rcu_read_state *read_state) |
| 72 | { |
| 73 | struct tgif_rcu_percpu_count *begin_cpu_count; |
| 74 | struct tgif_rcu_cpu_gp_state *cpu_gp_state; |
| 75 | unsigned int period; |
| 76 | int cpu; |
| 77 | |
| 78 | cpu = rseq_cpu_start(); |
| 79 | period = __atomic_load_n(&gp_state->period, __ATOMIC_RELAXED); |
| 80 | cpu_gp_state = &gp_state->percpu_state[cpu]; |
| 81 | read_state->percpu_count = begin_cpu_count = &cpu_gp_state->count[period]; |
| 82 | read_state->cpu = cpu; |
| 83 | if (tgif_likely(tgif_rcu_rseq_membarrier_available && |
| 84 | !rseq_addv((intptr_t *)&begin_cpu_count->rseq_begin, 1, cpu))) { |
| 85 | /* |
| 86 | * This compiler barrier (A) is paired with membarrier() at (C), |
| 87 | * (D), (E). It effectively upgrades this compiler barrier to a |
| 88 | * SEQ_CST fence with respect to the paired barriers. |
| 89 | * |
| 90 | * This barrier (A) ensures that the contents of the read-tgif |
| 91 | * critical section does not leak before the "begin" counter |
| 92 | * increment. It pairs with memory barriers (D) and (E). |
| 93 | * |
| 94 | * This barrier (A) also ensures that the "begin" increment is |
| 95 | * before the "end" increment. It pairs with memory barrier (C). |
| 96 | * It is redundant with barrier (B) for that purpose. |
| 97 | */ |
| 98 | rseq_barrier(); |
| 99 | return; |
| 100 | } |
| 101 | /* Fallback to atomic increment and SEQ_CST. */ |
| 102 | cpu = sched_getcpu(); |
| 103 | if (tgif_unlikely(cpu < 0)) |
| 104 | cpu = 0; |
| 105 | read_state->cpu = cpu; |
| 106 | cpu_gp_state = &gp_state->percpu_state[cpu]; |
| 107 | read_state->percpu_count = begin_cpu_count = &cpu_gp_state->count[period]; |
| 108 | (void) __atomic_add_fetch(&begin_cpu_count->begin, 1, __ATOMIC_SEQ_CST); |
| 109 | } |
| 110 | |
| 111 | static inline |
| 112 | void tgif_rcu_read_end(struct tgif_rcu_gp_state *gp_state, struct tgif_rcu_read_state *read_state) |
| 113 | { |
| 114 | struct tgif_rcu_percpu_count *begin_cpu_count = read_state->percpu_count; |
| 115 | int cpu = read_state->cpu; |
| 116 | |
| 117 | /* |
| 118 | * This compiler barrier (B) is paired with membarrier() at (C), |
| 119 | * (D), (E). It effectively upgrades this compiler barrier to a |
| 120 | * SEQ_CST fence with respect to the paired barriers. |
| 121 | * |
| 122 | * This barrier (B) ensures that the contents of the read-tgif |
| 123 | * critical section does not leak after the "end" counter |
| 124 | * increment. It pairs with memory barriers (D) and (E). |
| 125 | * |
| 126 | * This barrier (B) also ensures that the "begin" increment is |
| 127 | * before the "end" increment. It pairs with memory barrier (C). |
| 128 | * It is redundant with barrier (A) for that purpose. |
| 129 | */ |
| 130 | rseq_barrier(); |
| 131 | if (tgif_likely(tgif_rcu_rseq_membarrier_available && |
| 132 | !rseq_addv((intptr_t *)&begin_cpu_count->rseq_end, 1, cpu))) { |
| 133 | /* |
| 134 | * This barrier (F) is paired with membarrier() |
| 135 | * at (G). It orders increment of the begin/end |
| 136 | * counters before load/store to the futex. |
| 137 | */ |
| 138 | rseq_barrier(); |
| 139 | goto end; |
| 140 | } |
| 141 | /* Fallback to atomic increment and SEQ_CST. */ |
| 142 | (void) __atomic_add_fetch(&begin_cpu_count->end, 1, __ATOMIC_SEQ_CST); |
| 143 | /* |
| 144 | * This barrier (F) is paired with SEQ_CST barrier or |
| 145 | * membarrier() at (G). It orders increment of the begin/end |
| 146 | * counters before load/store to the futex. |
| 147 | */ |
| 148 | __atomic_thread_fence(__ATOMIC_SEQ_CST); |
| 149 | end: |
| 150 | tgif_rcu_wake_up_gp(gp_state); |
| 151 | } |
| 152 | |
| 153 | #define tgif_rcu_dereference(p) \ |
| 154 | __extension__ \ |
| 155 | ({ \ |
| 156 | __typeof__(p) _____tgif_v = __atomic_load_n(&(p), __ATOMIC_CONSUME); \ |
| 157 | (_____tgif_v); \ |
| 158 | }) |
| 159 | |
| 160 | #define tgif_rcu_assign_pointer(p, v) __atomic_store_n(&(p), v, __ATOMIC_RELEASE); \ |
| 161 | |
| 162 | void tgif_rcu_wait_grace_period(struct tgif_rcu_gp_state *gp_state) __attribute__((visibility("hidden"))); |
| 163 | void tgif_rcu_gp_init(struct tgif_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden"))); |
| 164 | void tgif_rcu_gp_exit(struct tgif_rcu_gp_state *rcu_gp) __attribute__((visibility("hidden"))); |
| 165 | |
| 166 | #endif /* _TGIF_RCU_H */ |