- struct rcu_ht_node *node, *old_head, *new_head;
- struct rcu_table *t;
- unsigned long hash;
- int ret = 0;
-
- new_head = calloc(1, sizeof(struct rcu_ht_node));
- new_head->key = key;
- new_head->data = data;
- new_head->flags = 0;
- /* here comes the fun and tricky part.
- * Add at the beginning with a cmpxchg.
- * Hold a read lock between the moment the first element is read
- * and the nodes traversal (to find duplicates). This ensures
- * the head pointer has not been reclaimed when cmpxchg is done.
- * Always adding at the head ensures that we would have to
- * re-try if a new item has been added concurrently. So we ensure that
- * we never add duplicates. */
-retry:
- rcu_read_lock();
-
- if (unlikely(LOAD_SHARED(ht->resize_ongoing))) {
- rcu_read_unlock();
- /*
- * Wait for resize to complete before continuing.
- */
- ret = pthread_mutex_lock(&ht->resize_mutex);
- assert(!ret);
- ret = pthread_mutex_unlock(&ht->resize_mutex);
- assert(!ret);
- goto retry;
+ return ((uint64_t) bit_reverse_u8(v) << 56) |
+ ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
+ ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
+ ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
+ ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
+ ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
+ ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
+ ((uint64_t) bit_reverse_u8(v >> 56));
+}
+
+static
+unsigned long bit_reverse_ulong(unsigned long v)
+{
+#if (CAA_BITS_PER_LONG == 32)
+ return bit_reverse_u32(v);
+#else
+ return bit_reverse_u64(v);
+#endif
+}
+
+static
+struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
+{
+ return (struct rcu_ht_node *) (((unsigned long) node) & ~0x1);
+}
+
+static
+int is_removed(struct rcu_ht_node *node)
+{
+ return ((unsigned long) node) & 0x1;
+}
+
+static
+struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
+{
+ return (struct rcu_ht_node *) (((unsigned long) node) | 0x1);
+}
+
+static
+void _uatomic_max(unsigned long *ptr, unsigned long v)
+{
+ unsigned long old1, old2;
+
+ old1 = uatomic_read(ptr);
+ do {
+ old2 = old1;
+ if (old2 >= v)
+ break;
+ } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
+}
+
+static
+int _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
+{
+ struct rcu_ht_node *iter_prev = NULL, *iter = NULL;
+
+ for (;;) {
+ unsigned long chain_len = 0;
+
+ iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
+ assert(iter_prev);
+ assert(iter_prev->reverse_hash <= node->reverse_hash);
+ for (;;) {
+ iter = clear_flag(rcu_dereference(iter_prev->next));
+ if (unlikely(!iter))
+ break;
+ if (iter->reverse_hash < node->reverse_hash)
+ break;
+ iter_prev = iter;
+ check_resize(ht, ++chain_len);
+ }
+ /* add in iter_prev->next */
+ if (is_removed(iter))
+ continue;
+ node->next = iter;
+ if (uatomic_cmpxchg(&iter_prev->next, iter, node) != iter)
+ continue;