4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * Based on the following articles:
25 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
26 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
27 * - Michael, M. M. High performance dynamic lock-free hash tables
28 * and list-based sets. In Proceedings of the fourteenth annual ACM
29 * symposium on Parallel algorithms and architectures, ACM Press,
32 * Some specificities of this Lock-Free Expandable RCU Hash Table
35 * - RCU read-side critical section allows readers to perform hash
36 * table lookups and use the returned objects safely by delaying
37 * memory reclaim of a grace period.
38 * - Add and remove operations are lock-free, and do not need to
39 * allocate memory. They need to be executed within RCU read-side
40 * critical section to ensure the objects they read are valid and to
41 * deal with the cmpxchg ABA problem.
42 * - add and add_unique operations are supported. add_unique checks if
43 * the node key already exists in the hash table. It ensures no key
45 * - The resize operation executes concurrently with add/remove/lookup.
46 * - Hash table nodes are contained within a split-ordered list. This
47 * list is ordered by incrementing reversed-bits-hash value.
48 * - An index of dummy nodes is kept. These dummy nodes are the hash
49 * table "buckets", and they are also chained together in the
50 * split-ordered list, which allows recursive expansion.
51 * - The resize operation only allows expanding the hash table.
52 * It is triggered either through an API call or automatically by
53 * detecting long chains in the add operation.
54 * - Resize operation initiated by long chain detection is executed by a
55 * call_rcu thread, which keeps lock-freedom of add and remove.
56 * - Resize operations are protected by a mutex.
57 * - The removal operation is split in two parts: first, a "removed"
58 * flag is set in the next pointer within the node to remove. Then,
59 * a "garbage collection" is performed in the bucket containing the
60 * removed node (from the start of the bucket up to the removed node).
61 * All encountered nodes with "removed" flag set in their next
62 * pointers are removed from the linked-list. If the cmpxchg used for
63 * removal fails (due to concurrent garbage-collection or concurrent
64 * add), we retry from the beginning of the bucket. This ensures that
65 * the node with "removed" flag set is removed from the hash table
66 * (not visible to lookups anymore) before the RCU read-side critical
67 * section held across removal ends. Furthermore, this ensures that
68 * the node with "removed" flag set is removed from the linked-list
69 * before its memory is reclaimed. Only the thread which removal
70 * successfully set the "removed" flag (with a cmpxchg) into a node's
71 * next pointer is considered to have succeeded its removal (and thus
72 * owns the node to reclaim). Because we garbage-collect starting from
73 * an invariant node (the start-of-bucket dummy node) up to the
74 * "removed" node (or find a reverse-hash that is higher), we are sure
75 * that a successful traversal of the chain leads to a chain that is
76 * present in the linked-list (the start node is never removed) and
77 * that is does not contain the "removed" node anymore, even if
78 * concurrent delete/add operations are changing the structure of the
80 * - The add operation performs gargage collection of buckets if it
81 * encounters nodes with removed flag set in the bucket where it wants
82 * to add its new node. This ensures lock-freedom of add operation by
83 * helping the remover unlink nodes from the list rather than to wait
85 * - A RCU "order table" indexed by log2(hash index) is copied and
86 * expanded by the resize operation. This order table allows finding
87 * the "dummy node" tables.
88 * - There is one dummy node table per hash index order. The size of
89 * each dummy node table is half the number of hashes contained in
91 * - call_rcu is used to garbage-collect the old order table.
92 * - The per-order dummy node tables contain a compact version of the
93 * hash table nodes. These tables are invariant after they are
94 * populated into the hash table.
106 #include <urcu-call-rcu.h>
107 #include <urcu/arch.h>
108 #include <urcu/uatomic.h>
109 #include <urcu/jhash.h>
110 #include <urcu/compiler.h>
111 #include <urcu/rculfhash.h>
116 #define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
118 #define dbg_printf(fmt, args...)
121 #define CHAIN_LEN_TARGET 4
122 #define CHAIN_LEN_RESIZE_THRESHOLD 8
125 #define max(a, b) ((a) > (b) ? (a) : (b))
129 * The removed flag needs to be updated atomically with the pointer.
130 * The dummy flag does not require to be updated atomically with the
131 * pointer, but it is added as a pointer low bit flag to save space.
133 #define REMOVED_FLAG (1UL << 0)
134 #define DUMMY_FLAG (1UL << 1)
135 #define FLAGS_MASK ((1UL << 2) - 1)
138 unsigned long size
; /* always a power of 2 */
139 unsigned long resize_target
;
140 int resize_initiated
;
141 struct rcu_head head
;
142 struct _cds_lfht_node
*tbl
[0];
146 struct rcu_table
*t
; /* shared */
147 cds_lfht_hash_fct hash_fct
;
148 cds_lfht_compare_fct compare_fct
;
149 unsigned long hash_seed
;
150 pthread_mutex_t resize_mutex
; /* resize mutex: add/del mutex */
151 unsigned int in_progress_resize
, in_progress_destroy
;
152 void (*cds_lfht_call_rcu
)(struct rcu_head
*head
,
153 void (*func
)(struct rcu_head
*head
));
156 struct rcu_resize_work
{
157 struct rcu_head head
;
162 * Algorithm to reverse bits in a word by lookup table, extended to
165 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
166 * Originally from Public Domain.
169 static const uint8_t BitReverseTable256
[256] =
171 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
172 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
173 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
174 R6(0), R6(2), R6(1), R6(3)
181 uint8_t bit_reverse_u8(uint8_t v
)
183 return BitReverseTable256
[v
];
186 static __attribute__((unused
))
187 uint32_t bit_reverse_u32(uint32_t v
)
189 return ((uint32_t) bit_reverse_u8(v
) << 24) |
190 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
191 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
192 ((uint32_t) bit_reverse_u8(v
>> 24));
195 static __attribute__((unused
))
196 uint64_t bit_reverse_u64(uint64_t v
)
198 return ((uint64_t) bit_reverse_u8(v
) << 56) |
199 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
200 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
201 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
202 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
203 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
204 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
205 ((uint64_t) bit_reverse_u8(v
>> 56));
209 unsigned long bit_reverse_ulong(unsigned long v
)
211 #if (CAA_BITS_PER_LONG == 32)
212 return bit_reverse_u32(v
);
214 return bit_reverse_u64(v
);
219 * fls: returns the position of the most significant bit.
220 * Returns 0 if no bit is set, else returns the position of the most
221 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
223 #if defined(__i386) || defined(__x86_64)
225 unsigned int fls_u32(uint32_t x
)
233 : "=r" (r
) : "rm" (x
));
239 #if defined(__x86_64)
241 unsigned int fls_u64(uint64_t x
)
249 : "=r" (r
) : "rm" (x
));
256 static __attribute__((unused
))
257 unsigned int fls_u64(uint64_t x
)
264 if (!(x
& 0xFFFFFFFF00000000ULL
)) {
268 if (!(x
& 0xFFFF000000000000ULL
)) {
272 if (!(x
& 0xFF00000000000000ULL
)) {
276 if (!(x
& 0xF000000000000000ULL
)) {
280 if (!(x
& 0xC000000000000000ULL
)) {
284 if (!(x
& 0x8000000000000000ULL
)) {
293 static __attribute__((unused
))
294 unsigned int fls_u32(uint32_t x
)
300 if (!(x
& 0xFFFF0000U
)) {
304 if (!(x
& 0xFF000000U
)) {
308 if (!(x
& 0xF0000000U
)) {
312 if (!(x
& 0xC0000000U
)) {
316 if (!(x
& 0x80000000U
)) {
324 unsigned int fls_ulong(unsigned long x
)
326 #if (CAA_BITS_PER_lONG == 32)
333 int get_count_order_u32(uint32_t x
)
337 order
= fls_u32(x
) - 1;
343 int get_count_order_ulong(unsigned long x
)
347 order
= fls_ulong(x
) - 1;
354 void cds_lfht_resize_lazy(struct cds_lfht
*ht
, struct rcu_table
*t
, int growth
);
357 void check_resize(struct cds_lfht
*ht
, struct rcu_table
*t
,
361 dbg_printf("WARNING: large chain length: %u.\n",
363 if (chain_len
>= CHAIN_LEN_RESIZE_THRESHOLD
)
364 cds_lfht_resize_lazy(ht
, t
,
365 get_count_order_u32(chain_len
- (CHAIN_LEN_TARGET
- 1)));
369 struct cds_lfht_node
*clear_flag(struct cds_lfht_node
*node
)
371 return (struct cds_lfht_node
*) (((unsigned long) node
) & ~FLAGS_MASK
);
375 int is_removed(struct cds_lfht_node
*node
)
377 return ((unsigned long) node
) & REMOVED_FLAG
;
381 struct cds_lfht_node
*flag_removed(struct cds_lfht_node
*node
)
383 return (struct cds_lfht_node
*) (((unsigned long) node
) | REMOVED_FLAG
);
387 int is_dummy(struct cds_lfht_node
*node
)
389 return ((unsigned long) node
) & DUMMY_FLAG
;
393 struct cds_lfht_node
*flag_dummy(struct cds_lfht_node
*node
)
395 return (struct cds_lfht_node
*) (((unsigned long) node
) | DUMMY_FLAG
);
399 unsigned long _uatomic_max(unsigned long *ptr
, unsigned long v
)
401 unsigned long old1
, old2
;
403 old1
= uatomic_read(ptr
);
408 } while ((old1
= uatomic_cmpxchg(ptr
, old2
, v
)) != old2
);
413 * Remove all logically deleted nodes from a bucket up to a certain node key.
416 void _cds_lfht_gc_bucket(struct cds_lfht_node
*dummy
, struct cds_lfht_node
*node
)
418 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_next
;
422 /* We can always skip the dummy node initially */
423 iter
= rcu_dereference(iter_prev
->p
.next
);
424 assert(iter_prev
->p
.reverse_hash
<= node
->p
.reverse_hash
);
426 if (unlikely(!clear_flag(iter
)))
428 if (likely(clear_flag(iter
)->p
.reverse_hash
> node
->p
.reverse_hash
))
430 next
= rcu_dereference(clear_flag(iter
)->p
.next
);
431 if (likely(is_removed(next
)))
433 iter_prev
= clear_flag(iter
);
436 assert(!is_removed(iter
));
438 new_next
= flag_dummy(clear_flag(next
));
440 new_next
= clear_flag(next
);
441 (void) uatomic_cmpxchg(&iter_prev
->p
.next
, iter
, new_next
);
446 struct cds_lfht_node
*_cds_lfht_add(struct cds_lfht
*ht
, struct rcu_table
*t
,
447 struct cds_lfht_node
*node
, int unique
, int dummy
)
449 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_node
, *new_next
,
451 struct _cds_lfht_node
*lookup
;
452 unsigned long hash
, index
, order
;
456 node
->p
.next
= flag_dummy(NULL
);
457 return node
; /* Initial first add (head) */
459 hash
= bit_reverse_ulong(node
->p
.reverse_hash
);
461 uint32_t chain_len
= 0;
464 * iter_prev points to the non-removed node prior to the
467 index
= hash
& (t
->size
- 1);
468 order
= get_count_order_ulong(index
+ 1);
469 lookup
= &t
->tbl
[order
][index
& ((1UL << (order
- 1)) - 1)];
470 iter_prev
= (struct cds_lfht_node
*) lookup
;
471 /* We can always skip the dummy node initially */
472 iter
= rcu_dereference(iter_prev
->p
.next
);
473 assert(iter_prev
->p
.reverse_hash
<= node
->p
.reverse_hash
);
475 if (unlikely(!clear_flag(iter
)))
477 if (likely(clear_flag(iter
)->p
.reverse_hash
> node
->p
.reverse_hash
))
479 next
= rcu_dereference(clear_flag(iter
)->p
.next
);
480 if (unlikely(is_removed(next
)))
484 && !ht
->compare_fct(node
->key
, node
->key_len
,
485 clear_flag(iter
)->key
,
486 clear_flag(iter
)->key_len
))
487 return clear_flag(iter
);
488 /* Only account for identical reverse hash once */
489 if (iter_prev
->p
.reverse_hash
!= clear_flag(iter
)->p
.reverse_hash
491 check_resize(ht
, t
, ++chain_len
);
492 iter_prev
= clear_flag(iter
);
496 assert(node
!= clear_flag(iter
));
497 assert(!is_removed(iter_prev
));
498 assert(iter_prev
!= node
);
500 node
->p
.next
= clear_flag(iter
);
502 node
->p
.next
= flag_dummy(clear_flag(iter
));
504 new_node
= flag_dummy(node
);
507 if (uatomic_cmpxchg(&iter_prev
->p
.next
, iter
,
509 continue; /* retry */
513 assert(!is_removed(iter
));
515 new_next
= flag_dummy(clear_flag(next
));
517 new_next
= clear_flag(next
);
518 (void) uatomic_cmpxchg(&iter_prev
->p
.next
, iter
, new_next
);
522 /* Garbage collect logically removed nodes in the bucket */
523 index
= hash
& (t
->size
- 1);
524 order
= get_count_order_ulong(index
+ 1);
525 lookup
= &t
->tbl
[order
][index
& ((1UL << (order
- 1)) - 1)];
526 dummy_node
= (struct cds_lfht_node
*) lookup
;
527 _cds_lfht_gc_bucket(dummy_node
, node
);
532 int _cds_lfht_remove(struct cds_lfht
*ht
, struct rcu_table
*t
,
533 struct cds_lfht_node
*node
)
535 struct cds_lfht_node
*dummy
, *next
, *old
;
536 struct _cds_lfht_node
*lookup
;
538 unsigned long hash
, index
, order
;
540 /* logically delete the node */
541 old
= rcu_dereference(node
->p
.next
);
544 if (unlikely(is_removed(next
)))
546 assert(!is_dummy(next
));
547 old
= uatomic_cmpxchg(&node
->p
.next
, next
,
549 } while (old
!= next
);
551 /* We performed the (logical) deletion. */
555 * Ensure that the node is not visible to readers anymore: lookup for
556 * the node, and remove it (along with any other logically removed node)
559 hash
= bit_reverse_ulong(node
->p
.reverse_hash
);
560 index
= hash
& (t
->size
- 1);
561 order
= get_count_order_ulong(index
+ 1);
562 lookup
= &t
->tbl
[order
][index
& ((1UL << (order
- 1)) - 1)];
563 dummy
= (struct cds_lfht_node
*) lookup
;
564 _cds_lfht_gc_bucket(dummy
, node
);
567 * Only the flagging action indicated that we (and no other)
568 * removed the node from the hash.
571 assert(is_removed(rcu_dereference(node
->p
.next
)));
578 void init_table(struct cds_lfht
*ht
, struct rcu_table
*t
,
579 unsigned long first_order
, unsigned long len_order
)
581 unsigned long i
, end_order
;
583 dbg_printf("init table: first_order %lu end_order %lu\n",
584 first_order
, first_order
+ len_order
);
585 end_order
= first_order
+ len_order
;
586 t
->size
= !first_order
? 0 : (1UL << (first_order
- 1));
587 for (i
= first_order
; i
< end_order
; i
++) {
588 unsigned long j
, len
;
590 len
= !i
? 1 : 1UL << (i
- 1);
591 dbg_printf("init order %lu len: %lu\n", i
, len
);
592 t
->tbl
[i
] = calloc(len
, sizeof(struct _cds_lfht_node
));
593 for (j
= 0; j
< len
; j
++) {
594 dbg_printf("init entry: i %lu j %lu hash %lu\n",
595 i
, j
, !i
? 0 : (1UL << (i
- 1)) + j
);
596 struct cds_lfht_node
*new_node
=
597 (struct cds_lfht_node
*) &t
->tbl
[i
][j
];
598 new_node
->p
.reverse_hash
=
599 bit_reverse_ulong(!i
? 0 : (1UL << (i
- 1)) + j
);
600 (void) _cds_lfht_add(ht
, t
, new_node
, 0, 1);
601 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
604 /* Update table size */
605 t
->size
= !i
? 1 : (1UL << i
);
606 dbg_printf("init new size: %lu\n", t
->size
);
607 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
610 t
->resize_target
= t
->size
;
611 t
->resize_initiated
= 0;
614 struct cds_lfht
*cds_lfht_new(cds_lfht_hash_fct hash_fct
,
615 cds_lfht_compare_fct compare_fct
,
616 unsigned long hash_seed
,
617 unsigned long init_size
,
618 void (*cds_lfht_call_rcu
)(struct rcu_head
*head
,
619 void (*func
)(struct rcu_head
*head
)))
624 /* init_size must be power of two */
625 if (init_size
& (init_size
- 1))
627 ht
= calloc(1, sizeof(struct cds_lfht
));
628 ht
->hash_fct
= hash_fct
;
629 ht
->compare_fct
= compare_fct
;
630 ht
->hash_seed
= hash_seed
;
631 ht
->cds_lfht_call_rcu
= cds_lfht_call_rcu
;
632 ht
->in_progress_resize
= 0;
633 /* this mutex should not nest in read-side C.S. */
634 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
635 order
= get_count_order_ulong(max(init_size
, 1)) + 1;
636 ht
->t
= calloc(1, sizeof(struct cds_lfht
)
637 + (order
* sizeof(struct _cds_lfht_node
*)));
639 pthread_mutex_lock(&ht
->resize_mutex
);
640 init_table(ht
, ht
->t
, 0, order
);
641 pthread_mutex_unlock(&ht
->resize_mutex
);
645 struct cds_lfht_node
*cds_lfht_lookup(struct cds_lfht
*ht
, void *key
, size_t key_len
)
648 struct cds_lfht_node
*node
, *next
;
649 struct _cds_lfht_node
*lookup
;
650 unsigned long hash
, reverse_hash
, index
, order
;
652 hash
= ht
->hash_fct(key
, key_len
, ht
->hash_seed
);
653 reverse_hash
= bit_reverse_ulong(hash
);
655 t
= rcu_dereference(ht
->t
);
656 index
= hash
& (t
->size
- 1);
657 order
= get_count_order_ulong(index
+ 1);
658 lookup
= &t
->tbl
[order
][index
& ((1UL << (order
- 1)) - 1)];
659 dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
660 hash
, index
, order
, index
& ((1UL << (order
- 1)) - 1));
661 node
= (struct cds_lfht_node
*) lookup
;
665 if (unlikely(node
->p
.reverse_hash
> reverse_hash
)) {
669 next
= rcu_dereference(node
->p
.next
);
670 if (likely(!is_removed(next
))
672 && likely(!ht
->compare_fct(node
->key
, node
->key_len
, key
, key_len
))) {
675 node
= clear_flag(next
);
677 assert(!node
|| !is_dummy(rcu_dereference(node
->p
.next
)));
681 struct cds_lfht_node
*cds_lfht_next(struct cds_lfht
*ht
,
682 struct cds_lfht_node
*node
)
684 struct cds_lfht_node
*next
;
685 unsigned long reverse_hash
;
689 reverse_hash
= node
->p
.reverse_hash
;
691 key_len
= node
->key_len
;
692 next
= rcu_dereference(node
->p
.next
);
693 node
= clear_flag(next
);
698 if (unlikely(node
->p
.reverse_hash
> reverse_hash
)) {
702 next
= rcu_dereference(node
->p
.next
);
703 if (likely(!is_removed(next
))
705 && likely(!ht
->compare_fct(node
->key
, node
->key_len
, key
, key_len
))) {
708 node
= clear_flag(next
);
710 assert(!node
|| !is_dummy(rcu_dereference(node
->p
.next
)));
714 void cds_lfht_add(struct cds_lfht
*ht
, struct cds_lfht_node
*node
)
719 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
720 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
722 t
= rcu_dereference(ht
->t
);
723 (void) _cds_lfht_add(ht
, t
, node
, 0, 0);
726 struct cds_lfht_node
*cds_lfht_add_unique(struct cds_lfht
*ht
,
727 struct cds_lfht_node
*node
)
732 hash
= ht
->hash_fct(node
->key
, node
->key_len
, ht
->hash_seed
);
733 node
->p
.reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
735 t
= rcu_dereference(ht
->t
);
736 return _cds_lfht_add(ht
, t
, node
, 1, 0);
739 int cds_lfht_remove(struct cds_lfht
*ht
, struct cds_lfht_node
*node
)
743 t
= rcu_dereference(ht
->t
);
744 return _cds_lfht_remove(ht
, t
, node
);
748 int cds_lfht_delete_dummy(struct cds_lfht
*ht
)
751 struct cds_lfht_node
*node
;
752 struct _cds_lfht_node
*lookup
;
753 unsigned long order
, i
;
756 /* Check that the table is empty */
757 lookup
= &t
->tbl
[0][0];
758 node
= (struct cds_lfht_node
*) lookup
;
760 node
= clear_flag(node
)->p
.next
;
763 assert(!is_removed(node
));
764 } while (clear_flag(node
));
765 /* Internal sanity check: all nodes left should be dummy */
766 for (order
= 0; order
< get_count_order_ulong(t
->size
) + 1; order
++) {
769 len
= !order
? 1 : 1UL << (order
- 1);
770 for (i
= 0; i
< len
; i
++) {
771 dbg_printf("delete order %lu i %lu hash %lu\n",
773 bit_reverse_ulong(t
->tbl
[order
][i
].reverse_hash
));
774 assert(is_dummy(t
->tbl
[order
][i
].next
));
782 * Should only be called when no more concurrent readers nor writers can
783 * possibly access the table.
785 int cds_lfht_destroy(struct cds_lfht
*ht
)
789 /* Wait for in-flight resize operations to complete */
790 CMM_STORE_SHARED(ht
->in_progress_destroy
, 1);
791 while (uatomic_read(&ht
->in_progress_resize
))
792 poll(NULL
, 0, 100); /* wait for 100ms */
793 ret
= cds_lfht_delete_dummy(ht
);
801 void cds_lfht_count_nodes(struct cds_lfht
*ht
,
802 unsigned long *count
,
803 unsigned long *removed
)
806 struct cds_lfht_node
*node
, *next
;
807 struct _cds_lfht_node
*lookup
;
808 unsigned long nr_dummy
= 0;
813 t
= rcu_dereference(ht
->t
);
814 /* Count non-dummy nodes in the table */
815 lookup
= &t
->tbl
[0][0];
816 node
= (struct cds_lfht_node
*) lookup
;
818 next
= rcu_dereference(node
->p
.next
);
819 if (is_removed(next
)) {
820 assert(!is_dummy(next
));
822 } else if (!is_dummy(next
))
826 node
= clear_flag(next
);
828 dbg_printf("number of dummy nodes: %lu\n", nr_dummy
);
832 void cds_lfht_free_table_cb(struct rcu_head
*head
)
834 struct rcu_table
*t
=
835 caa_container_of(head
, struct rcu_table
, head
);
839 /* called with resize mutex held */
841 void _do_cds_lfht_resize(struct cds_lfht
*ht
)
843 unsigned long new_size
, old_size
, old_order
, new_order
;
844 struct rcu_table
*new_t
, *old_t
;
847 old_size
= old_t
->size
;
848 old_order
= get_count_order_ulong(old_size
) + 1;
850 new_size
= CMM_LOAD_SHARED(old_t
->resize_target
);
851 if (old_size
== new_size
)
853 new_order
= get_count_order_ulong(new_size
) + 1;
854 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
855 old_size
, old_order
, new_size
, new_order
);
856 new_t
= malloc(sizeof(struct cds_lfht
)
857 + (new_order
* sizeof(struct _cds_lfht_node
*)));
858 assert(new_size
> old_size
);
859 memcpy(&new_t
->tbl
, &old_t
->tbl
,
860 old_order
* sizeof(struct _cds_lfht_node
*));
861 init_table(ht
, new_t
, old_order
, new_order
- old_order
);
862 /* Changing table and size atomically wrt lookups */
863 rcu_assign_pointer(ht
->t
, new_t
);
864 ht
->cds_lfht_call_rcu(&old_t
->head
, cds_lfht_free_table_cb
);
868 unsigned long resize_target_update(struct rcu_table
*t
,
871 return _uatomic_max(&t
->resize_target
,
872 t
->size
<< growth_order
);
875 void cds_lfht_resize(struct cds_lfht
*ht
, int growth
)
877 struct rcu_table
*t
= rcu_dereference(ht
->t
);
878 unsigned long target_size
;
882 * Silently refuse to shrink hash table. (not supported)
884 dbg_printf("shrinking hash table not supported.\n");
888 target_size
= resize_target_update(t
, growth
);
889 if (t
->size
< target_size
) {
890 CMM_STORE_SHARED(t
->resize_initiated
, 1);
891 pthread_mutex_lock(&ht
->resize_mutex
);
892 _do_cds_lfht_resize(ht
);
893 pthread_mutex_unlock(&ht
->resize_mutex
);
898 void do_resize_cb(struct rcu_head
*head
)
900 struct rcu_resize_work
*work
=
901 caa_container_of(head
, struct rcu_resize_work
, head
);
902 struct cds_lfht
*ht
= work
->ht
;
904 pthread_mutex_lock(&ht
->resize_mutex
);
905 _do_cds_lfht_resize(ht
);
906 pthread_mutex_unlock(&ht
->resize_mutex
);
908 cmm_smp_mb(); /* finish resize before decrement */
909 uatomic_dec(&ht
->in_progress_resize
);
913 void cds_lfht_resize_lazy(struct cds_lfht
*ht
, struct rcu_table
*t
, int growth
)
915 struct rcu_resize_work
*work
;
916 unsigned long target_size
;
918 target_size
= resize_target_update(t
, growth
);
919 if (!CMM_LOAD_SHARED(t
->resize_initiated
) && t
->size
< target_size
) {
920 uatomic_inc(&ht
->in_progress_resize
);
921 cmm_smp_mb(); /* increment resize count before calling it */
922 work
= malloc(sizeof(*work
));
924 ht
->cds_lfht_call_rcu(&work
->head
, do_resize_cb
);
925 CMM_STORE_SHARED(t
->resize_initiated
, 1);