4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 * Based on the following articles:
26 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
27 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
28 * - Michael, M. M. High performance dynamic lock-free hash tables
29 * and list-based sets. In Proceedings of the fourteenth annual ACM
30 * symposium on Parallel algorithms and architectures, ACM Press,
33 * Some specificities of this Lock-Free Resizable RCU Hash Table
36 * - RCU read-side critical section allows readers to perform hash
37 * table lookups and use the returned objects safely by delaying
38 * memory reclaim of a grace period.
39 * - Add and remove operations are lock-free, and do not need to
40 * allocate memory. They need to be executed within RCU read-side
41 * critical section to ensure the objects they read are valid and to
42 * deal with the cmpxchg ABA problem.
43 * - add and add_unique operations are supported. add_unique checks if
44 * the node key already exists in the hash table. It ensures no key
46 * - The resize operation executes concurrently with add/remove/lookup.
47 * - Hash table nodes are contained within a split-ordered list. This
48 * list is ordered by incrementing reversed-bits-hash value.
49 * - An index of bucket nodes is kept. These bucket nodes are the hash
50 * table "buckets", and they are also chained together in the
51 * split-ordered list, which allows recursive expansion.
52 * - The resize operation for small tables only allows expanding the hash table.
53 * It is triggered automatically by detecting long chains in the add
55 * - The resize operation for larger tables (and available through an
56 * API) allows both expanding and shrinking the hash table.
57 * - Split-counters are used to keep track of the number of
58 * nodes within the hash table for automatic resize triggering.
59 * - Resize operation initiated by long chain detection is executed by a
60 * call_rcu thread, which keeps lock-freedom of add and remove.
61 * - Resize operations are protected by a mutex.
62 * - The removal operation is split in two parts: first, a "removed"
63 * flag is set in the next pointer within the node to remove. Then,
64 * a "garbage collection" is performed in the bucket containing the
65 * removed node (from the start of the bucket up to the removed node).
66 * All encountered nodes with "removed" flag set in their next
67 * pointers are removed from the linked-list. If the cmpxchg used for
68 * removal fails (due to concurrent garbage-collection or concurrent
69 * add), we retry from the beginning of the bucket. This ensures that
70 * the node with "removed" flag set is removed from the hash table
71 * (not visible to lookups anymore) before the RCU read-side critical
72 * section held across removal ends. Furthermore, this ensures that
73 * the node with "removed" flag set is removed from the linked-list
74 * before its memory is reclaimed. Only the thread which removal
75 * successfully set the "removed" flag (with a cmpxchg) into a node's
76 * next pointer is considered to have succeeded its removal (and thus
77 * owns the node to reclaim). Because we garbage-collect starting from
78 * an invariant node (the start-of-bucket bucket node) up to the
79 * "removed" node (or find a reverse-hash that is higher), we are sure
80 * that a successful traversal of the chain leads to a chain that is
81 * present in the linked-list (the start node is never removed) and
82 * that is does not contain the "removed" node anymore, even if
83 * concurrent delete/add operations are changing the structure of the
85 * - The add operation performs gargage collection of buckets if it
86 * encounters nodes with removed flag set in the bucket where it wants
87 * to add its new node. This ensures lock-freedom of add operation by
88 * helping the remover unlink nodes from the list rather than to wait
90 * - A RCU "order table" indexed by log2(hash index) is copied and
91 * expanded by the resize operation. This order table allows finding
92 * the "bucket node" tables.
93 * - There is one bucket node table per hash index order. The size of
94 * each bucket node table is half the number of hashes contained in
95 * this order (except for order 0).
96 * - synchronzie_rcu is used to garbage-collect the old bucket node table.
97 * - The per-order bucket node tables contain a compact version of the
98 * hash table nodes. These tables are invariant after they are
99 * populated into the hash table.
101 * Bucket node tables:
103 * hash table hash table the last all bucket node tables
104 * order size bucket node 0 1 2 3 4 5 6(index)
111 * 5 32 16 1 1 2 4 8 16
112 * 6 64 32 1 1 2 4 8 16 32
114 * When growing/shrinking, we only focus on the last bucket node table
115 * which size is (!order ? 1 : (1 << (order -1))).
117 * Example for growing/shrinking:
118 * grow hash table from order 5 to 6: init the index=6 bucket node table
119 * shrink hash table from order 6 to 5: fini the index=6 bucket node table
121 * A bit of ascii art explanation:
123 * Order index is the off-by-one compare to the actual power of 2 because
124 * we use index 0 to deal with the 0 special-case.
126 * This shows the nodes for a small table ordered by reversed bits:
138 * This shows the nodes in order of non-reversed bits, linked by
139 * reversed-bit order.
144 * 2 | | 2 010 010 <- |
145 * | | | 3 011 110 | <- |
146 * 3 -> | | | 4 100 001 | |
162 #include <urcu-call-rcu.h>
163 #include <urcu/arch.h>
164 #include <urcu/uatomic.h>
165 #include <urcu/compiler.h>
166 #include <urcu/rculfhash.h>
171 #define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
173 #define dbg_printf(fmt, args...)
177 * Split-counters lazily update the global counter each 1024
178 * addition/removal. It automatically keeps track of resize required.
179 * We use the bucket length as indicator for need to expand for small
180 * tables and machines lacking per-cpu data suppport.
182 #define COUNT_COMMIT_ORDER 10
183 #define DEFAULT_SPLIT_COUNT_MASK 0xFUL
184 #define CHAIN_LEN_TARGET 1
185 #define CHAIN_LEN_RESIZE_THRESHOLD 3
188 * Define the minimum table size.
190 #define MIN_TABLE_SIZE 1
192 #if (CAA_BITS_PER_LONG == 32)
193 #define MAX_TABLE_ORDER 32
195 #define MAX_TABLE_ORDER 64
199 * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
201 #define MIN_PARTITION_PER_THREAD_ORDER 12
202 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
205 #define min(a, b) ((a) < (b) ? (a) : (b))
209 #define max(a, b) ((a) > (b) ? (a) : (b))
213 * The removed flag needs to be updated atomically with the pointer.
214 * It indicates that no node must attach to the node scheduled for
215 * removal, and that node garbage collection must be performed.
216 * The bucket flag does not require to be updated atomically with the
217 * pointer, but it is added as a pointer low bit flag to save space.
219 #define REMOVED_FLAG (1UL << 0)
220 #define BUCKET_FLAG (1UL << 1)
221 #define FLAGS_MASK ((1UL << 2) - 1)
223 /* Value of the end pointer. Should not interact with flags. */
224 #define END_VALUE NULL
227 * ht_items_count: Split-counters counting the number of node addition
228 * and removal in the table. Only used if the CDS_LFHT_ACCOUNTING flag
229 * is set at hash table creation.
231 * These are free-running counters, never reset to zero. They count the
232 * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
233 * operations to update the global counter. We choose a power-of-2 value
234 * for the trigger to deal with 32 or 64-bit overflow of the counter.
236 struct ht_items_count
{
237 unsigned long add
, del
;
238 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
241 * rcu_level: Contains the per order-index-level bucket node table. The
242 * size of each bucket node table is half the number of hashes contained
243 * in this order (except for order 0). The minimum allocation size
244 * parameter allows combining the bucket node arrays of the lowermost
245 * levels to improve cache locality for small index orders.
248 /* Note: manually update allocation length when adding a field */
249 struct cds_lfht_node nodes
[0];
253 * rcu_table: Contains the size and desired new size if a resize
254 * operation is in progress, as well as the statically-sized array of
255 * rcu_level pointers.
258 unsigned long size
; /* always a power of 2, shared (RCU) */
259 unsigned long resize_target
;
260 int resize_initiated
;
261 struct rcu_level
*tbl
[MAX_TABLE_ORDER
];
265 * cds_lfht: Top-level data structure representing a lock-free hash
266 * table. Defined in the implementation file to make it be an opaque
271 unsigned long min_alloc_order
;
272 unsigned long min_alloc_size
;
275 * We need to put the work threads offline (QSBR) when taking this
276 * mutex, because we use synchronize_rcu within this mutex critical
277 * section, which waits on read-side critical sections, and could
278 * therefore cause grace-period deadlock if we hold off RCU G.P.
281 pthread_mutex_t resize_mutex
; /* resize mutex: add/del mutex */
282 unsigned int in_progress_resize
, in_progress_destroy
;
283 void (*cds_lfht_call_rcu
)(struct rcu_head
*head
,
284 void (*func
)(struct rcu_head
*head
));
285 void (*cds_lfht_synchronize_rcu
)(void);
286 void (*cds_lfht_rcu_read_lock
)(void);
287 void (*cds_lfht_rcu_read_unlock
)(void);
288 void (*cds_lfht_rcu_thread_offline
)(void);
289 void (*cds_lfht_rcu_thread_online
)(void);
290 void (*cds_lfht_rcu_register_thread
)(void);
291 void (*cds_lfht_rcu_unregister_thread
)(void);
292 pthread_attr_t
*resize_attr
; /* Resize threads attributes */
293 long count
; /* global approximate item count */
294 struct ht_items_count
*split_count
; /* split item count */
298 * rcu_resize_work: Contains arguments passed to RCU worker thread
299 * responsible for performing lazy resize.
301 struct rcu_resize_work
{
302 struct rcu_head head
;
307 * partition_resize_work: Contains arguments passed to worker threads
308 * executing the hash table resize on partitions of the hash table
309 * assigned to each processor's worker thread.
311 struct partition_resize_work
{
314 unsigned long i
, start
, len
;
315 void (*fct
)(struct cds_lfht
*ht
, unsigned long i
,
316 unsigned long start
, unsigned long len
);
320 void _cds_lfht_add(struct cds_lfht
*ht
,
321 cds_lfht_match_fct match
,
324 struct cds_lfht_node
*node
,
325 struct cds_lfht_iter
*unique_ret
,
329 * Algorithm to reverse bits in a word by lookup table, extended to
332 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
333 * Originally from Public Domain.
336 static const uint8_t BitReverseTable256
[256] =
338 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
339 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
340 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
341 R6(0), R6(2), R6(1), R6(3)
348 uint8_t bit_reverse_u8(uint8_t v
)
350 return BitReverseTable256
[v
];
353 static __attribute__((unused
))
354 uint32_t bit_reverse_u32(uint32_t v
)
356 return ((uint32_t) bit_reverse_u8(v
) << 24) |
357 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
358 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
359 ((uint32_t) bit_reverse_u8(v
>> 24));
362 static __attribute__((unused
))
363 uint64_t bit_reverse_u64(uint64_t v
)
365 return ((uint64_t) bit_reverse_u8(v
) << 56) |
366 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
367 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
368 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
369 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
370 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
371 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
372 ((uint64_t) bit_reverse_u8(v
>> 56));
376 unsigned long bit_reverse_ulong(unsigned long v
)
378 #if (CAA_BITS_PER_LONG == 32)
379 return bit_reverse_u32(v
);
381 return bit_reverse_u64(v
);
386 * fls: returns the position of the most significant bit.
387 * Returns 0 if no bit is set, else returns the position of the most
388 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
390 #if defined(__i386) || defined(__x86_64)
392 unsigned int fls_u32(uint32_t x
)
400 : "=r" (r
) : "rm" (x
));
406 #if defined(__x86_64)
408 unsigned int fls_u64(uint64_t x
)
416 : "=r" (r
) : "rm" (x
));
423 static __attribute__((unused
))
424 unsigned int fls_u64(uint64_t x
)
431 if (!(x
& 0xFFFFFFFF00000000ULL
)) {
435 if (!(x
& 0xFFFF000000000000ULL
)) {
439 if (!(x
& 0xFF00000000000000ULL
)) {
443 if (!(x
& 0xF000000000000000ULL
)) {
447 if (!(x
& 0xC000000000000000ULL
)) {
451 if (!(x
& 0x8000000000000000ULL
)) {
460 static __attribute__((unused
))
461 unsigned int fls_u32(uint32_t x
)
467 if (!(x
& 0xFFFF0000U
)) {
471 if (!(x
& 0xFF000000U
)) {
475 if (!(x
& 0xF0000000U
)) {
479 if (!(x
& 0xC0000000U
)) {
483 if (!(x
& 0x80000000U
)) {
491 unsigned int fls_ulong(unsigned long x
)
493 #if (CAA_BITS_PER_LONG == 32)
501 * Return the minimum order for which x <= (1UL << order).
502 * Return -1 if x is 0.
504 int get_count_order_u32(uint32_t x
)
509 return fls_u32(x
- 1);
513 * Return the minimum order for which x <= (1UL << order).
514 * Return -1 if x is 0.
516 int get_count_order_ulong(unsigned long x
)
521 return fls_ulong(x
- 1);
525 #define poison_free(ptr) \
528 memset(ptr, 0x42, sizeof(*(ptr))); \
533 #define poison_free(ptr) free(ptr)
537 void cds_lfht_resize_lazy_grow(struct cds_lfht
*ht
, unsigned long size
, int growth
);
540 void cds_lfht_resize_lazy_count(struct cds_lfht
*ht
, unsigned long size
,
541 unsigned long count
);
543 static long nr_cpus_mask
= -1;
544 static long split_count_mask
= -1;
546 #if defined(HAVE_SYSCONF)
547 static void ht_init_nr_cpus_mask(void)
551 maxcpus
= sysconf(_SC_NPROCESSORS_CONF
);
557 * round up number of CPUs to next power of two, so we
558 * can use & for modulo.
560 maxcpus
= 1UL << get_count_order_ulong(maxcpus
);
561 nr_cpus_mask
= maxcpus
- 1;
563 #else /* #if defined(HAVE_SYSCONF) */
564 static void ht_init_nr_cpus_mask(void)
568 #endif /* #else #if defined(HAVE_SYSCONF) */
571 void alloc_split_items_count(struct cds_lfht
*ht
)
573 struct ht_items_count
*count
;
575 if (nr_cpus_mask
== -1) {
576 ht_init_nr_cpus_mask();
577 if (nr_cpus_mask
< 0)
578 split_count_mask
= DEFAULT_SPLIT_COUNT_MASK
;
580 split_count_mask
= nr_cpus_mask
;
583 assert(split_count_mask
>= 0);
585 if (ht
->flags
& CDS_LFHT_ACCOUNTING
) {
586 ht
->split_count
= calloc(split_count_mask
+ 1, sizeof(*count
));
587 assert(ht
->split_count
);
589 ht
->split_count
= NULL
;
594 void free_split_items_count(struct cds_lfht
*ht
)
596 poison_free(ht
->split_count
);
599 #if defined(HAVE_SCHED_GETCPU)
601 int ht_get_split_count_index(unsigned long hash
)
605 assert(split_count_mask
>= 0);
606 cpu
= sched_getcpu();
607 if (caa_unlikely(cpu
< 0))
608 return hash
& split_count_mask
;
610 return cpu
& split_count_mask
;
612 #else /* #if defined(HAVE_SCHED_GETCPU) */
614 int ht_get_split_count_index(unsigned long hash
)
616 return hash
& split_count_mask
;
618 #endif /* #else #if defined(HAVE_SCHED_GETCPU) */
621 void ht_count_add(struct cds_lfht
*ht
, unsigned long size
, unsigned long hash
)
623 unsigned long split_count
;
626 if (caa_unlikely(!ht
->split_count
))
628 index
= ht_get_split_count_index(hash
);
629 split_count
= uatomic_add_return(&ht
->split_count
[index
].add
, 1);
630 if (caa_unlikely(!(split_count
& ((1UL << COUNT_COMMIT_ORDER
) - 1)))) {
633 dbg_printf("add split count %lu\n", split_count
);
634 count
= uatomic_add_return(&ht
->count
,
635 1UL << COUNT_COMMIT_ORDER
);
637 if (!(count
& (count
- 1))) {
638 if ((count
>> CHAIN_LEN_RESIZE_THRESHOLD
) < size
)
640 dbg_printf("add set global %ld\n", count
);
641 cds_lfht_resize_lazy_count(ht
, size
,
642 count
>> (CHAIN_LEN_TARGET
- 1));
648 void ht_count_del(struct cds_lfht
*ht
, unsigned long size
, unsigned long hash
)
650 unsigned long split_count
;
653 if (caa_unlikely(!ht
->split_count
))
655 index
= ht_get_split_count_index(hash
);
656 split_count
= uatomic_add_return(&ht
->split_count
[index
].del
, 1);
657 if (caa_unlikely(!(split_count
& ((1UL << COUNT_COMMIT_ORDER
) - 1)))) {
660 dbg_printf("del split count %lu\n", split_count
);
661 count
= uatomic_add_return(&ht
->count
,
662 -(1UL << COUNT_COMMIT_ORDER
));
664 if (!(count
& (count
- 1))) {
665 if ((count
>> CHAIN_LEN_RESIZE_THRESHOLD
) >= size
)
667 dbg_printf("del set global %ld\n", count
);
669 * Don't shrink table if the number of nodes is below a
672 if (count
< (1UL << COUNT_COMMIT_ORDER
) * (split_count_mask
+ 1))
674 cds_lfht_resize_lazy_count(ht
, size
,
675 count
>> (CHAIN_LEN_TARGET
- 1));
681 void check_resize(struct cds_lfht
*ht
, unsigned long size
, uint32_t chain_len
)
685 if (!(ht
->flags
& CDS_LFHT_AUTO_RESIZE
))
687 count
= uatomic_read(&ht
->count
);
689 * Use bucket-local length for small table expand and for
690 * environments lacking per-cpu data support.
692 if (count
>= (1UL << COUNT_COMMIT_ORDER
))
695 dbg_printf("WARNING: large chain length: %u.\n",
697 if (chain_len
>= CHAIN_LEN_RESIZE_THRESHOLD
)
698 cds_lfht_resize_lazy_grow(ht
, size
,
699 get_count_order_u32(chain_len
- (CHAIN_LEN_TARGET
- 1)));
703 struct cds_lfht_node
*clear_flag(struct cds_lfht_node
*node
)
705 return (struct cds_lfht_node
*) (((unsigned long) node
) & ~FLAGS_MASK
);
709 int is_removed(struct cds_lfht_node
*node
)
711 return ((unsigned long) node
) & REMOVED_FLAG
;
715 struct cds_lfht_node
*flag_removed(struct cds_lfht_node
*node
)
717 return (struct cds_lfht_node
*) (((unsigned long) node
) | REMOVED_FLAG
);
721 int is_bucket(struct cds_lfht_node
*node
)
723 return ((unsigned long) node
) & BUCKET_FLAG
;
727 struct cds_lfht_node
*flag_bucket(struct cds_lfht_node
*node
)
729 return (struct cds_lfht_node
*) (((unsigned long) node
) | BUCKET_FLAG
);
733 struct cds_lfht_node
*get_end(void)
735 return (struct cds_lfht_node
*) END_VALUE
;
739 int is_end(struct cds_lfht_node
*node
)
741 return clear_flag(node
) == (struct cds_lfht_node
*) END_VALUE
;
745 unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr
,
748 unsigned long old1
, old2
;
750 old1
= uatomic_read(ptr
);
755 } while ((old1
= uatomic_cmpxchg(ptr
, old2
, v
)) != old2
);
760 struct cds_lfht_node
*lookup_bucket(struct cds_lfht
*ht
, unsigned long size
,
763 unsigned long index
, order
;
766 index
= hash
& (size
- 1);
768 if (index
< ht
->min_alloc_size
) {
769 dbg_printf("lookup hash %lu index %lu order 0 aridx 0\n",
771 return &ht
->t
.tbl
[0]->nodes
[index
];
774 * equivalent to get_count_order_ulong(index + 1), but optimizes
775 * away the non-existing 0 special-case for
776 * get_count_order_ulong.
778 order
= fls_ulong(index
);
779 dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
780 hash
, index
, order
, index
& ((1UL << (order
- 1)) - 1));
781 return &ht
->t
.tbl
[order
]->nodes
[index
& ((1UL << (order
- 1)) - 1)];
785 * Remove all logically deleted nodes from a bucket up to a certain node key.
788 void _cds_lfht_gc_bucket(struct cds_lfht_node
*bucket
, struct cds_lfht_node
*node
)
790 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_next
;
792 assert(!is_bucket(bucket
));
793 assert(!is_removed(bucket
));
794 assert(!is_bucket(node
));
795 assert(!is_removed(node
));
798 /* We can always skip the bucket node initially */
799 iter
= rcu_dereference(iter_prev
->next
);
800 assert(!is_removed(iter
));
801 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
803 * We should never be called with bucket (start of chain)
804 * and logically removed node (end of path compression
805 * marker) being the actual same node. This would be a
806 * bug in the algorithm implementation.
808 assert(bucket
!= node
);
810 if (caa_unlikely(is_end(iter
)))
812 if (caa_likely(clear_flag(iter
)->reverse_hash
> node
->reverse_hash
))
814 next
= rcu_dereference(clear_flag(iter
)->next
);
815 if (caa_likely(is_removed(next
)))
817 iter_prev
= clear_flag(iter
);
820 assert(!is_removed(iter
));
822 new_next
= flag_bucket(clear_flag(next
));
824 new_next
= clear_flag(next
);
825 (void) uatomic_cmpxchg(&iter_prev
->next
, iter
, new_next
);
831 int _cds_lfht_replace(struct cds_lfht
*ht
, unsigned long size
,
832 struct cds_lfht_node
*old_node
,
833 struct cds_lfht_node
*old_next
,
834 struct cds_lfht_node
*new_node
)
836 struct cds_lfht_node
*bucket
, *ret_next
;
838 if (!old_node
) /* Return -ENOENT if asked to replace NULL node */
841 assert(!is_removed(old_node
));
842 assert(!is_bucket(old_node
));
843 assert(!is_removed(new_node
));
844 assert(!is_bucket(new_node
));
845 assert(new_node
!= old_node
);
847 /* Insert after node to be replaced */
848 if (is_removed(old_next
)) {
850 * Too late, the old node has been removed under us
851 * between lookup and replace. Fail.
855 assert(!is_bucket(old_next
));
856 assert(new_node
!= clear_flag(old_next
));
857 new_node
->next
= clear_flag(old_next
);
859 * Here is the whole trick for lock-free replace: we add
860 * the replacement node _after_ the node we want to
861 * replace by atomically setting its next pointer at the
862 * same time we set its removal flag. Given that
863 * the lookups/get next use an iterator aware of the
864 * next pointer, they will either skip the old node due
865 * to the removal flag and see the new node, or use
866 * the old node, but will not see the new one.
868 ret_next
= uatomic_cmpxchg(&old_node
->next
,
869 old_next
, flag_removed(new_node
));
870 if (ret_next
== old_next
)
871 break; /* We performed the replacement. */
876 * Ensure that the old node is not visible to readers anymore:
877 * lookup for the node, and remove it (along with any other
878 * logically removed node) if found.
880 bucket
= lookup_bucket(ht
, size
, bit_reverse_ulong(old_node
->reverse_hash
));
881 _cds_lfht_gc_bucket(bucket
, new_node
);
883 assert(is_removed(rcu_dereference(old_node
->next
)));
888 * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
889 * mode. A NULL unique_ret allows creation of duplicate keys.
892 void _cds_lfht_add(struct cds_lfht
*ht
,
893 cds_lfht_match_fct match
,
896 struct cds_lfht_node
*node
,
897 struct cds_lfht_iter
*unique_ret
,
900 struct cds_lfht_node
*iter_prev
, *iter
, *next
, *new_node
, *new_next
,
902 struct cds_lfht_node
*bucket
;
904 assert(!is_bucket(node
));
905 assert(!is_removed(node
));
906 bucket
= lookup_bucket(ht
, size
, bit_reverse_ulong(node
->reverse_hash
));
908 uint32_t chain_len
= 0;
911 * iter_prev points to the non-removed node prior to the
915 /* We can always skip the bucket node initially */
916 iter
= rcu_dereference(iter_prev
->next
);
917 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
919 if (caa_unlikely(is_end(iter
)))
921 if (caa_likely(clear_flag(iter
)->reverse_hash
> node
->reverse_hash
))
924 /* bucket node is the first node of the identical-hash-value chain */
925 if (bucket_flag
&& clear_flag(iter
)->reverse_hash
== node
->reverse_hash
)
928 next
= rcu_dereference(clear_flag(iter
)->next
);
929 if (caa_unlikely(is_removed(next
)))
935 && clear_flag(iter
)->reverse_hash
== node
->reverse_hash
) {
936 struct cds_lfht_iter d_iter
= { .node
= node
, .next
= iter
, };
939 * uniquely adding inserts the node as the first
940 * node of the identical-hash-value node chain.
942 * This semantic ensures no duplicated keys
943 * should ever be observable in the table
944 * (including observe one node by one node
945 * by forward iterations)
947 cds_lfht_next_duplicate(ht
, match
, key
, &d_iter
);
951 *unique_ret
= d_iter
;
955 /* Only account for identical reverse hash once */
956 if (iter_prev
->reverse_hash
!= clear_flag(iter
)->reverse_hash
958 check_resize(ht
, size
, ++chain_len
);
959 iter_prev
= clear_flag(iter
);
964 assert(node
!= clear_flag(iter
));
965 assert(!is_removed(iter_prev
));
966 assert(!is_removed(iter
));
967 assert(iter_prev
!= node
);
969 node
->next
= clear_flag(iter
);
971 node
->next
= flag_bucket(clear_flag(iter
));
973 new_node
= flag_bucket(node
);
976 if (uatomic_cmpxchg(&iter_prev
->next
, iter
,
978 continue; /* retry */
985 assert(!is_removed(iter
));
987 new_next
= flag_bucket(clear_flag(next
));
989 new_next
= clear_flag(next
);
990 (void) uatomic_cmpxchg(&iter_prev
->next
, iter
, new_next
);
995 unique_ret
->node
= return_node
;
996 /* unique_ret->next left unset, never used. */
1001 int _cds_lfht_del(struct cds_lfht
*ht
, unsigned long size
,
1002 struct cds_lfht_node
*node
,
1005 struct cds_lfht_node
*bucket
, *next
, *old
;
1007 if (!node
) /* Return -ENOENT if asked to delete NULL node */
1010 /* logically delete the node */
1011 assert(!is_bucket(node
));
1012 assert(!is_removed(node
));
1013 old
= rcu_dereference(node
->next
);
1015 struct cds_lfht_node
*new_next
;
1018 if (caa_unlikely(is_removed(next
)))
1021 assert(is_bucket(next
));
1023 assert(!is_bucket(next
));
1024 new_next
= flag_removed(next
);
1025 old
= uatomic_cmpxchg(&node
->next
, next
, new_next
);
1026 } while (old
!= next
);
1027 /* We performed the (logical) deletion. */
1030 * Ensure that the node is not visible to readers anymore: lookup for
1031 * the node, and remove it (along with any other logically removed node)
1034 bucket
= lookup_bucket(ht
, size
, bit_reverse_ulong(node
->reverse_hash
));
1035 _cds_lfht_gc_bucket(bucket
, node
);
1037 assert(is_removed(rcu_dereference(node
->next
)));
1042 void *partition_resize_thread(void *arg
)
1044 struct partition_resize_work
*work
= arg
;
1046 work
->ht
->cds_lfht_rcu_register_thread();
1047 work
->fct(work
->ht
, work
->i
, work
->start
, work
->len
);
1048 work
->ht
->cds_lfht_rcu_unregister_thread();
1053 void partition_resize_helper(struct cds_lfht
*ht
, unsigned long i
,
1055 void (*fct
)(struct cds_lfht
*ht
, unsigned long i
,
1056 unsigned long start
, unsigned long len
))
1058 unsigned long partition_len
;
1059 struct partition_resize_work
*work
;
1061 unsigned long nr_threads
;
1064 * Note: nr_cpus_mask + 1 is always power of 2.
1065 * We spawn just the number of threads we need to satisfy the minimum
1066 * partition size, up to the number of CPUs in the system.
1068 if (nr_cpus_mask
> 0) {
1069 nr_threads
= min(nr_cpus_mask
+ 1,
1070 len
>> MIN_PARTITION_PER_THREAD_ORDER
);
1074 partition_len
= len
>> get_count_order_ulong(nr_threads
);
1075 work
= calloc(nr_threads
, sizeof(*work
));
1077 for (thread
= 0; thread
< nr_threads
; thread
++) {
1078 work
[thread
].ht
= ht
;
1080 work
[thread
].len
= partition_len
;
1081 work
[thread
].start
= thread
* partition_len
;
1082 work
[thread
].fct
= fct
;
1083 ret
= pthread_create(&(work
[thread
].thread_id
), ht
->resize_attr
,
1084 partition_resize_thread
, &work
[thread
]);
1087 for (thread
= 0; thread
< nr_threads
; thread
++) {
1088 ret
= pthread_join(work
[thread
].thread_id
, NULL
);
1095 * Holding RCU read lock to protect _cds_lfht_add against memory
1096 * reclaim that could be performed by other call_rcu worker threads (ABA
1099 * When we reach a certain length, we can split this population phase over
1100 * many worker threads, based on the number of CPUs available in the system.
1101 * This should therefore take care of not having the expand lagging behind too
1102 * many concurrent insertion threads by using the scheduler's ability to
1103 * schedule bucket node population fairly with insertions.
1106 void init_table_populate_partition(struct cds_lfht
*ht
, unsigned long i
,
1107 unsigned long start
, unsigned long len
)
1111 assert(i
> ht
->min_alloc_order
);
1112 ht
->cds_lfht_rcu_read_lock();
1113 for (j
= start
; j
< start
+ len
; j
++) {
1114 struct cds_lfht_node
*new_node
= &ht
->t
.tbl
[i
]->nodes
[j
];
1116 dbg_printf("init populate: i %lu j %lu hash %lu\n",
1117 i
, j
, (1UL << (i
- 1)) + j
);
1118 new_node
->reverse_hash
=
1119 bit_reverse_ulong((1UL << (i
- 1)) + j
);
1120 _cds_lfht_add(ht
, NULL
, NULL
, 1UL << (i
- 1),
1123 ht
->cds_lfht_rcu_read_unlock();
1127 void init_table_populate(struct cds_lfht
*ht
, unsigned long i
,
1130 assert(nr_cpus_mask
!= -1);
1131 if (nr_cpus_mask
< 0 || len
< 2 * MIN_PARTITION_PER_THREAD
) {
1132 ht
->cds_lfht_rcu_thread_online();
1133 init_table_populate_partition(ht
, i
, 0, len
);
1134 ht
->cds_lfht_rcu_thread_offline();
1137 partition_resize_helper(ht
, i
, len
, init_table_populate_partition
);
1141 void init_table(struct cds_lfht
*ht
,
1142 unsigned long first_order
, unsigned long last_order
)
1146 dbg_printf("init table: first_order %lu last_order %lu\n",
1147 first_order
, last_order
);
1148 assert(first_order
> ht
->min_alloc_order
);
1149 for (i
= first_order
; i
<= last_order
; i
++) {
1152 len
= 1UL << (i
- 1);
1153 dbg_printf("init order %lu len: %lu\n", i
, len
);
1155 /* Stop expand if the resize target changes under us */
1156 if (CMM_LOAD_SHARED(ht
->t
.resize_target
) < (1UL << i
))
1159 ht
->t
.tbl
[i
] = calloc(1, len
* sizeof(struct cds_lfht_node
));
1160 assert(ht
->t
.tbl
[i
]);
1163 * Set all bucket nodes reverse hash values for a level and
1164 * link all bucket nodes into the table.
1166 init_table_populate(ht
, i
, len
);
1169 * Update table size.
1171 cmm_smp_wmb(); /* populate data before RCU size */
1172 CMM_STORE_SHARED(ht
->t
.size
, 1UL << i
);
1174 dbg_printf("init new size: %lu\n", 1UL << i
);
1175 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1181 * Holding RCU read lock to protect _cds_lfht_remove against memory
1182 * reclaim that could be performed by other call_rcu worker threads (ABA
1184 * For a single level, we logically remove and garbage collect each node.
1186 * As a design choice, we perform logical removal and garbage collection on a
1187 * node-per-node basis to simplify this algorithm. We also assume keeping good
1188 * cache locality of the operation would overweight possible performance gain
1189 * that could be achieved by batching garbage collection for multiple levels.
1190 * However, this would have to be justified by benchmarks.
1192 * Concurrent removal and add operations are helping us perform garbage
1193 * collection of logically removed nodes. We guarantee that all logically
1194 * removed nodes have been garbage-collected (unlinked) before call_rcu is
1195 * invoked to free a hole level of bucket nodes (after a grace period).
1197 * Logical removal and garbage collection can therefore be done in batch or on a
1198 * node-per-node basis, as long as the guarantee above holds.
1200 * When we reach a certain length, we can split this removal over many worker
1201 * threads, based on the number of CPUs available in the system. This should
1202 * take care of not letting resize process lag behind too many concurrent
1203 * updater threads actively inserting into the hash table.
1206 void remove_table_partition(struct cds_lfht
*ht
, unsigned long i
,
1207 unsigned long start
, unsigned long len
)
1211 assert(i
> ht
->min_alloc_order
);
1212 ht
->cds_lfht_rcu_read_lock();
1213 for (j
= start
; j
< start
+ len
; j
++) {
1214 struct cds_lfht_node
*fini_node
= &ht
->t
.tbl
[i
]->nodes
[j
];
1216 dbg_printf("remove entry: i %lu j %lu hash %lu\n",
1217 i
, j
, (1UL << (i
- 1)) + j
);
1218 fini_node
->reverse_hash
=
1219 bit_reverse_ulong((1UL << (i
- 1)) + j
);
1220 (void) _cds_lfht_del(ht
, 1UL << (i
- 1), fini_node
, 1);
1222 ht
->cds_lfht_rcu_read_unlock();
1226 void remove_table(struct cds_lfht
*ht
, unsigned long i
, unsigned long len
)
1229 assert(nr_cpus_mask
!= -1);
1230 if (nr_cpus_mask
< 0 || len
< 2 * MIN_PARTITION_PER_THREAD
) {
1231 ht
->cds_lfht_rcu_thread_online();
1232 remove_table_partition(ht
, i
, 0, len
);
1233 ht
->cds_lfht_rcu_thread_offline();
1236 partition_resize_helper(ht
, i
, len
, remove_table_partition
);
1240 void fini_table(struct cds_lfht
*ht
,
1241 unsigned long first_order
, unsigned long last_order
)
1244 void *free_by_rcu
= NULL
;
1246 dbg_printf("fini table: first_order %lu last_order %lu\n",
1247 first_order
, last_order
);
1248 assert(first_order
> ht
->min_alloc_order
);
1249 for (i
= last_order
; i
>= first_order
; i
--) {
1252 len
= 1UL << (i
- 1);
1253 dbg_printf("fini order %lu len: %lu\n", i
, len
);
1255 /* Stop shrink if the resize target changes under us */
1256 if (CMM_LOAD_SHARED(ht
->t
.resize_target
) > (1UL << (i
- 1)))
1259 cmm_smp_wmb(); /* populate data before RCU size */
1260 CMM_STORE_SHARED(ht
->t
.size
, 1UL << (i
- 1));
1263 * We need to wait for all add operations to reach Q.S. (and
1264 * thus use the new table for lookups) before we can start
1265 * releasing the old bucket nodes. Otherwise their lookup will
1266 * return a logically removed node as insert position.
1268 ht
->cds_lfht_synchronize_rcu();
1273 * Set "removed" flag in bucket nodes about to be removed.
1274 * Unlink all now-logically-removed bucket node pointers.
1275 * Concurrent add/remove operation are helping us doing
1278 remove_table(ht
, i
, len
);
1280 free_by_rcu
= ht
->t
.tbl
[i
];
1282 dbg_printf("fini new size: %lu\n", 1UL << i
);
1283 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1288 ht
->cds_lfht_synchronize_rcu();
1294 void cds_lfht_create_bucket(struct cds_lfht
*ht
, unsigned long size
)
1296 struct cds_lfht_node
*prev
, *node
;
1297 unsigned long order
, len
, i
, j
;
1299 ht
->t
.tbl
[0] = calloc(1, ht
->min_alloc_size
* sizeof(struct cds_lfht_node
));
1300 assert(ht
->t
.tbl
[0]);
1302 dbg_printf("create bucket: order %lu index %lu hash %lu\n", 0, 0, 0);
1303 ht
->t
.tbl
[0]->nodes
[0].next
= flag_bucket(get_end());
1304 ht
->t
.tbl
[0]->nodes
[0].reverse_hash
= 0;
1306 for (order
= 1; order
< get_count_order_ulong(size
) + 1; order
++) {
1307 len
= 1UL << (order
- 1);
1308 if (order
<= ht
->min_alloc_order
) {
1309 ht
->t
.tbl
[order
] = (struct rcu_level
*) (ht
->t
.tbl
[0]->nodes
+ len
);
1311 ht
->t
.tbl
[order
] = calloc(1, len
* sizeof(struct cds_lfht_node
));
1312 assert(ht
->t
.tbl
[order
]);
1316 prev
= ht
->t
.tbl
[i
]->nodes
;
1317 for (j
= 0; j
< len
; j
++) {
1318 if (j
& (j
- 1)) { /* Between power of 2 */
1320 } else if (j
) { /* At each power of 2 */
1322 prev
= ht
->t
.tbl
[i
]->nodes
;
1325 node
= &ht
->t
.tbl
[order
]->nodes
[j
];
1326 dbg_printf("create bucket: order %lu index %lu hash %lu\n",
1328 node
->next
= prev
->next
;
1329 assert(is_bucket(node
->next
));
1330 node
->reverse_hash
= bit_reverse_ulong(j
+ len
);
1331 prev
->next
= flag_bucket(node
);
1336 struct cds_lfht
*_cds_lfht_new(unsigned long init_size
,
1337 unsigned long min_alloc_size
,
1339 void (*cds_lfht_call_rcu
)(struct rcu_head
*head
,
1340 void (*func
)(struct rcu_head
*head
)),
1341 void (*cds_lfht_synchronize_rcu
)(void),
1342 void (*cds_lfht_rcu_read_lock
)(void),
1343 void (*cds_lfht_rcu_read_unlock
)(void),
1344 void (*cds_lfht_rcu_thread_offline
)(void),
1345 void (*cds_lfht_rcu_thread_online
)(void),
1346 void (*cds_lfht_rcu_register_thread
)(void),
1347 void (*cds_lfht_rcu_unregister_thread
)(void),
1348 pthread_attr_t
*attr
)
1350 struct cds_lfht
*ht
;
1351 unsigned long order
;
1353 /* min_alloc_size must be power of two */
1354 if (!min_alloc_size
|| (min_alloc_size
& (min_alloc_size
- 1)))
1356 /* init_size must be power of two */
1357 if (!init_size
|| (init_size
& (init_size
- 1)))
1359 min_alloc_size
= max(min_alloc_size
, MIN_TABLE_SIZE
);
1360 init_size
= max(init_size
, min_alloc_size
);
1361 ht
= calloc(1, sizeof(struct cds_lfht
));
1364 ht
->cds_lfht_call_rcu
= cds_lfht_call_rcu
;
1365 ht
->cds_lfht_synchronize_rcu
= cds_lfht_synchronize_rcu
;
1366 ht
->cds_lfht_rcu_read_lock
= cds_lfht_rcu_read_lock
;
1367 ht
->cds_lfht_rcu_read_unlock
= cds_lfht_rcu_read_unlock
;
1368 ht
->cds_lfht_rcu_thread_offline
= cds_lfht_rcu_thread_offline
;
1369 ht
->cds_lfht_rcu_thread_online
= cds_lfht_rcu_thread_online
;
1370 ht
->cds_lfht_rcu_register_thread
= cds_lfht_rcu_register_thread
;
1371 ht
->cds_lfht_rcu_unregister_thread
= cds_lfht_rcu_unregister_thread
;
1372 ht
->resize_attr
= attr
;
1373 alloc_split_items_count(ht
);
1374 /* this mutex should not nest in read-side C.S. */
1375 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
1376 order
= get_count_order_ulong(init_size
);
1377 ht
->t
.resize_target
= 1UL << order
;
1378 ht
->min_alloc_size
= min_alloc_size
;
1379 ht
->min_alloc_order
= get_count_order_ulong(min_alloc_size
);
1380 cds_lfht_create_bucket(ht
, 1UL << order
);
1381 ht
->t
.size
= 1UL << order
;
1385 void cds_lfht_lookup(struct cds_lfht
*ht
, unsigned long hash
,
1386 cds_lfht_match_fct match
, const void *key
,
1387 struct cds_lfht_iter
*iter
)
1389 struct cds_lfht_node
*node
, *next
, *bucket
;
1390 unsigned long reverse_hash
, size
;
1392 reverse_hash
= bit_reverse_ulong(hash
);
1394 size
= rcu_dereference(ht
->t
.size
);
1395 bucket
= lookup_bucket(ht
, size
, hash
);
1396 /* We can always skip the bucket node initially */
1397 node
= rcu_dereference(bucket
->next
);
1398 node
= clear_flag(node
);
1400 if (caa_unlikely(is_end(node
))) {
1404 if (caa_unlikely(node
->reverse_hash
> reverse_hash
)) {
1408 next
= rcu_dereference(node
->next
);
1409 assert(node
== clear_flag(node
));
1410 if (caa_likely(!is_removed(next
))
1412 && node
->reverse_hash
== reverse_hash
1413 && caa_likely(match(node
, key
))) {
1416 node
= clear_flag(next
);
1418 assert(!node
|| !is_bucket(rcu_dereference(node
->next
)));
1423 void cds_lfht_next_duplicate(struct cds_lfht
*ht
, cds_lfht_match_fct match
,
1424 const void *key
, struct cds_lfht_iter
*iter
)
1426 struct cds_lfht_node
*node
, *next
;
1427 unsigned long reverse_hash
;
1430 reverse_hash
= node
->reverse_hash
;
1432 node
= clear_flag(next
);
1435 if (caa_unlikely(is_end(node
))) {
1439 if (caa_unlikely(node
->reverse_hash
> reverse_hash
)) {
1443 next
= rcu_dereference(node
->next
);
1444 if (caa_likely(!is_removed(next
))
1446 && caa_likely(match(node
, key
))) {
1449 node
= clear_flag(next
);
1451 assert(!node
|| !is_bucket(rcu_dereference(node
->next
)));
1456 void cds_lfht_next(struct cds_lfht
*ht
, struct cds_lfht_iter
*iter
)
1458 struct cds_lfht_node
*node
, *next
;
1460 node
= clear_flag(iter
->next
);
1462 if (caa_unlikely(is_end(node
))) {
1466 next
= rcu_dereference(node
->next
);
1467 if (caa_likely(!is_removed(next
))
1468 && !is_bucket(next
)) {
1471 node
= clear_flag(next
);
1473 assert(!node
|| !is_bucket(rcu_dereference(node
->next
)));
1478 void cds_lfht_first(struct cds_lfht
*ht
, struct cds_lfht_iter
*iter
)
1480 struct cds_lfht_node
*lookup
;
1483 * Get next after first bucket node. The first bucket node is the
1484 * first node of the linked list.
1486 lookup
= &ht
->t
.tbl
[0]->nodes
[0];
1487 iter
->next
= lookup
->next
;
1488 cds_lfht_next(ht
, iter
);
1491 void cds_lfht_add(struct cds_lfht
*ht
, unsigned long hash
,
1492 struct cds_lfht_node
*node
)
1496 node
->reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
1497 size
= rcu_dereference(ht
->t
.size
);
1498 _cds_lfht_add(ht
, NULL
, NULL
, size
, node
, NULL
, 0);
1499 ht_count_add(ht
, size
, hash
);
1502 struct cds_lfht_node
*cds_lfht_add_unique(struct cds_lfht
*ht
,
1504 cds_lfht_match_fct match
,
1506 struct cds_lfht_node
*node
)
1509 struct cds_lfht_iter iter
;
1511 node
->reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
1512 size
= rcu_dereference(ht
->t
.size
);
1513 _cds_lfht_add(ht
, match
, key
, size
, node
, &iter
, 0);
1514 if (iter
.node
== node
)
1515 ht_count_add(ht
, size
, hash
);
1519 struct cds_lfht_node
*cds_lfht_add_replace(struct cds_lfht
*ht
,
1521 cds_lfht_match_fct match
,
1523 struct cds_lfht_node
*node
)
1526 struct cds_lfht_iter iter
;
1528 node
->reverse_hash
= bit_reverse_ulong((unsigned long) hash
);
1529 size
= rcu_dereference(ht
->t
.size
);
1531 _cds_lfht_add(ht
, match
, key
, size
, node
, &iter
, 0);
1532 if (iter
.node
== node
) {
1533 ht_count_add(ht
, size
, hash
);
1537 if (!_cds_lfht_replace(ht
, size
, iter
.node
, iter
.next
, node
))
1542 int cds_lfht_replace(struct cds_lfht
*ht
, struct cds_lfht_iter
*old_iter
,
1543 struct cds_lfht_node
*new_node
)
1547 size
= rcu_dereference(ht
->t
.size
);
1548 return _cds_lfht_replace(ht
, size
, old_iter
->node
, old_iter
->next
,
1552 int cds_lfht_del(struct cds_lfht
*ht
, struct cds_lfht_iter
*iter
)
1554 unsigned long size
, hash
;
1557 size
= rcu_dereference(ht
->t
.size
);
1558 ret
= _cds_lfht_del(ht
, size
, iter
->node
, 0);
1560 hash
= bit_reverse_ulong(iter
->node
->reverse_hash
);
1561 ht_count_del(ht
, size
, hash
);
1567 int cds_lfht_delete_bucket(struct cds_lfht
*ht
)
1569 struct cds_lfht_node
*node
;
1570 unsigned long order
, i
, size
;
1572 /* Check that the table is empty */
1573 node
= &ht
->t
.tbl
[0]->nodes
[0];
1575 node
= clear_flag(node
)->next
;
1576 if (!is_bucket(node
))
1578 assert(!is_removed(node
));
1579 } while (!is_end(node
));
1581 * size accessed without rcu_dereference because hash table is
1585 /* Internal sanity check: all nodes left should be bucket */
1586 for (order
= 0; order
< get_count_order_ulong(size
) + 1; order
++) {
1589 len
= !order
? 1 : 1UL << (order
- 1);
1590 for (i
= 0; i
< len
; i
++) {
1591 dbg_printf("delete order %lu i %lu hash %lu\n",
1593 bit_reverse_ulong(ht
->t
.tbl
[order
]->nodes
[i
].reverse_hash
));
1594 assert(is_bucket(ht
->t
.tbl
[order
]->nodes
[i
].next
));
1597 if (order
== ht
->min_alloc_order
)
1598 poison_free(ht
->t
.tbl
[0]);
1599 else if (order
> ht
->min_alloc_order
)
1600 poison_free(ht
->t
.tbl
[order
]);
1601 /* Nothing to delete for order < ht->min_alloc_order */
1607 * Should only be called when no more concurrent readers nor writers can
1608 * possibly access the table.
1610 int cds_lfht_destroy(struct cds_lfht
*ht
, pthread_attr_t
**attr
)
1614 /* Wait for in-flight resize operations to complete */
1615 _CMM_STORE_SHARED(ht
->in_progress_destroy
, 1);
1616 cmm_smp_mb(); /* Store destroy before load resize */
1617 while (uatomic_read(&ht
->in_progress_resize
))
1618 poll(NULL
, 0, 100); /* wait for 100ms */
1619 ret
= cds_lfht_delete_bucket(ht
);
1622 free_split_items_count(ht
);
1624 *attr
= ht
->resize_attr
;
1629 void cds_lfht_count_nodes(struct cds_lfht
*ht
,
1630 long *approx_before
,
1631 unsigned long *count
,
1632 unsigned long *removed
,
1635 struct cds_lfht_node
*node
, *next
;
1636 unsigned long nr_bucket
= 0;
1639 if (ht
->split_count
) {
1642 for (i
= 0; i
< split_count_mask
+ 1; i
++) {
1643 *approx_before
+= uatomic_read(&ht
->split_count
[i
].add
);
1644 *approx_before
-= uatomic_read(&ht
->split_count
[i
].del
);
1651 /* Count non-bucket nodes in the table */
1652 node
= &ht
->t
.tbl
[0]->nodes
[0];
1654 next
= rcu_dereference(node
->next
);
1655 if (is_removed(next
)) {
1656 if (!is_bucket(next
))
1660 } else if (!is_bucket(next
))
1664 node
= clear_flag(next
);
1665 } while (!is_end(node
));
1666 dbg_printf("number of bucket nodes: %lu\n", nr_bucket
);
1668 if (ht
->split_count
) {
1671 for (i
= 0; i
< split_count_mask
+ 1; i
++) {
1672 *approx_after
+= uatomic_read(&ht
->split_count
[i
].add
);
1673 *approx_after
-= uatomic_read(&ht
->split_count
[i
].del
);
1678 /* called with resize mutex held */
1680 void _do_cds_lfht_grow(struct cds_lfht
*ht
,
1681 unsigned long old_size
, unsigned long new_size
)
1683 unsigned long old_order
, new_order
;
1685 old_order
= get_count_order_ulong(old_size
);
1686 new_order
= get_count_order_ulong(new_size
);
1687 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1688 old_size
, old_order
, new_size
, new_order
);
1689 assert(new_size
> old_size
);
1690 init_table(ht
, old_order
+ 1, new_order
);
1693 /* called with resize mutex held */
1695 void _do_cds_lfht_shrink(struct cds_lfht
*ht
,
1696 unsigned long old_size
, unsigned long new_size
)
1698 unsigned long old_order
, new_order
;
1700 new_size
= max(new_size
, ht
->min_alloc_size
);
1701 old_order
= get_count_order_ulong(old_size
);
1702 new_order
= get_count_order_ulong(new_size
);
1703 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1704 old_size
, old_order
, new_size
, new_order
);
1705 assert(new_size
< old_size
);
1707 /* Remove and unlink all bucket nodes to remove. */
1708 fini_table(ht
, new_order
+ 1, old_order
);
1712 /* called with resize mutex held */
1714 void _do_cds_lfht_resize(struct cds_lfht
*ht
)
1716 unsigned long new_size
, old_size
;
1719 * Resize table, re-do if the target size has changed under us.
1722 assert(uatomic_read(&ht
->in_progress_resize
));
1723 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
))
1725 ht
->t
.resize_initiated
= 1;
1726 old_size
= ht
->t
.size
;
1727 new_size
= CMM_LOAD_SHARED(ht
->t
.resize_target
);
1728 if (old_size
< new_size
)
1729 _do_cds_lfht_grow(ht
, old_size
, new_size
);
1730 else if (old_size
> new_size
)
1731 _do_cds_lfht_shrink(ht
, old_size
, new_size
);
1732 ht
->t
.resize_initiated
= 0;
1733 /* write resize_initiated before read resize_target */
1735 } while (ht
->t
.size
!= CMM_LOAD_SHARED(ht
->t
.resize_target
));
1739 unsigned long resize_target_grow(struct cds_lfht
*ht
, unsigned long new_size
)
1741 return _uatomic_xchg_monotonic_increase(&ht
->t
.resize_target
, new_size
);
1745 void resize_target_update_count(struct cds_lfht
*ht
,
1746 unsigned long count
)
1748 count
= max(count
, ht
->min_alloc_size
);
1749 uatomic_set(&ht
->t
.resize_target
, count
);
1752 void cds_lfht_resize(struct cds_lfht
*ht
, unsigned long new_size
)
1754 resize_target_update_count(ht
, new_size
);
1755 CMM_STORE_SHARED(ht
->t
.resize_initiated
, 1);
1756 ht
->cds_lfht_rcu_thread_offline();
1757 pthread_mutex_lock(&ht
->resize_mutex
);
1758 _do_cds_lfht_resize(ht
);
1759 pthread_mutex_unlock(&ht
->resize_mutex
);
1760 ht
->cds_lfht_rcu_thread_online();
1764 void do_resize_cb(struct rcu_head
*head
)
1766 struct rcu_resize_work
*work
=
1767 caa_container_of(head
, struct rcu_resize_work
, head
);
1768 struct cds_lfht
*ht
= work
->ht
;
1770 ht
->cds_lfht_rcu_thread_offline();
1771 pthread_mutex_lock(&ht
->resize_mutex
);
1772 _do_cds_lfht_resize(ht
);
1773 pthread_mutex_unlock(&ht
->resize_mutex
);
1774 ht
->cds_lfht_rcu_thread_online();
1776 cmm_smp_mb(); /* finish resize before decrement */
1777 uatomic_dec(&ht
->in_progress_resize
);
1781 void __cds_lfht_resize_lazy_launch(struct cds_lfht
*ht
)
1783 struct rcu_resize_work
*work
;
1785 /* Store resize_target before read resize_initiated */
1787 if (!CMM_LOAD_SHARED(ht
->t
.resize_initiated
)) {
1788 uatomic_inc(&ht
->in_progress_resize
);
1789 cmm_smp_mb(); /* increment resize count before load destroy */
1790 if (CMM_LOAD_SHARED(ht
->in_progress_destroy
)) {
1791 uatomic_dec(&ht
->in_progress_resize
);
1794 work
= malloc(sizeof(*work
));
1796 ht
->cds_lfht_call_rcu(&work
->head
, do_resize_cb
);
1797 CMM_STORE_SHARED(ht
->t
.resize_initiated
, 1);
1802 void cds_lfht_resize_lazy_grow(struct cds_lfht
*ht
, unsigned long size
, int growth
)
1804 unsigned long target_size
= size
<< growth
;
1806 if (resize_target_grow(ht
, target_size
) >= target_size
)
1809 __cds_lfht_resize_lazy_launch(ht
);
1813 * We favor grow operations over shrink. A shrink operation never occurs
1814 * if a grow operation is queued for lazy execution. A grow operation
1815 * cancels any pending shrink lazy execution.
1818 void cds_lfht_resize_lazy_count(struct cds_lfht
*ht
, unsigned long size
,
1819 unsigned long count
)
1821 if (!(ht
->flags
& CDS_LFHT_AUTO_RESIZE
))
1823 count
= max(count
, ht
->min_alloc_size
);
1825 return; /* Already the right size, no resize needed */
1826 if (count
> size
) { /* lazy grow */
1827 if (resize_target_grow(ht
, count
) >= count
)
1829 } else { /* lazy shrink */
1833 s
= uatomic_cmpxchg(&ht
->t
.resize_target
, size
, count
);
1835 break; /* no resize needed */
1837 return; /* growing is/(was just) in progress */
1839 return; /* some other thread do shrink */
1843 __cds_lfht_resize_lazy_launch(ht
);