2 * SPDX-License-Identifier: LGPL-2.1-or-later
4 * Copyright 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
7 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
11 * Based on the following articles:
12 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
13 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
14 * - Michael, M. M. High performance dynamic lock-free hash tables
15 * and list-based sets. In Proceedings of the fourteenth annual ACM
16 * symposium on Parallel algorithms and architectures, ACM Press,
19 * Some specificities of this Lock-Free Resizable RCU Hash Table
22 * - RCU read-side critical section allows readers to perform hash
23 * table lookups, as well as traversals, and use the returned objects
24 * safely by allowing memory reclaim to take place only after a grace
26 * - Add and remove operations are lock-free, and do not need to
27 * allocate memory. They need to be executed within RCU read-side
28 * critical section to ensure the objects they read are valid and to
29 * deal with the cmpxchg ABA problem.
30 * - add and add_unique operations are supported. add_unique checks if
31 * the node key already exists in the hash table. It ensures not to
32 * populate a duplicate key if the node key already exists in the hash
34 * - The resize operation executes concurrently with
35 * add/add_unique/add_replace/remove/lookup/traversal.
36 * - Hash table nodes are contained within a split-ordered list. This
37 * list is ordered by incrementing reversed-bits-hash value.
38 * - An index of bucket nodes is kept. These bucket nodes are the hash
39 * table "buckets". These buckets are internal nodes that allow to
40 * perform a fast hash lookup, similarly to a skip list. These
41 * buckets are chained together in the split-ordered list, which
42 * allows recursive expansion by inserting new buckets between the
43 * existing buckets. The split-ordered list allows adding new buckets
44 * between existing buckets as the table needs to grow.
45 * - The resize operation for small tables only allows expanding the
46 * hash table. It is triggered automatically by detecting long chains
47 * in the add operation.
48 * - The resize operation for larger tables (and available through an
49 * API) allows both expanding and shrinking the hash table.
50 * - Split-counters are used to keep track of the number of
51 * nodes within the hash table for automatic resize triggering.
52 * - Resize operation initiated by long chain detection is executed by a
53 * worker thread, which keeps lock-freedom of add and remove.
54 * - Resize operations are protected by a mutex.
55 * - The removal operation is split in two parts: first, a "removed"
56 * flag is set in the next pointer within the node to remove. Then,
57 * a "garbage collection" is performed in the bucket containing the
58 * removed node (from the start of the bucket up to the removed node).
59 * All encountered nodes with "removed" flag set in their next
60 * pointers are removed from the linked-list. If the cmpxchg used for
61 * removal fails (due to concurrent garbage-collection or concurrent
62 * add), we retry from the beginning of the bucket. This ensures that
63 * the node with "removed" flag set is removed from the hash table
64 * (not visible to lookups anymore) before the RCU read-side critical
65 * section held across removal ends. Furthermore, this ensures that
66 * the node with "removed" flag set is removed from the linked-list
67 * before its memory is reclaimed. After setting the "removal" flag,
68 * only the thread which removal is the first to set the "removal
69 * owner" flag (with an xchg) into a node's next pointer is considered
70 * to have succeeded its removal (and thus owns the node to reclaim).
71 * Because we garbage-collect starting from an invariant node (the
72 * start-of-bucket bucket node) up to the "removed" node (or find a
73 * reverse-hash that is higher), we are sure that a successful
74 * traversal of the chain leads to a chain that is present in the
75 * linked-list (the start node is never removed) and that it does not
76 * contain the "removed" node anymore, even if concurrent delete/add
77 * operations are changing the structure of the list concurrently.
78 * - The add operations perform garbage collection of buckets if they
79 * encounter nodes with removed flag set in the bucket where they want
80 * to add their new node. This ensures lock-freedom of add operation by
81 * helping the remover unlink nodes from the list rather than to wait
83 * - There are three memory backends for the hash table buckets: the
84 * "order table", the "chunks", and the "mmap".
85 * - These bucket containers contain a compact version of the hash table
87 * - The RCU "order table":
88 * - has a first level table indexed by log2(hash index) which is
89 * copied and expanded by the resize operation. This order table
90 * allows finding the "bucket node" tables.
91 * - There is one bucket node table per hash index order. The size of
92 * each bucket node table is half the number of hashes contained in
93 * this order (except for order 0).
94 * - The RCU "chunks" is best suited for close interaction with a page
95 * allocator. It uses a linear array as index to "chunks" containing
96 * each the same number of buckets.
97 * - The RCU "mmap" memory backend uses a single memory map to hold
99 * - synchronize_rcu is used to garbage-collect the old bucket node table.
101 * Ordering Guarantees:
103 * To discuss these guarantees, we first define "read" operation as any
104 * of the the basic lttng_ust_lfht_lookup, lttng_ust_lfht_next_duplicate,
105 * lttng_ust_lfht_first, lttng_ust_lfht_next operation, as well as
106 * lttng_ust_lfht_add_unique (failure).
108 * We define "read traversal" operation as any of the following
109 * group of operations
110 * - lttng_ust_lfht_lookup followed by iteration with lttng_ust_lfht_next_duplicate
111 * (and/or lttng_ust_lfht_next, although less common).
112 * - lttng_ust_lfht_add_unique (failure) followed by iteration with
113 * lttng_ust_lfht_next_duplicate (and/or lttng_ust_lfht_next, although less
115 * - lttng_ust_lfht_first followed iteration with lttng_ust_lfht_next (and/or
116 * lttng_ust_lfht_next_duplicate, although less common).
118 * We define "write" operations as any of lttng_ust_lfht_add, lttng_ust_lfht_replace,
119 * lttng_ust_lfht_add_unique (success), lttng_ust_lfht_add_replace, lttng_ust_lfht_del.
121 * When lttng_ust_lfht_add_unique succeeds (returns the node passed as
122 * parameter), it acts as a "write" operation. When lttng_ust_lfht_add_unique
123 * fails (returns a node different from the one passed as parameter), it
124 * acts as a "read" operation. A lttng_ust_lfht_add_unique failure is a
125 * lttng_ust_lfht_lookup "read" operation, therefore, any ordering guarantee
126 * referring to "lookup" imply any of "lookup" or lttng_ust_lfht_add_unique
129 * We define "prior" and "later" node as nodes observable by reads and
130 * read traversals respectively before and after a write or sequence of
133 * Hash-table operations are often cascaded, for example, the pointer
134 * returned by a lttng_ust_lfht_lookup() might be passed to a lttng_ust_lfht_next(),
135 * whose return value might in turn be passed to another hash-table
136 * operation. This entire cascaded series of operations must be enclosed
137 * by a pair of matching rcu_read_lock() and rcu_read_unlock()
140 * The following ordering guarantees are offered by this hash table:
142 * A.1) "read" after "write": if there is ordering between a write and a
143 * later read, then the read is guaranteed to see the write or some
145 * A.2) "read traversal" after "write": given that there is dependency
146 * ordering between reads in a "read traversal", if there is
147 * ordering between a write and the first read of the traversal,
148 * then the "read traversal" is guaranteed to see the write or
150 * B.1) "write" after "read": if there is ordering between a read and a
151 * later write, then the read will never see the write.
152 * B.2) "write" after "read traversal": given that there is dependency
153 * ordering between reads in a "read traversal", if there is
154 * ordering between the last read of the traversal and a later
155 * write, then the "read traversal" will never see the write.
156 * C) "write" while "read traversal": if a write occurs during a "read
157 * traversal", the traversal may, or may not, see the write.
158 * D.1) "write" after "write": if there is ordering between a write and
159 * a later write, then the later write is guaranteed to see the
160 * effects of the first write.
161 * D.2) Concurrent "write" pairs: The system will assign an arbitrary
162 * order to any pair of concurrent conflicting writes.
163 * Non-conflicting writes (for example, to different keys) are
165 * E) If a grace period separates a "del" or "replace" operation
166 * and a subsequent operation, then that subsequent operation is
167 * guaranteed not to see the removed item.
168 * F) Uniqueness guarantee: given a hash table that does not contain
169 * duplicate items for a given key, there will only be one item in
170 * the hash table after an arbitrary sequence of add_unique and/or
171 * add_replace operations. Note, however, that a pair of
172 * concurrent read operations might well access two different items
174 * G.1) If a pair of lookups for a given key are ordered (e.g. by a
175 * memory barrier), then the second lookup will return the same
176 * node as the previous lookup, or some later node.
177 * G.2) A "read traversal" that starts after the end of a prior "read
178 * traversal" (ordered by memory barriers) is guaranteed to see the
179 * same nodes as the previous traversal, or some later nodes.
180 * G.3) Concurrent "read" pairs: concurrent reads are unordered. For
181 * example, if a pair of reads to the same key run concurrently
182 * with an insertion of that same key, the reads remain unordered
183 * regardless of their return values. In other words, you cannot
184 * rely on the values returned by the reads to deduce ordering.
186 * Progress guarantees:
188 * * Reads are wait-free. These operations always move forward in the
189 * hash table linked list, and this list has no loop.
190 * * Writes are lock-free. Any retry loop performed by a write operation
191 * is triggered by progress made within another update operation.
193 * Bucket node tables:
195 * hash table hash table the last all bucket node tables
196 * order size bucket node 0 1 2 3 4 5 6(index)
203 * 5 32 16 1 1 2 4 8 16
204 * 6 64 32 1 1 2 4 8 16 32
206 * When growing/shrinking, we only focus on the last bucket node table
207 * which size is (!order ? 1 : (1 << (order -1))).
209 * Example for growing/shrinking:
210 * grow hash table from order 5 to 6: init the index=6 bucket node table
211 * shrink hash table from order 6 to 5: fini the index=6 bucket node table
213 * A bit of ascii art explanation:
215 * The order index is the off-by-one compared to the actual power of 2
216 * because we use index 0 to deal with the 0 special-case.
218 * This shows the nodes for a small table ordered by reversed bits:
230 * This shows the nodes in order of non-reversed bits, linked by
231 * reversed-bit order.
236 * 2 | | 2 010 010 <- |
237 * | | | 3 011 110 | <- |
238 * 3 -> | | | 4 100 001 | |
245 * Note on port to lttng-ust: auto-resize and accounting features are
259 #include <lttng/urcu/pointer.h>
260 #include <urcu/arch.h>
261 #include <urcu/uatomic.h>
262 #include <urcu/compiler.h>
263 #include "rculfhash.h"
264 #include "rculfhash-internal.h"
270 * Split-counters lazily update the global counter each 1024
271 * addition/removal. It automatically keeps track of resize required.
272 * We use the bucket length as indicator for need to expand for small
273 * tables and machines lacking per-cpu data support.
275 #define COUNT_COMMIT_ORDER 10
278 * Define the minimum table size.
280 #define MIN_TABLE_ORDER 0
281 #define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
284 * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
286 #define MIN_PARTITION_PER_THREAD_ORDER 12
287 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
290 * The removed flag needs to be updated atomically with the pointer.
291 * It indicates that no node must attach to the node scheduled for
292 * removal, and that node garbage collection must be performed.
293 * The bucket flag does not require to be updated atomically with the
294 * pointer, but it is added as a pointer low bit flag to save space.
295 * The "removal owner" flag is used to detect which of the "del"
296 * operation that has set the "removed flag" gets to return the removed
297 * node to its caller. Note that the replace operation does not need to
298 * iteract with the "removal owner" flag, because it validates that
299 * the "removed" flag is not set before performing its cmpxchg.
301 #define REMOVED_FLAG (1UL << 0)
302 #define BUCKET_FLAG (1UL << 1)
303 #define REMOVAL_OWNER_FLAG (1UL << 2)
304 #define FLAGS_MASK ((1UL << 3) - 1)
306 /* Value of the end pointer. Should not interact with flags. */
307 #define END_VALUE NULL
310 * ht_items_count: Split-counters counting the number of node addition
311 * and removal in the table. Only used if the LTTNG_UST_LFHT_ACCOUNTING flag
312 * is set at hash table creation.
314 * These are free-running counters, never reset to zero. They count the
315 * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
316 * operations to update the global counter. We choose a power-of-2 value
317 * for the trigger to deal with 32 or 64-bit overflow of the counter.
319 struct ht_items_count
{
320 unsigned long add
, del
;
321 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
323 #ifdef CONFIG_LTTNG_UST_LFHT_ITER_DEBUG
326 void lttng_ust_lfht_iter_debug_set_ht(struct lttng_ust_lfht
*ht
, struct lttng_ust_lfht_iter
*iter
)
331 #define lttng_ust_lfht_iter_debug_assert(...) assert(__VA_ARGS__)
336 void lttng_ust_lfht_iter_debug_set_ht(struct lttng_ust_lfht
*ht
, struct lttng_ust_lfht_iter
*iter
)
340 #define lttng_ust_lfht_iter_debug_assert(...)
345 * Algorithm to reverse bits in a word by lookup table, extended to
348 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
349 * Originally from Public Domain.
352 static const uint8_t BitReverseTable256
[256] =
354 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
355 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
356 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
357 R6(0), R6(2), R6(1), R6(3)
364 uint8_t bit_reverse_u8(uint8_t v
)
366 return BitReverseTable256
[v
];
369 #if (CAA_BITS_PER_LONG == 32)
371 uint32_t bit_reverse_u32(uint32_t v
)
373 return ((uint32_t) bit_reverse_u8(v
) << 24) |
374 ((uint32_t) bit_reverse_u8(v
>> 8) << 16) |
375 ((uint32_t) bit_reverse_u8(v
>> 16) << 8) |
376 ((uint32_t) bit_reverse_u8(v
>> 24));
380 uint64_t bit_reverse_u64(uint64_t v
)
382 return ((uint64_t) bit_reverse_u8(v
) << 56) |
383 ((uint64_t) bit_reverse_u8(v
>> 8) << 48) |
384 ((uint64_t) bit_reverse_u8(v
>> 16) << 40) |
385 ((uint64_t) bit_reverse_u8(v
>> 24) << 32) |
386 ((uint64_t) bit_reverse_u8(v
>> 32) << 24) |
387 ((uint64_t) bit_reverse_u8(v
>> 40) << 16) |
388 ((uint64_t) bit_reverse_u8(v
>> 48) << 8) |
389 ((uint64_t) bit_reverse_u8(v
>> 56));
394 unsigned long bit_reverse_ulong(unsigned long v
)
396 #if (CAA_BITS_PER_LONG == 32)
397 return bit_reverse_u32(v
);
399 return bit_reverse_u64(v
);
404 * fls: returns the position of the most significant bit.
405 * Returns 0 if no bit is set, else returns the position of the most
406 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
408 #if defined(__i386) || defined(__x86_64)
410 unsigned int fls_u32(uint32_t x
)
414 __asm__ ("bsrl %1,%0\n\t"
418 : "=r" (r
) : "rm" (x
));
424 #if defined(__x86_64)
426 unsigned int fls_u64(uint64_t x
)
430 __asm__ ("bsrq %1,%0\n\t"
434 : "=r" (r
) : "rm" (x
));
441 static __attribute__((unused
))
442 unsigned int fls_u64(uint64_t x
)
449 if (!(x
& 0xFFFFFFFF00000000ULL
)) {
453 if (!(x
& 0xFFFF000000000000ULL
)) {
457 if (!(x
& 0xFF00000000000000ULL
)) {
461 if (!(x
& 0xF000000000000000ULL
)) {
465 if (!(x
& 0xC000000000000000ULL
)) {
469 if (!(x
& 0x8000000000000000ULL
)) {
478 static __attribute__((unused
))
479 unsigned int fls_u32(uint32_t x
)
485 if (!(x
& 0xFFFF0000U
)) {
489 if (!(x
& 0xFF000000U
)) {
493 if (!(x
& 0xF0000000U
)) {
497 if (!(x
& 0xC0000000U
)) {
501 if (!(x
& 0x80000000U
)) {
509 unsigned int lttng_ust_lfht_fls_ulong(unsigned long x
)
511 #if (CAA_BITS_PER_LONG == 32)
519 * Return the minimum order for which x <= (1UL << order).
520 * Return -1 if x is 0.
522 int lttng_ust_lfht_get_count_order_u32(uint32_t x
)
527 return fls_u32(x
- 1);
531 * Return the minimum order for which x <= (1UL << order).
532 * Return -1 if x is 0.
534 int lttng_ust_lfht_get_count_order_ulong(unsigned long x
)
539 return lttng_ust_lfht_fls_ulong(x
- 1);
543 struct lttng_ust_lfht_node
*clear_flag(struct lttng_ust_lfht_node
*node
)
545 return (struct lttng_ust_lfht_node
*) (((unsigned long) node
) & ~FLAGS_MASK
);
549 int is_removed(const struct lttng_ust_lfht_node
*node
)
551 return ((unsigned long) node
) & REMOVED_FLAG
;
555 int is_bucket(struct lttng_ust_lfht_node
*node
)
557 return ((unsigned long) node
) & BUCKET_FLAG
;
561 struct lttng_ust_lfht_node
*flag_bucket(struct lttng_ust_lfht_node
*node
)
563 return (struct lttng_ust_lfht_node
*) (((unsigned long) node
) | BUCKET_FLAG
);
567 int is_removal_owner(struct lttng_ust_lfht_node
*node
)
569 return ((unsigned long) node
) & REMOVAL_OWNER_FLAG
;
573 struct lttng_ust_lfht_node
*flag_removal_owner(struct lttng_ust_lfht_node
*node
)
575 return (struct lttng_ust_lfht_node
*) (((unsigned long) node
) | REMOVAL_OWNER_FLAG
);
579 struct lttng_ust_lfht_node
*flag_removed_or_removal_owner(struct lttng_ust_lfht_node
*node
)
581 return (struct lttng_ust_lfht_node
*) (((unsigned long) node
) | REMOVED_FLAG
| REMOVAL_OWNER_FLAG
);
585 struct lttng_ust_lfht_node
*get_end(void)
587 return (struct lttng_ust_lfht_node
*) END_VALUE
;
591 int is_end(struct lttng_ust_lfht_node
*node
)
593 return clear_flag(node
) == (struct lttng_ust_lfht_node
*) END_VALUE
;
597 void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht
*ht
, unsigned long order
)
599 return ht
->mm
->alloc_bucket_table(ht
, order
);
603 * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
604 * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
608 void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht
*ht
, unsigned long order
)
610 return ht
->mm
->free_bucket_table(ht
, order
);
614 struct lttng_ust_lfht_node
*bucket_at(struct lttng_ust_lfht
*ht
, unsigned long index
)
616 return ht
->bucket_at(ht
, index
);
620 struct lttng_ust_lfht_node
*lookup_bucket(struct lttng_ust_lfht
*ht
, unsigned long size
,
624 return bucket_at(ht
, hash
& (size
- 1));
628 * Remove all logically deleted nodes from a bucket up to a certain node key.
631 void _lttng_ust_lfht_gc_bucket(struct lttng_ust_lfht_node
*bucket
, struct lttng_ust_lfht_node
*node
)
633 struct lttng_ust_lfht_node
*iter_prev
, *iter
, *next
, *new_next
;
635 assert(!is_bucket(bucket
));
636 assert(!is_removed(bucket
));
637 assert(!is_removal_owner(bucket
));
638 assert(!is_bucket(node
));
639 assert(!is_removed(node
));
640 assert(!is_removal_owner(node
));
643 /* We can always skip the bucket node initially */
644 iter
= lttng_ust_rcu_dereference(iter_prev
->next
);
645 assert(!is_removed(iter
));
646 assert(!is_removal_owner(iter
));
647 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
649 * We should never be called with bucket (start of chain)
650 * and logically removed node (end of path compression
651 * marker) being the actual same node. This would be a
652 * bug in the algorithm implementation.
654 assert(bucket
!= node
);
656 if (caa_unlikely(is_end(iter
)))
658 if (caa_likely(clear_flag(iter
)->reverse_hash
> node
->reverse_hash
))
660 next
= lttng_ust_rcu_dereference(clear_flag(iter
)->next
);
661 if (caa_likely(is_removed(next
)))
663 iter_prev
= clear_flag(iter
);
666 assert(!is_removed(iter
));
667 assert(!is_removal_owner(iter
));
669 new_next
= flag_bucket(clear_flag(next
));
671 new_next
= clear_flag(next
);
672 (void) uatomic_cmpxchg(&iter_prev
->next
, iter
, new_next
);
677 int _lttng_ust_lfht_replace(struct lttng_ust_lfht
*ht
, unsigned long size
,
678 struct lttng_ust_lfht_node
*old_node
,
679 struct lttng_ust_lfht_node
*old_next
,
680 struct lttng_ust_lfht_node
*new_node
)
682 struct lttng_ust_lfht_node
*bucket
, *ret_next
;
684 if (!old_node
) /* Return -ENOENT if asked to replace NULL node */
687 assert(!is_removed(old_node
));
688 assert(!is_removal_owner(old_node
));
689 assert(!is_bucket(old_node
));
690 assert(!is_removed(new_node
));
691 assert(!is_removal_owner(new_node
));
692 assert(!is_bucket(new_node
));
693 assert(new_node
!= old_node
);
695 /* Insert after node to be replaced */
696 if (is_removed(old_next
)) {
698 * Too late, the old node has been removed under us
699 * between lookup and replace. Fail.
703 assert(old_next
== clear_flag(old_next
));
704 assert(new_node
!= old_next
);
706 * REMOVAL_OWNER flag is _NEVER_ set before the REMOVED
707 * flag. It is either set atomically at the same time
708 * (replace) or after (del).
710 assert(!is_removal_owner(old_next
));
711 new_node
->next
= old_next
;
713 * Here is the whole trick for lock-free replace: we add
714 * the replacement node _after_ the node we want to
715 * replace by atomically setting its next pointer at the
716 * same time we set its removal flag. Given that
717 * the lookups/get next use an iterator aware of the
718 * next pointer, they will either skip the old node due
719 * to the removal flag and see the new node, or use
720 * the old node, but will not see the new one.
721 * This is a replacement of a node with another node
722 * that has the same value: we are therefore not
723 * removing a value from the hash table. We set both the
724 * REMOVED and REMOVAL_OWNER flags atomically so we own
725 * the node after successful cmpxchg.
727 ret_next
= uatomic_cmpxchg(&old_node
->next
,
728 old_next
, flag_removed_or_removal_owner(new_node
));
729 if (ret_next
== old_next
)
730 break; /* We performed the replacement. */
735 * Ensure that the old node is not visible to readers anymore:
736 * lookup for the node, and remove it (along with any other
737 * logically removed node) if found.
739 bucket
= lookup_bucket(ht
, size
, bit_reverse_ulong(old_node
->reverse_hash
));
740 _lttng_ust_lfht_gc_bucket(bucket
, new_node
);
742 assert(is_removed(CMM_LOAD_SHARED(old_node
->next
)));
747 * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
748 * mode. A NULL unique_ret allows creation of duplicate keys.
751 void _lttng_ust_lfht_add(struct lttng_ust_lfht
*ht
,
753 lttng_ust_lfht_match_fct match
,
756 struct lttng_ust_lfht_node
*node
,
757 struct lttng_ust_lfht_iter
*unique_ret
,
760 struct lttng_ust_lfht_node
*iter_prev
, *iter
, *next
, *new_node
, *new_next
,
762 struct lttng_ust_lfht_node
*bucket
;
764 assert(!is_bucket(node
));
765 assert(!is_removed(node
));
766 assert(!is_removal_owner(node
));
767 bucket
= lookup_bucket(ht
, size
, hash
);
770 * iter_prev points to the non-removed node prior to the
774 /* We can always skip the bucket node initially */
775 iter
= lttng_ust_rcu_dereference(iter_prev
->next
);
776 assert(iter_prev
->reverse_hash
<= node
->reverse_hash
);
778 if (caa_unlikely(is_end(iter
)))
780 if (caa_likely(clear_flag(iter
)->reverse_hash
> node
->reverse_hash
))
783 /* bucket node is the first node of the identical-hash-value chain */
784 if (bucket_flag
&& clear_flag(iter
)->reverse_hash
== node
->reverse_hash
)
787 next
= lttng_ust_rcu_dereference(clear_flag(iter
)->next
);
788 if (caa_unlikely(is_removed(next
)))
794 && clear_flag(iter
)->reverse_hash
== node
->reverse_hash
) {
795 struct lttng_ust_lfht_iter d_iter
= {
798 #ifdef CONFIG_LTTNG_UST_LFHT_ITER_DEBUG
804 * uniquely adding inserts the node as the first
805 * node of the identical-hash-value node chain.
807 * This semantic ensures no duplicated keys
808 * should ever be observable in the table
809 * (including traversing the table node by
810 * node by forward iterations)
812 lttng_ust_lfht_next_duplicate(ht
, match
, key
, &d_iter
);
816 *unique_ret
= d_iter
;
820 iter_prev
= clear_flag(iter
);
825 assert(node
!= clear_flag(iter
));
826 assert(!is_removed(iter_prev
));
827 assert(!is_removal_owner(iter_prev
));
828 assert(!is_removed(iter
));
829 assert(!is_removal_owner(iter
));
830 assert(iter_prev
!= node
);
832 node
->next
= clear_flag(iter
);
834 node
->next
= flag_bucket(clear_flag(iter
));
836 new_node
= flag_bucket(node
);
839 if (uatomic_cmpxchg(&iter_prev
->next
, iter
,
841 continue; /* retry */
848 assert(!is_removed(iter
));
849 assert(!is_removal_owner(iter
));
851 new_next
= flag_bucket(clear_flag(next
));
853 new_next
= clear_flag(next
);
854 (void) uatomic_cmpxchg(&iter_prev
->next
, iter
, new_next
);
859 unique_ret
->node
= return_node
;
860 /* unique_ret->next left unset, never used. */
865 int _lttng_ust_lfht_del(struct lttng_ust_lfht
*ht
, unsigned long size
,
866 struct lttng_ust_lfht_node
*node
)
868 struct lttng_ust_lfht_node
*bucket
, *next
;
870 if (!node
) /* Return -ENOENT if asked to delete NULL node */
873 /* logically delete the node */
874 assert(!is_bucket(node
));
875 assert(!is_removed(node
));
876 assert(!is_removal_owner(node
));
879 * We are first checking if the node had previously been
880 * logically removed (this check is not atomic with setting the
881 * logical removal flag). Return -ENOENT if the node had
882 * previously been removed.
884 next
= CMM_LOAD_SHARED(node
->next
); /* next is not dereferenced */
885 if (caa_unlikely(is_removed(next
)))
887 assert(!is_bucket(next
));
889 * The del operation semantic guarantees a full memory barrier
890 * before the uatomic_or atomic commit of the deletion flag.
892 cmm_smp_mb__before_uatomic_or();
894 * We set the REMOVED_FLAG unconditionally. Note that there may
895 * be more than one concurrent thread setting this flag.
896 * Knowing which wins the race will be known after the garbage
897 * collection phase, stay tuned!
899 uatomic_or(&node
->next
, REMOVED_FLAG
);
900 /* We performed the (logical) deletion. */
903 * Ensure that the node is not visible to readers anymore: lookup for
904 * the node, and remove it (along with any other logically removed node)
907 bucket
= lookup_bucket(ht
, size
, bit_reverse_ulong(node
->reverse_hash
));
908 _lttng_ust_lfht_gc_bucket(bucket
, node
);
910 assert(is_removed(CMM_LOAD_SHARED(node
->next
)));
912 * Last phase: atomically exchange node->next with a version
913 * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
914 * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
915 * the node and win the removal race.
916 * It is interesting to note that all "add" paths are forbidden
917 * to change the next pointer starting from the point where the
918 * REMOVED_FLAG is set, so here using a read, followed by a
919 * xchg() suffice to guarantee that the xchg() will ever only
920 * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
923 if (!is_removal_owner(uatomic_xchg(&node
->next
,
924 flag_removal_owner(node
->next
))))
931 * Never called with size < 1.
934 void lttng_ust_lfht_create_bucket(struct lttng_ust_lfht
*ht
, unsigned long size
)
936 struct lttng_ust_lfht_node
*prev
, *node
;
937 unsigned long order
, len
, i
;
940 lttng_ust_lfht_alloc_bucket_table(ht
, 0);
942 dbg_printf("create bucket: order 0 index 0 hash 0\n");
943 node
= bucket_at(ht
, 0);
944 node
->next
= flag_bucket(get_end());
945 node
->reverse_hash
= 0;
947 bucket_order
= lttng_ust_lfht_get_count_order_ulong(size
);
948 assert(bucket_order
>= 0);
950 for (order
= 1; order
< (unsigned long) bucket_order
+ 1; order
++) {
951 len
= 1UL << (order
- 1);
952 lttng_ust_lfht_alloc_bucket_table(ht
, order
);
954 for (i
= 0; i
< len
; i
++) {
956 * Now, we are trying to init the node with the
957 * hash=(len+i) (which is also a bucket with the
958 * index=(len+i)) and insert it into the hash table,
959 * so this node has to be inserted after the bucket
960 * with the index=(len+i)&(len-1)=i. And because there
961 * is no other non-bucket node nor bucket node with
962 * larger index/hash inserted, so the bucket node
963 * being inserted should be inserted directly linked
964 * after the bucket node with index=i.
966 prev
= bucket_at(ht
, i
);
967 node
= bucket_at(ht
, len
+ i
);
969 dbg_printf("create bucket: order %lu index %lu hash %lu\n",
970 order
, len
+ i
, len
+ i
);
971 node
->reverse_hash
= bit_reverse_ulong(len
+ i
);
973 /* insert after prev */
974 assert(is_bucket(prev
->next
));
975 node
->next
= prev
->next
;
976 prev
->next
= flag_bucket(node
);
981 #if (CAA_BITS_PER_LONG > 32)
983 * For 64-bit architectures, with max number of buckets small enough not to
984 * use the entire 64-bit memory mapping space (and allowing a fair number of
985 * hash table instances), use the mmap allocator, which is faster. Otherwise,
986 * fallback to the order allocator.
989 const struct lttng_ust_lfht_mm_type
*get_mm_type(unsigned long max_nr_buckets
)
991 if (max_nr_buckets
&& max_nr_buckets
<= (1ULL << 32))
992 return <tng_ust_lfht_mm_mmap
;
994 return <tng_ust_lfht_mm_order
;
998 * For 32-bit architectures, use the order allocator.
1001 const struct lttng_ust_lfht_mm_type
*get_mm_type(unsigned long max_nr_buckets
)
1003 return <tng_ust_lfht_mm_order
;
1007 struct lttng_ust_lfht
*lttng_ust_lfht_new(unsigned long init_size
,
1008 unsigned long min_nr_alloc_buckets
,
1009 unsigned long max_nr_buckets
,
1011 const struct lttng_ust_lfht_mm_type
*mm
)
1013 struct lttng_ust_lfht
*ht
;
1014 unsigned long order
;
1016 /* min_nr_alloc_buckets must be power of two */
1017 if (!min_nr_alloc_buckets
|| (min_nr_alloc_buckets
& (min_nr_alloc_buckets
- 1)))
1020 /* init_size must be power of two */
1021 if (!init_size
|| (init_size
& (init_size
- 1)))
1025 * Memory management plugin default.
1028 mm
= get_mm_type(max_nr_buckets
);
1030 /* max_nr_buckets == 0 for order based mm means infinite */
1031 if (mm
== <tng_ust_lfht_mm_order
&& !max_nr_buckets
)
1032 max_nr_buckets
= 1UL << (MAX_TABLE_ORDER
- 1);
1034 /* max_nr_buckets must be power of two */
1035 if (!max_nr_buckets
|| (max_nr_buckets
& (max_nr_buckets
- 1)))
1038 if (flags
& LTTNG_UST_LFHT_AUTO_RESIZE
)
1041 min_nr_alloc_buckets
= max(min_nr_alloc_buckets
, MIN_TABLE_SIZE
);
1042 init_size
= max(init_size
, MIN_TABLE_SIZE
);
1043 max_nr_buckets
= max(max_nr_buckets
, min_nr_alloc_buckets
);
1044 init_size
= min(init_size
, max_nr_buckets
);
1046 ht
= mm
->alloc_lttng_ust_lfht(min_nr_alloc_buckets
, max_nr_buckets
);
1048 assert(ht
->mm
== mm
);
1049 assert(ht
->bucket_at
== mm
->bucket_at
);
1052 /* this mutex should not nest in read-side C.S. */
1053 pthread_mutex_init(&ht
->resize_mutex
, NULL
);
1054 order
= lttng_ust_lfht_get_count_order_ulong(init_size
);
1055 ht
->resize_target
= 1UL << order
;
1056 lttng_ust_lfht_create_bucket(ht
, 1UL << order
);
1057 ht
->size
= 1UL << order
;
1061 void lttng_ust_lfht_lookup(struct lttng_ust_lfht
*ht
, unsigned long hash
,
1062 lttng_ust_lfht_match_fct match
, const void *key
,
1063 struct lttng_ust_lfht_iter
*iter
)
1065 struct lttng_ust_lfht_node
*node
, *next
, *bucket
;
1066 unsigned long reverse_hash
, size
;
1068 lttng_ust_lfht_iter_debug_set_ht(ht
, iter
);
1070 reverse_hash
= bit_reverse_ulong(hash
);
1072 size
= lttng_ust_rcu_dereference(ht
->size
);
1073 bucket
= lookup_bucket(ht
, size
, hash
);
1074 /* We can always skip the bucket node initially */
1075 node
= lttng_ust_rcu_dereference(bucket
->next
);
1076 node
= clear_flag(node
);
1078 if (caa_unlikely(is_end(node
))) {
1082 if (caa_unlikely(node
->reverse_hash
> reverse_hash
)) {
1086 next
= lttng_ust_rcu_dereference(node
->next
);
1087 assert(node
== clear_flag(node
));
1088 if (caa_likely(!is_removed(next
))
1090 && node
->reverse_hash
== reverse_hash
1091 && caa_likely(match(node
, key
))) {
1094 node
= clear_flag(next
);
1096 assert(!node
|| !is_bucket(CMM_LOAD_SHARED(node
->next
)));
1101 void lttng_ust_lfht_next_duplicate(struct lttng_ust_lfht
*ht
, lttng_ust_lfht_match_fct match
,
1102 const void *key
, struct lttng_ust_lfht_iter
*iter
)
1104 struct lttng_ust_lfht_node
*node
, *next
;
1105 unsigned long reverse_hash
;
1107 lttng_ust_lfht_iter_debug_assert(ht
== iter
->lfht
);
1109 reverse_hash
= node
->reverse_hash
;
1111 node
= clear_flag(next
);
1114 if (caa_unlikely(is_end(node
))) {
1118 if (caa_unlikely(node
->reverse_hash
> reverse_hash
)) {
1122 next
= lttng_ust_rcu_dereference(node
->next
);
1123 if (caa_likely(!is_removed(next
))
1125 && caa_likely(match(node
, key
))) {
1128 node
= clear_flag(next
);
1130 assert(!node
|| !is_bucket(CMM_LOAD_SHARED(node
->next
)));
1135 void lttng_ust_lfht_next(struct lttng_ust_lfht
*ht
, struct lttng_ust_lfht_iter
*iter
)
1137 struct lttng_ust_lfht_node
*node
, *next
;
1139 lttng_ust_lfht_iter_debug_assert(ht
== iter
->lfht
);
1140 node
= clear_flag(iter
->next
);
1142 if (caa_unlikely(is_end(node
))) {
1146 next
= lttng_ust_rcu_dereference(node
->next
);
1147 if (caa_likely(!is_removed(next
))
1148 && !is_bucket(next
)) {
1151 node
= clear_flag(next
);
1153 assert(!node
|| !is_bucket(CMM_LOAD_SHARED(node
->next
)));
1158 void lttng_ust_lfht_first(struct lttng_ust_lfht
*ht
, struct lttng_ust_lfht_iter
*iter
)
1160 lttng_ust_lfht_iter_debug_set_ht(ht
, iter
);
1162 * Get next after first bucket node. The first bucket node is the
1163 * first node of the linked list.
1165 iter
->next
= bucket_at(ht
, 0)->next
;
1166 lttng_ust_lfht_next(ht
, iter
);
1169 void lttng_ust_lfht_add(struct lttng_ust_lfht
*ht
, unsigned long hash
,
1170 struct lttng_ust_lfht_node
*node
)
1174 node
->reverse_hash
= bit_reverse_ulong(hash
);
1175 size
= lttng_ust_rcu_dereference(ht
->size
);
1176 _lttng_ust_lfht_add(ht
, hash
, NULL
, NULL
, size
, node
, NULL
, 0);
1179 struct lttng_ust_lfht_node
*lttng_ust_lfht_add_unique(struct lttng_ust_lfht
*ht
,
1181 lttng_ust_lfht_match_fct match
,
1183 struct lttng_ust_lfht_node
*node
)
1186 struct lttng_ust_lfht_iter iter
;
1188 node
->reverse_hash
= bit_reverse_ulong(hash
);
1189 size
= lttng_ust_rcu_dereference(ht
->size
);
1190 _lttng_ust_lfht_add(ht
, hash
, match
, key
, size
, node
, &iter
, 0);
1194 struct lttng_ust_lfht_node
*lttng_ust_lfht_add_replace(struct lttng_ust_lfht
*ht
,
1196 lttng_ust_lfht_match_fct match
,
1198 struct lttng_ust_lfht_node
*node
)
1201 struct lttng_ust_lfht_iter iter
;
1203 node
->reverse_hash
= bit_reverse_ulong(hash
);
1204 size
= lttng_ust_rcu_dereference(ht
->size
);
1206 _lttng_ust_lfht_add(ht
, hash
, match
, key
, size
, node
, &iter
, 0);
1207 if (iter
.node
== node
) {
1211 if (!_lttng_ust_lfht_replace(ht
, size
, iter
.node
, iter
.next
, node
))
1216 int lttng_ust_lfht_replace(struct lttng_ust_lfht
*ht
,
1217 struct lttng_ust_lfht_iter
*old_iter
,
1219 lttng_ust_lfht_match_fct match
,
1221 struct lttng_ust_lfht_node
*new_node
)
1225 new_node
->reverse_hash
= bit_reverse_ulong(hash
);
1226 if (!old_iter
->node
)
1228 if (caa_unlikely(old_iter
->node
->reverse_hash
!= new_node
->reverse_hash
))
1230 if (caa_unlikely(!match(old_iter
->node
, key
)))
1232 size
= lttng_ust_rcu_dereference(ht
->size
);
1233 return _lttng_ust_lfht_replace(ht
, size
, old_iter
->node
, old_iter
->next
,
1237 int lttng_ust_lfht_del(struct lttng_ust_lfht
*ht
, struct lttng_ust_lfht_node
*node
)
1241 size
= lttng_ust_rcu_dereference(ht
->size
);
1242 return _lttng_ust_lfht_del(ht
, size
, node
);
1245 int lttng_ust_lfht_is_node_deleted(const struct lttng_ust_lfht_node
*node
)
1247 return is_removed(CMM_LOAD_SHARED(node
->next
));
1251 int lttng_ust_lfht_delete_bucket(struct lttng_ust_lfht
*ht
)
1253 struct lttng_ust_lfht_node
*node
;
1254 unsigned long order
, i
, size
;
1256 /* Check that the table is empty */
1257 node
= bucket_at(ht
, 0);
1259 node
= clear_flag(node
)->next
;
1260 if (!is_bucket(node
))
1262 assert(!is_removed(node
));
1263 assert(!is_removal_owner(node
));
1264 } while (!is_end(node
));
1266 * size accessed without lttng_ust_rcu_dereference because hash table is
1270 /* Internal sanity check: all nodes left should be buckets */
1271 for (i
= 0; i
< size
; i
++) {
1272 node
= bucket_at(ht
, i
);
1273 dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
1274 i
, i
, bit_reverse_ulong(node
->reverse_hash
));
1275 assert(is_bucket(node
->next
));
1278 for (order
= lttng_ust_lfht_get_count_order_ulong(size
); (long)order
>= 0; order
--)
1279 lttng_ust_lfht_free_bucket_table(ht
, order
);
1285 * Should only be called when no more concurrent readers nor writers can
1286 * possibly access the table.
1288 int lttng_ust_lfht_destroy(struct lttng_ust_lfht
*ht
)
1292 ret
= lttng_ust_lfht_delete_bucket(ht
);
1295 ret
= pthread_mutex_destroy(&ht
->resize_mutex
);