2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
5 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 * Based on the following paper:
8 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
10 * Code partially derived from nft_hash
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
23 #include <linux/jhash.h>
24 #include <linux/random.h>
25 #include <linux/rhashtable.h>
26 #include <linux/err.h>
28 #define HASH_DEFAULT_SIZE 64UL
29 #define HASH_MIN_SIZE 4UL
30 #define BUCKET_LOCKS_PER_CPU 128UL
32 /* Base bits plus 1 bit for nulls marker */
33 #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
40 /* The bucket lock is selected based on the hash and protects mutations
41 * on a group of hash buckets.
43 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
44 * a single lock always covers both buckets which may both contains
45 * entries which link to the same bucket of the old table during resizing.
46 * This allows to simplify the locking as locking the bucket in both
47 * tables during resize always guarantee protection.
49 * IMPORTANT: When holding the bucket lock of both the old and new table
50 * during expansions and shrinking, the old bucket lock must always be
53 static spinlock_t
*bucket_lock(const struct bucket_table
*tbl
, u32 hash
)
55 return &tbl
->locks
[hash
& tbl
->locks_mask
];
58 static void *rht_obj(const struct rhashtable
*ht
, const struct rhash_head
*he
)
60 return (void *) he
- ht
->p
.head_offset
;
63 static u32
rht_bucket_index(const struct bucket_table
*tbl
, u32 hash
)
65 return hash
& (tbl
->size
- 1);
68 static u32
obj_raw_hashfn(const struct rhashtable
*ht
, const void *ptr
)
72 if (unlikely(!ht
->p
.key_len
))
73 hash
= ht
->p
.obj_hashfn(ptr
, ht
->p
.hash_rnd
);
75 hash
= ht
->p
.hashfn(ptr
+ ht
->p
.key_offset
, ht
->p
.key_len
,
78 return hash
>> HASH_RESERVED_SPACE
;
81 static u32
key_hashfn(struct rhashtable
*ht
, const void *key
, u32 len
)
83 return ht
->p
.hashfn(key
, len
, ht
->p
.hash_rnd
) >> HASH_RESERVED_SPACE
;
86 static u32
head_hashfn(const struct rhashtable
*ht
,
87 const struct bucket_table
*tbl
,
88 const struct rhash_head
*he
)
90 return rht_bucket_index(tbl
, obj_raw_hashfn(ht
, rht_obj(ht
, he
)));
93 #ifdef CONFIG_PROVE_LOCKING
94 static void debug_dump_buckets(const struct rhashtable
*ht
,
95 const struct bucket_table
*tbl
)
97 struct rhash_head
*he
;
100 for (i
= 0; i
< tbl
->size
; i
++) {
101 pr_warn(" [Bucket %d] ", i
);
102 rht_for_each_rcu(he
, tbl
, i
) {
103 hash
= head_hashfn(ht
, tbl
, he
);
104 pr_cont("[hash = %#x, lock = %p] ",
105 hash
, bucket_lock(tbl
, hash
));
112 static void debug_dump_table(struct rhashtable
*ht
,
113 const struct bucket_table
*tbl
,
116 struct bucket_table
*old_tbl
, *future_tbl
;
118 pr_emerg("BUG: lock for hash %#x in table %p not held\n",
122 future_tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
123 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
124 if (future_tbl
!= old_tbl
) {
125 pr_warn("Future table %p (size: %zd)\n",
126 future_tbl
, future_tbl
->size
);
127 debug_dump_buckets(ht
, future_tbl
);
130 pr_warn("Table %p (size: %zd)\n", old_tbl
, old_tbl
->size
);
131 debug_dump_buckets(ht
, old_tbl
);
136 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
137 #define ASSERT_BUCKET_LOCK(HT, TBL, HASH) \
139 if (unlikely(!lockdep_rht_bucket_is_held(TBL, HASH))) { \
140 debug_dump_table(HT, TBL, HASH); \
145 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
147 return (debug_locks
) ? lockdep_is_held(&ht
->mutex
) : 1;
149 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held
);
151 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
)
153 spinlock_t
*lock
= bucket_lock(tbl
, hash
);
155 return (debug_locks
) ? lockdep_is_held(lock
) : 1;
157 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held
);
159 #define ASSERT_RHT_MUTEX(HT)
160 #define ASSERT_BUCKET_LOCK(HT, TBL, HASH)
164 static struct rhash_head __rcu
**bucket_tail(struct bucket_table
*tbl
, u32 n
)
166 struct rhash_head __rcu
**pprev
;
168 for (pprev
= &tbl
->buckets
[n
];
169 !rht_is_a_nulls(rht_dereference_bucket(*pprev
, tbl
, n
));
170 pprev
= &rht_dereference_bucket(*pprev
, tbl
, n
)->next
)
176 static int alloc_bucket_locks(struct rhashtable
*ht
, struct bucket_table
*tbl
)
178 unsigned int i
, size
;
179 #if defined(CONFIG_PROVE_LOCKING)
180 unsigned int nr_pcpus
= 2;
182 unsigned int nr_pcpus
= num_possible_cpus();
185 nr_pcpus
= min_t(unsigned int, nr_pcpus
, 32UL);
186 size
= roundup_pow_of_two(nr_pcpus
* ht
->p
.locks_mul
);
188 /* Never allocate more than 0.5 locks per bucket */
189 size
= min_t(unsigned int, size
, tbl
->size
>> 1);
191 if (sizeof(spinlock_t
) != 0) {
193 if (size
* sizeof(spinlock_t
) > PAGE_SIZE
)
194 tbl
->locks
= vmalloc(size
* sizeof(spinlock_t
));
197 tbl
->locks
= kmalloc_array(size
, sizeof(spinlock_t
),
201 for (i
= 0; i
< size
; i
++)
202 spin_lock_init(&tbl
->locks
[i
]);
204 tbl
->locks_mask
= size
- 1;
209 static void bucket_table_free(const struct bucket_table
*tbl
)
217 static struct bucket_table
*bucket_table_alloc(struct rhashtable
*ht
,
220 struct bucket_table
*tbl
;
224 size
= sizeof(*tbl
) + nbuckets
* sizeof(tbl
->buckets
[0]);
225 tbl
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
232 tbl
->size
= nbuckets
;
234 if (alloc_bucket_locks(ht
, tbl
) < 0) {
235 bucket_table_free(tbl
);
239 for (i
= 0; i
< nbuckets
; i
++)
240 INIT_RHT_NULLS_HEAD(tbl
->buckets
[i
], ht
, i
);
246 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
248 * @new_size: new table size
250 bool rht_grow_above_75(const struct rhashtable
*ht
, size_t new_size
)
252 /* Expand table when exceeding 75% load */
253 return atomic_read(&ht
->nelems
) > (new_size
/ 4 * 3) &&
254 (ht
->p
.max_shift
&& atomic_read(&ht
->shift
) < ht
->p
.max_shift
);
256 EXPORT_SYMBOL_GPL(rht_grow_above_75
);
259 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
261 * @new_size: new table size
263 bool rht_shrink_below_30(const struct rhashtable
*ht
, size_t new_size
)
265 /* Shrink table beneath 30% load */
266 return atomic_read(&ht
->nelems
) < (new_size
* 3 / 10) &&
267 (atomic_read(&ht
->shift
) > ht
->p
.min_shift
);
269 EXPORT_SYMBOL_GPL(rht_shrink_below_30
);
271 static void lock_buckets(struct bucket_table
*new_tbl
,
272 struct bucket_table
*old_tbl
, unsigned int hash
)
273 __acquires(old_bucket_lock
)
275 spin_lock_bh(bucket_lock(old_tbl
, hash
));
276 if (new_tbl
!= old_tbl
)
277 spin_lock_bh_nested(bucket_lock(new_tbl
, hash
),
281 static void unlock_buckets(struct bucket_table
*new_tbl
,
282 struct bucket_table
*old_tbl
, unsigned int hash
)
283 __releases(old_bucket_lock
)
285 if (new_tbl
!= old_tbl
)
286 spin_unlock_bh(bucket_lock(new_tbl
, hash
));
287 spin_unlock_bh(bucket_lock(old_tbl
, hash
));
291 * Unlink entries on bucket which hash to different bucket.
293 * Returns true if no more work needs to be performed on the bucket.
295 static bool hashtable_chain_unzip(struct rhashtable
*ht
,
296 const struct bucket_table
*new_tbl
,
297 struct bucket_table
*old_tbl
,
300 struct rhash_head
*he
, *p
, *next
;
301 unsigned int new_hash
, new_hash2
;
303 ASSERT_BUCKET_LOCK(ht
, old_tbl
, old_hash
);
305 /* Old bucket empty, no work needed. */
306 p
= rht_dereference_bucket(old_tbl
->buckets
[old_hash
], old_tbl
,
308 if (rht_is_a_nulls(p
))
311 new_hash
= head_hashfn(ht
, new_tbl
, p
);
312 ASSERT_BUCKET_LOCK(ht
, new_tbl
, new_hash
);
314 /* Advance the old bucket pointer one or more times until it
315 * reaches a node that doesn't hash to the same bucket as the
316 * previous node p. Call the previous node p;
318 rht_for_each_continue(he
, p
->next
, old_tbl
, old_hash
) {
319 new_hash2
= head_hashfn(ht
, new_tbl
, he
);
320 ASSERT_BUCKET_LOCK(ht
, new_tbl
, new_hash2
);
322 if (new_hash
!= new_hash2
)
326 rcu_assign_pointer(old_tbl
->buckets
[old_hash
], p
->next
);
328 /* Find the subsequent node which does hash to the same
329 * bucket as node P, or NULL if no such node exists.
331 INIT_RHT_NULLS_HEAD(next
, ht
, old_hash
);
332 if (!rht_is_a_nulls(he
)) {
333 rht_for_each_continue(he
, he
->next
, old_tbl
, old_hash
) {
334 if (head_hashfn(ht
, new_tbl
, he
) == new_hash
) {
341 /* Set p's next pointer to that subsequent node pointer,
342 * bypassing the nodes which do not hash to p's bucket
344 rcu_assign_pointer(p
->next
, next
);
346 p
= rht_dereference_bucket(old_tbl
->buckets
[old_hash
], old_tbl
,
349 return !rht_is_a_nulls(p
);
352 static void link_old_to_new(struct rhashtable
*ht
, struct bucket_table
*new_tbl
,
353 unsigned int new_hash
, struct rhash_head
*entry
)
355 ASSERT_BUCKET_LOCK(ht
, new_tbl
, new_hash
);
357 rcu_assign_pointer(*bucket_tail(new_tbl
, new_hash
), entry
);
361 * rhashtable_expand - Expand hash table while allowing concurrent lookups
362 * @ht: the hash table to expand
364 * A secondary bucket array is allocated and the hash entries are migrated
365 * while keeping them on both lists until the end of the RCU grace period.
367 * This function may only be called in a context where it is safe to call
368 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
370 * The caller must ensure that no concurrent resizing occurs by holding
373 * It is valid to have concurrent insertions and deletions protected by per
374 * bucket locks or concurrent RCU protected lookups and traversals.
376 int rhashtable_expand(struct rhashtable
*ht
)
378 struct bucket_table
*new_tbl
, *old_tbl
= rht_dereference(ht
->tbl
, ht
);
379 struct rhash_head
*he
;
380 unsigned int new_hash
, old_hash
;
381 bool complete
= false;
383 ASSERT_RHT_MUTEX(ht
);
385 new_tbl
= bucket_table_alloc(ht
, old_tbl
->size
* 2);
389 atomic_inc(&ht
->shift
);
391 /* Make insertions go into the new, empty table right away. Deletions
392 * and lookups will be attempted in both tables until we synchronize.
393 * The synchronize_rcu() guarantees for the new table to be picked up
394 * so no new additions go into the old table while we relink.
396 rcu_assign_pointer(ht
->future_tbl
, new_tbl
);
399 /* For each new bucket, search the corresponding old bucket for the
400 * first entry that hashes to the new bucket, and link the end of
401 * newly formed bucket chain (containing entries added to future
402 * table) to that entry. Since all the entries which will end up in
403 * the new bucket appear in the same old bucket, this constructs an
404 * entirely valid new hash table, but with multiple buckets
405 * "zipped" together into a single imprecise chain.
407 for (new_hash
= 0; new_hash
< new_tbl
->size
; new_hash
++) {
408 old_hash
= rht_bucket_index(old_tbl
, new_hash
);
409 lock_buckets(new_tbl
, old_tbl
, new_hash
);
410 rht_for_each(he
, old_tbl
, old_hash
) {
411 if (head_hashfn(ht
, new_tbl
, he
) == new_hash
) {
412 link_old_to_new(ht
, new_tbl
, new_hash
, he
);
416 unlock_buckets(new_tbl
, old_tbl
, new_hash
);
419 /* Unzip interleaved hash chains */
420 while (!complete
&& !ht
->being_destroyed
) {
421 /* Wait for readers. All new readers will see the new
422 * table, and thus no references to the old table will
427 /* For each bucket in the old table (each of which
428 * contains items from multiple buckets of the new
432 for (old_hash
= 0; old_hash
< old_tbl
->size
; old_hash
++) {
433 lock_buckets(new_tbl
, old_tbl
, old_hash
);
435 if (hashtable_chain_unzip(ht
, new_tbl
, old_tbl
,
439 unlock_buckets(new_tbl
, old_tbl
, old_hash
);
443 rcu_assign_pointer(ht
->tbl
, new_tbl
);
446 bucket_table_free(old_tbl
);
449 EXPORT_SYMBOL_GPL(rhashtable_expand
);
452 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
453 * @ht: the hash table to shrink
455 * This function may only be called in a context where it is safe to call
456 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
458 * The caller must ensure that no concurrent resizing occurs by holding
461 * The caller must ensure that no concurrent table mutations take place.
462 * It is however valid to have concurrent lookups if they are RCU protected.
464 * It is valid to have concurrent insertions and deletions protected by per
465 * bucket locks or concurrent RCU protected lookups and traversals.
467 int rhashtable_shrink(struct rhashtable
*ht
)
469 struct bucket_table
*new_tbl
, *tbl
= rht_dereference(ht
->tbl
, ht
);
470 unsigned int new_hash
;
472 ASSERT_RHT_MUTEX(ht
);
474 new_tbl
= bucket_table_alloc(ht
, tbl
->size
/ 2);
478 rcu_assign_pointer(ht
->future_tbl
, new_tbl
);
481 /* Link the first entry in the old bucket to the end of the
482 * bucket in the new table. As entries are concurrently being
483 * added to the new table, lock down the new bucket. As we
484 * always divide the size in half when shrinking, each bucket
485 * in the new table maps to exactly two buckets in the old
488 for (new_hash
= 0; new_hash
< new_tbl
->size
; new_hash
++) {
489 lock_buckets(new_tbl
, tbl
, new_hash
);
491 rcu_assign_pointer(*bucket_tail(new_tbl
, new_hash
),
492 tbl
->buckets
[new_hash
]);
493 ASSERT_BUCKET_LOCK(ht
, tbl
, new_hash
+ new_tbl
->size
);
494 rcu_assign_pointer(*bucket_tail(new_tbl
, new_hash
),
495 tbl
->buckets
[new_hash
+ new_tbl
->size
]);
497 unlock_buckets(new_tbl
, tbl
, new_hash
);
500 /* Publish the new, valid hash table */
501 rcu_assign_pointer(ht
->tbl
, new_tbl
);
502 atomic_dec(&ht
->shift
);
504 /* Wait for readers. No new readers will have references to the
509 bucket_table_free(tbl
);
513 EXPORT_SYMBOL_GPL(rhashtable_shrink
);
515 static void rht_deferred_worker(struct work_struct
*work
)
517 struct rhashtable
*ht
;
518 struct bucket_table
*tbl
;
519 struct rhashtable_walker
*walker
;
521 ht
= container_of(work
, struct rhashtable
, run_work
);
522 mutex_lock(&ht
->mutex
);
523 if (ht
->being_destroyed
)
526 tbl
= rht_dereference(ht
->tbl
, ht
);
528 list_for_each_entry(walker
, &ht
->walkers
, list
)
529 walker
->resize
= true;
531 if (ht
->p
.grow_decision
&& ht
->p
.grow_decision(ht
, tbl
->size
))
532 rhashtable_expand(ht
);
533 else if (ht
->p
.shrink_decision
&& ht
->p
.shrink_decision(ht
, tbl
->size
))
534 rhashtable_shrink(ht
);
537 mutex_unlock(&ht
->mutex
);
540 static void rhashtable_wakeup_worker(struct rhashtable
*ht
)
542 struct bucket_table
*tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
543 struct bucket_table
*new_tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
544 size_t size
= tbl
->size
;
546 /* Only adjust the table if no resizing is currently in progress. */
547 if (tbl
== new_tbl
&&
548 ((ht
->p
.grow_decision
&& ht
->p
.grow_decision(ht
, size
)) ||
549 (ht
->p
.shrink_decision
&& ht
->p
.shrink_decision(ht
, size
))))
550 schedule_work(&ht
->run_work
);
553 static void __rhashtable_insert(struct rhashtable
*ht
, struct rhash_head
*obj
,
554 struct bucket_table
*tbl
, u32 hash
)
556 struct rhash_head
*head
;
558 hash
= rht_bucket_index(tbl
, hash
);
559 head
= rht_dereference_bucket(tbl
->buckets
[hash
], tbl
, hash
);
561 ASSERT_BUCKET_LOCK(ht
, tbl
, hash
);
563 if (rht_is_a_nulls(head
))
564 INIT_RHT_NULLS_HEAD(obj
->next
, ht
, hash
);
566 RCU_INIT_POINTER(obj
->next
, head
);
568 rcu_assign_pointer(tbl
->buckets
[hash
], obj
);
570 atomic_inc(&ht
->nelems
);
572 rhashtable_wakeup_worker(ht
);
576 * rhashtable_insert - insert object into hash table
578 * @obj: pointer to hash head inside object
580 * Will take a per bucket spinlock to protect against mutual mutations
581 * on the same bucket. Multiple insertions may occur in parallel unless
582 * they map to the same bucket lock.
584 * It is safe to call this function from atomic context.
586 * Will trigger an automatic deferred table resizing if the size grows
587 * beyond the watermark indicated by grow_decision() which can be passed
588 * to rhashtable_init().
590 void rhashtable_insert(struct rhashtable
*ht
, struct rhash_head
*obj
)
592 struct bucket_table
*tbl
, *old_tbl
;
597 tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
598 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
599 hash
= obj_raw_hashfn(ht
, rht_obj(ht
, obj
));
601 lock_buckets(tbl
, old_tbl
, hash
);
602 __rhashtable_insert(ht
, obj
, tbl
, hash
);
603 unlock_buckets(tbl
, old_tbl
, hash
);
607 EXPORT_SYMBOL_GPL(rhashtable_insert
);
610 * rhashtable_remove - remove object from hash table
612 * @obj: pointer to hash head inside object
614 * Since the hash chain is single linked, the removal operation needs to
615 * walk the bucket chain upon removal. The removal operation is thus
616 * considerable slow if the hash table is not correctly sized.
618 * Will automatically shrink the table via rhashtable_expand() if the
619 * shrink_decision function specified at rhashtable_init() returns true.
621 * The caller must ensure that no concurrent table mutations occur. It is
622 * however valid to have concurrent lookups if they are RCU protected.
624 bool rhashtable_remove(struct rhashtable
*ht
, struct rhash_head
*obj
)
626 struct bucket_table
*tbl
, *new_tbl
, *old_tbl
;
627 struct rhash_head __rcu
**pprev
;
628 struct rhash_head
*he
, *he2
;
629 unsigned int hash
, new_hash
;
633 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
634 tbl
= new_tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
635 new_hash
= obj_raw_hashfn(ht
, rht_obj(ht
, obj
));
637 lock_buckets(new_tbl
, old_tbl
, new_hash
);
639 hash
= rht_bucket_index(tbl
, new_hash
);
640 pprev
= &tbl
->buckets
[hash
];
641 rht_for_each(he
, tbl
, hash
) {
647 ASSERT_BUCKET_LOCK(ht
, tbl
, hash
);
649 if (old_tbl
->size
> new_tbl
->size
&& tbl
== old_tbl
&&
650 !rht_is_a_nulls(obj
->next
) &&
651 head_hashfn(ht
, tbl
, obj
->next
) != hash
) {
652 rcu_assign_pointer(*pprev
, (struct rhash_head
*) rht_marker(ht
, hash
));
653 } else if (unlikely(old_tbl
->size
< new_tbl
->size
&& tbl
== new_tbl
)) {
654 rht_for_each_continue(he2
, obj
->next
, tbl
, hash
) {
655 if (head_hashfn(ht
, tbl
, he2
) == hash
) {
656 rcu_assign_pointer(*pprev
, he2
);
661 rcu_assign_pointer(*pprev
, (struct rhash_head
*) rht_marker(ht
, hash
));
663 rcu_assign_pointer(*pprev
, obj
->next
);
671 /* The entry may be linked in either 'tbl', 'future_tbl', or both.
672 * 'future_tbl' only exists for a short period of time during
673 * resizing. Thus traversing both is fine and the added cost is
676 if (tbl
!= old_tbl
) {
681 unlock_buckets(new_tbl
, old_tbl
, new_hash
);
684 atomic_dec(&ht
->nelems
);
685 rhashtable_wakeup_worker(ht
);
692 EXPORT_SYMBOL_GPL(rhashtable_remove
);
694 struct rhashtable_compare_arg
{
695 struct rhashtable
*ht
;
699 static bool rhashtable_compare(void *ptr
, void *arg
)
701 struct rhashtable_compare_arg
*x
= arg
;
702 struct rhashtable
*ht
= x
->ht
;
704 return !memcmp(ptr
+ ht
->p
.key_offset
, x
->key
, ht
->p
.key_len
);
708 * rhashtable_lookup - lookup key in hash table
710 * @key: pointer to key
712 * Computes the hash value for the key and traverses the bucket chain looking
713 * for a entry with an identical key. The first matching entry is returned.
715 * This lookup function may only be used for fixed key hash table (key_len
716 * parameter set). It will BUG() if used inappropriately.
718 * Lookups may occur in parallel with hashtable mutations and resizing.
720 void *rhashtable_lookup(struct rhashtable
*ht
, const void *key
)
722 struct rhashtable_compare_arg arg
= {
727 BUG_ON(!ht
->p
.key_len
);
729 return rhashtable_lookup_compare(ht
, key
, &rhashtable_compare
, &arg
);
731 EXPORT_SYMBOL_GPL(rhashtable_lookup
);
734 * rhashtable_lookup_compare - search hash table with compare function
736 * @key: the pointer to the key
737 * @compare: compare function, must return true on match
738 * @arg: argument passed on to compare function
740 * Traverses the bucket chain behind the provided hash value and calls the
741 * specified compare function for each entry.
743 * Lookups may occur in parallel with hashtable mutations and resizing.
745 * Returns the first entry on which the compare function returned true.
747 void *rhashtable_lookup_compare(struct rhashtable
*ht
, const void *key
,
748 bool (*compare
)(void *, void *), void *arg
)
750 const struct bucket_table
*tbl
, *old_tbl
;
751 struct rhash_head
*he
;
756 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
757 tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
758 hash
= key_hashfn(ht
, key
, ht
->p
.key_len
);
760 rht_for_each_rcu(he
, tbl
, rht_bucket_index(tbl
, hash
)) {
761 if (!compare(rht_obj(ht
, he
), arg
))
764 return rht_obj(ht
, he
);
767 if (unlikely(tbl
!= old_tbl
)) {
775 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare
);
778 * rhashtable_lookup_insert - lookup and insert object into hash table
780 * @obj: pointer to hash head inside object
782 * Locks down the bucket chain in both the old and new table if a resize
783 * is in progress to ensure that writers can't remove from the old table
784 * and can't insert to the new table during the atomic operation of search
785 * and insertion. Searches for duplicates in both the old and new table if
786 * a resize is in progress.
788 * This lookup function may only be used for fixed key hash table (key_len
789 * parameter set). It will BUG() if used inappropriately.
791 * It is safe to call this function from atomic context.
793 * Will trigger an automatic deferred table resizing if the size grows
794 * beyond the watermark indicated by grow_decision() which can be passed
795 * to rhashtable_init().
797 bool rhashtable_lookup_insert(struct rhashtable
*ht
, struct rhash_head
*obj
)
799 struct rhashtable_compare_arg arg
= {
801 .key
= rht_obj(ht
, obj
) + ht
->p
.key_offset
,
804 BUG_ON(!ht
->p
.key_len
);
806 return rhashtable_lookup_compare_insert(ht
, obj
, &rhashtable_compare
,
809 EXPORT_SYMBOL_GPL(rhashtable_lookup_insert
);
812 * rhashtable_lookup_compare_insert - search and insert object to hash table
813 * with compare function
815 * @obj: pointer to hash head inside object
816 * @compare: compare function, must return true on match
817 * @arg: argument passed on to compare function
819 * Locks down the bucket chain in both the old and new table if a resize
820 * is in progress to ensure that writers can't remove from the old table
821 * and can't insert to the new table during the atomic operation of search
822 * and insertion. Searches for duplicates in both the old and new table if
823 * a resize is in progress.
825 * Lookups may occur in parallel with hashtable mutations and resizing.
827 * Will trigger an automatic deferred table resizing if the size grows
828 * beyond the watermark indicated by grow_decision() which can be passed
829 * to rhashtable_init().
831 bool rhashtable_lookup_compare_insert(struct rhashtable
*ht
,
832 struct rhash_head
*obj
,
833 bool (*compare
)(void *, void *),
836 struct bucket_table
*new_tbl
, *old_tbl
;
840 BUG_ON(!ht
->p
.key_len
);
843 old_tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
844 new_tbl
= rht_dereference_rcu(ht
->future_tbl
, ht
);
845 new_hash
= obj_raw_hashfn(ht
, rht_obj(ht
, obj
));
847 lock_buckets(new_tbl
, old_tbl
, new_hash
);
849 if (rhashtable_lookup_compare(ht
, rht_obj(ht
, obj
) + ht
->p
.key_offset
,
855 __rhashtable_insert(ht
, obj
, new_tbl
, new_hash
);
858 unlock_buckets(new_tbl
, old_tbl
, new_hash
);
863 EXPORT_SYMBOL_GPL(rhashtable_lookup_compare_insert
);
866 * rhashtable_walk_init - Initialise an iterator
867 * @ht: Table to walk over
868 * @iter: Hash table Iterator
870 * This function prepares a hash table walk.
872 * Note that if you restart a walk after rhashtable_walk_stop you
873 * may see the same object twice. Also, you may miss objects if
874 * there are removals in between rhashtable_walk_stop and the next
875 * call to rhashtable_walk_start.
877 * For a completely stable walk you should construct your own data
878 * structure outside the hash table.
880 * This function may sleep so you must not call it from interrupt
881 * context or with spin locks held.
883 * You must call rhashtable_walk_exit if this function returns
886 int rhashtable_walk_init(struct rhashtable
*ht
, struct rhashtable_iter
*iter
)
893 iter
->walker
= kmalloc(sizeof(*iter
->walker
), GFP_KERNEL
);
897 mutex_lock(&ht
->mutex
);
898 list_add(&iter
->walker
->list
, &ht
->walkers
);
899 mutex_unlock(&ht
->mutex
);
903 EXPORT_SYMBOL_GPL(rhashtable_walk_init
);
906 * rhashtable_walk_exit - Free an iterator
907 * @iter: Hash table Iterator
909 * This function frees resources allocated by rhashtable_walk_init.
911 void rhashtable_walk_exit(struct rhashtable_iter
*iter
)
913 mutex_lock(&iter
->ht
->mutex
);
914 list_del(&iter
->walker
->list
);
915 mutex_unlock(&iter
->ht
->mutex
);
918 EXPORT_SYMBOL_GPL(rhashtable_walk_exit
);
921 * rhashtable_walk_start - Start a hash table walk
922 * @iter: Hash table iterator
924 * Start a hash table walk. Note that we take the RCU lock in all
925 * cases including when we return an error. So you must always call
926 * rhashtable_walk_stop to clean up.
928 * Returns zero if successful.
930 * Returns -EAGAIN if resize event occured. Note that the iterator
931 * will rewind back to the beginning and you may use it immediately
932 * by calling rhashtable_walk_next.
934 int rhashtable_walk_start(struct rhashtable_iter
*iter
)
938 if (iter
->walker
->resize
) {
941 iter
->walker
->resize
= false;
947 EXPORT_SYMBOL_GPL(rhashtable_walk_start
);
950 * rhashtable_walk_next - Return the next object and advance the iterator
951 * @iter: Hash table iterator
953 * Note that you must call rhashtable_walk_stop when you are finished
956 * Returns the next object or NULL when the end of the table is reached.
958 * Returns -EAGAIN if resize event occured. Note that the iterator
959 * will rewind back to the beginning and you may continue to use it.
961 void *rhashtable_walk_next(struct rhashtable_iter
*iter
)
963 const struct bucket_table
*tbl
;
964 struct rhashtable
*ht
= iter
->ht
;
965 struct rhash_head
*p
= iter
->p
;
968 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
971 p
= rht_dereference_bucket_rcu(p
->next
, tbl
, iter
->slot
);
975 for (; iter
->slot
< tbl
->size
; iter
->slot
++) {
976 int skip
= iter
->skip
;
978 rht_for_each_rcu(p
, tbl
, iter
->slot
) {
985 if (!rht_is_a_nulls(p
)) {
988 obj
= rht_obj(ht
, p
);
998 if (iter
->walker
->resize
) {
1002 iter
->walker
->resize
= false;
1003 return ERR_PTR(-EAGAIN
);
1008 EXPORT_SYMBOL_GPL(rhashtable_walk_next
);
1011 * rhashtable_walk_stop - Finish a hash table walk
1012 * @iter: Hash table iterator
1014 * Finish a hash table walk.
1016 void rhashtable_walk_stop(struct rhashtable_iter
*iter
)
1021 EXPORT_SYMBOL_GPL(rhashtable_walk_stop
);
1023 static size_t rounded_hashtable_size(struct rhashtable_params
*params
)
1025 return max(roundup_pow_of_two(params
->nelem_hint
* 4 / 3),
1026 1UL << params
->min_shift
);
1030 * rhashtable_init - initialize a new hash table
1031 * @ht: hash table to be initialized
1032 * @params: configuration parameters
1034 * Initializes a new hash table based on the provided configuration
1035 * parameters. A table can be configured either with a variable or
1038 * Configuration Example 1: Fixed length keys
1042 * struct rhash_head node;
1045 * struct rhashtable_params params = {
1046 * .head_offset = offsetof(struct test_obj, node),
1047 * .key_offset = offsetof(struct test_obj, key),
1048 * .key_len = sizeof(int),
1050 * .nulls_base = (1U << RHT_BASE_SHIFT),
1053 * Configuration Example 2: Variable length keys
1056 * struct rhash_head node;
1059 * u32 my_hash_fn(const void *data, u32 seed)
1061 * struct test_obj *obj = data;
1063 * return [... hash ...];
1066 * struct rhashtable_params params = {
1067 * .head_offset = offsetof(struct test_obj, node),
1069 * .obj_hashfn = my_hash_fn,
1072 int rhashtable_init(struct rhashtable
*ht
, struct rhashtable_params
*params
)
1074 struct bucket_table
*tbl
;
1077 size
= HASH_DEFAULT_SIZE
;
1079 if ((params
->key_len
&& !params
->hashfn
) ||
1080 (!params
->key_len
&& !params
->obj_hashfn
))
1083 if (params
->nulls_base
&& params
->nulls_base
< (1U << RHT_BASE_SHIFT
))
1086 params
->min_shift
= max_t(size_t, params
->min_shift
,
1087 ilog2(HASH_MIN_SIZE
));
1089 if (params
->nelem_hint
)
1090 size
= rounded_hashtable_size(params
);
1092 memset(ht
, 0, sizeof(*ht
));
1093 mutex_init(&ht
->mutex
);
1094 memcpy(&ht
->p
, params
, sizeof(*params
));
1095 INIT_LIST_HEAD(&ht
->walkers
);
1097 if (params
->locks_mul
)
1098 ht
->p
.locks_mul
= roundup_pow_of_two(params
->locks_mul
);
1100 ht
->p
.locks_mul
= BUCKET_LOCKS_PER_CPU
;
1102 tbl
= bucket_table_alloc(ht
, size
);
1106 atomic_set(&ht
->nelems
, 0);
1107 atomic_set(&ht
->shift
, ilog2(tbl
->size
));
1108 RCU_INIT_POINTER(ht
->tbl
, tbl
);
1109 RCU_INIT_POINTER(ht
->future_tbl
, tbl
);
1111 if (!ht
->p
.hash_rnd
)
1112 get_random_bytes(&ht
->p
.hash_rnd
, sizeof(ht
->p
.hash_rnd
));
1114 if (ht
->p
.grow_decision
|| ht
->p
.shrink_decision
)
1115 INIT_WORK(&ht
->run_work
, rht_deferred_worker
);
1119 EXPORT_SYMBOL_GPL(rhashtable_init
);
1122 * rhashtable_destroy - destroy hash table
1123 * @ht: the hash table to destroy
1125 * Frees the bucket array. This function is not rcu safe, therefore the caller
1126 * has to make sure that no resizing may happen by unpublishing the hashtable
1127 * and waiting for the quiescent cycle before releasing the bucket array.
1129 void rhashtable_destroy(struct rhashtable
*ht
)
1131 ht
->being_destroyed
= true;
1133 if (ht
->p
.grow_decision
|| ht
->p
.shrink_decision
)
1134 cancel_work_sync(&ht
->run_work
);
1136 mutex_lock(&ht
->mutex
);
1137 bucket_table_free(rht_dereference(ht
->tbl
, ht
));
1138 mutex_unlock(&ht
->mutex
);
1140 EXPORT_SYMBOL_GPL(rhashtable_destroy
);