2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #ifndef _LINUX_RHASHTABLE_H
18 #define _LINUX_RHASHTABLE_H
20 #include <linux/compiler.h>
21 #include <linux/errno.h>
22 #include <linux/jhash.h>
23 #include <linux/list_nulls.h>
24 #include <linux/workqueue.h>
25 #include <linux/mutex.h>
26 #include <linux/rcupdate.h>
29 * The end of the chain is marked with a special nulls marks which has
30 * the following format:
32 * +-------+-----------------------------------------------------+-+
34 * +-------+-----------------------------------------------------+-+
36 * Base (4 bits) : Reserved to distinguish between multiple tables.
37 * Specified via &struct rhashtable_params.nulls_base.
38 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
39 * 1 (1 bit) : Nulls marker (always set)
41 * The remaining bits of the next pointer remain unused for now.
43 #define RHT_BASE_BITS 4
44 #define RHT_HASH_BITS 27
45 #define RHT_BASE_SHIFT RHT_HASH_BITS
47 /* Base bits plus 1 bit for nulls marker */
48 #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
51 struct rhash_head __rcu
*next
;
55 * struct bucket_table - Table of hash buckets
56 * @size: Number of hash buckets
57 * @rehash: Current bucket being rehashed
58 * @hash_rnd: Random seed to fold into hash
59 * @locks_mask: Mask to apply before accessing locks[]
60 * @locks: Array of spinlocks protecting individual buckets
61 * @walkers: List of active walkers
62 * @rcu: RCU structure for freeing the table
63 * @future_tbl: Table under construction during rehashing
64 * @buckets: size * hash buckets
70 unsigned int locks_mask
;
72 struct list_head walkers
;
75 struct bucket_table __rcu
*future_tbl
;
77 struct rhash_head __rcu
*buckets
[] ____cacheline_aligned_in_smp
;
81 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
83 * @key: Key to compare against
85 struct rhashtable_compare_arg
{
86 struct rhashtable
*ht
;
90 typedef u32 (*rht_hashfn_t
)(const void *data
, u32 len
, u32 seed
);
91 typedef u32 (*rht_obj_hashfn_t
)(const void *data
, u32 seed
);
92 typedef int (*rht_obj_cmpfn_t
)(struct rhashtable_compare_arg
*arg
,
98 * struct rhashtable_params - Hash table construction parameters
99 * @nelem_hint: Hint on number of elements, should be 75% of desired size
100 * @key_len: Length of key
101 * @key_offset: Offset of key in struct to be hashed
102 * @head_offset: Offset of rhash_head in struct to be hashed
103 * @max_size: Maximum size while expanding
104 * @min_size: Minimum size while shrinking
105 * @nulls_base: Base value to generate nulls marker
106 * @insecure_elasticity: Set to true to disable chain length checks
107 * @automatic_shrinking: Enable automatic shrinking of tables
108 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
109 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
110 * @obj_hashfn: Function to hash object
111 * @obj_cmpfn: Function to compare key with object
113 struct rhashtable_params
{
118 unsigned int max_size
;
119 unsigned int min_size
;
121 bool insecure_elasticity
;
122 bool automatic_shrinking
;
125 rht_obj_hashfn_t obj_hashfn
;
126 rht_obj_cmpfn_t obj_cmpfn
;
130 * struct rhashtable - Hash table handle
132 * @nelems: Number of elements in table
133 * @key_len: Key length for hashfn
134 * @elasticity: Maximum chain length before rehash
135 * @p: Configuration parameters
136 * @run_work: Deferred worker to expand/shrink asynchronously
137 * @mutex: Mutex to protect current/future table swapping
138 * @lock: Spin lock to protect walker list
141 struct bucket_table __rcu
*tbl
;
143 unsigned int key_len
;
144 unsigned int elasticity
;
145 struct rhashtable_params p
;
146 struct work_struct run_work
;
152 * struct rhashtable_walker - Hash table walker
153 * @list: List entry on list of walkers
154 * @tbl: The table that we were walking over
156 struct rhashtable_walker
{
157 struct list_head list
;
158 struct bucket_table
*tbl
;
162 * struct rhashtable_iter - Hash table iterator, fits into netlink cb
163 * @ht: Table to iterate through
164 * @p: Current pointer
165 * @walker: Associated rhashtable walker
166 * @slot: Current slot
167 * @skip: Number of entries to skip in slot
169 struct rhashtable_iter
{
170 struct rhashtable
*ht
;
171 struct rhash_head
*p
;
172 struct rhashtable_walker
*walker
;
177 static inline unsigned long rht_marker(const struct rhashtable
*ht
, u32 hash
)
179 return NULLS_MARKER(ht
->p
.nulls_base
+ hash
);
182 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
183 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
185 static inline bool rht_is_a_nulls(const struct rhash_head
*ptr
)
187 return ((unsigned long) ptr
& 1);
190 static inline unsigned long rht_get_nulls_value(const struct rhash_head
*ptr
)
192 return ((unsigned long) ptr
) >> 1;
195 static inline void *rht_obj(const struct rhashtable
*ht
,
196 const struct rhash_head
*he
)
198 return (char *)he
- ht
->p
.head_offset
;
201 static inline unsigned int rht_bucket_index(const struct bucket_table
*tbl
,
204 return (hash
>> RHT_HASH_RESERVED_SPACE
) & (tbl
->size
- 1);
207 static inline unsigned int rht_key_hashfn(
208 struct rhashtable
*ht
, const struct bucket_table
*tbl
,
209 const void *key
, const struct rhashtable_params params
)
213 /* params must be equal to ht->p if it isn't constant. */
214 if (!__builtin_constant_p(params
.key_len
))
215 hash
= ht
->p
.hashfn(key
, ht
->key_len
, tbl
->hash_rnd
);
216 else if (params
.key_len
) {
217 unsigned int key_len
= params
.key_len
;
220 hash
= params
.hashfn(key
, key_len
, tbl
->hash_rnd
);
221 else if (key_len
& (sizeof(u32
) - 1))
222 hash
= jhash(key
, key_len
, tbl
->hash_rnd
);
224 hash
= jhash2(key
, key_len
/ sizeof(u32
),
227 unsigned int key_len
= ht
->p
.key_len
;
230 hash
= params
.hashfn(key
, key_len
, tbl
->hash_rnd
);
232 hash
= jhash(key
, key_len
, tbl
->hash_rnd
);
235 return rht_bucket_index(tbl
, hash
);
238 static inline unsigned int rht_head_hashfn(
239 struct rhashtable
*ht
, const struct bucket_table
*tbl
,
240 const struct rhash_head
*he
, const struct rhashtable_params params
)
242 const char *ptr
= rht_obj(ht
, he
);
244 return likely(params
.obj_hashfn
) ?
245 rht_bucket_index(tbl
, params
.obj_hashfn(ptr
, tbl
->hash_rnd
)) :
246 rht_key_hashfn(ht
, tbl
, ptr
+ params
.key_offset
, params
);
250 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
252 * @tbl: current table
254 static inline bool rht_grow_above_75(const struct rhashtable
*ht
,
255 const struct bucket_table
*tbl
)
257 /* Expand table when exceeding 75% load */
258 return atomic_read(&ht
->nelems
) > (tbl
->size
/ 4 * 3) &&
259 (!ht
->p
.max_size
|| tbl
->size
< ht
->p
.max_size
);
263 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
265 * @tbl: current table
267 static inline bool rht_shrink_below_30(const struct rhashtable
*ht
,
268 const struct bucket_table
*tbl
)
270 /* Shrink table beneath 30% load */
271 return atomic_read(&ht
->nelems
) < (tbl
->size
* 3 / 10) &&
272 tbl
->size
> ht
->p
.min_size
;
276 * rht_grow_above_100 - returns true if nelems > table-size
278 * @tbl: current table
280 static inline bool rht_grow_above_100(const struct rhashtable
*ht
,
281 const struct bucket_table
*tbl
)
283 return atomic_read(&ht
->nelems
) > tbl
->size
;
286 /* The bucket lock is selected based on the hash and protects mutations
287 * on a group of hash buckets.
289 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
290 * a single lock always covers both buckets which may both contains
291 * entries which link to the same bucket of the old table during resizing.
292 * This allows to simplify the locking as locking the bucket in both
293 * tables during resize always guarantee protection.
295 * IMPORTANT: When holding the bucket lock of both the old and new table
296 * during expansions and shrinking, the old bucket lock must always be
299 static inline spinlock_t
*rht_bucket_lock(const struct bucket_table
*tbl
,
302 return &tbl
->locks
[hash
& tbl
->locks_mask
];
305 #ifdef CONFIG_PROVE_LOCKING
306 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
);
307 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
);
309 static inline int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
314 static inline int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
,
319 #endif /* CONFIG_PROVE_LOCKING */
321 int rhashtable_init(struct rhashtable
*ht
,
322 const struct rhashtable_params
*params
);
324 int rhashtable_insert_slow(struct rhashtable
*ht
, const void *key
,
325 struct rhash_head
*obj
,
326 struct bucket_table
*old_tbl
);
327 int rhashtable_insert_rehash(struct rhashtable
*ht
);
329 int rhashtable_walk_init(struct rhashtable
*ht
, struct rhashtable_iter
*iter
);
330 void rhashtable_walk_exit(struct rhashtable_iter
*iter
);
331 int rhashtable_walk_start(struct rhashtable_iter
*iter
) __acquires(RCU
);
332 void *rhashtable_walk_next(struct rhashtable_iter
*iter
);
333 void rhashtable_walk_stop(struct rhashtable_iter
*iter
) __releases(RCU
);
335 void rhashtable_free_and_destroy(struct rhashtable
*ht
,
336 void (*free_fn
)(void *ptr
, void *arg
),
338 void rhashtable_destroy(struct rhashtable
*ht
);
340 #define rht_dereference(p, ht) \
341 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
343 #define rht_dereference_rcu(p, ht) \
344 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
346 #define rht_dereference_bucket(p, tbl, hash) \
347 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
349 #define rht_dereference_bucket_rcu(p, tbl, hash) \
350 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
352 #define rht_entry(tpos, pos, member) \
353 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
356 * rht_for_each_continue - continue iterating over hash chain
357 * @pos: the &struct rhash_head to use as a loop cursor.
358 * @head: the previous &struct rhash_head to continue from
359 * @tbl: the &struct bucket_table
360 * @hash: the hash value / bucket index
362 #define rht_for_each_continue(pos, head, tbl, hash) \
363 for (pos = rht_dereference_bucket(head, tbl, hash); \
364 !rht_is_a_nulls(pos); \
365 pos = rht_dereference_bucket((pos)->next, tbl, hash))
368 * rht_for_each - iterate over hash chain
369 * @pos: the &struct rhash_head to use as a loop cursor.
370 * @tbl: the &struct bucket_table
371 * @hash: the hash value / bucket index
373 #define rht_for_each(pos, tbl, hash) \
374 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
377 * rht_for_each_entry_continue - continue iterating over hash chain
378 * @tpos: the type * to use as a loop cursor.
379 * @pos: the &struct rhash_head to use as a loop cursor.
380 * @head: the previous &struct rhash_head to continue from
381 * @tbl: the &struct bucket_table
382 * @hash: the hash value / bucket index
383 * @member: name of the &struct rhash_head within the hashable struct.
385 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
386 for (pos = rht_dereference_bucket(head, tbl, hash); \
387 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
388 pos = rht_dereference_bucket((pos)->next, tbl, hash))
391 * rht_for_each_entry - iterate over hash chain of given type
392 * @tpos: the type * to use as a loop cursor.
393 * @pos: the &struct rhash_head to use as a loop cursor.
394 * @tbl: the &struct bucket_table
395 * @hash: the hash value / bucket index
396 * @member: name of the &struct rhash_head within the hashable struct.
398 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
399 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
403 * rht_for_each_entry_safe - safely iterate over hash chain of given type
404 * @tpos: the type * to use as a loop cursor.
405 * @pos: the &struct rhash_head to use as a loop cursor.
406 * @next: the &struct rhash_head to use as next in loop cursor.
407 * @tbl: the &struct bucket_table
408 * @hash: the hash value / bucket index
409 * @member: name of the &struct rhash_head within the hashable struct.
411 * This hash chain list-traversal primitive allows for the looped code to
412 * remove the loop cursor from the list.
414 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
415 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
416 next = !rht_is_a_nulls(pos) ? \
417 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
418 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
420 next = !rht_is_a_nulls(pos) ? \
421 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
424 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
425 * @pos: the &struct rhash_head to use as a loop cursor.
426 * @head: the previous &struct rhash_head to continue from
427 * @tbl: the &struct bucket_table
428 * @hash: the hash value / bucket index
430 * This hash chain list-traversal primitive may safely run concurrently with
431 * the _rcu mutation primitives such as rhashtable_insert() as long as the
432 * traversal is guarded by rcu_read_lock().
434 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
435 for (({barrier(); }), \
436 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
437 !rht_is_a_nulls(pos); \
438 pos = rcu_dereference_raw(pos->next))
441 * rht_for_each_rcu - iterate over rcu hash chain
442 * @pos: the &struct rhash_head to use as a loop cursor.
443 * @tbl: the &struct bucket_table
444 * @hash: the hash value / bucket index
446 * This hash chain list-traversal primitive may safely run concurrently with
447 * the _rcu mutation primitives such as rhashtable_insert() as long as the
448 * traversal is guarded by rcu_read_lock().
450 #define rht_for_each_rcu(pos, tbl, hash) \
451 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
454 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
455 * @tpos: the type * to use as a loop cursor.
456 * @pos: the &struct rhash_head to use as a loop cursor.
457 * @head: the previous &struct rhash_head to continue from
458 * @tbl: the &struct bucket_table
459 * @hash: the hash value / bucket index
460 * @member: name of the &struct rhash_head within the hashable struct.
462 * This hash chain list-traversal primitive may safely run concurrently with
463 * the _rcu mutation primitives such as rhashtable_insert() as long as the
464 * traversal is guarded by rcu_read_lock().
466 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
467 for (({barrier(); }), \
468 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
469 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
470 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
473 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
474 * @tpos: the type * to use as a loop cursor.
475 * @pos: the &struct rhash_head to use as a loop cursor.
476 * @tbl: the &struct bucket_table
477 * @hash: the hash value / bucket index
478 * @member: name of the &struct rhash_head within the hashable struct.
480 * This hash chain list-traversal primitive may safely run concurrently with
481 * the _rcu mutation primitives such as rhashtable_insert() as long as the
482 * traversal is guarded by rcu_read_lock().
484 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
485 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
488 static inline int rhashtable_compare(struct rhashtable_compare_arg
*arg
,
491 struct rhashtable
*ht
= arg
->ht
;
492 const char *ptr
= obj
;
494 return memcmp(ptr
+ ht
->p
.key_offset
, arg
->key
, ht
->p
.key_len
);
498 * rhashtable_lookup_fast - search hash table, inlined version
500 * @key: the pointer to the key
501 * @params: hash table parameters
503 * Computes the hash value for the key and traverses the bucket chain looking
504 * for a entry with an identical key. The first matching entry is returned.
506 * Returns the first entry on which the compare function returned true.
508 static inline void *rhashtable_lookup_fast(
509 struct rhashtable
*ht
, const void *key
,
510 const struct rhashtable_params params
)
512 struct rhashtable_compare_arg arg
= {
516 const struct bucket_table
*tbl
;
517 struct rhash_head
*he
;
522 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
524 hash
= rht_key_hashfn(ht
, tbl
, key
, params
);
525 rht_for_each_rcu(he
, tbl
, hash
) {
526 if (params
.obj_cmpfn
?
527 params
.obj_cmpfn(&arg
, rht_obj(ht
, he
)) :
528 rhashtable_compare(&arg
, rht_obj(ht
, he
)))
531 return rht_obj(ht
, he
);
534 /* Ensure we see any new tables. */
537 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
545 /* Internal function, please use rhashtable_insert_fast() instead */
546 static inline int __rhashtable_insert_fast(
547 struct rhashtable
*ht
, const void *key
, struct rhash_head
*obj
,
548 const struct rhashtable_params params
)
550 struct rhashtable_compare_arg arg
= {
554 struct bucket_table
*tbl
, *new_tbl
;
555 struct rhash_head
*head
;
557 unsigned int elasticity
;
564 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
566 /* All insertions must grab the oldest table containing
567 * the hashed bucket that is yet to be rehashed.
570 hash
= rht_head_hashfn(ht
, tbl
, obj
, params
);
571 lock
= rht_bucket_lock(tbl
, hash
);
574 if (tbl
->rehash
<= hash
)
577 spin_unlock_bh(lock
);
578 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
581 new_tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
582 if (unlikely(new_tbl
)) {
583 err
= rhashtable_insert_slow(ht
, key
, obj
, new_tbl
);
589 if (unlikely(rht_grow_above_100(ht
, tbl
))) {
591 spin_unlock_bh(lock
);
592 err
= rhashtable_insert_rehash(ht
);
601 elasticity
= ht
->elasticity
;
602 rht_for_each(head
, tbl
, hash
) {
604 unlikely(!(params
.obj_cmpfn
?
605 params
.obj_cmpfn(&arg
, rht_obj(ht
, head
)) :
606 rhashtable_compare(&arg
, rht_obj(ht
, head
)))))
614 head
= rht_dereference_bucket(tbl
->buckets
[hash
], tbl
, hash
);
616 RCU_INIT_POINTER(obj
->next
, head
);
618 rcu_assign_pointer(tbl
->buckets
[hash
], obj
);
620 atomic_inc(&ht
->nelems
);
621 if (rht_grow_above_75(ht
, tbl
))
622 schedule_work(&ht
->run_work
);
625 spin_unlock_bh(lock
);
632 * rhashtable_insert_fast - insert object into hash table
634 * @obj: pointer to hash head inside object
635 * @params: hash table parameters
637 * Will take a per bucket spinlock to protect against mutual mutations
638 * on the same bucket. Multiple insertions may occur in parallel unless
639 * they map to the same bucket lock.
641 * It is safe to call this function from atomic context.
643 * Will trigger an automatic deferred table resizing if the size grows
644 * beyond the watermark indicated by grow_decision() which can be passed
645 * to rhashtable_init().
647 static inline int rhashtable_insert_fast(
648 struct rhashtable
*ht
, struct rhash_head
*obj
,
649 const struct rhashtable_params params
)
651 return __rhashtable_insert_fast(ht
, NULL
, obj
, params
);
655 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
657 * @obj: pointer to hash head inside object
658 * @params: hash table parameters
660 * Locks down the bucket chain in both the old and new table if a resize
661 * is in progress to ensure that writers can't remove from the old table
662 * and can't insert to the new table during the atomic operation of search
663 * and insertion. Searches for duplicates in both the old and new table if
664 * a resize is in progress.
666 * This lookup function may only be used for fixed key hash table (key_len
667 * parameter set). It will BUG() if used inappropriately.
669 * It is safe to call this function from atomic context.
671 * Will trigger an automatic deferred table resizing if the size grows
672 * beyond the watermark indicated by grow_decision() which can be passed
673 * to rhashtable_init().
675 static inline int rhashtable_lookup_insert_fast(
676 struct rhashtable
*ht
, struct rhash_head
*obj
,
677 const struct rhashtable_params params
)
679 const char *key
= rht_obj(ht
, obj
);
681 BUG_ON(ht
->p
.obj_hashfn
);
683 return __rhashtable_insert_fast(ht
, key
+ ht
->p
.key_offset
, obj
,
688 * rhashtable_lookup_insert_key - search and insert object to hash table
692 * @obj: pointer to hash head inside object
693 * @params: hash table parameters
695 * Locks down the bucket chain in both the old and new table if a resize
696 * is in progress to ensure that writers can't remove from the old table
697 * and can't insert to the new table during the atomic operation of search
698 * and insertion. Searches for duplicates in both the old and new table if
699 * a resize is in progress.
701 * Lookups may occur in parallel with hashtable mutations and resizing.
703 * Will trigger an automatic deferred table resizing if the size grows
704 * beyond the watermark indicated by grow_decision() which can be passed
705 * to rhashtable_init().
707 * Returns zero on success.
709 static inline int rhashtable_lookup_insert_key(
710 struct rhashtable
*ht
, const void *key
, struct rhash_head
*obj
,
711 const struct rhashtable_params params
)
713 BUG_ON(!ht
->p
.obj_hashfn
|| !key
);
715 return __rhashtable_insert_fast(ht
, key
, obj
, params
);
718 /* Internal function, please use rhashtable_remove_fast() instead */
719 static inline int __rhashtable_remove_fast(
720 struct rhashtable
*ht
, struct bucket_table
*tbl
,
721 struct rhash_head
*obj
, const struct rhashtable_params params
)
723 struct rhash_head __rcu
**pprev
;
724 struct rhash_head
*he
;
729 hash
= rht_head_hashfn(ht
, tbl
, obj
, params
);
730 lock
= rht_bucket_lock(tbl
, hash
);
734 pprev
= &tbl
->buckets
[hash
];
735 rht_for_each(he
, tbl
, hash
) {
741 rcu_assign_pointer(*pprev
, obj
->next
);
746 spin_unlock_bh(lock
);
752 * rhashtable_remove_fast - remove object from hash table
754 * @obj: pointer to hash head inside object
755 * @params: hash table parameters
757 * Since the hash chain is single linked, the removal operation needs to
758 * walk the bucket chain upon removal. The removal operation is thus
759 * considerable slow if the hash table is not correctly sized.
761 * Will automatically shrink the table via rhashtable_expand() if the
762 * shrink_decision function specified at rhashtable_init() returns true.
764 * Returns zero on success, -ENOENT if the entry could not be found.
766 static inline int rhashtable_remove_fast(
767 struct rhashtable
*ht
, struct rhash_head
*obj
,
768 const struct rhashtable_params params
)
770 struct bucket_table
*tbl
;
775 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
777 /* Because we have already taken (and released) the bucket
778 * lock in old_tbl, if we find that future_tbl is not yet
779 * visible then that guarantees the entry to still be in
780 * the old tbl if it exists.
782 while ((err
= __rhashtable_remove_fast(ht
, tbl
, obj
, params
)) &&
783 (tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
)))
789 atomic_dec(&ht
->nelems
);
790 if (unlikely(ht
->p
.automatic_shrinking
&&
791 rht_shrink_below_30(ht
, tbl
)))
792 schedule_work(&ht
->run_work
);
800 #endif /* _LINUX_RHASHTABLE_H */