2 * Resizable, Scalable, Concurrent Hash Table
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #ifndef _LINUX_RHASHTABLE_H
18 #define _LINUX_RHASHTABLE_H
20 #include <linux/atomic.h>
21 #include <linux/compiler.h>
22 #include <linux/err.h>
23 #include <linux/errno.h>
24 #include <linux/jhash.h>
25 #include <linux/list_nulls.h>
26 #include <linux/workqueue.h>
27 #include <linux/mutex.h>
28 #include <linux/rcupdate.h>
31 * The end of the chain is marked with a special nulls marks which has
32 * the following format:
34 * +-------+-----------------------------------------------------+-+
36 * +-------+-----------------------------------------------------+-+
38 * Base (4 bits) : Reserved to distinguish between multiple tables.
39 * Specified via &struct rhashtable_params.nulls_base.
40 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
41 * 1 (1 bit) : Nulls marker (always set)
43 * The remaining bits of the next pointer remain unused for now.
45 #define RHT_BASE_BITS 4
46 #define RHT_HASH_BITS 27
47 #define RHT_BASE_SHIFT RHT_HASH_BITS
49 /* Base bits plus 1 bit for nulls marker */
50 #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
53 struct rhash_head __rcu
*next
;
57 * struct bucket_table - Table of hash buckets
58 * @size: Number of hash buckets
59 * @rehash: Current bucket being rehashed
60 * @hash_rnd: Random seed to fold into hash
61 * @locks_mask: Mask to apply before accessing locks[]
62 * @locks: Array of spinlocks protecting individual buckets
63 * @walkers: List of active walkers
64 * @rcu: RCU structure for freeing the table
65 * @future_tbl: Table under construction during rehashing
66 * @buckets: size * hash buckets
72 unsigned int locks_mask
;
74 struct list_head walkers
;
77 struct bucket_table __rcu
*future_tbl
;
79 struct rhash_head __rcu
*buckets
[] ____cacheline_aligned_in_smp
;
83 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
85 * @key: Key to compare against
87 struct rhashtable_compare_arg
{
88 struct rhashtable
*ht
;
92 typedef u32 (*rht_hashfn_t
)(const void *data
, u32 len
, u32 seed
);
93 typedef u32 (*rht_obj_hashfn_t
)(const void *data
, u32 len
, u32 seed
);
94 typedef int (*rht_obj_cmpfn_t
)(struct rhashtable_compare_arg
*arg
,
100 * struct rhashtable_params - Hash table construction parameters
101 * @nelem_hint: Hint on number of elements, should be 75% of desired size
102 * @key_len: Length of key
103 * @key_offset: Offset of key in struct to be hashed
104 * @head_offset: Offset of rhash_head in struct to be hashed
105 * @insecure_max_entries: Maximum number of entries (may be exceeded)
106 * @max_size: Maximum size while expanding
107 * @min_size: Minimum size while shrinking
108 * @nulls_base: Base value to generate nulls marker
109 * @insecure_elasticity: Set to true to disable chain length checks
110 * @automatic_shrinking: Enable automatic shrinking of tables
111 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
112 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
113 * @obj_hashfn: Function to hash object
114 * @obj_cmpfn: Function to compare key with object
116 struct rhashtable_params
{
121 unsigned int insecure_max_entries
;
122 unsigned int max_size
;
123 unsigned int min_size
;
125 bool insecure_elasticity
;
126 bool automatic_shrinking
;
129 rht_obj_hashfn_t obj_hashfn
;
130 rht_obj_cmpfn_t obj_cmpfn
;
134 * struct rhashtable - Hash table handle
136 * @nelems: Number of elements in table
137 * @key_len: Key length for hashfn
138 * @elasticity: Maximum chain length before rehash
139 * @p: Configuration parameters
140 * @run_work: Deferred worker to expand/shrink asynchronously
141 * @mutex: Mutex to protect current/future table swapping
142 * @lock: Spin lock to protect walker list
145 struct bucket_table __rcu
*tbl
;
147 unsigned int key_len
;
148 unsigned int elasticity
;
149 struct rhashtable_params p
;
150 struct work_struct run_work
;
156 * struct rhashtable_walker - Hash table walker
157 * @list: List entry on list of walkers
158 * @tbl: The table that we were walking over
160 struct rhashtable_walker
{
161 struct list_head list
;
162 struct bucket_table
*tbl
;
166 * struct rhashtable_iter - Hash table iterator, fits into netlink cb
167 * @ht: Table to iterate through
168 * @p: Current pointer
169 * @walker: Associated rhashtable walker
170 * @slot: Current slot
171 * @skip: Number of entries to skip in slot
173 struct rhashtable_iter
{
174 struct rhashtable
*ht
;
175 struct rhash_head
*p
;
176 struct rhashtable_walker
*walker
;
181 static inline unsigned long rht_marker(const struct rhashtable
*ht
, u32 hash
)
183 return NULLS_MARKER(ht
->p
.nulls_base
+ hash
);
186 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
187 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
189 static inline bool rht_is_a_nulls(const struct rhash_head
*ptr
)
191 return ((unsigned long) ptr
& 1);
194 static inline unsigned long rht_get_nulls_value(const struct rhash_head
*ptr
)
196 return ((unsigned long) ptr
) >> 1;
199 static inline void *rht_obj(const struct rhashtable
*ht
,
200 const struct rhash_head
*he
)
202 return (char *)he
- ht
->p
.head_offset
;
205 static inline unsigned int rht_bucket_index(const struct bucket_table
*tbl
,
208 return (hash
>> RHT_HASH_RESERVED_SPACE
) & (tbl
->size
- 1);
211 static inline unsigned int rht_key_hashfn(
212 struct rhashtable
*ht
, const struct bucket_table
*tbl
,
213 const void *key
, const struct rhashtable_params params
)
217 /* params must be equal to ht->p if it isn't constant. */
218 if (!__builtin_constant_p(params
.key_len
))
219 hash
= ht
->p
.hashfn(key
, ht
->key_len
, tbl
->hash_rnd
);
220 else if (params
.key_len
) {
221 unsigned int key_len
= params
.key_len
;
224 hash
= params
.hashfn(key
, key_len
, tbl
->hash_rnd
);
225 else if (key_len
& (sizeof(u32
) - 1))
226 hash
= jhash(key
, key_len
, tbl
->hash_rnd
);
228 hash
= jhash2(key
, key_len
/ sizeof(u32
),
231 unsigned int key_len
= ht
->p
.key_len
;
234 hash
= params
.hashfn(key
, key_len
, tbl
->hash_rnd
);
236 hash
= jhash(key
, key_len
, tbl
->hash_rnd
);
239 return rht_bucket_index(tbl
, hash
);
242 static inline unsigned int rht_head_hashfn(
243 struct rhashtable
*ht
, const struct bucket_table
*tbl
,
244 const struct rhash_head
*he
, const struct rhashtable_params params
)
246 const char *ptr
= rht_obj(ht
, he
);
248 return likely(params
.obj_hashfn
) ?
249 rht_bucket_index(tbl
, params
.obj_hashfn(ptr
, params
.key_len
?:
252 rht_key_hashfn(ht
, tbl
, ptr
+ params
.key_offset
, params
);
256 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
258 * @tbl: current table
260 static inline bool rht_grow_above_75(const struct rhashtable
*ht
,
261 const struct bucket_table
*tbl
)
263 /* Expand table when exceeding 75% load */
264 return atomic_read(&ht
->nelems
) > (tbl
->size
/ 4 * 3) &&
265 (!ht
->p
.max_size
|| tbl
->size
< ht
->p
.max_size
);
269 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
271 * @tbl: current table
273 static inline bool rht_shrink_below_30(const struct rhashtable
*ht
,
274 const struct bucket_table
*tbl
)
276 /* Shrink table beneath 30% load */
277 return atomic_read(&ht
->nelems
) < (tbl
->size
* 3 / 10) &&
278 tbl
->size
> ht
->p
.min_size
;
282 * rht_grow_above_100 - returns true if nelems > table-size
284 * @tbl: current table
286 static inline bool rht_grow_above_100(const struct rhashtable
*ht
,
287 const struct bucket_table
*tbl
)
289 return atomic_read(&ht
->nelems
) > tbl
->size
&&
290 (!ht
->p
.max_size
|| tbl
->size
< ht
->p
.max_size
);
294 * rht_grow_above_max - returns true if table is above maximum
296 * @tbl: current table
298 static inline bool rht_grow_above_max(const struct rhashtable
*ht
,
299 const struct bucket_table
*tbl
)
301 return ht
->p
.insecure_max_entries
&&
302 atomic_read(&ht
->nelems
) >= ht
->p
.insecure_max_entries
;
305 /* The bucket lock is selected based on the hash and protects mutations
306 * on a group of hash buckets.
308 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
309 * a single lock always covers both buckets which may both contains
310 * entries which link to the same bucket of the old table during resizing.
311 * This allows to simplify the locking as locking the bucket in both
312 * tables during resize always guarantee protection.
314 * IMPORTANT: When holding the bucket lock of both the old and new table
315 * during expansions and shrinking, the old bucket lock must always be
318 static inline spinlock_t
*rht_bucket_lock(const struct bucket_table
*tbl
,
321 return &tbl
->locks
[hash
& tbl
->locks_mask
];
324 #ifdef CONFIG_PROVE_LOCKING
325 int lockdep_rht_mutex_is_held(struct rhashtable
*ht
);
326 int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
, u32 hash
);
328 static inline int lockdep_rht_mutex_is_held(struct rhashtable
*ht
)
333 static inline int lockdep_rht_bucket_is_held(const struct bucket_table
*tbl
,
338 #endif /* CONFIG_PROVE_LOCKING */
340 int rhashtable_init(struct rhashtable
*ht
,
341 const struct rhashtable_params
*params
);
343 struct bucket_table
*rhashtable_insert_slow(struct rhashtable
*ht
,
345 struct rhash_head
*obj
,
346 struct bucket_table
*old_tbl
);
347 int rhashtable_insert_rehash(struct rhashtable
*ht
, struct bucket_table
*tbl
);
349 int rhashtable_walk_init(struct rhashtable
*ht
, struct rhashtable_iter
*iter
);
350 void rhashtable_walk_exit(struct rhashtable_iter
*iter
);
351 int rhashtable_walk_start(struct rhashtable_iter
*iter
) __acquires(RCU
);
352 void *rhashtable_walk_next(struct rhashtable_iter
*iter
);
353 void rhashtable_walk_stop(struct rhashtable_iter
*iter
) __releases(RCU
);
355 void rhashtable_free_and_destroy(struct rhashtable
*ht
,
356 void (*free_fn
)(void *ptr
, void *arg
),
358 void rhashtable_destroy(struct rhashtable
*ht
);
360 #define rht_dereference(p, ht) \
361 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
363 #define rht_dereference_rcu(p, ht) \
364 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
366 #define rht_dereference_bucket(p, tbl, hash) \
367 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
369 #define rht_dereference_bucket_rcu(p, tbl, hash) \
370 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
372 #define rht_entry(tpos, pos, member) \
373 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
376 * rht_for_each_continue - continue iterating over hash chain
377 * @pos: the &struct rhash_head to use as a loop cursor.
378 * @head: the previous &struct rhash_head to continue from
379 * @tbl: the &struct bucket_table
380 * @hash: the hash value / bucket index
382 #define rht_for_each_continue(pos, head, tbl, hash) \
383 for (pos = rht_dereference_bucket(head, tbl, hash); \
384 !rht_is_a_nulls(pos); \
385 pos = rht_dereference_bucket((pos)->next, tbl, hash))
388 * rht_for_each - iterate over hash chain
389 * @pos: the &struct rhash_head to use as a loop cursor.
390 * @tbl: the &struct bucket_table
391 * @hash: the hash value / bucket index
393 #define rht_for_each(pos, tbl, hash) \
394 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
397 * rht_for_each_entry_continue - continue iterating over hash chain
398 * @tpos: the type * to use as a loop cursor.
399 * @pos: the &struct rhash_head to use as a loop cursor.
400 * @head: the previous &struct rhash_head to continue from
401 * @tbl: the &struct bucket_table
402 * @hash: the hash value / bucket index
403 * @member: name of the &struct rhash_head within the hashable struct.
405 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
406 for (pos = rht_dereference_bucket(head, tbl, hash); \
407 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
408 pos = rht_dereference_bucket((pos)->next, tbl, hash))
411 * rht_for_each_entry - iterate over hash chain of given type
412 * @tpos: the type * to use as a loop cursor.
413 * @pos: the &struct rhash_head to use as a loop cursor.
414 * @tbl: the &struct bucket_table
415 * @hash: the hash value / bucket index
416 * @member: name of the &struct rhash_head within the hashable struct.
418 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
419 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
423 * rht_for_each_entry_safe - safely iterate over hash chain of given type
424 * @tpos: the type * to use as a loop cursor.
425 * @pos: the &struct rhash_head to use as a loop cursor.
426 * @next: the &struct rhash_head to use as next in loop cursor.
427 * @tbl: the &struct bucket_table
428 * @hash: the hash value / bucket index
429 * @member: name of the &struct rhash_head within the hashable struct.
431 * This hash chain list-traversal primitive allows for the looped code to
432 * remove the loop cursor from the list.
434 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
435 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
436 next = !rht_is_a_nulls(pos) ? \
437 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
438 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
440 next = !rht_is_a_nulls(pos) ? \
441 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
444 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
445 * @pos: the &struct rhash_head to use as a loop cursor.
446 * @head: the previous &struct rhash_head to continue from
447 * @tbl: the &struct bucket_table
448 * @hash: the hash value / bucket index
450 * This hash chain list-traversal primitive may safely run concurrently with
451 * the _rcu mutation primitives such as rhashtable_insert() as long as the
452 * traversal is guarded by rcu_read_lock().
454 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
455 for (({barrier(); }), \
456 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
457 !rht_is_a_nulls(pos); \
458 pos = rcu_dereference_raw(pos->next))
461 * rht_for_each_rcu - iterate over rcu hash chain
462 * @pos: the &struct rhash_head to use as a loop cursor.
463 * @tbl: the &struct bucket_table
464 * @hash: the hash value / bucket index
466 * This hash chain list-traversal primitive may safely run concurrently with
467 * the _rcu mutation primitives such as rhashtable_insert() as long as the
468 * traversal is guarded by rcu_read_lock().
470 #define rht_for_each_rcu(pos, tbl, hash) \
471 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
474 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
475 * @tpos: the type * to use as a loop cursor.
476 * @pos: the &struct rhash_head to use as a loop cursor.
477 * @head: the previous &struct rhash_head to continue from
478 * @tbl: the &struct bucket_table
479 * @hash: the hash value / bucket index
480 * @member: name of the &struct rhash_head within the hashable struct.
482 * This hash chain list-traversal primitive may safely run concurrently with
483 * the _rcu mutation primitives such as rhashtable_insert() as long as the
484 * traversal is guarded by rcu_read_lock().
486 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
487 for (({barrier(); }), \
488 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
489 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
490 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
493 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
494 * @tpos: the type * to use as a loop cursor.
495 * @pos: the &struct rhash_head to use as a loop cursor.
496 * @tbl: the &struct bucket_table
497 * @hash: the hash value / bucket index
498 * @member: name of the &struct rhash_head within the hashable struct.
500 * This hash chain list-traversal primitive may safely run concurrently with
501 * the _rcu mutation primitives such as rhashtable_insert() as long as the
502 * traversal is guarded by rcu_read_lock().
504 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
505 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
508 static inline int rhashtable_compare(struct rhashtable_compare_arg
*arg
,
511 struct rhashtable
*ht
= arg
->ht
;
512 const char *ptr
= obj
;
514 return memcmp(ptr
+ ht
->p
.key_offset
, arg
->key
, ht
->p
.key_len
);
518 * rhashtable_lookup_fast - search hash table, inlined version
520 * @key: the pointer to the key
521 * @params: hash table parameters
523 * Computes the hash value for the key and traverses the bucket chain looking
524 * for a entry with an identical key. The first matching entry is returned.
526 * Returns the first entry on which the compare function returned true.
528 static inline void *rhashtable_lookup_fast(
529 struct rhashtable
*ht
, const void *key
,
530 const struct rhashtable_params params
)
532 struct rhashtable_compare_arg arg
= {
536 const struct bucket_table
*tbl
;
537 struct rhash_head
*he
;
542 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
544 hash
= rht_key_hashfn(ht
, tbl
, key
, params
);
545 rht_for_each_rcu(he
, tbl
, hash
) {
546 if (params
.obj_cmpfn
?
547 params
.obj_cmpfn(&arg
, rht_obj(ht
, he
)) :
548 rhashtable_compare(&arg
, rht_obj(ht
, he
)))
551 return rht_obj(ht
, he
);
554 /* Ensure we see any new tables. */
557 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
565 /* Internal function, please use rhashtable_insert_fast() instead */
566 static inline int __rhashtable_insert_fast(
567 struct rhashtable
*ht
, const void *key
, struct rhash_head
*obj
,
568 const struct rhashtable_params params
)
570 struct rhashtable_compare_arg arg
= {
574 struct bucket_table
*tbl
, *new_tbl
;
575 struct rhash_head
*head
;
577 unsigned int elasticity
;
584 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
586 /* All insertions must grab the oldest table containing
587 * the hashed bucket that is yet to be rehashed.
590 hash
= rht_head_hashfn(ht
, tbl
, obj
, params
);
591 lock
= rht_bucket_lock(tbl
, hash
);
594 if (tbl
->rehash
<= hash
)
597 spin_unlock_bh(lock
);
598 tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
601 new_tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
);
602 if (unlikely(new_tbl
)) {
603 tbl
= rhashtable_insert_slow(ht
, key
, obj
, new_tbl
);
604 if (!IS_ERR_OR_NULL(tbl
))
612 if (unlikely(rht_grow_above_max(ht
, tbl
)))
615 if (unlikely(rht_grow_above_100(ht
, tbl
))) {
617 spin_unlock_bh(lock
);
618 err
= rhashtable_insert_rehash(ht
, tbl
);
627 elasticity
= ht
->elasticity
;
628 rht_for_each(head
, tbl
, hash
) {
630 unlikely(!(params
.obj_cmpfn
?
631 params
.obj_cmpfn(&arg
, rht_obj(ht
, head
)) :
632 rhashtable_compare(&arg
, rht_obj(ht
, head
)))))
640 head
= rht_dereference_bucket(tbl
->buckets
[hash
], tbl
, hash
);
642 RCU_INIT_POINTER(obj
->next
, head
);
644 rcu_assign_pointer(tbl
->buckets
[hash
], obj
);
646 atomic_inc(&ht
->nelems
);
647 if (rht_grow_above_75(ht
, tbl
))
648 schedule_work(&ht
->run_work
);
651 spin_unlock_bh(lock
);
658 * rhashtable_insert_fast - insert object into hash table
660 * @obj: pointer to hash head inside object
661 * @params: hash table parameters
663 * Will take a per bucket spinlock to protect against mutual mutations
664 * on the same bucket. Multiple insertions may occur in parallel unless
665 * they map to the same bucket lock.
667 * It is safe to call this function from atomic context.
669 * Will trigger an automatic deferred table resizing if the size grows
670 * beyond the watermark indicated by grow_decision() which can be passed
671 * to rhashtable_init().
673 static inline int rhashtable_insert_fast(
674 struct rhashtable
*ht
, struct rhash_head
*obj
,
675 const struct rhashtable_params params
)
677 return __rhashtable_insert_fast(ht
, NULL
, obj
, params
);
681 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
683 * @obj: pointer to hash head inside object
684 * @params: hash table parameters
686 * Locks down the bucket chain in both the old and new table if a resize
687 * is in progress to ensure that writers can't remove from the old table
688 * and can't insert to the new table during the atomic operation of search
689 * and insertion. Searches for duplicates in both the old and new table if
690 * a resize is in progress.
692 * This lookup function may only be used for fixed key hash table (key_len
693 * parameter set). It will BUG() if used inappropriately.
695 * It is safe to call this function from atomic context.
697 * Will trigger an automatic deferred table resizing if the size grows
698 * beyond the watermark indicated by grow_decision() which can be passed
699 * to rhashtable_init().
701 static inline int rhashtable_lookup_insert_fast(
702 struct rhashtable
*ht
, struct rhash_head
*obj
,
703 const struct rhashtable_params params
)
705 const char *key
= rht_obj(ht
, obj
);
707 BUG_ON(ht
->p
.obj_hashfn
);
709 return __rhashtable_insert_fast(ht
, key
+ ht
->p
.key_offset
, obj
,
714 * rhashtable_lookup_insert_key - search and insert object to hash table
718 * @obj: pointer to hash head inside object
719 * @params: hash table parameters
721 * Locks down the bucket chain in both the old and new table if a resize
722 * is in progress to ensure that writers can't remove from the old table
723 * and can't insert to the new table during the atomic operation of search
724 * and insertion. Searches for duplicates in both the old and new table if
725 * a resize is in progress.
727 * Lookups may occur in parallel with hashtable mutations and resizing.
729 * Will trigger an automatic deferred table resizing if the size grows
730 * beyond the watermark indicated by grow_decision() which can be passed
731 * to rhashtable_init().
733 * Returns zero on success.
735 static inline int rhashtable_lookup_insert_key(
736 struct rhashtable
*ht
, const void *key
, struct rhash_head
*obj
,
737 const struct rhashtable_params params
)
739 BUG_ON(!ht
->p
.obj_hashfn
|| !key
);
741 return __rhashtable_insert_fast(ht
, key
, obj
, params
);
744 /* Internal function, please use rhashtable_remove_fast() instead */
745 static inline int __rhashtable_remove_fast(
746 struct rhashtable
*ht
, struct bucket_table
*tbl
,
747 struct rhash_head
*obj
, const struct rhashtable_params params
)
749 struct rhash_head __rcu
**pprev
;
750 struct rhash_head
*he
;
755 hash
= rht_head_hashfn(ht
, tbl
, obj
, params
);
756 lock
= rht_bucket_lock(tbl
, hash
);
760 pprev
= &tbl
->buckets
[hash
];
761 rht_for_each(he
, tbl
, hash
) {
767 rcu_assign_pointer(*pprev
, obj
->next
);
772 spin_unlock_bh(lock
);
778 * rhashtable_remove_fast - remove object from hash table
780 * @obj: pointer to hash head inside object
781 * @params: hash table parameters
783 * Since the hash chain is single linked, the removal operation needs to
784 * walk the bucket chain upon removal. The removal operation is thus
785 * considerable slow if the hash table is not correctly sized.
787 * Will automatically shrink the table via rhashtable_expand() if the
788 * shrink_decision function specified at rhashtable_init() returns true.
790 * Returns zero on success, -ENOENT if the entry could not be found.
792 static inline int rhashtable_remove_fast(
793 struct rhashtable
*ht
, struct rhash_head
*obj
,
794 const struct rhashtable_params params
)
796 struct bucket_table
*tbl
;
801 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
803 /* Because we have already taken (and released) the bucket
804 * lock in old_tbl, if we find that future_tbl is not yet
805 * visible then that guarantees the entry to still be in
806 * the old tbl if it exists.
808 while ((err
= __rhashtable_remove_fast(ht
, tbl
, obj
, params
)) &&
809 (tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
)))
815 atomic_dec(&ht
->nelems
);
816 if (unlikely(ht
->p
.automatic_shrinking
&&
817 rht_shrink_below_30(ht
, tbl
)))
818 schedule_work(&ht
->run_work
);
826 /* Internal function, please use rhashtable_replace_fast() instead */
827 static inline int __rhashtable_replace_fast(
828 struct rhashtable
*ht
, struct bucket_table
*tbl
,
829 struct rhash_head
*obj_old
, struct rhash_head
*obj_new
,
830 const struct rhashtable_params params
)
832 struct rhash_head __rcu
**pprev
;
833 struct rhash_head
*he
;
838 /* Minimally, the old and new objects must have same hash
839 * (which should mean identifiers are the same).
841 hash
= rht_head_hashfn(ht
, tbl
, obj_old
, params
);
842 if (hash
!= rht_head_hashfn(ht
, tbl
, obj_new
, params
))
845 lock
= rht_bucket_lock(tbl
, hash
);
849 pprev
= &tbl
->buckets
[hash
];
850 rht_for_each(he
, tbl
, hash
) {
856 rcu_assign_pointer(obj_new
->next
, obj_old
->next
);
857 rcu_assign_pointer(*pprev
, obj_new
);
862 spin_unlock_bh(lock
);
868 * rhashtable_replace_fast - replace an object in hash table
870 * @obj_old: pointer to hash head inside object being replaced
871 * @obj_new: pointer to hash head inside object which is new
872 * @params: hash table parameters
874 * Replacing an object doesn't affect the number of elements in the hash table
875 * or bucket, so we don't need to worry about shrinking or expanding the
878 * Returns zero on success, -ENOENT if the entry could not be found,
879 * -EINVAL if hash is not the same for the old and new objects.
881 static inline int rhashtable_replace_fast(
882 struct rhashtable
*ht
, struct rhash_head
*obj_old
,
883 struct rhash_head
*obj_new
,
884 const struct rhashtable_params params
)
886 struct bucket_table
*tbl
;
891 tbl
= rht_dereference_rcu(ht
->tbl
, ht
);
893 /* Because we have already taken (and released) the bucket
894 * lock in old_tbl, if we find that future_tbl is not yet
895 * visible then that guarantees the entry to still be in
896 * the old tbl if it exists.
898 while ((err
= __rhashtable_replace_fast(ht
, tbl
, obj_old
,
900 (tbl
= rht_dereference_rcu(tbl
->future_tbl
, ht
)))
908 #endif /* _LINUX_RHASHTABLE_H */