Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[deliverable/linux.git] / include / linux / rhashtable.h
1 /*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 *
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17 #ifndef _LINUX_RHASHTABLE_H
18 #define _LINUX_RHASHTABLE_H
19
20 #include <linux/compiler.h>
21 #include <linux/errno.h>
22 #include <linux/jhash.h>
23 #include <linux/list_nulls.h>
24 #include <linux/workqueue.h>
25 #include <linux/mutex.h>
26 #include <linux/rcupdate.h>
27
28 /*
29 * The end of the chain is marked with a special nulls marks which has
30 * the following format:
31 *
32 * +-------+-----------------------------------------------------+-+
33 * | Base | Hash |1|
34 * +-------+-----------------------------------------------------+-+
35 *
36 * Base (4 bits) : Reserved to distinguish between multiple tables.
37 * Specified via &struct rhashtable_params.nulls_base.
38 * Hash (27 bits): Full hash (unmasked) of first element added to bucket
39 * 1 (1 bit) : Nulls marker (always set)
40 *
41 * The remaining bits of the next pointer remain unused for now.
42 */
43 #define RHT_BASE_BITS 4
44 #define RHT_HASH_BITS 27
45 #define RHT_BASE_SHIFT RHT_HASH_BITS
46
47 /* Base bits plus 1 bit for nulls marker */
48 #define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
49
50 struct rhash_head {
51 struct rhash_head __rcu *next;
52 };
53
54 /**
55 * struct bucket_table - Table of hash buckets
56 * @size: Number of hash buckets
57 * @rehash: Current bucket being rehashed
58 * @hash_rnd: Random seed to fold into hash
59 * @locks_mask: Mask to apply before accessing locks[]
60 * @locks: Array of spinlocks protecting individual buckets
61 * @walkers: List of active walkers
62 * @rcu: RCU structure for freeing the table
63 * @future_tbl: Table under construction during rehashing
64 * @buckets: size * hash buckets
65 */
66 struct bucket_table {
67 unsigned int size;
68 unsigned int rehash;
69 u32 hash_rnd;
70 unsigned int locks_mask;
71 spinlock_t *locks;
72 struct list_head walkers;
73 struct rcu_head rcu;
74
75 struct bucket_table __rcu *future_tbl;
76
77 struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
78 };
79
80 /**
81 * struct rhashtable_compare_arg - Key for the function rhashtable_compare
82 * @ht: Hash table
83 * @key: Key to compare against
84 */
85 struct rhashtable_compare_arg {
86 struct rhashtable *ht;
87 const void *key;
88 };
89
90 typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
91 typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
92 typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
93 const void *obj);
94
95 struct rhashtable;
96
97 /**
98 * struct rhashtable_params - Hash table construction parameters
99 * @nelem_hint: Hint on number of elements, should be 75% of desired size
100 * @key_len: Length of key
101 * @key_offset: Offset of key in struct to be hashed
102 * @head_offset: Offset of rhash_head in struct to be hashed
103 * @max_size: Maximum size while expanding
104 * @min_size: Minimum size while shrinking
105 * @nulls_base: Base value to generate nulls marker
106 * @insecure_elasticity: Set to true to disable chain length checks
107 * @automatic_shrinking: Enable automatic shrinking of tables
108 * @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
109 * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
110 * @obj_hashfn: Function to hash object
111 * @obj_cmpfn: Function to compare key with object
112 */
113 struct rhashtable_params {
114 size_t nelem_hint;
115 size_t key_len;
116 size_t key_offset;
117 size_t head_offset;
118 unsigned int max_size;
119 unsigned int min_size;
120 u32 nulls_base;
121 bool insecure_elasticity;
122 bool automatic_shrinking;
123 size_t locks_mul;
124 rht_hashfn_t hashfn;
125 rht_obj_hashfn_t obj_hashfn;
126 rht_obj_cmpfn_t obj_cmpfn;
127 };
128
129 /**
130 * struct rhashtable - Hash table handle
131 * @tbl: Bucket table
132 * @nelems: Number of elements in table
133 * @key_len: Key length for hashfn
134 * @elasticity: Maximum chain length before rehash
135 * @p: Configuration parameters
136 * @run_work: Deferred worker to expand/shrink asynchronously
137 * @mutex: Mutex to protect current/future table swapping
138 * @lock: Spin lock to protect walker list
139 */
140 struct rhashtable {
141 struct bucket_table __rcu *tbl;
142 atomic_t nelems;
143 unsigned int key_len;
144 unsigned int elasticity;
145 struct rhashtable_params p;
146 struct work_struct run_work;
147 struct mutex mutex;
148 spinlock_t lock;
149 };
150
151 /**
152 * struct rhashtable_walker - Hash table walker
153 * @list: List entry on list of walkers
154 * @tbl: The table that we were walking over
155 */
156 struct rhashtable_walker {
157 struct list_head list;
158 struct bucket_table *tbl;
159 };
160
161 /**
162 * struct rhashtable_iter - Hash table iterator, fits into netlink cb
163 * @ht: Table to iterate through
164 * @p: Current pointer
165 * @walker: Associated rhashtable walker
166 * @slot: Current slot
167 * @skip: Number of entries to skip in slot
168 */
169 struct rhashtable_iter {
170 struct rhashtable *ht;
171 struct rhash_head *p;
172 struct rhashtable_walker *walker;
173 unsigned int slot;
174 unsigned int skip;
175 };
176
177 static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
178 {
179 return NULLS_MARKER(ht->p.nulls_base + hash);
180 }
181
182 #define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
183 ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
184
185 static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
186 {
187 return ((unsigned long) ptr & 1);
188 }
189
190 static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
191 {
192 return ((unsigned long) ptr) >> 1;
193 }
194
195 static inline void *rht_obj(const struct rhashtable *ht,
196 const struct rhash_head *he)
197 {
198 return (char *)he - ht->p.head_offset;
199 }
200
201 static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
202 unsigned int hash)
203 {
204 return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
205 }
206
207 static inline unsigned int rht_key_hashfn(
208 struct rhashtable *ht, const struct bucket_table *tbl,
209 const void *key, const struct rhashtable_params params)
210 {
211 unsigned int hash;
212
213 /* params must be equal to ht->p if it isn't constant. */
214 if (!__builtin_constant_p(params.key_len))
215 hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
216 else if (params.key_len) {
217 unsigned int key_len = params.key_len;
218
219 if (params.hashfn)
220 hash = params.hashfn(key, key_len, tbl->hash_rnd);
221 else if (key_len & (sizeof(u32) - 1))
222 hash = jhash(key, key_len, tbl->hash_rnd);
223 else
224 hash = jhash2(key, key_len / sizeof(u32),
225 tbl->hash_rnd);
226 } else {
227 unsigned int key_len = ht->p.key_len;
228
229 if (params.hashfn)
230 hash = params.hashfn(key, key_len, tbl->hash_rnd);
231 else
232 hash = jhash(key, key_len, tbl->hash_rnd);
233 }
234
235 return rht_bucket_index(tbl, hash);
236 }
237
238 static inline unsigned int rht_head_hashfn(
239 struct rhashtable *ht, const struct bucket_table *tbl,
240 const struct rhash_head *he, const struct rhashtable_params params)
241 {
242 const char *ptr = rht_obj(ht, he);
243
244 return likely(params.obj_hashfn) ?
245 rht_bucket_index(tbl, params.obj_hashfn(ptr, params.key_len ?:
246 ht->p.key_len,
247 tbl->hash_rnd)) :
248 rht_key_hashfn(ht, tbl, ptr + params.key_offset, params);
249 }
250
251 /**
252 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
253 * @ht: hash table
254 * @tbl: current table
255 */
256 static inline bool rht_grow_above_75(const struct rhashtable *ht,
257 const struct bucket_table *tbl)
258 {
259 /* Expand table when exceeding 75% load */
260 return atomic_read(&ht->nelems) > (tbl->size / 4 * 3) &&
261 (!ht->p.max_size || tbl->size < ht->p.max_size);
262 }
263
264 /**
265 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
266 * @ht: hash table
267 * @tbl: current table
268 */
269 static inline bool rht_shrink_below_30(const struct rhashtable *ht,
270 const struct bucket_table *tbl)
271 {
272 /* Shrink table beneath 30% load */
273 return atomic_read(&ht->nelems) < (tbl->size * 3 / 10) &&
274 tbl->size > ht->p.min_size;
275 }
276
277 /**
278 * rht_grow_above_100 - returns true if nelems > table-size
279 * @ht: hash table
280 * @tbl: current table
281 */
282 static inline bool rht_grow_above_100(const struct rhashtable *ht,
283 const struct bucket_table *tbl)
284 {
285 return atomic_read(&ht->nelems) > tbl->size &&
286 (!ht->p.max_size || tbl->size < ht->p.max_size);
287 }
288
289 /* The bucket lock is selected based on the hash and protects mutations
290 * on a group of hash buckets.
291 *
292 * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
293 * a single lock always covers both buckets which may both contains
294 * entries which link to the same bucket of the old table during resizing.
295 * This allows to simplify the locking as locking the bucket in both
296 * tables during resize always guarantee protection.
297 *
298 * IMPORTANT: When holding the bucket lock of both the old and new table
299 * during expansions and shrinking, the old bucket lock must always be
300 * acquired first.
301 */
302 static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
303 unsigned int hash)
304 {
305 return &tbl->locks[hash & tbl->locks_mask];
306 }
307
308 #ifdef CONFIG_PROVE_LOCKING
309 int lockdep_rht_mutex_is_held(struct rhashtable *ht);
310 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
311 #else
312 static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
313 {
314 return 1;
315 }
316
317 static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
318 u32 hash)
319 {
320 return 1;
321 }
322 #endif /* CONFIG_PROVE_LOCKING */
323
324 int rhashtable_init(struct rhashtable *ht,
325 const struct rhashtable_params *params);
326
327 int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
328 struct rhash_head *obj,
329 struct bucket_table *old_tbl);
330 int rhashtable_insert_rehash(struct rhashtable *ht);
331
332 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
333 void rhashtable_walk_exit(struct rhashtable_iter *iter);
334 int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
335 void *rhashtable_walk_next(struct rhashtable_iter *iter);
336 void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
337
338 void rhashtable_free_and_destroy(struct rhashtable *ht,
339 void (*free_fn)(void *ptr, void *arg),
340 void *arg);
341 void rhashtable_destroy(struct rhashtable *ht);
342
343 #define rht_dereference(p, ht) \
344 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
345
346 #define rht_dereference_rcu(p, ht) \
347 rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
348
349 #define rht_dereference_bucket(p, tbl, hash) \
350 rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
351
352 #define rht_dereference_bucket_rcu(p, tbl, hash) \
353 rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
354
355 #define rht_entry(tpos, pos, member) \
356 ({ tpos = container_of(pos, typeof(*tpos), member); 1; })
357
358 /**
359 * rht_for_each_continue - continue iterating over hash chain
360 * @pos: the &struct rhash_head to use as a loop cursor.
361 * @head: the previous &struct rhash_head to continue from
362 * @tbl: the &struct bucket_table
363 * @hash: the hash value / bucket index
364 */
365 #define rht_for_each_continue(pos, head, tbl, hash) \
366 for (pos = rht_dereference_bucket(head, tbl, hash); \
367 !rht_is_a_nulls(pos); \
368 pos = rht_dereference_bucket((pos)->next, tbl, hash))
369
370 /**
371 * rht_for_each - iterate over hash chain
372 * @pos: the &struct rhash_head to use as a loop cursor.
373 * @tbl: the &struct bucket_table
374 * @hash: the hash value / bucket index
375 */
376 #define rht_for_each(pos, tbl, hash) \
377 rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
378
379 /**
380 * rht_for_each_entry_continue - continue iterating over hash chain
381 * @tpos: the type * to use as a loop cursor.
382 * @pos: the &struct rhash_head to use as a loop cursor.
383 * @head: the previous &struct rhash_head to continue from
384 * @tbl: the &struct bucket_table
385 * @hash: the hash value / bucket index
386 * @member: name of the &struct rhash_head within the hashable struct.
387 */
388 #define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
389 for (pos = rht_dereference_bucket(head, tbl, hash); \
390 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
391 pos = rht_dereference_bucket((pos)->next, tbl, hash))
392
393 /**
394 * rht_for_each_entry - iterate over hash chain of given type
395 * @tpos: the type * to use as a loop cursor.
396 * @pos: the &struct rhash_head to use as a loop cursor.
397 * @tbl: the &struct bucket_table
398 * @hash: the hash value / bucket index
399 * @member: name of the &struct rhash_head within the hashable struct.
400 */
401 #define rht_for_each_entry(tpos, pos, tbl, hash, member) \
402 rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
403 tbl, hash, member)
404
405 /**
406 * rht_for_each_entry_safe - safely iterate over hash chain of given type
407 * @tpos: the type * to use as a loop cursor.
408 * @pos: the &struct rhash_head to use as a loop cursor.
409 * @next: the &struct rhash_head to use as next in loop cursor.
410 * @tbl: the &struct bucket_table
411 * @hash: the hash value / bucket index
412 * @member: name of the &struct rhash_head within the hashable struct.
413 *
414 * This hash chain list-traversal primitive allows for the looped code to
415 * remove the loop cursor from the list.
416 */
417 #define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
418 for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
419 next = !rht_is_a_nulls(pos) ? \
420 rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
421 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
422 pos = next, \
423 next = !rht_is_a_nulls(pos) ? \
424 rht_dereference_bucket(pos->next, tbl, hash) : NULL)
425
426 /**
427 * rht_for_each_rcu_continue - continue iterating over rcu hash chain
428 * @pos: the &struct rhash_head to use as a loop cursor.
429 * @head: the previous &struct rhash_head to continue from
430 * @tbl: the &struct bucket_table
431 * @hash: the hash value / bucket index
432 *
433 * This hash chain list-traversal primitive may safely run concurrently with
434 * the _rcu mutation primitives such as rhashtable_insert() as long as the
435 * traversal is guarded by rcu_read_lock().
436 */
437 #define rht_for_each_rcu_continue(pos, head, tbl, hash) \
438 for (({barrier(); }), \
439 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
440 !rht_is_a_nulls(pos); \
441 pos = rcu_dereference_raw(pos->next))
442
443 /**
444 * rht_for_each_rcu - iterate over rcu hash chain
445 * @pos: the &struct rhash_head to use as a loop cursor.
446 * @tbl: the &struct bucket_table
447 * @hash: the hash value / bucket index
448 *
449 * This hash chain list-traversal primitive may safely run concurrently with
450 * the _rcu mutation primitives such as rhashtable_insert() as long as the
451 * traversal is guarded by rcu_read_lock().
452 */
453 #define rht_for_each_rcu(pos, tbl, hash) \
454 rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
455
456 /**
457 * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
458 * @tpos: the type * to use as a loop cursor.
459 * @pos: the &struct rhash_head to use as a loop cursor.
460 * @head: the previous &struct rhash_head to continue from
461 * @tbl: the &struct bucket_table
462 * @hash: the hash value / bucket index
463 * @member: name of the &struct rhash_head within the hashable struct.
464 *
465 * This hash chain list-traversal primitive may safely run concurrently with
466 * the _rcu mutation primitives such as rhashtable_insert() as long as the
467 * traversal is guarded by rcu_read_lock().
468 */
469 #define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
470 for (({barrier(); }), \
471 pos = rht_dereference_bucket_rcu(head, tbl, hash); \
472 (!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
473 pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
474
475 /**
476 * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
477 * @tpos: the type * to use as a loop cursor.
478 * @pos: the &struct rhash_head to use as a loop cursor.
479 * @tbl: the &struct bucket_table
480 * @hash: the hash value / bucket index
481 * @member: name of the &struct rhash_head within the hashable struct.
482 *
483 * This hash chain list-traversal primitive may safely run concurrently with
484 * the _rcu mutation primitives such as rhashtable_insert() as long as the
485 * traversal is guarded by rcu_read_lock().
486 */
487 #define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
488 rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
489 tbl, hash, member)
490
491 static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
492 const void *obj)
493 {
494 struct rhashtable *ht = arg->ht;
495 const char *ptr = obj;
496
497 return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
498 }
499
500 /**
501 * rhashtable_lookup_fast - search hash table, inlined version
502 * @ht: hash table
503 * @key: the pointer to the key
504 * @params: hash table parameters
505 *
506 * Computes the hash value for the key and traverses the bucket chain looking
507 * for a entry with an identical key. The first matching entry is returned.
508 *
509 * Returns the first entry on which the compare function returned true.
510 */
511 static inline void *rhashtable_lookup_fast(
512 struct rhashtable *ht, const void *key,
513 const struct rhashtable_params params)
514 {
515 struct rhashtable_compare_arg arg = {
516 .ht = ht,
517 .key = key,
518 };
519 const struct bucket_table *tbl;
520 struct rhash_head *he;
521 unsigned int hash;
522
523 rcu_read_lock();
524
525 tbl = rht_dereference_rcu(ht->tbl, ht);
526 restart:
527 hash = rht_key_hashfn(ht, tbl, key, params);
528 rht_for_each_rcu(he, tbl, hash) {
529 if (params.obj_cmpfn ?
530 params.obj_cmpfn(&arg, rht_obj(ht, he)) :
531 rhashtable_compare(&arg, rht_obj(ht, he)))
532 continue;
533 rcu_read_unlock();
534 return rht_obj(ht, he);
535 }
536
537 /* Ensure we see any new tables. */
538 smp_rmb();
539
540 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
541 if (unlikely(tbl))
542 goto restart;
543 rcu_read_unlock();
544
545 return NULL;
546 }
547
548 /* Internal function, please use rhashtable_insert_fast() instead */
549 static inline int __rhashtable_insert_fast(
550 struct rhashtable *ht, const void *key, struct rhash_head *obj,
551 const struct rhashtable_params params)
552 {
553 struct rhashtable_compare_arg arg = {
554 .ht = ht,
555 .key = key,
556 };
557 struct bucket_table *tbl, *new_tbl;
558 struct rhash_head *head;
559 spinlock_t *lock;
560 unsigned int elasticity;
561 unsigned int hash;
562 int err;
563
564 restart:
565 rcu_read_lock();
566
567 tbl = rht_dereference_rcu(ht->tbl, ht);
568
569 /* All insertions must grab the oldest table containing
570 * the hashed bucket that is yet to be rehashed.
571 */
572 for (;;) {
573 hash = rht_head_hashfn(ht, tbl, obj, params);
574 lock = rht_bucket_lock(tbl, hash);
575 spin_lock_bh(lock);
576
577 if (tbl->rehash <= hash)
578 break;
579
580 spin_unlock_bh(lock);
581 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
582 }
583
584 new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
585 if (unlikely(new_tbl)) {
586 err = rhashtable_insert_slow(ht, key, obj, new_tbl);
587 if (err == -EAGAIN)
588 goto slow_path;
589 goto out;
590 }
591
592 if (unlikely(rht_grow_above_100(ht, tbl))) {
593 slow_path:
594 spin_unlock_bh(lock);
595 err = rhashtable_insert_rehash(ht);
596 rcu_read_unlock();
597 if (err)
598 return err;
599
600 goto restart;
601 }
602
603 err = -EEXIST;
604 elasticity = ht->elasticity;
605 rht_for_each(head, tbl, hash) {
606 if (key &&
607 unlikely(!(params.obj_cmpfn ?
608 params.obj_cmpfn(&arg, rht_obj(ht, head)) :
609 rhashtable_compare(&arg, rht_obj(ht, head)))))
610 goto out;
611 if (!--elasticity)
612 goto slow_path;
613 }
614
615 err = 0;
616
617 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
618
619 RCU_INIT_POINTER(obj->next, head);
620
621 rcu_assign_pointer(tbl->buckets[hash], obj);
622
623 atomic_inc(&ht->nelems);
624 if (rht_grow_above_75(ht, tbl))
625 schedule_work(&ht->run_work);
626
627 out:
628 spin_unlock_bh(lock);
629 rcu_read_unlock();
630
631 return err;
632 }
633
634 /**
635 * rhashtable_insert_fast - insert object into hash table
636 * @ht: hash table
637 * @obj: pointer to hash head inside object
638 * @params: hash table parameters
639 *
640 * Will take a per bucket spinlock to protect against mutual mutations
641 * on the same bucket. Multiple insertions may occur in parallel unless
642 * they map to the same bucket lock.
643 *
644 * It is safe to call this function from atomic context.
645 *
646 * Will trigger an automatic deferred table resizing if the size grows
647 * beyond the watermark indicated by grow_decision() which can be passed
648 * to rhashtable_init().
649 */
650 static inline int rhashtable_insert_fast(
651 struct rhashtable *ht, struct rhash_head *obj,
652 const struct rhashtable_params params)
653 {
654 return __rhashtable_insert_fast(ht, NULL, obj, params);
655 }
656
657 /**
658 * rhashtable_lookup_insert_fast - lookup and insert object into hash table
659 * @ht: hash table
660 * @obj: pointer to hash head inside object
661 * @params: hash table parameters
662 *
663 * Locks down the bucket chain in both the old and new table if a resize
664 * is in progress to ensure that writers can't remove from the old table
665 * and can't insert to the new table during the atomic operation of search
666 * and insertion. Searches for duplicates in both the old and new table if
667 * a resize is in progress.
668 *
669 * This lookup function may only be used for fixed key hash table (key_len
670 * parameter set). It will BUG() if used inappropriately.
671 *
672 * It is safe to call this function from atomic context.
673 *
674 * Will trigger an automatic deferred table resizing if the size grows
675 * beyond the watermark indicated by grow_decision() which can be passed
676 * to rhashtable_init().
677 */
678 static inline int rhashtable_lookup_insert_fast(
679 struct rhashtable *ht, struct rhash_head *obj,
680 const struct rhashtable_params params)
681 {
682 const char *key = rht_obj(ht, obj);
683
684 BUG_ON(ht->p.obj_hashfn);
685
686 return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
687 params);
688 }
689
690 /**
691 * rhashtable_lookup_insert_key - search and insert object to hash table
692 * with explicit key
693 * @ht: hash table
694 * @key: key
695 * @obj: pointer to hash head inside object
696 * @params: hash table parameters
697 *
698 * Locks down the bucket chain in both the old and new table if a resize
699 * is in progress to ensure that writers can't remove from the old table
700 * and can't insert to the new table during the atomic operation of search
701 * and insertion. Searches for duplicates in both the old and new table if
702 * a resize is in progress.
703 *
704 * Lookups may occur in parallel with hashtable mutations and resizing.
705 *
706 * Will trigger an automatic deferred table resizing if the size grows
707 * beyond the watermark indicated by grow_decision() which can be passed
708 * to rhashtable_init().
709 *
710 * Returns zero on success.
711 */
712 static inline int rhashtable_lookup_insert_key(
713 struct rhashtable *ht, const void *key, struct rhash_head *obj,
714 const struct rhashtable_params params)
715 {
716 BUG_ON(!ht->p.obj_hashfn || !key);
717
718 return __rhashtable_insert_fast(ht, key, obj, params);
719 }
720
721 /* Internal function, please use rhashtable_remove_fast() instead */
722 static inline int __rhashtable_remove_fast(
723 struct rhashtable *ht, struct bucket_table *tbl,
724 struct rhash_head *obj, const struct rhashtable_params params)
725 {
726 struct rhash_head __rcu **pprev;
727 struct rhash_head *he;
728 spinlock_t * lock;
729 unsigned int hash;
730 int err = -ENOENT;
731
732 hash = rht_head_hashfn(ht, tbl, obj, params);
733 lock = rht_bucket_lock(tbl, hash);
734
735 spin_lock_bh(lock);
736
737 pprev = &tbl->buckets[hash];
738 rht_for_each(he, tbl, hash) {
739 if (he != obj) {
740 pprev = &he->next;
741 continue;
742 }
743
744 rcu_assign_pointer(*pprev, obj->next);
745 err = 0;
746 break;
747 }
748
749 spin_unlock_bh(lock);
750
751 return err;
752 }
753
754 /**
755 * rhashtable_remove_fast - remove object from hash table
756 * @ht: hash table
757 * @obj: pointer to hash head inside object
758 * @params: hash table parameters
759 *
760 * Since the hash chain is single linked, the removal operation needs to
761 * walk the bucket chain upon removal. The removal operation is thus
762 * considerable slow if the hash table is not correctly sized.
763 *
764 * Will automatically shrink the table via rhashtable_expand() if the
765 * shrink_decision function specified at rhashtable_init() returns true.
766 *
767 * Returns zero on success, -ENOENT if the entry could not be found.
768 */
769 static inline int rhashtable_remove_fast(
770 struct rhashtable *ht, struct rhash_head *obj,
771 const struct rhashtable_params params)
772 {
773 struct bucket_table *tbl;
774 int err;
775
776 rcu_read_lock();
777
778 tbl = rht_dereference_rcu(ht->tbl, ht);
779
780 /* Because we have already taken (and released) the bucket
781 * lock in old_tbl, if we find that future_tbl is not yet
782 * visible then that guarantees the entry to still be in
783 * the old tbl if it exists.
784 */
785 while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
786 (tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
787 ;
788
789 if (err)
790 goto out;
791
792 atomic_dec(&ht->nelems);
793 if (unlikely(ht->p.automatic_shrinking &&
794 rht_shrink_below_30(ht, tbl)))
795 schedule_work(&ht->run_work);
796
797 out:
798 rcu_read_unlock();
799
800 return err;
801 }
802
803 #endif /* _LINUX_RHASHTABLE_H */
This page took 0.062116 seconds and 5 git commands to generate.