Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
[deliverable/linux.git] / lib / rhashtable.c
1 /*
2 * Resizable, Scalable, Concurrent Hash Table
3 *
4 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5 * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7 *
8 * Code partially derived from nft_hash
9 * Rewritten with rehash code from br_multicast plus single list
10 * pointer as suggested by Josh Triplett
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/mm.h>
24 #include <linux/jhash.h>
25 #include <linux/random.h>
26 #include <linux/rhashtable.h>
27 #include <linux/err.h>
28
29 #define HASH_DEFAULT_SIZE 64UL
30 #define HASH_MIN_SIZE 4U
31 #define BUCKET_LOCKS_PER_CPU 128UL
32
33 static u32 head_hashfn(struct rhashtable *ht,
34 const struct bucket_table *tbl,
35 const struct rhash_head *he)
36 {
37 return rht_head_hashfn(ht, tbl, he, ht->p);
38 }
39
40 #ifdef CONFIG_PROVE_LOCKING
41 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
42
43 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
44 {
45 return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
46 }
47 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
48
49 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
50 {
51 spinlock_t *lock = rht_bucket_lock(tbl, hash);
52
53 return (debug_locks) ? lockdep_is_held(lock) : 1;
54 }
55 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
56 #else
57 #define ASSERT_RHT_MUTEX(HT)
58 #endif
59
60
61 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
62 {
63 unsigned int i, size;
64 #if defined(CONFIG_PROVE_LOCKING)
65 unsigned int nr_pcpus = 2;
66 #else
67 unsigned int nr_pcpus = num_possible_cpus();
68 #endif
69
70 nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
71 size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
72
73 /* Never allocate more than 0.5 locks per bucket */
74 size = min_t(unsigned int, size, tbl->size >> 1);
75
76 if (sizeof(spinlock_t) != 0) {
77 #ifdef CONFIG_NUMA
78 if (size * sizeof(spinlock_t) > PAGE_SIZE)
79 tbl->locks = vmalloc(size * sizeof(spinlock_t));
80 else
81 #endif
82 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
83 GFP_KERNEL);
84 if (!tbl->locks)
85 return -ENOMEM;
86 for (i = 0; i < size; i++)
87 spin_lock_init(&tbl->locks[i]);
88 }
89 tbl->locks_mask = size - 1;
90
91 return 0;
92 }
93
94 static void bucket_table_free(const struct bucket_table *tbl)
95 {
96 if (tbl)
97 kvfree(tbl->locks);
98
99 kvfree(tbl);
100 }
101
102 static void bucket_table_free_rcu(struct rcu_head *head)
103 {
104 bucket_table_free(container_of(head, struct bucket_table, rcu));
105 }
106
107 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
108 size_t nbuckets)
109 {
110 struct bucket_table *tbl = NULL;
111 size_t size;
112 int i;
113
114 size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
115 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
116 tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
117 if (tbl == NULL)
118 tbl = vzalloc(size);
119 if (tbl == NULL)
120 return NULL;
121
122 tbl->size = nbuckets;
123
124 if (alloc_bucket_locks(ht, tbl) < 0) {
125 bucket_table_free(tbl);
126 return NULL;
127 }
128
129 INIT_LIST_HEAD(&tbl->walkers);
130
131 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
132
133 for (i = 0; i < nbuckets; i++)
134 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
135
136 return tbl;
137 }
138
139 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
140 {
141 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
142 struct bucket_table *new_tbl =
143 rht_dereference(old_tbl->future_tbl, ht) ?: old_tbl;
144 struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
145 int err = -ENOENT;
146 struct rhash_head *head, *next, *entry;
147 spinlock_t *new_bucket_lock;
148 unsigned new_hash;
149
150 rht_for_each(entry, old_tbl, old_hash) {
151 err = 0;
152 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
153
154 if (rht_is_a_nulls(next))
155 break;
156
157 pprev = &entry->next;
158 }
159
160 if (err)
161 goto out;
162
163 new_hash = head_hashfn(ht, new_tbl, entry);
164
165 new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
166
167 spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
168 head = rht_dereference_bucket(new_tbl->buckets[new_hash],
169 new_tbl, new_hash);
170
171 if (rht_is_a_nulls(head))
172 INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
173 else
174 RCU_INIT_POINTER(entry->next, head);
175
176 rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
177 spin_unlock(new_bucket_lock);
178
179 rcu_assign_pointer(*pprev, next);
180
181 out:
182 return err;
183 }
184
185 static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
186 {
187 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
188 spinlock_t *old_bucket_lock;
189
190 old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
191
192 spin_lock_bh(old_bucket_lock);
193 while (!rhashtable_rehash_one(ht, old_hash))
194 ;
195 old_tbl->rehash++;
196 spin_unlock_bh(old_bucket_lock);
197 }
198
199 static void rhashtable_rehash(struct rhashtable *ht,
200 struct bucket_table *new_tbl)
201 {
202 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
203 struct rhashtable_walker *walker;
204 unsigned old_hash;
205
206 /* Make insertions go into the new, empty table right away. Deletions
207 * and lookups will be attempted in both tables until we synchronize.
208 */
209 rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
210
211 /* Ensure the new table is visible to readers. */
212 smp_wmb();
213
214 for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
215 rhashtable_rehash_chain(ht, old_hash);
216
217 /* Publish the new table pointer. */
218 rcu_assign_pointer(ht->tbl, new_tbl);
219
220 list_for_each_entry(walker, &old_tbl->walkers, list)
221 walker->tbl = NULL;
222
223 /* Wait for readers. All new readers will see the new
224 * table, and thus no references to the old table will
225 * remain.
226 */
227 call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
228 }
229
230 /**
231 * rhashtable_expand - Expand hash table while allowing concurrent lookups
232 * @ht: the hash table to expand
233 *
234 * A secondary bucket array is allocated and the hash entries are migrated.
235 *
236 * This function may only be called in a context where it is safe to call
237 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
238 *
239 * The caller must ensure that no concurrent resizing occurs by holding
240 * ht->mutex.
241 *
242 * It is valid to have concurrent insertions and deletions protected by per
243 * bucket locks or concurrent RCU protected lookups and traversals.
244 */
245 int rhashtable_expand(struct rhashtable *ht)
246 {
247 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
248
249 ASSERT_RHT_MUTEX(ht);
250
251 new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
252 if (new_tbl == NULL)
253 return -ENOMEM;
254
255 rhashtable_rehash(ht, new_tbl);
256 return 0;
257 }
258 EXPORT_SYMBOL_GPL(rhashtable_expand);
259
260 /**
261 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
262 * @ht: the hash table to shrink
263 *
264 * This function may only be called in a context where it is safe to call
265 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
266 *
267 * The caller must ensure that no concurrent resizing occurs by holding
268 * ht->mutex.
269 *
270 * The caller must ensure that no concurrent table mutations take place.
271 * It is however valid to have concurrent lookups if they are RCU protected.
272 *
273 * It is valid to have concurrent insertions and deletions protected by per
274 * bucket locks or concurrent RCU protected lookups and traversals.
275 */
276 int rhashtable_shrink(struct rhashtable *ht)
277 {
278 struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
279
280 ASSERT_RHT_MUTEX(ht);
281
282 new_tbl = bucket_table_alloc(ht, old_tbl->size / 2);
283 if (new_tbl == NULL)
284 return -ENOMEM;
285
286 rhashtable_rehash(ht, new_tbl);
287 return 0;
288 }
289 EXPORT_SYMBOL_GPL(rhashtable_shrink);
290
291 static void rht_deferred_worker(struct work_struct *work)
292 {
293 struct rhashtable *ht;
294 struct bucket_table *tbl;
295
296 ht = container_of(work, struct rhashtable, run_work);
297 mutex_lock(&ht->mutex);
298 if (ht->being_destroyed)
299 goto unlock;
300
301 tbl = rht_dereference(ht->tbl, ht);
302
303 if (rht_grow_above_75(ht, tbl))
304 rhashtable_expand(ht);
305 else if (rht_shrink_below_30(ht, tbl))
306 rhashtable_shrink(ht);
307 unlock:
308 mutex_unlock(&ht->mutex);
309 }
310
311 int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
312 struct rhash_head *obj,
313 struct bucket_table *tbl)
314 {
315 struct rhash_head *head;
316 unsigned hash;
317 int err = -EEXIST;
318
319 hash = head_hashfn(ht, tbl, obj);
320 spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
321
322 if (key && rhashtable_lookup_fast(ht, key, ht->p))
323 goto exit;
324
325 err = 0;
326
327 head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
328
329 RCU_INIT_POINTER(obj->next, head);
330
331 rcu_assign_pointer(tbl->buckets[hash], obj);
332
333 atomic_inc(&ht->nelems);
334
335 exit:
336 spin_unlock(rht_bucket_lock(tbl, hash));
337
338 return err;
339 }
340 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
341
342 /**
343 * rhashtable_walk_init - Initialise an iterator
344 * @ht: Table to walk over
345 * @iter: Hash table Iterator
346 *
347 * This function prepares a hash table walk.
348 *
349 * Note that if you restart a walk after rhashtable_walk_stop you
350 * may see the same object twice. Also, you may miss objects if
351 * there are removals in between rhashtable_walk_stop and the next
352 * call to rhashtable_walk_start.
353 *
354 * For a completely stable walk you should construct your own data
355 * structure outside the hash table.
356 *
357 * This function may sleep so you must not call it from interrupt
358 * context or with spin locks held.
359 *
360 * You must call rhashtable_walk_exit if this function returns
361 * successfully.
362 */
363 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
364 {
365 iter->ht = ht;
366 iter->p = NULL;
367 iter->slot = 0;
368 iter->skip = 0;
369
370 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
371 if (!iter->walker)
372 return -ENOMEM;
373
374 mutex_lock(&ht->mutex);
375 iter->walker->tbl = rht_dereference(ht->tbl, ht);
376 list_add(&iter->walker->list, &iter->walker->tbl->walkers);
377 mutex_unlock(&ht->mutex);
378
379 return 0;
380 }
381 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
382
383 /**
384 * rhashtable_walk_exit - Free an iterator
385 * @iter: Hash table Iterator
386 *
387 * This function frees resources allocated by rhashtable_walk_init.
388 */
389 void rhashtable_walk_exit(struct rhashtable_iter *iter)
390 {
391 mutex_lock(&iter->ht->mutex);
392 if (iter->walker->tbl)
393 list_del(&iter->walker->list);
394 mutex_unlock(&iter->ht->mutex);
395 kfree(iter->walker);
396 }
397 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
398
399 /**
400 * rhashtable_walk_start - Start a hash table walk
401 * @iter: Hash table iterator
402 *
403 * Start a hash table walk. Note that we take the RCU lock in all
404 * cases including when we return an error. So you must always call
405 * rhashtable_walk_stop to clean up.
406 *
407 * Returns zero if successful.
408 *
409 * Returns -EAGAIN if resize event occured. Note that the iterator
410 * will rewind back to the beginning and you may use it immediately
411 * by calling rhashtable_walk_next.
412 */
413 int rhashtable_walk_start(struct rhashtable_iter *iter)
414 __acquires(RCU)
415 {
416 struct rhashtable *ht = iter->ht;
417
418 mutex_lock(&ht->mutex);
419
420 if (iter->walker->tbl)
421 list_del(&iter->walker->list);
422
423 rcu_read_lock();
424
425 mutex_unlock(&ht->mutex);
426
427 if (!iter->walker->tbl) {
428 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
429 return -EAGAIN;
430 }
431
432 return 0;
433 }
434 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
435
436 /**
437 * rhashtable_walk_next - Return the next object and advance the iterator
438 * @iter: Hash table iterator
439 *
440 * Note that you must call rhashtable_walk_stop when you are finished
441 * with the walk.
442 *
443 * Returns the next object or NULL when the end of the table is reached.
444 *
445 * Returns -EAGAIN if resize event occured. Note that the iterator
446 * will rewind back to the beginning and you may continue to use it.
447 */
448 void *rhashtable_walk_next(struct rhashtable_iter *iter)
449 {
450 struct bucket_table *tbl = iter->walker->tbl;
451 struct rhashtable *ht = iter->ht;
452 struct rhash_head *p = iter->p;
453 void *obj = NULL;
454
455 if (p) {
456 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
457 goto next;
458 }
459
460 for (; iter->slot < tbl->size; iter->slot++) {
461 int skip = iter->skip;
462
463 rht_for_each_rcu(p, tbl, iter->slot) {
464 if (!skip)
465 break;
466 skip--;
467 }
468
469 next:
470 if (!rht_is_a_nulls(p)) {
471 iter->skip++;
472 iter->p = p;
473 obj = rht_obj(ht, p);
474 goto out;
475 }
476
477 iter->skip = 0;
478 }
479
480 iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
481 if (iter->walker->tbl) {
482 iter->slot = 0;
483 iter->skip = 0;
484 return ERR_PTR(-EAGAIN);
485 }
486
487 iter->p = NULL;
488
489 out:
490
491 return obj;
492 }
493 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
494
495 /**
496 * rhashtable_walk_stop - Finish a hash table walk
497 * @iter: Hash table iterator
498 *
499 * Finish a hash table walk.
500 */
501 void rhashtable_walk_stop(struct rhashtable_iter *iter)
502 __releases(RCU)
503 {
504 struct rhashtable *ht;
505 struct bucket_table *tbl = iter->walker->tbl;
506
507 if (!tbl)
508 goto out;
509
510 ht = iter->ht;
511
512 mutex_lock(&ht->mutex);
513 if (tbl->rehash < tbl->size)
514 list_add(&iter->walker->list, &tbl->walkers);
515 else
516 iter->walker->tbl = NULL;
517 mutex_unlock(&ht->mutex);
518
519 iter->p = NULL;
520
521 out:
522 rcu_read_unlock();
523 }
524 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
525
526 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
527 {
528 return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
529 (unsigned long)params->min_size);
530 }
531
532 /**
533 * rhashtable_init - initialize a new hash table
534 * @ht: hash table to be initialized
535 * @params: configuration parameters
536 *
537 * Initializes a new hash table based on the provided configuration
538 * parameters. A table can be configured either with a variable or
539 * fixed length key:
540 *
541 * Configuration Example 1: Fixed length keys
542 * struct test_obj {
543 * int key;
544 * void * my_member;
545 * struct rhash_head node;
546 * };
547 *
548 * struct rhashtable_params params = {
549 * .head_offset = offsetof(struct test_obj, node),
550 * .key_offset = offsetof(struct test_obj, key),
551 * .key_len = sizeof(int),
552 * .hashfn = jhash,
553 * .nulls_base = (1U << RHT_BASE_SHIFT),
554 * };
555 *
556 * Configuration Example 2: Variable length keys
557 * struct test_obj {
558 * [...]
559 * struct rhash_head node;
560 * };
561 *
562 * u32 my_hash_fn(const void *data, u32 seed)
563 * {
564 * struct test_obj *obj = data;
565 *
566 * return [... hash ...];
567 * }
568 *
569 * struct rhashtable_params params = {
570 * .head_offset = offsetof(struct test_obj, node),
571 * .hashfn = jhash,
572 * .obj_hashfn = my_hash_fn,
573 * };
574 */
575 int rhashtable_init(struct rhashtable *ht,
576 const struct rhashtable_params *params)
577 {
578 struct bucket_table *tbl;
579 size_t size;
580
581 size = HASH_DEFAULT_SIZE;
582
583 if ((!(params->key_len && params->hashfn) && !params->obj_hashfn) ||
584 (params->obj_hashfn && !params->obj_cmpfn))
585 return -EINVAL;
586
587 if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
588 return -EINVAL;
589
590 if (params->nelem_hint)
591 size = rounded_hashtable_size(params);
592
593 memset(ht, 0, sizeof(*ht));
594 mutex_init(&ht->mutex);
595 memcpy(&ht->p, params, sizeof(*params));
596
597 if (params->min_size)
598 ht->p.min_size = roundup_pow_of_two(params->min_size);
599
600 if (params->max_size)
601 ht->p.max_size = rounddown_pow_of_two(params->max_size);
602
603 ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
604
605 if (params->locks_mul)
606 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
607 else
608 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
609
610 tbl = bucket_table_alloc(ht, size);
611 if (tbl == NULL)
612 return -ENOMEM;
613
614 atomic_set(&ht->nelems, 0);
615
616 RCU_INIT_POINTER(ht->tbl, tbl);
617
618 INIT_WORK(&ht->run_work, rht_deferred_worker);
619
620 return 0;
621 }
622 EXPORT_SYMBOL_GPL(rhashtable_init);
623
624 /**
625 * rhashtable_destroy - destroy hash table
626 * @ht: the hash table to destroy
627 *
628 * Frees the bucket array. This function is not rcu safe, therefore the caller
629 * has to make sure that no resizing may happen by unpublishing the hashtable
630 * and waiting for the quiescent cycle before releasing the bucket array.
631 */
632 void rhashtable_destroy(struct rhashtable *ht)
633 {
634 ht->being_destroyed = true;
635
636 cancel_work_sync(&ht->run_work);
637
638 mutex_lock(&ht->mutex);
639 bucket_table_free(rht_dereference(ht->tbl, ht));
640 mutex_unlock(&ht->mutex);
641 }
642 EXPORT_SYMBOL_GPL(rhashtable_destroy);
This page took 0.059403 seconds and 5 git commands to generate.