Commit | Line | Data |
---|---|---|
7e1e7763 TG |
1 | /* |
2 | * Resizable, Scalable, Concurrent Hash Table | |
3 | * | |
02fd97c3 | 4 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
a5ec68e3 | 5 | * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> |
7e1e7763 TG |
6 | * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> |
7 | * | |
7e1e7763 | 8 | * Code partially derived from nft_hash |
02fd97c3 HX |
9 | * Rewritten with rehash code from br_multicast plus single list |
10 | * pointer as suggested by Josh Triplett | |
7e1e7763 TG |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License version 2 as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
17 | #include <linux/kernel.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/log2.h> | |
5beb5c90 | 20 | #include <linux/sched.h> |
7e1e7763 TG |
21 | #include <linux/slab.h> |
22 | #include <linux/vmalloc.h> | |
23 | #include <linux/mm.h> | |
87545899 | 24 | #include <linux/jhash.h> |
7e1e7763 TG |
25 | #include <linux/random.h> |
26 | #include <linux/rhashtable.h> | |
61d7b097 | 27 | #include <linux/err.h> |
7e1e7763 TG |
28 | |
29 | #define HASH_DEFAULT_SIZE 64UL | |
c2e213cf | 30 | #define HASH_MIN_SIZE 4U |
97defe1e TG |
31 | #define BUCKET_LOCKS_PER_CPU 128UL |
32 | ||
988dfbd7 | 33 | static u32 head_hashfn(struct rhashtable *ht, |
8d24c0b4 TG |
34 | const struct bucket_table *tbl, |
35 | const struct rhash_head *he) | |
7e1e7763 | 36 | { |
02fd97c3 | 37 | return rht_head_hashfn(ht, tbl, he, ht->p); |
7e1e7763 TG |
38 | } |
39 | ||
a03eaec0 | 40 | #ifdef CONFIG_PROVE_LOCKING |
a03eaec0 | 41 | #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT)) |
a03eaec0 TG |
42 | |
43 | int lockdep_rht_mutex_is_held(struct rhashtable *ht) | |
44 | { | |
45 | return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1; | |
46 | } | |
47 | EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held); | |
48 | ||
49 | int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash) | |
50 | { | |
02fd97c3 | 51 | spinlock_t *lock = rht_bucket_lock(tbl, hash); |
a03eaec0 TG |
52 | |
53 | return (debug_locks) ? lockdep_is_held(lock) : 1; | |
54 | } | |
55 | EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held); | |
56 | #else | |
57 | #define ASSERT_RHT_MUTEX(HT) | |
a03eaec0 TG |
58 | #endif |
59 | ||
60 | ||
b9ecfdaa HX |
61 | static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl, |
62 | gfp_t gfp) | |
97defe1e TG |
63 | { |
64 | unsigned int i, size; | |
65 | #if defined(CONFIG_PROVE_LOCKING) | |
66 | unsigned int nr_pcpus = 2; | |
67 | #else | |
68 | unsigned int nr_pcpus = num_possible_cpus(); | |
69 | #endif | |
70 | ||
71 | nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL); | |
72 | size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul); | |
73 | ||
a5ec68e3 TG |
74 | /* Never allocate more than 0.5 locks per bucket */ |
75 | size = min_t(unsigned int, size, tbl->size >> 1); | |
97defe1e TG |
76 | |
77 | if (sizeof(spinlock_t) != 0) { | |
78 | #ifdef CONFIG_NUMA | |
b9ecfdaa HX |
79 | if (size * sizeof(spinlock_t) > PAGE_SIZE && |
80 | gfp == GFP_KERNEL) | |
97defe1e TG |
81 | tbl->locks = vmalloc(size * sizeof(spinlock_t)); |
82 | else | |
83 | #endif | |
84 | tbl->locks = kmalloc_array(size, sizeof(spinlock_t), | |
b9ecfdaa | 85 | gfp); |
97defe1e TG |
86 | if (!tbl->locks) |
87 | return -ENOMEM; | |
88 | for (i = 0; i < size; i++) | |
89 | spin_lock_init(&tbl->locks[i]); | |
90 | } | |
91 | tbl->locks_mask = size - 1; | |
92 | ||
93 | return 0; | |
94 | } | |
95 | ||
96 | static void bucket_table_free(const struct bucket_table *tbl) | |
97 | { | |
98 | if (tbl) | |
99 | kvfree(tbl->locks); | |
100 | ||
101 | kvfree(tbl); | |
102 | } | |
103 | ||
9d901bc0 HX |
104 | static void bucket_table_free_rcu(struct rcu_head *head) |
105 | { | |
106 | bucket_table_free(container_of(head, struct bucket_table, rcu)); | |
107 | } | |
108 | ||
97defe1e | 109 | static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, |
b9ecfdaa HX |
110 | size_t nbuckets, |
111 | gfp_t gfp) | |
7e1e7763 | 112 | { |
eb6d1abf | 113 | struct bucket_table *tbl = NULL; |
7e1e7763 | 114 | size_t size; |
f89bd6f8 | 115 | int i; |
7e1e7763 TG |
116 | |
117 | size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); | |
b9ecfdaa HX |
118 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) || |
119 | gfp != GFP_KERNEL) | |
120 | tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY); | |
121 | if (tbl == NULL && gfp == GFP_KERNEL) | |
7e1e7763 | 122 | tbl = vzalloc(size); |
7e1e7763 TG |
123 | if (tbl == NULL) |
124 | return NULL; | |
125 | ||
126 | tbl->size = nbuckets; | |
127 | ||
b9ecfdaa | 128 | if (alloc_bucket_locks(ht, tbl, gfp) < 0) { |
97defe1e TG |
129 | bucket_table_free(tbl); |
130 | return NULL; | |
131 | } | |
7e1e7763 | 132 | |
eddee5ba HX |
133 | INIT_LIST_HEAD(&tbl->walkers); |
134 | ||
5269b53d HX |
135 | get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); |
136 | ||
f89bd6f8 TG |
137 | for (i = 0; i < nbuckets; i++) |
138 | INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); | |
139 | ||
97defe1e | 140 | return tbl; |
7e1e7763 TG |
141 | } |
142 | ||
b824478b HX |
143 | static struct bucket_table *rhashtable_last_table(struct rhashtable *ht, |
144 | struct bucket_table *tbl) | |
145 | { | |
146 | struct bucket_table *new_tbl; | |
147 | ||
148 | do { | |
149 | new_tbl = tbl; | |
150 | tbl = rht_dereference_rcu(tbl->future_tbl, ht); | |
151 | } while (tbl); | |
152 | ||
153 | return new_tbl; | |
154 | } | |
155 | ||
299e5c32 | 156 | static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash) |
a5ec68e3 | 157 | { |
aa34a6cb | 158 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); |
b824478b HX |
159 | struct bucket_table *new_tbl = rhashtable_last_table(ht, |
160 | rht_dereference_rcu(old_tbl->future_tbl, ht)); | |
aa34a6cb HX |
161 | struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash]; |
162 | int err = -ENOENT; | |
163 | struct rhash_head *head, *next, *entry; | |
164 | spinlock_t *new_bucket_lock; | |
299e5c32 | 165 | unsigned int new_hash; |
aa34a6cb HX |
166 | |
167 | rht_for_each(entry, old_tbl, old_hash) { | |
168 | err = 0; | |
169 | next = rht_dereference_bucket(entry->next, old_tbl, old_hash); | |
170 | ||
171 | if (rht_is_a_nulls(next)) | |
172 | break; | |
a5ec68e3 | 173 | |
aa34a6cb HX |
174 | pprev = &entry->next; |
175 | } | |
a5ec68e3 | 176 | |
aa34a6cb HX |
177 | if (err) |
178 | goto out; | |
97defe1e | 179 | |
aa34a6cb | 180 | new_hash = head_hashfn(ht, new_tbl, entry); |
7e1e7763 | 181 | |
02fd97c3 | 182 | new_bucket_lock = rht_bucket_lock(new_tbl, new_hash); |
7e1e7763 | 183 | |
8f2484bd | 184 | spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); |
aa34a6cb HX |
185 | head = rht_dereference_bucket(new_tbl->buckets[new_hash], |
186 | new_tbl, new_hash); | |
97defe1e | 187 | |
aa34a6cb HX |
188 | if (rht_is_a_nulls(head)) |
189 | INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash); | |
190 | else | |
191 | RCU_INIT_POINTER(entry->next, head); | |
a5ec68e3 | 192 | |
aa34a6cb HX |
193 | rcu_assign_pointer(new_tbl->buckets[new_hash], entry); |
194 | spin_unlock(new_bucket_lock); | |
97defe1e | 195 | |
aa34a6cb | 196 | rcu_assign_pointer(*pprev, next); |
7e1e7763 | 197 | |
aa34a6cb HX |
198 | out: |
199 | return err; | |
200 | } | |
97defe1e | 201 | |
299e5c32 TG |
202 | static void rhashtable_rehash_chain(struct rhashtable *ht, |
203 | unsigned int old_hash) | |
aa34a6cb HX |
204 | { |
205 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | |
206 | spinlock_t *old_bucket_lock; | |
207 | ||
02fd97c3 | 208 | old_bucket_lock = rht_bucket_lock(old_tbl, old_hash); |
a5ec68e3 | 209 | |
aa34a6cb HX |
210 | spin_lock_bh(old_bucket_lock); |
211 | while (!rhashtable_rehash_one(ht, old_hash)) | |
212 | ; | |
63d512d0 | 213 | old_tbl->rehash++; |
aa34a6cb | 214 | spin_unlock_bh(old_bucket_lock); |
97defe1e TG |
215 | } |
216 | ||
b824478b HX |
217 | static int rhashtable_rehash_attach(struct rhashtable *ht, |
218 | struct bucket_table *old_tbl, | |
219 | struct bucket_table *new_tbl) | |
97defe1e | 220 | { |
b824478b HX |
221 | /* Protect future_tbl using the first bucket lock. */ |
222 | spin_lock_bh(old_tbl->locks); | |
223 | ||
224 | /* Did somebody beat us to it? */ | |
225 | if (rcu_access_pointer(old_tbl->future_tbl)) { | |
226 | spin_unlock_bh(old_tbl->locks); | |
227 | return -EEXIST; | |
228 | } | |
7cd10db8 | 229 | |
aa34a6cb HX |
230 | /* Make insertions go into the new, empty table right away. Deletions |
231 | * and lookups will be attempted in both tables until we synchronize. | |
aa34a6cb | 232 | */ |
c4db8848 | 233 | rcu_assign_pointer(old_tbl->future_tbl, new_tbl); |
aa34a6cb | 234 | |
9497df88 HX |
235 | /* Ensure the new table is visible to readers. */ |
236 | smp_wmb(); | |
237 | ||
b824478b HX |
238 | spin_unlock_bh(old_tbl->locks); |
239 | ||
240 | return 0; | |
241 | } | |
242 | ||
243 | static int rhashtable_rehash_table(struct rhashtable *ht) | |
244 | { | |
245 | struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); | |
246 | struct bucket_table *new_tbl; | |
247 | struct rhashtable_walker *walker; | |
299e5c32 | 248 | unsigned int old_hash; |
b824478b HX |
249 | |
250 | new_tbl = rht_dereference(old_tbl->future_tbl, ht); | |
251 | if (!new_tbl) | |
252 | return 0; | |
253 | ||
aa34a6cb HX |
254 | for (old_hash = 0; old_hash < old_tbl->size; old_hash++) |
255 | rhashtable_rehash_chain(ht, old_hash); | |
256 | ||
257 | /* Publish the new table pointer. */ | |
258 | rcu_assign_pointer(ht->tbl, new_tbl); | |
259 | ||
ba7c95ea | 260 | spin_lock(&ht->lock); |
eddee5ba HX |
261 | list_for_each_entry(walker, &old_tbl->walkers, list) |
262 | walker->tbl = NULL; | |
ba7c95ea | 263 | spin_unlock(&ht->lock); |
eddee5ba | 264 | |
aa34a6cb HX |
265 | /* Wait for readers. All new readers will see the new |
266 | * table, and thus no references to the old table will | |
267 | * remain. | |
268 | */ | |
9d901bc0 | 269 | call_rcu(&old_tbl->rcu, bucket_table_free_rcu); |
b824478b HX |
270 | |
271 | return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0; | |
7e1e7763 TG |
272 | } |
273 | ||
274 | /** | |
275 | * rhashtable_expand - Expand hash table while allowing concurrent lookups | |
276 | * @ht: the hash table to expand | |
7e1e7763 | 277 | * |
aa34a6cb | 278 | * A secondary bucket array is allocated and the hash entries are migrated. |
7e1e7763 TG |
279 | * |
280 | * This function may only be called in a context where it is safe to call | |
281 | * synchronize_rcu(), e.g. not within a rcu_read_lock() section. | |
282 | * | |
97defe1e TG |
283 | * The caller must ensure that no concurrent resizing occurs by holding |
284 | * ht->mutex. | |
285 | * | |
286 | * It is valid to have concurrent insertions and deletions protected by per | |
287 | * bucket locks or concurrent RCU protected lookups and traversals. | |
7e1e7763 | 288 | */ |
b824478b | 289 | static int rhashtable_expand(struct rhashtable *ht) |
7e1e7763 TG |
290 | { |
291 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); | |
b824478b | 292 | int err; |
7e1e7763 TG |
293 | |
294 | ASSERT_RHT_MUTEX(ht); | |
295 | ||
b824478b HX |
296 | old_tbl = rhashtable_last_table(ht, old_tbl); |
297 | ||
b9ecfdaa | 298 | new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL); |
7e1e7763 TG |
299 | if (new_tbl == NULL) |
300 | return -ENOMEM; | |
301 | ||
b824478b HX |
302 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); |
303 | if (err) | |
304 | bucket_table_free(new_tbl); | |
305 | ||
306 | return err; | |
7e1e7763 | 307 | } |
7e1e7763 TG |
308 | |
309 | /** | |
310 | * rhashtable_shrink - Shrink hash table while allowing concurrent lookups | |
311 | * @ht: the hash table to shrink | |
7e1e7763 | 312 | * |
18093d1c HX |
313 | * This function shrinks the hash table to fit, i.e., the smallest |
314 | * size would not cause it to expand right away automatically. | |
7e1e7763 | 315 | * |
97defe1e TG |
316 | * The caller must ensure that no concurrent resizing occurs by holding |
317 | * ht->mutex. | |
318 | * | |
7e1e7763 TG |
319 | * The caller must ensure that no concurrent table mutations take place. |
320 | * It is however valid to have concurrent lookups if they are RCU protected. | |
97defe1e TG |
321 | * |
322 | * It is valid to have concurrent insertions and deletions protected by per | |
323 | * bucket locks or concurrent RCU protected lookups and traversals. | |
7e1e7763 | 324 | */ |
b824478b | 325 | static int rhashtable_shrink(struct rhashtable *ht) |
7e1e7763 | 326 | { |
a5b6846f | 327 | struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht); |
299e5c32 | 328 | unsigned int size; |
b824478b | 329 | int err; |
7e1e7763 TG |
330 | |
331 | ASSERT_RHT_MUTEX(ht); | |
332 | ||
299e5c32 | 333 | size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2); |
18093d1c HX |
334 | if (size < ht->p.min_size) |
335 | size = ht->p.min_size; | |
336 | ||
337 | if (old_tbl->size <= size) | |
338 | return 0; | |
339 | ||
b824478b HX |
340 | if (rht_dereference(old_tbl->future_tbl, ht)) |
341 | return -EEXIST; | |
342 | ||
b9ecfdaa | 343 | new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
97defe1e | 344 | if (new_tbl == NULL) |
7e1e7763 TG |
345 | return -ENOMEM; |
346 | ||
b824478b HX |
347 | err = rhashtable_rehash_attach(ht, old_tbl, new_tbl); |
348 | if (err) | |
349 | bucket_table_free(new_tbl); | |
350 | ||
351 | return err; | |
7e1e7763 | 352 | } |
7e1e7763 | 353 | |
97defe1e TG |
354 | static void rht_deferred_worker(struct work_struct *work) |
355 | { | |
356 | struct rhashtable *ht; | |
357 | struct bucket_table *tbl; | |
b824478b | 358 | int err = 0; |
97defe1e | 359 | |
57699a40 | 360 | ht = container_of(work, struct rhashtable, run_work); |
97defe1e | 361 | mutex_lock(&ht->mutex); |
28134a53 | 362 | |
97defe1e | 363 | tbl = rht_dereference(ht->tbl, ht); |
b824478b | 364 | tbl = rhashtable_last_table(ht, tbl); |
97defe1e | 365 | |
a5b6846f | 366 | if (rht_grow_above_75(ht, tbl)) |
97defe1e | 367 | rhashtable_expand(ht); |
b5e2c150 | 368 | else if (ht->p.automatic_shrinking && rht_shrink_below_30(ht, tbl)) |
97defe1e | 369 | rhashtable_shrink(ht); |
b824478b HX |
370 | |
371 | err = rhashtable_rehash_table(ht); | |
372 | ||
97defe1e | 373 | mutex_unlock(&ht->mutex); |
b824478b HX |
374 | |
375 | if (err) | |
376 | schedule_work(&ht->run_work); | |
97defe1e TG |
377 | } |
378 | ||
ccd57b1b HX |
379 | static bool rhashtable_check_elasticity(struct rhashtable *ht, |
380 | struct bucket_table *tbl, | |
299e5c32 | 381 | unsigned int hash) |
ccd57b1b | 382 | { |
299e5c32 | 383 | unsigned int elasticity = ht->elasticity; |
ccd57b1b HX |
384 | struct rhash_head *head; |
385 | ||
386 | rht_for_each(head, tbl, hash) | |
387 | if (!--elasticity) | |
388 | return true; | |
389 | ||
390 | return false; | |
391 | } | |
392 | ||
393 | int rhashtable_insert_rehash(struct rhashtable *ht) | |
394 | { | |
395 | struct bucket_table *old_tbl; | |
396 | struct bucket_table *new_tbl; | |
397 | struct bucket_table *tbl; | |
398 | unsigned int size; | |
399 | int err; | |
400 | ||
401 | old_tbl = rht_dereference_rcu(ht->tbl, ht); | |
402 | tbl = rhashtable_last_table(ht, old_tbl); | |
403 | ||
404 | size = tbl->size; | |
405 | ||
406 | if (rht_grow_above_75(ht, tbl)) | |
407 | size *= 2; | |
a87b9ebf TG |
408 | /* Do not schedule more than one rehash */ |
409 | else if (old_tbl != tbl) | |
ccd57b1b HX |
410 | return -EBUSY; |
411 | ||
412 | new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC); | |
e2307ed6 TG |
413 | if (new_tbl == NULL) { |
414 | /* Schedule async resize/rehash to try allocation | |
415 | * non-atomic context. | |
416 | */ | |
417 | schedule_work(&ht->run_work); | |
ccd57b1b | 418 | return -ENOMEM; |
e2307ed6 | 419 | } |
ccd57b1b HX |
420 | |
421 | err = rhashtable_rehash_attach(ht, tbl, new_tbl); | |
422 | if (err) { | |
423 | bucket_table_free(new_tbl); | |
424 | if (err == -EEXIST) | |
425 | err = 0; | |
426 | } else | |
427 | schedule_work(&ht->run_work); | |
428 | ||
429 | return err; | |
430 | } | |
431 | EXPORT_SYMBOL_GPL(rhashtable_insert_rehash); | |
432 | ||
02fd97c3 HX |
433 | int rhashtable_insert_slow(struct rhashtable *ht, const void *key, |
434 | struct rhash_head *obj, | |
435 | struct bucket_table *tbl) | |
436 | { | |
437 | struct rhash_head *head; | |
299e5c32 | 438 | unsigned int hash; |
ccd57b1b | 439 | int err; |
02fd97c3 | 440 | |
b824478b | 441 | tbl = rhashtable_last_table(ht, tbl); |
02fd97c3 HX |
442 | hash = head_hashfn(ht, tbl, obj); |
443 | spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); | |
444 | ||
ccd57b1b | 445 | err = -EEXIST; |
02fd97c3 HX |
446 | if (key && rhashtable_lookup_fast(ht, key, ht->p)) |
447 | goto exit; | |
448 | ||
ccd57b1b HX |
449 | err = -EAGAIN; |
450 | if (rhashtable_check_elasticity(ht, tbl, hash) || | |
451 | rht_grow_above_100(ht, tbl)) | |
452 | goto exit; | |
453 | ||
02fd97c3 HX |
454 | err = 0; |
455 | ||
456 | head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash); | |
457 | ||
458 | RCU_INIT_POINTER(obj->next, head); | |
459 | ||
460 | rcu_assign_pointer(tbl->buckets[hash], obj); | |
461 | ||
462 | atomic_inc(&ht->nelems); | |
463 | ||
464 | exit: | |
465 | spin_unlock(rht_bucket_lock(tbl, hash)); | |
466 | ||
467 | return err; | |
468 | } | |
469 | EXPORT_SYMBOL_GPL(rhashtable_insert_slow); | |
470 | ||
f2dba9c6 HX |
471 | /** |
472 | * rhashtable_walk_init - Initialise an iterator | |
473 | * @ht: Table to walk over | |
474 | * @iter: Hash table Iterator | |
475 | * | |
476 | * This function prepares a hash table walk. | |
477 | * | |
478 | * Note that if you restart a walk after rhashtable_walk_stop you | |
479 | * may see the same object twice. Also, you may miss objects if | |
480 | * there are removals in between rhashtable_walk_stop and the next | |
481 | * call to rhashtable_walk_start. | |
482 | * | |
483 | * For a completely stable walk you should construct your own data | |
484 | * structure outside the hash table. | |
485 | * | |
486 | * This function may sleep so you must not call it from interrupt | |
487 | * context or with spin locks held. | |
488 | * | |
489 | * You must call rhashtable_walk_exit if this function returns | |
490 | * successfully. | |
491 | */ | |
492 | int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) | |
493 | { | |
494 | iter->ht = ht; | |
495 | iter->p = NULL; | |
496 | iter->slot = 0; | |
497 | iter->skip = 0; | |
498 | ||
499 | iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); | |
500 | if (!iter->walker) | |
501 | return -ENOMEM; | |
502 | ||
503 | mutex_lock(&ht->mutex); | |
eddee5ba HX |
504 | iter->walker->tbl = rht_dereference(ht->tbl, ht); |
505 | list_add(&iter->walker->list, &iter->walker->tbl->walkers); | |
f2dba9c6 HX |
506 | mutex_unlock(&ht->mutex); |
507 | ||
508 | return 0; | |
509 | } | |
510 | EXPORT_SYMBOL_GPL(rhashtable_walk_init); | |
511 | ||
512 | /** | |
513 | * rhashtable_walk_exit - Free an iterator | |
514 | * @iter: Hash table Iterator | |
515 | * | |
516 | * This function frees resources allocated by rhashtable_walk_init. | |
517 | */ | |
518 | void rhashtable_walk_exit(struct rhashtable_iter *iter) | |
519 | { | |
520 | mutex_lock(&iter->ht->mutex); | |
eddee5ba HX |
521 | if (iter->walker->tbl) |
522 | list_del(&iter->walker->list); | |
f2dba9c6 HX |
523 | mutex_unlock(&iter->ht->mutex); |
524 | kfree(iter->walker); | |
525 | } | |
526 | EXPORT_SYMBOL_GPL(rhashtable_walk_exit); | |
527 | ||
528 | /** | |
529 | * rhashtable_walk_start - Start a hash table walk | |
530 | * @iter: Hash table iterator | |
531 | * | |
532 | * Start a hash table walk. Note that we take the RCU lock in all | |
533 | * cases including when we return an error. So you must always call | |
534 | * rhashtable_walk_stop to clean up. | |
535 | * | |
536 | * Returns zero if successful. | |
537 | * | |
538 | * Returns -EAGAIN if resize event occured. Note that the iterator | |
539 | * will rewind back to the beginning and you may use it immediately | |
540 | * by calling rhashtable_walk_next. | |
541 | */ | |
542 | int rhashtable_walk_start(struct rhashtable_iter *iter) | |
db4374f4 | 543 | __acquires(RCU) |
f2dba9c6 | 544 | { |
eddee5ba HX |
545 | struct rhashtable *ht = iter->ht; |
546 | ||
547 | mutex_lock(&ht->mutex); | |
548 | ||
549 | if (iter->walker->tbl) | |
550 | list_del(&iter->walker->list); | |
551 | ||
f2dba9c6 HX |
552 | rcu_read_lock(); |
553 | ||
eddee5ba HX |
554 | mutex_unlock(&ht->mutex); |
555 | ||
556 | if (!iter->walker->tbl) { | |
557 | iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); | |
f2dba9c6 HX |
558 | return -EAGAIN; |
559 | } | |
560 | ||
561 | return 0; | |
562 | } | |
563 | EXPORT_SYMBOL_GPL(rhashtable_walk_start); | |
564 | ||
565 | /** | |
566 | * rhashtable_walk_next - Return the next object and advance the iterator | |
567 | * @iter: Hash table iterator | |
568 | * | |
569 | * Note that you must call rhashtable_walk_stop when you are finished | |
570 | * with the walk. | |
571 | * | |
572 | * Returns the next object or NULL when the end of the table is reached. | |
573 | * | |
574 | * Returns -EAGAIN if resize event occured. Note that the iterator | |
575 | * will rewind back to the beginning and you may continue to use it. | |
576 | */ | |
577 | void *rhashtable_walk_next(struct rhashtable_iter *iter) | |
578 | { | |
eddee5ba | 579 | struct bucket_table *tbl = iter->walker->tbl; |
f2dba9c6 HX |
580 | struct rhashtable *ht = iter->ht; |
581 | struct rhash_head *p = iter->p; | |
582 | void *obj = NULL; | |
583 | ||
f2dba9c6 HX |
584 | if (p) { |
585 | p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); | |
586 | goto next; | |
587 | } | |
588 | ||
589 | for (; iter->slot < tbl->size; iter->slot++) { | |
590 | int skip = iter->skip; | |
591 | ||
592 | rht_for_each_rcu(p, tbl, iter->slot) { | |
593 | if (!skip) | |
594 | break; | |
595 | skip--; | |
596 | } | |
597 | ||
598 | next: | |
599 | if (!rht_is_a_nulls(p)) { | |
600 | iter->skip++; | |
601 | iter->p = p; | |
602 | obj = rht_obj(ht, p); | |
603 | goto out; | |
604 | } | |
605 | ||
606 | iter->skip = 0; | |
607 | } | |
608 | ||
d88252f9 HX |
609 | /* Ensure we see any new tables. */ |
610 | smp_rmb(); | |
611 | ||
c4db8848 HX |
612 | iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); |
613 | if (iter->walker->tbl) { | |
f2dba9c6 HX |
614 | iter->slot = 0; |
615 | iter->skip = 0; | |
f2dba9c6 HX |
616 | return ERR_PTR(-EAGAIN); |
617 | } | |
618 | ||
eddee5ba HX |
619 | iter->p = NULL; |
620 | ||
621 | out: | |
622 | ||
f2dba9c6 HX |
623 | return obj; |
624 | } | |
625 | EXPORT_SYMBOL_GPL(rhashtable_walk_next); | |
626 | ||
627 | /** | |
628 | * rhashtable_walk_stop - Finish a hash table walk | |
629 | * @iter: Hash table iterator | |
630 | * | |
631 | * Finish a hash table walk. | |
632 | */ | |
633 | void rhashtable_walk_stop(struct rhashtable_iter *iter) | |
db4374f4 | 634 | __releases(RCU) |
f2dba9c6 | 635 | { |
eddee5ba HX |
636 | struct rhashtable *ht; |
637 | struct bucket_table *tbl = iter->walker->tbl; | |
638 | ||
eddee5ba | 639 | if (!tbl) |
963ecbd4 | 640 | goto out; |
eddee5ba HX |
641 | |
642 | ht = iter->ht; | |
643 | ||
ba7c95ea | 644 | spin_lock(&ht->lock); |
c4db8848 | 645 | if (tbl->rehash < tbl->size) |
eddee5ba HX |
646 | list_add(&iter->walker->list, &tbl->walkers); |
647 | else | |
648 | iter->walker->tbl = NULL; | |
ba7c95ea | 649 | spin_unlock(&ht->lock); |
eddee5ba | 650 | |
f2dba9c6 | 651 | iter->p = NULL; |
963ecbd4 HX |
652 | |
653 | out: | |
654 | rcu_read_unlock(); | |
f2dba9c6 HX |
655 | } |
656 | EXPORT_SYMBOL_GPL(rhashtable_walk_stop); | |
657 | ||
488fb86e | 658 | static size_t rounded_hashtable_size(const struct rhashtable_params *params) |
7e1e7763 | 659 | { |
94000176 | 660 | return max(roundup_pow_of_two(params->nelem_hint * 4 / 3), |
e2e21c1c | 661 | (unsigned long)params->min_size); |
7e1e7763 TG |
662 | } |
663 | ||
31ccde2d HX |
664 | static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) |
665 | { | |
666 | return jhash2(key, length, seed); | |
667 | } | |
668 | ||
7e1e7763 TG |
669 | /** |
670 | * rhashtable_init - initialize a new hash table | |
671 | * @ht: hash table to be initialized | |
672 | * @params: configuration parameters | |
673 | * | |
674 | * Initializes a new hash table based on the provided configuration | |
675 | * parameters. A table can be configured either with a variable or | |
676 | * fixed length key: | |
677 | * | |
678 | * Configuration Example 1: Fixed length keys | |
679 | * struct test_obj { | |
680 | * int key; | |
681 | * void * my_member; | |
682 | * struct rhash_head node; | |
683 | * }; | |
684 | * | |
685 | * struct rhashtable_params params = { | |
686 | * .head_offset = offsetof(struct test_obj, node), | |
687 | * .key_offset = offsetof(struct test_obj, key), | |
688 | * .key_len = sizeof(int), | |
87545899 | 689 | * .hashfn = jhash, |
f89bd6f8 | 690 | * .nulls_base = (1U << RHT_BASE_SHIFT), |
7e1e7763 TG |
691 | * }; |
692 | * | |
693 | * Configuration Example 2: Variable length keys | |
694 | * struct test_obj { | |
695 | * [...] | |
696 | * struct rhash_head node; | |
697 | * }; | |
698 | * | |
49f7b33e | 699 | * u32 my_hash_fn(const void *data, u32 len, u32 seed) |
7e1e7763 TG |
700 | * { |
701 | * struct test_obj *obj = data; | |
702 | * | |
703 | * return [... hash ...]; | |
704 | * } | |
705 | * | |
706 | * struct rhashtable_params params = { | |
707 | * .head_offset = offsetof(struct test_obj, node), | |
87545899 | 708 | * .hashfn = jhash, |
7e1e7763 | 709 | * .obj_hashfn = my_hash_fn, |
7e1e7763 TG |
710 | * }; |
711 | */ | |
488fb86e HX |
712 | int rhashtable_init(struct rhashtable *ht, |
713 | const struct rhashtable_params *params) | |
7e1e7763 TG |
714 | { |
715 | struct bucket_table *tbl; | |
716 | size_t size; | |
717 | ||
718 | size = HASH_DEFAULT_SIZE; | |
719 | ||
31ccde2d | 720 | if ((!params->key_len && !params->obj_hashfn) || |
02fd97c3 | 721 | (params->obj_hashfn && !params->obj_cmpfn)) |
7e1e7763 TG |
722 | return -EINVAL; |
723 | ||
f89bd6f8 TG |
724 | if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT)) |
725 | return -EINVAL; | |
726 | ||
7e1e7763 | 727 | if (params->nelem_hint) |
94000176 | 728 | size = rounded_hashtable_size(params); |
7e1e7763 | 729 | |
97defe1e TG |
730 | memset(ht, 0, sizeof(*ht)); |
731 | mutex_init(&ht->mutex); | |
ba7c95ea | 732 | spin_lock_init(&ht->lock); |
97defe1e TG |
733 | memcpy(&ht->p, params, sizeof(*params)); |
734 | ||
a998f712 TG |
735 | if (params->min_size) |
736 | ht->p.min_size = roundup_pow_of_two(params->min_size); | |
737 | ||
738 | if (params->max_size) | |
739 | ht->p.max_size = rounddown_pow_of_two(params->max_size); | |
740 | ||
488fb86e | 741 | ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE); |
a998f712 | 742 | |
27ed44a5 HX |
743 | /* The maximum (not average) chain length grows with the |
744 | * size of the hash table, at a rate of (log N)/(log log N). | |
745 | * The value of 16 is selected so that even if the hash | |
746 | * table grew to 2^32 you would not expect the maximum | |
747 | * chain length to exceed it unless we are under attack | |
748 | * (or extremely unlucky). | |
749 | * | |
750 | * As this limit is only to detect attacks, we don't need | |
751 | * to set it to a lower value as you'd need the chain | |
752 | * length to vastly exceed 16 to have any real effect | |
753 | * on the system. | |
754 | */ | |
ccd57b1b HX |
755 | if (!params->insecure_elasticity) |
756 | ht->elasticity = 16; | |
757 | ||
97defe1e TG |
758 | if (params->locks_mul) |
759 | ht->p.locks_mul = roundup_pow_of_two(params->locks_mul); | |
760 | else | |
761 | ht->p.locks_mul = BUCKET_LOCKS_PER_CPU; | |
762 | ||
31ccde2d HX |
763 | ht->key_len = ht->p.key_len; |
764 | if (!params->hashfn) { | |
765 | ht->p.hashfn = jhash; | |
766 | ||
767 | if (!(ht->key_len & (sizeof(u32) - 1))) { | |
768 | ht->key_len /= sizeof(u32); | |
769 | ht->p.hashfn = rhashtable_jhash2; | |
770 | } | |
771 | } | |
772 | ||
b9ecfdaa | 773 | tbl = bucket_table_alloc(ht, size, GFP_KERNEL); |
7e1e7763 TG |
774 | if (tbl == NULL) |
775 | return -ENOMEM; | |
776 | ||
545a148e | 777 | atomic_set(&ht->nelems, 0); |
a5b6846f | 778 | |
7e1e7763 TG |
779 | RCU_INIT_POINTER(ht->tbl, tbl); |
780 | ||
4c4b52d9 | 781 | INIT_WORK(&ht->run_work, rht_deferred_worker); |
97defe1e | 782 | |
7e1e7763 TG |
783 | return 0; |
784 | } | |
785 | EXPORT_SYMBOL_GPL(rhashtable_init); | |
786 | ||
787 | /** | |
6b6f302c | 788 | * rhashtable_free_and_destroy - free elements and destroy hash table |
7e1e7763 | 789 | * @ht: the hash table to destroy |
6b6f302c TG |
790 | * @free_fn: callback to release resources of element |
791 | * @arg: pointer passed to free_fn | |
7e1e7763 | 792 | * |
6b6f302c TG |
793 | * Stops an eventual async resize. If defined, invokes free_fn for each |
794 | * element to releasal resources. Please note that RCU protected | |
795 | * readers may still be accessing the elements. Releasing of resources | |
796 | * must occur in a compatible manner. Then frees the bucket array. | |
797 | * | |
798 | * This function will eventually sleep to wait for an async resize | |
799 | * to complete. The caller is responsible that no further write operations | |
800 | * occurs in parallel. | |
7e1e7763 | 801 | */ |
6b6f302c TG |
802 | void rhashtable_free_and_destroy(struct rhashtable *ht, |
803 | void (*free_fn)(void *ptr, void *arg), | |
804 | void *arg) | |
7e1e7763 | 805 | { |
6b6f302c TG |
806 | const struct bucket_table *tbl; |
807 | unsigned int i; | |
97defe1e | 808 | |
4c4b52d9 | 809 | cancel_work_sync(&ht->run_work); |
97defe1e | 810 | |
57699a40 | 811 | mutex_lock(&ht->mutex); |
6b6f302c TG |
812 | tbl = rht_dereference(ht->tbl, ht); |
813 | if (free_fn) { | |
814 | for (i = 0; i < tbl->size; i++) { | |
815 | struct rhash_head *pos, *next; | |
816 | ||
817 | for (pos = rht_dereference(tbl->buckets[i], ht), | |
818 | next = !rht_is_a_nulls(pos) ? | |
819 | rht_dereference(pos->next, ht) : NULL; | |
820 | !rht_is_a_nulls(pos); | |
821 | pos = next, | |
822 | next = !rht_is_a_nulls(pos) ? | |
823 | rht_dereference(pos->next, ht) : NULL) | |
824 | free_fn(rht_obj(ht, pos), arg); | |
825 | } | |
826 | } | |
827 | ||
828 | bucket_table_free(tbl); | |
97defe1e | 829 | mutex_unlock(&ht->mutex); |
7e1e7763 | 830 | } |
6b6f302c TG |
831 | EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy); |
832 | ||
833 | void rhashtable_destroy(struct rhashtable *ht) | |
834 | { | |
835 | return rhashtable_free_and_destroy(ht, NULL, NULL); | |
836 | } | |
7e1e7763 | 837 | EXPORT_SYMBOL_GPL(rhashtable_destroy); |