3b1d24c88dd1ff16ad75e4df6da6bc12236c57db
[deliverable/linux.git] / drivers / staging / lustre / lustre / libcfs / hash.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2012, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * libcfs/libcfs/hash.c
37 *
38 * Implement a hash class for hash process in lustre system.
39 *
40 * Author: YuZhangyong <yzy@clusterfs.com>
41 *
42 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43 * - Simplified API and improved documentation
44 * - Added per-hash feature flags:
45 * * CFS_HASH_DEBUG additional validation
46 * * CFS_HASH_REHASH dynamic rehashing
47 * - Added per-hash statistics
48 * - General performance enhancements
49 *
50 * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51 * - move all stuff to libcfs
52 * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53 * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54 * - buckets are allocated one by one(instead of contiguous memory),
55 * to avoid unnecessary cacheline conflict
56 *
57 * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58 * - "bucket" is a group of hlist_head now, user can specify bucket size
59 * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60 * one lock for reducing memory overhead.
61 *
62 * - support lockless hash, caller will take care of locks:
63 * avoid lock overhead for hash tables that are already protected
64 * by locking in the caller for another reason
65 *
66 * - support both spin_lock/rwlock for bucket:
67 * overhead of spinlock contention is lower than read/write
68 * contention of rwlock, so using spinlock to serialize operations on
69 * bucket is more reasonable for those frequently changed hash tables
70 *
71 * - support one-single lock mode:
72 * one lock to protect all hash operations to avoid overhead of
73 * multiple locks if hash table is always small
74 *
75 * - removed a lot of unnecessary addref & decref on hash element:
76 * addref & decref are atomic operations in many use-cases which
77 * are expensive.
78 *
79 * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80 * some lustre use-cases require these functions to be strictly
81 * non-blocking, we need to schedule required rehash on a different
82 * thread on those cases.
83 *
84 * - safer rehash on large hash table
85 * In old implementation, rehash function will exclusively lock the
86 * hash table and finish rehash in one batch, it's dangerous on SMP
87 * system because rehash millions of elements could take long time.
88 * New implemented rehash can release lock and relax CPU in middle
89 * of rehash, it's safe for another thread to search/change on the
90 * hash table even it's in rehasing.
91 *
92 * - support two different refcount modes
93 * . hash table has refcount on element
94 * . hash table doesn't change refcount on adding/removing element
95 *
96 * - support long name hash table (for param-tree)
97 *
98 * - fix a bug for cfs_hash_rehash_key:
99 * in old implementation, cfs_hash_rehash_key could screw up the
100 * hash-table because @key is overwritten without any protection.
101 * Now we need user to define hs_keycpy for those rehash enabled
102 * hash tables, cfs_hash_rehash_key will overwrite hash-key
103 * inside lock by calling hs_keycpy.
104 *
105 * - better hash iteration:
106 * Now we support both locked iteration & lockless iteration of hash
107 * table. Also, user can break the iteration by return 1 in callback.
108 */
109
110 #include "../../include/linux/libcfs/libcfs.h"
111 #include <linux/seq_file.h>
112
113 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
114 static unsigned int warn_on_depth = 8;
115 module_param(warn_on_depth, uint, 0644);
116 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
117 #endif
118
119 struct cfs_wi_sched *cfs_sched_rehash;
120
121 static inline void
122 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
123
124 static inline void
125 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
126
127 static inline void
128 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
129 {
130 spin_lock(&lock->spin);
131 }
132
133 static inline void
134 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
135 {
136 spin_unlock(&lock->spin);
137 }
138
139 static inline void
140 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
141 {
142 if (!exclusive)
143 read_lock(&lock->rw);
144 else
145 write_lock(&lock->rw);
146 }
147
148 static inline void
149 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
150 {
151 if (!exclusive)
152 read_unlock(&lock->rw);
153 else
154 write_unlock(&lock->rw);
155 }
156
157 /** No lock hash */
158 static cfs_hash_lock_ops_t cfs_hash_nl_lops = {
159 .hs_lock = cfs_hash_nl_lock,
160 .hs_unlock = cfs_hash_nl_unlock,
161 .hs_bkt_lock = cfs_hash_nl_lock,
162 .hs_bkt_unlock = cfs_hash_nl_unlock,
163 };
164
165 /** no bucket lock, one spinlock to protect everything */
166 static cfs_hash_lock_ops_t cfs_hash_nbl_lops = {
167 .hs_lock = cfs_hash_spin_lock,
168 .hs_unlock = cfs_hash_spin_unlock,
169 .hs_bkt_lock = cfs_hash_nl_lock,
170 .hs_bkt_unlock = cfs_hash_nl_unlock,
171 };
172
173 /** spin bucket lock, rehash is enabled */
174 static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops = {
175 .hs_lock = cfs_hash_rw_lock,
176 .hs_unlock = cfs_hash_rw_unlock,
177 .hs_bkt_lock = cfs_hash_spin_lock,
178 .hs_bkt_unlock = cfs_hash_spin_unlock,
179 };
180
181 /** rw bucket lock, rehash is enabled */
182 static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops = {
183 .hs_lock = cfs_hash_rw_lock,
184 .hs_unlock = cfs_hash_rw_unlock,
185 .hs_bkt_lock = cfs_hash_rw_lock,
186 .hs_bkt_unlock = cfs_hash_rw_unlock,
187 };
188
189 /** spin bucket lock, rehash is disabled */
190 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops = {
191 .hs_lock = cfs_hash_nl_lock,
192 .hs_unlock = cfs_hash_nl_unlock,
193 .hs_bkt_lock = cfs_hash_spin_lock,
194 .hs_bkt_unlock = cfs_hash_spin_unlock,
195 };
196
197 /** rw bucket lock, rehash is disabled */
198 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops = {
199 .hs_lock = cfs_hash_nl_lock,
200 .hs_unlock = cfs_hash_nl_unlock,
201 .hs_bkt_lock = cfs_hash_rw_lock,
202 .hs_bkt_unlock = cfs_hash_rw_unlock,
203 };
204
205 static void
206 cfs_hash_lock_setup(struct cfs_hash *hs)
207 {
208 if (cfs_hash_with_no_lock(hs)) {
209 hs->hs_lops = &cfs_hash_nl_lops;
210
211 } else if (cfs_hash_with_no_bktlock(hs)) {
212 hs->hs_lops = &cfs_hash_nbl_lops;
213 spin_lock_init(&hs->hs_lock.spin);
214
215 } else if (cfs_hash_with_rehash(hs)) {
216 rwlock_init(&hs->hs_lock.rw);
217
218 if (cfs_hash_with_rw_bktlock(hs))
219 hs->hs_lops = &cfs_hash_bkt_rw_lops;
220 else if (cfs_hash_with_spin_bktlock(hs))
221 hs->hs_lops = &cfs_hash_bkt_spin_lops;
222 else
223 LBUG();
224 } else {
225 if (cfs_hash_with_rw_bktlock(hs))
226 hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
227 else if (cfs_hash_with_spin_bktlock(hs))
228 hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
229 else
230 LBUG();
231 }
232 }
233
234 /**
235 * Simple hash head without depth tracking
236 * new element is always added to head of hlist
237 */
238 typedef struct {
239 struct hlist_head hh_head; /**< entries list */
240 } cfs_hash_head_t;
241
242 static int
243 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
244 {
245 return sizeof(cfs_hash_head_t);
246 }
247
248 static struct hlist_head *
249 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
250 {
251 cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
252
253 return &head[bd->bd_offset].hh_head;
254 }
255
256 static int
257 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
258 struct hlist_node *hnode)
259 {
260 hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
261 return -1; /* unknown depth */
262 }
263
264 static int
265 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
266 struct hlist_node *hnode)
267 {
268 hlist_del_init(hnode);
269 return -1; /* unknown depth */
270 }
271
272 /**
273 * Simple hash head with depth tracking
274 * new element is always added to head of hlist
275 */
276 typedef struct {
277 struct hlist_head hd_head; /**< entries list */
278 unsigned int hd_depth; /**< list length */
279 } cfs_hash_head_dep_t;
280
281 static int
282 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
283 {
284 return sizeof(cfs_hash_head_dep_t);
285 }
286
287 static struct hlist_head *
288 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
289 {
290 cfs_hash_head_dep_t *head;
291
292 head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
293 return &head[bd->bd_offset].hd_head;
294 }
295
296 static int
297 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
298 struct hlist_node *hnode)
299 {
300 cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
301 cfs_hash_head_dep_t, hd_head);
302 hlist_add_head(hnode, &hh->hd_head);
303 return ++hh->hd_depth;
304 }
305
306 static int
307 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
308 struct hlist_node *hnode)
309 {
310 cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
311 cfs_hash_head_dep_t, hd_head);
312 hlist_del_init(hnode);
313 return --hh->hd_depth;
314 }
315
316 /**
317 * double links hash head without depth tracking
318 * new element is always added to tail of hlist
319 */
320 typedef struct {
321 struct hlist_head dh_head; /**< entries list */
322 struct hlist_node *dh_tail; /**< the last entry */
323 } cfs_hash_dhead_t;
324
325 static int
326 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
327 {
328 return sizeof(cfs_hash_dhead_t);
329 }
330
331 static struct hlist_head *
332 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
333 {
334 cfs_hash_dhead_t *head;
335
336 head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
337 return &head[bd->bd_offset].dh_head;
338 }
339
340 static int
341 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
342 struct hlist_node *hnode)
343 {
344 cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
345 cfs_hash_dhead_t, dh_head);
346
347 if (dh->dh_tail != NULL) /* not empty */
348 hlist_add_behind(hnode, dh->dh_tail);
349 else /* empty list */
350 hlist_add_head(hnode, &dh->dh_head);
351 dh->dh_tail = hnode;
352 return -1; /* unknown depth */
353 }
354
355 static int
356 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
357 struct hlist_node *hnd)
358 {
359 cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
360 cfs_hash_dhead_t, dh_head);
361
362 if (hnd->next == NULL) { /* it's the tail */
363 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
364 container_of(hnd->pprev, struct hlist_node, next);
365 }
366 hlist_del_init(hnd);
367 return -1; /* unknown depth */
368 }
369
370 /**
371 * double links hash head with depth tracking
372 * new element is always added to tail of hlist
373 */
374 typedef struct {
375 struct hlist_head dd_head; /**< entries list */
376 struct hlist_node *dd_tail; /**< the last entry */
377 unsigned int dd_depth; /**< list length */
378 } cfs_hash_dhead_dep_t;
379
380 static int
381 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
382 {
383 return sizeof(cfs_hash_dhead_dep_t);
384 }
385
386 static struct hlist_head *
387 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
388 {
389 cfs_hash_dhead_dep_t *head;
390
391 head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
392 return &head[bd->bd_offset].dd_head;
393 }
394
395 static int
396 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
397 struct hlist_node *hnode)
398 {
399 cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
400 cfs_hash_dhead_dep_t, dd_head);
401
402 if (dh->dd_tail != NULL) /* not empty */
403 hlist_add_behind(hnode, dh->dd_tail);
404 else /* empty list */
405 hlist_add_head(hnode, &dh->dd_head);
406 dh->dd_tail = hnode;
407 return ++dh->dd_depth;
408 }
409
410 static int
411 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
412 struct hlist_node *hnd)
413 {
414 cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
415 cfs_hash_dhead_dep_t, dd_head);
416
417 if (hnd->next == NULL) { /* it's the tail */
418 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
419 container_of(hnd->pprev, struct hlist_node, next);
420 }
421 hlist_del_init(hnd);
422 return --dh->dd_depth;
423 }
424
425 static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
426 .hop_hhead = cfs_hash_hh_hhead,
427 .hop_hhead_size = cfs_hash_hh_hhead_size,
428 .hop_hnode_add = cfs_hash_hh_hnode_add,
429 .hop_hnode_del = cfs_hash_hh_hnode_del,
430 };
431
432 static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
433 .hop_hhead = cfs_hash_hd_hhead,
434 .hop_hhead_size = cfs_hash_hd_hhead_size,
435 .hop_hnode_add = cfs_hash_hd_hnode_add,
436 .hop_hnode_del = cfs_hash_hd_hnode_del,
437 };
438
439 static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
440 .hop_hhead = cfs_hash_dh_hhead,
441 .hop_hhead_size = cfs_hash_dh_hhead_size,
442 .hop_hnode_add = cfs_hash_dh_hnode_add,
443 .hop_hnode_del = cfs_hash_dh_hnode_del,
444 };
445
446 static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
447 .hop_hhead = cfs_hash_dd_hhead,
448 .hop_hhead_size = cfs_hash_dd_hhead_size,
449 .hop_hnode_add = cfs_hash_dd_hnode_add,
450 .hop_hnode_del = cfs_hash_dd_hnode_del,
451 };
452
453 static void
454 cfs_hash_hlist_setup(struct cfs_hash *hs)
455 {
456 if (cfs_hash_with_add_tail(hs)) {
457 hs->hs_hops = cfs_hash_with_depth(hs) ?
458 &cfs_hash_dd_hops : &cfs_hash_dh_hops;
459 } else {
460 hs->hs_hops = cfs_hash_with_depth(hs) ?
461 &cfs_hash_hd_hops : &cfs_hash_hh_hops;
462 }
463 }
464
465 static void
466 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
467 unsigned int bits, const void *key, struct cfs_hash_bd *bd)
468 {
469 unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
470
471 LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
472
473 bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
474 bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
475 }
476
477 void
478 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
479 {
480 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
481 if (likely(hs->hs_rehash_buckets == NULL)) {
482 cfs_hash_bd_from_key(hs, hs->hs_buckets,
483 hs->hs_cur_bits, key, bd);
484 } else {
485 LASSERT(hs->hs_rehash_bits != 0);
486 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
487 hs->hs_rehash_bits, key, bd);
488 }
489 }
490 EXPORT_SYMBOL(cfs_hash_bd_get);
491
492 static inline void
493 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
494 {
495 if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
496 return;
497
498 bd->bd_bucket->hsb_depmax = dep_cur;
499 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
500 if (likely(warn_on_depth == 0 ||
501 max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
502 return;
503
504 spin_lock(&hs->hs_dep_lock);
505 hs->hs_dep_max = dep_cur;
506 hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
507 hs->hs_dep_off = bd->bd_offset;
508 hs->hs_dep_bits = hs->hs_cur_bits;
509 spin_unlock(&hs->hs_dep_lock);
510
511 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
512 # endif
513 }
514
515 void
516 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
517 struct hlist_node *hnode)
518 {
519 int rc;
520
521 rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
522 cfs_hash_bd_dep_record(hs, bd, rc);
523 bd->bd_bucket->hsb_version++;
524 if (unlikely(bd->bd_bucket->hsb_version == 0))
525 bd->bd_bucket->hsb_version++;
526 bd->bd_bucket->hsb_count++;
527
528 if (cfs_hash_with_counter(hs))
529 atomic_inc(&hs->hs_count);
530 if (!cfs_hash_with_no_itemref(hs))
531 cfs_hash_get(hs, hnode);
532 }
533 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
534
535 void
536 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
537 struct hlist_node *hnode)
538 {
539 hs->hs_hops->hop_hnode_del(hs, bd, hnode);
540
541 LASSERT(bd->bd_bucket->hsb_count > 0);
542 bd->bd_bucket->hsb_count--;
543 bd->bd_bucket->hsb_version++;
544 if (unlikely(bd->bd_bucket->hsb_version == 0))
545 bd->bd_bucket->hsb_version++;
546
547 if (cfs_hash_with_counter(hs)) {
548 LASSERT(atomic_read(&hs->hs_count) > 0);
549 atomic_dec(&hs->hs_count);
550 }
551 if (!cfs_hash_with_no_itemref(hs))
552 cfs_hash_put_locked(hs, hnode);
553 }
554 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
555
556 void
557 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
558 struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
559 {
560 struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
561 struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
562 int rc;
563
564 if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
565 return;
566
567 /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
568 * in cfs_hash_bd_del/add_locked */
569 hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
570 rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
571 cfs_hash_bd_dep_record(hs, bd_new, rc);
572
573 LASSERT(obkt->hsb_count > 0);
574 obkt->hsb_count--;
575 obkt->hsb_version++;
576 if (unlikely(obkt->hsb_version == 0))
577 obkt->hsb_version++;
578 nbkt->hsb_count++;
579 nbkt->hsb_version++;
580 if (unlikely(nbkt->hsb_version == 0))
581 nbkt->hsb_version++;
582 }
583 EXPORT_SYMBOL(cfs_hash_bd_move_locked);
584
585 enum {
586 /** always set, for sanity (avoid ZERO intent) */
587 CFS_HS_LOOKUP_MASK_FIND = 1 << 0,
588 /** return entry with a ref */
589 CFS_HS_LOOKUP_MASK_REF = 1 << 1,
590 /** add entry if not existing */
591 CFS_HS_LOOKUP_MASK_ADD = 1 << 2,
592 /** delete entry, ignore other masks */
593 CFS_HS_LOOKUP_MASK_DEL = 1 << 3,
594 };
595
596 typedef enum cfs_hash_lookup_intent {
597 /** return item w/o refcount */
598 CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND,
599 /** return item with refcount */
600 CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND |
601 CFS_HS_LOOKUP_MASK_REF),
602 /** return item w/o refcount if existed, otherwise add */
603 CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND |
604 CFS_HS_LOOKUP_MASK_ADD),
605 /** return item with refcount if existed, otherwise add */
606 CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
607 CFS_HS_LOOKUP_MASK_ADD),
608 /** delete if existed */
609 CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
610 CFS_HS_LOOKUP_MASK_DEL)
611 } cfs_hash_lookup_intent_t;
612
613 static struct hlist_node *
614 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
615 const void *key, struct hlist_node *hnode,
616 cfs_hash_lookup_intent_t intent)
617
618 {
619 struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
620 struct hlist_node *ehnode;
621 struct hlist_node *match;
622 int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
623
624 /* with this function, we can avoid a lot of useless refcount ops,
625 * which are expensive atomic operations most time. */
626 match = intent_add ? NULL : hnode;
627 hlist_for_each(ehnode, hhead) {
628 if (!cfs_hash_keycmp(hs, key, ehnode))
629 continue;
630
631 if (match != NULL && match != ehnode) /* can't match */
632 continue;
633
634 /* match and ... */
635 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
636 cfs_hash_bd_del_locked(hs, bd, ehnode);
637 return ehnode;
638 }
639
640 /* caller wants refcount? */
641 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
642 cfs_hash_get(hs, ehnode);
643 return ehnode;
644 }
645 /* no match item */
646 if (!intent_add)
647 return NULL;
648
649 LASSERT(hnode != NULL);
650 cfs_hash_bd_add_locked(hs, bd, hnode);
651 return hnode;
652 }
653
654 struct hlist_node *
655 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
656 {
657 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
658 CFS_HS_LOOKUP_IT_FIND);
659 }
660 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
661
662 struct hlist_node *
663 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
664 {
665 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
666 CFS_HS_LOOKUP_IT_PEEK);
667 }
668 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
669
670 struct hlist_node *
671 cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
672 const void *key, struct hlist_node *hnode,
673 int noref)
674 {
675 return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
676 CFS_HS_LOOKUP_IT_ADD |
677 (!noref * CFS_HS_LOOKUP_MASK_REF));
678 }
679 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
680
681 struct hlist_node *
682 cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
683 const void *key, struct hlist_node *hnode)
684 {
685 /* hnode can be NULL, we find the first item with @key */
686 return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
687 CFS_HS_LOOKUP_IT_FINDDEL);
688 }
689 EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
690
691 static void
692 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
693 unsigned n, int excl)
694 {
695 struct cfs_hash_bucket *prev = NULL;
696 int i;
697
698 /**
699 * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
700 * NB: it's possible that several bds point to the same bucket but
701 * have different bd::bd_offset, so need take care of deadlock.
702 */
703 cfs_hash_for_each_bd(bds, n, i) {
704 if (prev == bds[i].bd_bucket)
705 continue;
706
707 LASSERT(prev == NULL ||
708 prev->hsb_index < bds[i].bd_bucket->hsb_index);
709 cfs_hash_bd_lock(hs, &bds[i], excl);
710 prev = bds[i].bd_bucket;
711 }
712 }
713
714 static void
715 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
716 unsigned n, int excl)
717 {
718 struct cfs_hash_bucket *prev = NULL;
719 int i;
720
721 cfs_hash_for_each_bd(bds, n, i) {
722 if (prev != bds[i].bd_bucket) {
723 cfs_hash_bd_unlock(hs, &bds[i], excl);
724 prev = bds[i].bd_bucket;
725 }
726 }
727 }
728
729 static struct hlist_node *
730 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
731 unsigned n, const void *key)
732 {
733 struct hlist_node *ehnode;
734 unsigned i;
735
736 cfs_hash_for_each_bd(bds, n, i) {
737 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
738 CFS_HS_LOOKUP_IT_FIND);
739 if (ehnode != NULL)
740 return ehnode;
741 }
742 return NULL;
743 }
744
745 static struct hlist_node *
746 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs,
747 struct cfs_hash_bd *bds, unsigned n, const void *key,
748 struct hlist_node *hnode, int noref)
749 {
750 struct hlist_node *ehnode;
751 int intent;
752 unsigned i;
753
754 LASSERT(hnode != NULL);
755 intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
756
757 cfs_hash_for_each_bd(bds, n, i) {
758 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
759 NULL, intent);
760 if (ehnode != NULL)
761 return ehnode;
762 }
763
764 if (i == 1) { /* only one bucket */
765 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
766 } else {
767 struct cfs_hash_bd mybd;
768
769 cfs_hash_bd_get(hs, key, &mybd);
770 cfs_hash_bd_add_locked(hs, &mybd, hnode);
771 }
772
773 return hnode;
774 }
775
776 static struct hlist_node *
777 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
778 unsigned n, const void *key,
779 struct hlist_node *hnode)
780 {
781 struct hlist_node *ehnode;
782 unsigned i;
783
784 cfs_hash_for_each_bd(bds, n, i) {
785 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
786 CFS_HS_LOOKUP_IT_FINDDEL);
787 if (ehnode != NULL)
788 return ehnode;
789 }
790 return NULL;
791 }
792
793 static void
794 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
795 {
796 int rc;
797
798 if (bd2->bd_bucket == NULL)
799 return;
800
801 if (bd1->bd_bucket == NULL) {
802 *bd1 = *bd2;
803 bd2->bd_bucket = NULL;
804 return;
805 }
806
807 rc = cfs_hash_bd_compare(bd1, bd2);
808 if (rc == 0) {
809 bd2->bd_bucket = NULL;
810
811 } else if (rc > 0) { /* swab bd1 and bd2 */
812 struct cfs_hash_bd tmp;
813
814 tmp = *bd2;
815 *bd2 = *bd1;
816 *bd1 = tmp;
817 }
818 }
819
820 void
821 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds)
822 {
823 /* NB: caller should hold hs_lock.rw if REHASH is set */
824 cfs_hash_bd_from_key(hs, hs->hs_buckets,
825 hs->hs_cur_bits, key, &bds[0]);
826 if (likely(hs->hs_rehash_buckets == NULL)) {
827 /* no rehash or not rehashing */
828 bds[1].bd_bucket = NULL;
829 return;
830 }
831
832 LASSERT(hs->hs_rehash_bits != 0);
833 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
834 hs->hs_rehash_bits, key, &bds[1]);
835
836 cfs_hash_bd_order(&bds[0], &bds[1]);
837 }
838 EXPORT_SYMBOL(cfs_hash_dual_bd_get);
839
840 void
841 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
842 {
843 cfs_hash_multi_bd_lock(hs, bds, 2, excl);
844 }
845 EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
846
847 void
848 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
849 {
850 cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
851 }
852 EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
853
854 struct hlist_node *
855 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
856 const void *key)
857 {
858 return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
859 }
860 EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
861
862 struct hlist_node *
863 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
864 const void *key, struct hlist_node *hnode,
865 int noref)
866 {
867 return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
868 hnode, noref);
869 }
870 EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
871
872 struct hlist_node *
873 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
874 const void *key, struct hlist_node *hnode)
875 {
876 return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
877 }
878 EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
879
880 static void
881 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
882 int bkt_size, int prev_size, int size)
883 {
884 int i;
885
886 for (i = prev_size; i < size; i++) {
887 if (buckets[i] != NULL)
888 LIBCFS_FREE(buckets[i], bkt_size);
889 }
890
891 LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
892 }
893
894 /*
895 * Create or grow bucket memory. Return old_buckets if no allocation was
896 * needed, the newly allocated buckets if allocation was needed and
897 * successful, and NULL on error.
898 */
899 static struct cfs_hash_bucket **
900 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
901 unsigned int old_size, unsigned int new_size)
902 {
903 struct cfs_hash_bucket **new_bkts;
904 int i;
905
906 LASSERT(old_size == 0 || old_bkts != NULL);
907
908 if (old_bkts != NULL && old_size == new_size)
909 return old_bkts;
910
911 LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
912 if (new_bkts == NULL)
913 return NULL;
914
915 if (old_bkts != NULL) {
916 memcpy(new_bkts, old_bkts,
917 min(old_size, new_size) * sizeof(*old_bkts));
918 }
919
920 for (i = old_size; i < new_size; i++) {
921 struct hlist_head *hhead;
922 struct cfs_hash_bd bd;
923
924 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
925 if (new_bkts[i] == NULL) {
926 cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
927 old_size, new_size);
928 return NULL;
929 }
930
931 new_bkts[i]->hsb_index = i;
932 new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
933 new_bkts[i]->hsb_depmax = -1; /* unknown */
934 bd.bd_bucket = new_bkts[i];
935 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
936 INIT_HLIST_HEAD(hhead);
937
938 if (cfs_hash_with_no_lock(hs) ||
939 cfs_hash_with_no_bktlock(hs))
940 continue;
941
942 if (cfs_hash_with_rw_bktlock(hs))
943 rwlock_init(&new_bkts[i]->hsb_lock.rw);
944 else if (cfs_hash_with_spin_bktlock(hs))
945 spin_lock_init(&new_bkts[i]->hsb_lock.spin);
946 else
947 LBUG(); /* invalid use-case */
948 }
949 return new_bkts;
950 }
951
952 /**
953 * Initialize new libcfs hash, where:
954 * @name - Descriptive hash name
955 * @cur_bits - Initial hash table size, in bits
956 * @max_bits - Maximum allowed hash table resize, in bits
957 * @ops - Registered hash table operations
958 * @flags - CFS_HASH_REHASH enable synamic hash resizing
959 * - CFS_HASH_SORT enable chained hash sort
960 */
961 static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
962
963 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
964 static int cfs_hash_dep_print(cfs_workitem_t *wi)
965 {
966 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
967 int dep;
968 int bkt;
969 int off;
970 int bits;
971
972 spin_lock(&hs->hs_dep_lock);
973 dep = hs->hs_dep_max;
974 bkt = hs->hs_dep_bkt;
975 off = hs->hs_dep_off;
976 bits = hs->hs_dep_bits;
977 spin_unlock(&hs->hs_dep_lock);
978
979 LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
980 hs->hs_name, bits, dep, bkt, off);
981 spin_lock(&hs->hs_dep_lock);
982 hs->hs_dep_bits = 0; /* mark as workitem done */
983 spin_unlock(&hs->hs_dep_lock);
984 return 0;
985 }
986
987 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
988 {
989 spin_lock_init(&hs->hs_dep_lock);
990 cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
991 }
992
993 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
994 {
995 if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
996 return;
997
998 spin_lock(&hs->hs_dep_lock);
999 while (hs->hs_dep_bits != 0) {
1000 spin_unlock(&hs->hs_dep_lock);
1001 cond_resched();
1002 spin_lock(&hs->hs_dep_lock);
1003 }
1004 spin_unlock(&hs->hs_dep_lock);
1005 }
1006
1007 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1008
1009 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
1010 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
1011
1012 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1013
1014 struct cfs_hash *
1015 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1016 unsigned bkt_bits, unsigned extra_bytes,
1017 unsigned min_theta, unsigned max_theta,
1018 cfs_hash_ops_t *ops, unsigned flags)
1019 {
1020 struct cfs_hash *hs;
1021 int len;
1022
1023 CLASSERT(CFS_HASH_THETA_BITS < 15);
1024
1025 LASSERT(name != NULL);
1026 LASSERT(ops != NULL);
1027 LASSERT(ops->hs_key);
1028 LASSERT(ops->hs_hash);
1029 LASSERT(ops->hs_object);
1030 LASSERT(ops->hs_keycmp);
1031 LASSERT(ops->hs_get != NULL);
1032 LASSERT(ops->hs_put_locked != NULL);
1033
1034 if ((flags & CFS_HASH_REHASH) != 0)
1035 flags |= CFS_HASH_COUNTER; /* must have counter */
1036
1037 LASSERT(cur_bits > 0);
1038 LASSERT(cur_bits >= bkt_bits);
1039 LASSERT(max_bits >= cur_bits && max_bits < 31);
1040 LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1041 LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1042 (flags & CFS_HASH_NO_LOCK) == 0));
1043 LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1044 ops->hs_keycpy != NULL));
1045
1046 len = (flags & CFS_HASH_BIGNAME) == 0 ?
1047 CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1048 LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1049 if (hs == NULL)
1050 return NULL;
1051
1052 strncpy(hs->hs_name, name, len);
1053 hs->hs_name[len - 1] = '\0';
1054 hs->hs_flags = flags;
1055
1056 atomic_set(&hs->hs_refcount, 1);
1057 atomic_set(&hs->hs_count, 0);
1058
1059 cfs_hash_lock_setup(hs);
1060 cfs_hash_hlist_setup(hs);
1061
1062 hs->hs_cur_bits = (__u8)cur_bits;
1063 hs->hs_min_bits = (__u8)cur_bits;
1064 hs->hs_max_bits = (__u8)max_bits;
1065 hs->hs_bkt_bits = (__u8)bkt_bits;
1066
1067 hs->hs_ops = ops;
1068 hs->hs_extra_bytes = extra_bytes;
1069 hs->hs_rehash_bits = 0;
1070 cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1071 cfs_hash_depth_wi_init(hs);
1072
1073 if (cfs_hash_with_rehash(hs))
1074 __cfs_hash_set_theta(hs, min_theta, max_theta);
1075
1076 hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1077 CFS_HASH_NBKT(hs));
1078 if (hs->hs_buckets != NULL)
1079 return hs;
1080
1081 LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1082 return NULL;
1083 }
1084 EXPORT_SYMBOL(cfs_hash_create);
1085
1086 /**
1087 * Cleanup libcfs hash @hs.
1088 */
1089 static void
1090 cfs_hash_destroy(struct cfs_hash *hs)
1091 {
1092 struct hlist_node *hnode;
1093 struct hlist_node *pos;
1094 struct cfs_hash_bd bd;
1095 int i;
1096
1097 LASSERT(hs != NULL);
1098 LASSERT(!cfs_hash_is_exiting(hs) &&
1099 !cfs_hash_is_iterating(hs));
1100
1101 /**
1102 * prohibit further rehashes, don't need any lock because
1103 * I'm the only (last) one can change it.
1104 */
1105 hs->hs_exiting = 1;
1106 if (cfs_hash_with_rehash(hs))
1107 cfs_hash_rehash_cancel(hs);
1108
1109 cfs_hash_depth_wi_cancel(hs);
1110 /* rehash should be done/canceled */
1111 LASSERT(hs->hs_buckets != NULL &&
1112 hs->hs_rehash_buckets == NULL);
1113
1114 cfs_hash_for_each_bucket(hs, &bd, i) {
1115 struct hlist_head *hhead;
1116
1117 LASSERT(bd.bd_bucket != NULL);
1118 /* no need to take this lock, just for consistent code */
1119 cfs_hash_bd_lock(hs, &bd, 1);
1120
1121 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1122 hlist_for_each_safe(hnode, pos, hhead) {
1123 LASSERTF(!cfs_hash_with_assert_empty(hs),
1124 "hash %s bucket %u(%u) is not "
1125 " empty: %u items left\n",
1126 hs->hs_name, bd.bd_bucket->hsb_index,
1127 bd.bd_offset, bd.bd_bucket->hsb_count);
1128 /* can't assert key valicate, because we
1129 * can interrupt rehash */
1130 cfs_hash_bd_del_locked(hs, &bd, hnode);
1131 cfs_hash_exit(hs, hnode);
1132 }
1133 }
1134 LASSERT(bd.bd_bucket->hsb_count == 0);
1135 cfs_hash_bd_unlock(hs, &bd, 1);
1136 cond_resched();
1137 }
1138
1139 LASSERT(atomic_read(&hs->hs_count) == 0);
1140
1141 cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1142 0, CFS_HASH_NBKT(hs));
1143 i = cfs_hash_with_bigname(hs) ?
1144 CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1145 LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1146 }
1147
1148 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1149 {
1150 if (atomic_inc_not_zero(&hs->hs_refcount))
1151 return hs;
1152 return NULL;
1153 }
1154 EXPORT_SYMBOL(cfs_hash_getref);
1155
1156 void cfs_hash_putref(struct cfs_hash *hs)
1157 {
1158 if (atomic_dec_and_test(&hs->hs_refcount))
1159 cfs_hash_destroy(hs);
1160 }
1161 EXPORT_SYMBOL(cfs_hash_putref);
1162
1163 static inline int
1164 cfs_hash_rehash_bits(struct cfs_hash *hs)
1165 {
1166 if (cfs_hash_with_no_lock(hs) ||
1167 !cfs_hash_with_rehash(hs))
1168 return -EOPNOTSUPP;
1169
1170 if (unlikely(cfs_hash_is_exiting(hs)))
1171 return -ESRCH;
1172
1173 if (unlikely(cfs_hash_is_rehashing(hs)))
1174 return -EALREADY;
1175
1176 if (unlikely(cfs_hash_is_iterating(hs)))
1177 return -EAGAIN;
1178
1179 /* XXX: need to handle case with max_theta != 2.0
1180 * and the case with min_theta != 0.5 */
1181 if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1182 (__cfs_hash_theta(hs) > hs->hs_max_theta))
1183 return hs->hs_cur_bits + 1;
1184
1185 if (!cfs_hash_with_shrink(hs))
1186 return 0;
1187
1188 if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1189 (__cfs_hash_theta(hs) < hs->hs_min_theta))
1190 return hs->hs_cur_bits - 1;
1191
1192 return 0;
1193 }
1194
1195 /**
1196 * don't allow inline rehash if:
1197 * - user wants non-blocking change (add/del) on hash table
1198 * - too many elements
1199 */
1200 static inline int
1201 cfs_hash_rehash_inline(struct cfs_hash *hs)
1202 {
1203 return !cfs_hash_with_nblk_change(hs) &&
1204 atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1205 }
1206
1207 /**
1208 * Add item @hnode to libcfs hash @hs using @key. The registered
1209 * ops->hs_get function will be called when the item is added.
1210 */
1211 void
1212 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1213 {
1214 struct cfs_hash_bd bd;
1215 int bits;
1216
1217 LASSERT(hlist_unhashed(hnode));
1218
1219 cfs_hash_lock(hs, 0);
1220 cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1221
1222 cfs_hash_key_validate(hs, key, hnode);
1223 cfs_hash_bd_add_locked(hs, &bd, hnode);
1224
1225 cfs_hash_bd_unlock(hs, &bd, 1);
1226
1227 bits = cfs_hash_rehash_bits(hs);
1228 cfs_hash_unlock(hs, 0);
1229 if (bits > 0)
1230 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1231 }
1232 EXPORT_SYMBOL(cfs_hash_add);
1233
1234 static struct hlist_node *
1235 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1236 struct hlist_node *hnode, int noref)
1237 {
1238 struct hlist_node *ehnode;
1239 struct cfs_hash_bd bds[2];
1240 int bits = 0;
1241
1242 LASSERT(hlist_unhashed(hnode));
1243
1244 cfs_hash_lock(hs, 0);
1245 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1246
1247 cfs_hash_key_validate(hs, key, hnode);
1248 ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1249 hnode, noref);
1250 cfs_hash_dual_bd_unlock(hs, bds, 1);
1251
1252 if (ehnode == hnode) /* new item added */
1253 bits = cfs_hash_rehash_bits(hs);
1254 cfs_hash_unlock(hs, 0);
1255 if (bits > 0)
1256 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1257
1258 return ehnode;
1259 }
1260
1261 /**
1262 * Add item @hnode to libcfs hash @hs using @key. The registered
1263 * ops->hs_get function will be called if the item was added.
1264 * Returns 0 on success or -EALREADY on key collisions.
1265 */
1266 int
1267 cfs_hash_add_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1268 {
1269 return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1270 -EALREADY : 0;
1271 }
1272 EXPORT_SYMBOL(cfs_hash_add_unique);
1273
1274 /**
1275 * Add item @hnode to libcfs hash @hs using @key. If this @key
1276 * already exists in the hash then ops->hs_get will be called on the
1277 * conflicting entry and that entry will be returned to the caller.
1278 * Otherwise ops->hs_get is called on the item which was added.
1279 */
1280 void *
1281 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1282 struct hlist_node *hnode)
1283 {
1284 hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1285
1286 return cfs_hash_object(hs, hnode);
1287 }
1288 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1289
1290 /**
1291 * Delete item @hnode from the libcfs hash @hs using @key. The @key
1292 * is required to ensure the correct hash bucket is locked since there
1293 * is no direct linkage from the item to the bucket. The object
1294 * removed from the hash will be returned and obs->hs_put is called
1295 * on the removed object.
1296 */
1297 void *
1298 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1299 {
1300 void *obj = NULL;
1301 int bits = 0;
1302 struct cfs_hash_bd bds[2];
1303
1304 cfs_hash_lock(hs, 0);
1305 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1306
1307 /* NB: do nothing if @hnode is not in hash table */
1308 if (hnode == NULL || !hlist_unhashed(hnode)) {
1309 if (bds[1].bd_bucket == NULL && hnode != NULL) {
1310 cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1311 } else {
1312 hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1313 key, hnode);
1314 }
1315 }
1316
1317 if (hnode != NULL) {
1318 obj = cfs_hash_object(hs, hnode);
1319 bits = cfs_hash_rehash_bits(hs);
1320 }
1321
1322 cfs_hash_dual_bd_unlock(hs, bds, 1);
1323 cfs_hash_unlock(hs, 0);
1324 if (bits > 0)
1325 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1326
1327 return obj;
1328 }
1329 EXPORT_SYMBOL(cfs_hash_del);
1330
1331 /**
1332 * Delete item given @key in libcfs hash @hs. The first @key found in
1333 * the hash will be removed, if the key exists multiple times in the hash
1334 * @hs this function must be called once per key. The removed object
1335 * will be returned and ops->hs_put is called on the removed object.
1336 */
1337 void *
1338 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1339 {
1340 return cfs_hash_del(hs, key, NULL);
1341 }
1342 EXPORT_SYMBOL(cfs_hash_del_key);
1343
1344 /**
1345 * Lookup an item using @key in the libcfs hash @hs and return it.
1346 * If the @key is found in the hash hs->hs_get() is called and the
1347 * matching objects is returned. It is the callers responsibility
1348 * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1349 * when when finished with the object. If the @key was not found
1350 * in the hash @hs NULL is returned.
1351 */
1352 void *
1353 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1354 {
1355 void *obj = NULL;
1356 struct hlist_node *hnode;
1357 struct cfs_hash_bd bds[2];
1358
1359 cfs_hash_lock(hs, 0);
1360 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1361
1362 hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1363 if (hnode != NULL)
1364 obj = cfs_hash_object(hs, hnode);
1365
1366 cfs_hash_dual_bd_unlock(hs, bds, 0);
1367 cfs_hash_unlock(hs, 0);
1368
1369 return obj;
1370 }
1371 EXPORT_SYMBOL(cfs_hash_lookup);
1372
1373 static void
1374 cfs_hash_for_each_enter(struct cfs_hash *hs)
1375 {
1376 LASSERT(!cfs_hash_is_exiting(hs));
1377
1378 if (!cfs_hash_with_rehash(hs))
1379 return;
1380 /*
1381 * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1382 * because it's just an unreliable signal to rehash-thread,
1383 * rehash-thread will try to finish rehash ASAP when seeing this.
1384 */
1385 hs->hs_iterating = 1;
1386
1387 cfs_hash_lock(hs, 1);
1388 hs->hs_iterators++;
1389
1390 /* NB: iteration is mostly called by service thread,
1391 * we tend to cancel pending rehash-request, instead of
1392 * blocking service thread, we will relaunch rehash request
1393 * after iteration */
1394 if (cfs_hash_is_rehashing(hs))
1395 cfs_hash_rehash_cancel_locked(hs);
1396 cfs_hash_unlock(hs, 1);
1397 }
1398
1399 static void
1400 cfs_hash_for_each_exit(struct cfs_hash *hs)
1401 {
1402 int remained;
1403 int bits;
1404
1405 if (!cfs_hash_with_rehash(hs))
1406 return;
1407 cfs_hash_lock(hs, 1);
1408 remained = --hs->hs_iterators;
1409 bits = cfs_hash_rehash_bits(hs);
1410 cfs_hash_unlock(hs, 1);
1411 /* NB: it's race on cfs_has_t::hs_iterating, see above */
1412 if (remained == 0)
1413 hs->hs_iterating = 0;
1414 if (bits > 0) {
1415 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1416 CFS_HASH_LOOP_HOG);
1417 }
1418 }
1419
1420 /**
1421 * For each item in the libcfs hash @hs call the passed callback @func
1422 * and pass to it as an argument each hash item and the private @data.
1423 *
1424 * a) the function may sleep!
1425 * b) during the callback:
1426 * . the bucket lock is held so the callback must never sleep.
1427 * . if @removal_safe is true, use can remove current item by
1428 * cfs_hash_bd_del_locked
1429 */
1430 static __u64
1431 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1432 void *data, int remove_safe)
1433 {
1434 struct hlist_node *hnode;
1435 struct hlist_node *pos;
1436 struct cfs_hash_bd bd;
1437 __u64 count = 0;
1438 int excl = !!remove_safe;
1439 int loop = 0;
1440 int i;
1441
1442 cfs_hash_for_each_enter(hs);
1443
1444 cfs_hash_lock(hs, 0);
1445 LASSERT(!cfs_hash_is_rehashing(hs));
1446
1447 cfs_hash_for_each_bucket(hs, &bd, i) {
1448 struct hlist_head *hhead;
1449
1450 cfs_hash_bd_lock(hs, &bd, excl);
1451 if (func == NULL) { /* only glimpse size */
1452 count += bd.bd_bucket->hsb_count;
1453 cfs_hash_bd_unlock(hs, &bd, excl);
1454 continue;
1455 }
1456
1457 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1458 hlist_for_each_safe(hnode, pos, hhead) {
1459 cfs_hash_bucket_validate(hs, &bd, hnode);
1460 count++;
1461 loop++;
1462 if (func(hs, &bd, hnode, data)) {
1463 cfs_hash_bd_unlock(hs, &bd, excl);
1464 goto out;
1465 }
1466 }
1467 }
1468 cfs_hash_bd_unlock(hs, &bd, excl);
1469 if (loop < CFS_HASH_LOOP_HOG)
1470 continue;
1471 loop = 0;
1472 cfs_hash_unlock(hs, 0);
1473 cond_resched();
1474 cfs_hash_lock(hs, 0);
1475 }
1476 out:
1477 cfs_hash_unlock(hs, 0);
1478
1479 cfs_hash_for_each_exit(hs);
1480 return count;
1481 }
1482
1483 typedef struct {
1484 cfs_hash_cond_opt_cb_t func;
1485 void *arg;
1486 } cfs_hash_cond_arg_t;
1487
1488 static int
1489 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1490 struct hlist_node *hnode, void *data)
1491 {
1492 cfs_hash_cond_arg_t *cond = data;
1493
1494 if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1495 cfs_hash_bd_del_locked(hs, bd, hnode);
1496 return 0;
1497 }
1498
1499 /**
1500 * Delete item from the libcfs hash @hs when @func return true.
1501 * The write lock being hold during loop for each bucket to avoid
1502 * any object be reference.
1503 */
1504 void
1505 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1506 {
1507 cfs_hash_cond_arg_t arg = {
1508 .func = func,
1509 .arg = data,
1510 };
1511
1512 cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1513 }
1514 EXPORT_SYMBOL(cfs_hash_cond_del);
1515
1516 void
1517 cfs_hash_for_each(struct cfs_hash *hs,
1518 cfs_hash_for_each_cb_t func, void *data)
1519 {
1520 cfs_hash_for_each_tight(hs, func, data, 0);
1521 }
1522 EXPORT_SYMBOL(cfs_hash_for_each);
1523
1524 void
1525 cfs_hash_for_each_safe(struct cfs_hash *hs,
1526 cfs_hash_for_each_cb_t func, void *data)
1527 {
1528 cfs_hash_for_each_tight(hs, func, data, 1);
1529 }
1530 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1531
1532 static int
1533 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1534 struct hlist_node *hnode, void *data)
1535 {
1536 *(int *)data = 0;
1537 return 1; /* return 1 to break the loop */
1538 }
1539
1540 int
1541 cfs_hash_is_empty(struct cfs_hash *hs)
1542 {
1543 int empty = 1;
1544
1545 cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1546 return empty;
1547 }
1548 EXPORT_SYMBOL(cfs_hash_is_empty);
1549
1550 __u64
1551 cfs_hash_size_get(struct cfs_hash *hs)
1552 {
1553 return cfs_hash_with_counter(hs) ?
1554 atomic_read(&hs->hs_count) :
1555 cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1556 }
1557 EXPORT_SYMBOL(cfs_hash_size_get);
1558
1559 /*
1560 * cfs_hash_for_each_relax:
1561 * Iterate the hash table and call @func on each item without
1562 * any lock. This function can't guarantee to finish iteration
1563 * if these features are enabled:
1564 *
1565 * a. if rehash_key is enabled, an item can be moved from
1566 * one bucket to another bucket
1567 * b. user can remove non-zero-ref item from hash-table,
1568 * so the item can be removed from hash-table, even worse,
1569 * it's possible that user changed key and insert to another
1570 * hash bucket.
1571 * there's no way for us to finish iteration correctly on previous
1572 * two cases, so iteration has to be stopped on change.
1573 */
1574 static int
1575 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data)
1576 {
1577 struct hlist_node *hnode;
1578 struct hlist_node *tmp;
1579 struct cfs_hash_bd bd;
1580 __u32 version;
1581 int count = 0;
1582 int stop_on_change;
1583 int rc;
1584 int i;
1585
1586 stop_on_change = cfs_hash_with_rehash_key(hs) ||
1587 !cfs_hash_with_no_itemref(hs) ||
1588 CFS_HOP(hs, put_locked) == NULL;
1589 cfs_hash_lock(hs, 0);
1590 LASSERT(!cfs_hash_is_rehashing(hs));
1591
1592 cfs_hash_for_each_bucket(hs, &bd, i) {
1593 struct hlist_head *hhead;
1594
1595 cfs_hash_bd_lock(hs, &bd, 0);
1596 version = cfs_hash_bd_version_get(&bd);
1597
1598 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1599 for (hnode = hhead->first; hnode != NULL;) {
1600 cfs_hash_bucket_validate(hs, &bd, hnode);
1601 cfs_hash_get(hs, hnode);
1602 cfs_hash_bd_unlock(hs, &bd, 0);
1603 cfs_hash_unlock(hs, 0);
1604
1605 rc = func(hs, &bd, hnode, data);
1606 if (stop_on_change)
1607 cfs_hash_put(hs, hnode);
1608 cond_resched();
1609 count++;
1610
1611 cfs_hash_lock(hs, 0);
1612 cfs_hash_bd_lock(hs, &bd, 0);
1613 if (!stop_on_change) {
1614 tmp = hnode->next;
1615 cfs_hash_put_locked(hs, hnode);
1616 hnode = tmp;
1617 } else { /* bucket changed? */
1618 if (version !=
1619 cfs_hash_bd_version_get(&bd))
1620 break;
1621 /* safe to continue because no change */
1622 hnode = hnode->next;
1623 }
1624 if (rc) /* callback wants to break iteration */
1625 break;
1626 }
1627 }
1628 cfs_hash_bd_unlock(hs, &bd, 0);
1629 }
1630 cfs_hash_unlock(hs, 0);
1631
1632 return count;
1633 }
1634
1635 int
1636 cfs_hash_for_each_nolock(struct cfs_hash *hs,
1637 cfs_hash_for_each_cb_t func, void *data)
1638 {
1639 if (cfs_hash_with_no_lock(hs) ||
1640 cfs_hash_with_rehash_key(hs) ||
1641 !cfs_hash_with_no_itemref(hs))
1642 return -EOPNOTSUPP;
1643
1644 if (CFS_HOP(hs, get) == NULL ||
1645 (CFS_HOP(hs, put) == NULL &&
1646 CFS_HOP(hs, put_locked) == NULL))
1647 return -EOPNOTSUPP;
1648
1649 cfs_hash_for_each_enter(hs);
1650 cfs_hash_for_each_relax(hs, func, data);
1651 cfs_hash_for_each_exit(hs);
1652
1653 return 0;
1654 }
1655 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1656
1657 /**
1658 * For each hash bucket in the libcfs hash @hs call the passed callback
1659 * @func until all the hash buckets are empty. The passed callback @func
1660 * or the previously registered callback hs->hs_put must remove the item
1661 * from the hash. You may either use the cfs_hash_del() or hlist_del()
1662 * functions. No rwlocks will be held during the callback @func it is
1663 * safe to sleep if needed. This function will not terminate until the
1664 * hash is empty. Note it is still possible to concurrently add new
1665 * items in to the hash. It is the callers responsibility to ensure
1666 * the required locking is in place to prevent concurrent insertions.
1667 */
1668 int
1669 cfs_hash_for_each_empty(struct cfs_hash *hs,
1670 cfs_hash_for_each_cb_t func, void *data)
1671 {
1672 unsigned i = 0;
1673
1674 if (cfs_hash_with_no_lock(hs))
1675 return -EOPNOTSUPP;
1676
1677 if (CFS_HOP(hs, get) == NULL ||
1678 (CFS_HOP(hs, put) == NULL &&
1679 CFS_HOP(hs, put_locked) == NULL))
1680 return -EOPNOTSUPP;
1681
1682 cfs_hash_for_each_enter(hs);
1683 while (cfs_hash_for_each_relax(hs, func, data)) {
1684 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1685 hs->hs_name, i++);
1686 }
1687 cfs_hash_for_each_exit(hs);
1688 return 0;
1689 }
1690 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1691
1692 void
1693 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1694 cfs_hash_for_each_cb_t func, void *data)
1695 {
1696 struct hlist_head *hhead;
1697 struct hlist_node *hnode;
1698 struct cfs_hash_bd bd;
1699
1700 cfs_hash_for_each_enter(hs);
1701 cfs_hash_lock(hs, 0);
1702 if (hindex >= CFS_HASH_NHLIST(hs))
1703 goto out;
1704
1705 cfs_hash_bd_index_set(hs, hindex, &bd);
1706
1707 cfs_hash_bd_lock(hs, &bd, 0);
1708 hhead = cfs_hash_bd_hhead(hs, &bd);
1709 hlist_for_each(hnode, hhead) {
1710 if (func(hs, &bd, hnode, data))
1711 break;
1712 }
1713 cfs_hash_bd_unlock(hs, &bd, 0);
1714 out:
1715 cfs_hash_unlock(hs, 0);
1716 cfs_hash_for_each_exit(hs);
1717 }
1718
1719 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1720
1721 /*
1722 * For each item in the libcfs hash @hs which matches the @key call
1723 * the passed callback @func and pass to it as an argument each hash
1724 * item and the private @data. During the callback the bucket lock
1725 * is held so the callback must never sleep.
1726 */
1727 void
1728 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1729 cfs_hash_for_each_cb_t func, void *data)
1730 {
1731 struct hlist_node *hnode;
1732 struct cfs_hash_bd bds[2];
1733 unsigned i;
1734
1735 cfs_hash_lock(hs, 0);
1736
1737 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1738
1739 cfs_hash_for_each_bd(bds, 2, i) {
1740 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1741
1742 hlist_for_each(hnode, hlist) {
1743 cfs_hash_bucket_validate(hs, &bds[i], hnode);
1744
1745 if (cfs_hash_keycmp(hs, key, hnode)) {
1746 if (func(hs, &bds[i], hnode, data))
1747 break;
1748 }
1749 }
1750 }
1751
1752 cfs_hash_dual_bd_unlock(hs, bds, 0);
1753 cfs_hash_unlock(hs, 0);
1754 }
1755 EXPORT_SYMBOL(cfs_hash_for_each_key);
1756
1757 /**
1758 * Rehash the libcfs hash @hs to the given @bits. This can be used
1759 * to grow the hash size when excessive chaining is detected, or to
1760 * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
1761 * flag is set in @hs the libcfs hash may be dynamically rehashed
1762 * during addition or removal if the hash's theta value exceeds
1763 * either the hs->hs_min_theta or hs->max_theta values. By default
1764 * these values are tuned to keep the chained hash depth small, and
1765 * this approach assumes a reasonably uniform hashing function. The
1766 * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1767 */
1768 void
1769 cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1770 {
1771 int i;
1772
1773 /* need hold cfs_hash_lock(hs, 1) */
1774 LASSERT(cfs_hash_with_rehash(hs) &&
1775 !cfs_hash_with_no_lock(hs));
1776
1777 if (!cfs_hash_is_rehashing(hs))
1778 return;
1779
1780 if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1781 hs->hs_rehash_bits = 0;
1782 return;
1783 }
1784
1785 for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1786 cfs_hash_unlock(hs, 1);
1787 /* raise console warning while waiting too long */
1788 CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1789 "hash %s is still rehashing, rescheded %d\n",
1790 hs->hs_name, i - 1);
1791 cond_resched();
1792 cfs_hash_lock(hs, 1);
1793 }
1794 }
1795 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1796
1797 void
1798 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1799 {
1800 cfs_hash_lock(hs, 1);
1801 cfs_hash_rehash_cancel_locked(hs);
1802 cfs_hash_unlock(hs, 1);
1803 }
1804 EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1805
1806 int
1807 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1808 {
1809 int rc;
1810
1811 LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1812
1813 cfs_hash_lock(hs, 1);
1814
1815 rc = cfs_hash_rehash_bits(hs);
1816 if (rc <= 0) {
1817 cfs_hash_unlock(hs, 1);
1818 return rc;
1819 }
1820
1821 hs->hs_rehash_bits = rc;
1822 if (!do_rehash) {
1823 /* launch and return */
1824 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1825 cfs_hash_unlock(hs, 1);
1826 return 0;
1827 }
1828
1829 /* rehash right now */
1830 cfs_hash_unlock(hs, 1);
1831
1832 return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1833 }
1834 EXPORT_SYMBOL(cfs_hash_rehash);
1835
1836 static int
1837 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1838 {
1839 struct cfs_hash_bd new;
1840 struct hlist_head *hhead;
1841 struct hlist_node *hnode;
1842 struct hlist_node *pos;
1843 void *key;
1844 int c = 0;
1845
1846 /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1847 cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1848 hlist_for_each_safe(hnode, pos, hhead) {
1849 key = cfs_hash_key(hs, hnode);
1850 LASSERT(key != NULL);
1851 /* Validate hnode is in the correct bucket. */
1852 cfs_hash_bucket_validate(hs, old, hnode);
1853 /*
1854 * Delete from old hash bucket; move to new bucket.
1855 * ops->hs_key must be defined.
1856 */
1857 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1858 hs->hs_rehash_bits, key, &new);
1859 cfs_hash_bd_move_locked(hs, old, &new, hnode);
1860 c++;
1861 }
1862 }
1863
1864 return c;
1865 }
1866
1867 static int
1868 cfs_hash_rehash_worker(cfs_workitem_t *wi)
1869 {
1870 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
1871 struct cfs_hash_bucket **bkts;
1872 struct cfs_hash_bd bd;
1873 unsigned int old_size;
1874 unsigned int new_size;
1875 int bsize;
1876 int count = 0;
1877 int rc = 0;
1878 int i;
1879
1880 LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1881
1882 cfs_hash_lock(hs, 0);
1883 LASSERT(cfs_hash_is_rehashing(hs));
1884
1885 old_size = CFS_HASH_NBKT(hs);
1886 new_size = CFS_HASH_RH_NBKT(hs);
1887
1888 cfs_hash_unlock(hs, 0);
1889
1890 /*
1891 * don't need hs::hs_rwlock for hs::hs_buckets,
1892 * because nobody can change bkt-table except me.
1893 */
1894 bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1895 old_size, new_size);
1896 cfs_hash_lock(hs, 1);
1897 if (bkts == NULL) {
1898 rc = -ENOMEM;
1899 goto out;
1900 }
1901
1902 if (bkts == hs->hs_buckets) {
1903 bkts = NULL; /* do nothing */
1904 goto out;
1905 }
1906
1907 rc = __cfs_hash_theta(hs);
1908 if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1909 /* free the new allocated bkt-table */
1910 old_size = new_size;
1911 new_size = CFS_HASH_NBKT(hs);
1912 rc = -EALREADY;
1913 goto out;
1914 }
1915
1916 LASSERT(hs->hs_rehash_buckets == NULL);
1917 hs->hs_rehash_buckets = bkts;
1918
1919 rc = 0;
1920 cfs_hash_for_each_bucket(hs, &bd, i) {
1921 if (cfs_hash_is_exiting(hs)) {
1922 rc = -ESRCH;
1923 /* someone wants to destroy the hash, abort now */
1924 if (old_size < new_size) /* OK to free old bkt-table */
1925 break;
1926 /* it's shrinking, need free new bkt-table */
1927 hs->hs_rehash_buckets = NULL;
1928 old_size = new_size;
1929 new_size = CFS_HASH_NBKT(hs);
1930 goto out;
1931 }
1932
1933 count += cfs_hash_rehash_bd(hs, &bd);
1934 if (count < CFS_HASH_LOOP_HOG ||
1935 cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1936 continue;
1937 }
1938
1939 count = 0;
1940 cfs_hash_unlock(hs, 1);
1941 cond_resched();
1942 cfs_hash_lock(hs, 1);
1943 }
1944
1945 hs->hs_rehash_count++;
1946
1947 bkts = hs->hs_buckets;
1948 hs->hs_buckets = hs->hs_rehash_buckets;
1949 hs->hs_rehash_buckets = NULL;
1950
1951 hs->hs_cur_bits = hs->hs_rehash_bits;
1952 out:
1953 hs->hs_rehash_bits = 0;
1954 if (rc == -ESRCH) /* never be scheduled again */
1955 cfs_wi_exit(cfs_sched_rehash, wi);
1956 bsize = cfs_hash_bkt_size(hs);
1957 cfs_hash_unlock(hs, 1);
1958 /* can't refer to @hs anymore because it could be destroyed */
1959 if (bkts != NULL)
1960 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1961 if (rc != 0)
1962 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1963 /* return 1 only if cfs_wi_exit is called */
1964 return rc == -ESRCH;
1965 }
1966
1967 /**
1968 * Rehash the object referenced by @hnode in the libcfs hash @hs. The
1969 * @old_key must be provided to locate the objects previous location
1970 * in the hash, and the @new_key will be used to reinsert the object.
1971 * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1972 * combo when it is critical that there is no window in time where the
1973 * object is missing from the hash. When an object is being rehashed
1974 * the registered cfs_hash_get() and cfs_hash_put() functions will
1975 * not be called.
1976 */
1977 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
1978 void *new_key, struct hlist_node *hnode)
1979 {
1980 struct cfs_hash_bd bds[3];
1981 struct cfs_hash_bd old_bds[2];
1982 struct cfs_hash_bd new_bd;
1983
1984 LASSERT(!hlist_unhashed(hnode));
1985
1986 cfs_hash_lock(hs, 0);
1987
1988 cfs_hash_dual_bd_get(hs, old_key, old_bds);
1989 cfs_hash_bd_get(hs, new_key, &new_bd);
1990
1991 bds[0] = old_bds[0];
1992 bds[1] = old_bds[1];
1993 bds[2] = new_bd;
1994
1995 /* NB: bds[0] and bds[1] are ordered already */
1996 cfs_hash_bd_order(&bds[1], &bds[2]);
1997 cfs_hash_bd_order(&bds[0], &bds[1]);
1998
1999 cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2000 if (likely(old_bds[1].bd_bucket == NULL)) {
2001 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2002 } else {
2003 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2004 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2005 }
2006 /* overwrite key inside locks, otherwise may screw up with
2007 * other operations, i.e: rehash */
2008 cfs_hash_keycpy(hs, new_key, hnode);
2009
2010 cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2011 cfs_hash_unlock(hs, 0);
2012 }
2013 EXPORT_SYMBOL(cfs_hash_rehash_key);
2014
2015 int cfs_hash_debug_header(struct seq_file *m)
2016 {
2017 return seq_printf(m, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2018 CFS_HASH_BIGNAME_LEN,
2019 "name", "cur", "min", "max", "theta", "t-min", "t-max",
2020 "flags", "rehash", "count", "maxdep", "maxdepb",
2021 " distribution");
2022 }
2023 EXPORT_SYMBOL(cfs_hash_debug_header);
2024
2025 static struct cfs_hash_bucket **
2026 cfs_hash_full_bkts(struct cfs_hash *hs)
2027 {
2028 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2029 if (hs->hs_rehash_buckets == NULL)
2030 return hs->hs_buckets;
2031
2032 LASSERT(hs->hs_rehash_bits != 0);
2033 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2034 hs->hs_rehash_buckets : hs->hs_buckets;
2035 }
2036
2037 static unsigned int
2038 cfs_hash_full_nbkt(struct cfs_hash *hs)
2039 {
2040 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2041 if (hs->hs_rehash_buckets == NULL)
2042 return CFS_HASH_NBKT(hs);
2043
2044 LASSERT(hs->hs_rehash_bits != 0);
2045 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2046 CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2047 }
2048
2049 int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2050 {
2051 int dist[8] = { 0, };
2052 int maxdep = -1;
2053 int maxdepb = -1;
2054 int total = 0;
2055 int theta;
2056 int i;
2057
2058 cfs_hash_lock(hs, 0);
2059 theta = __cfs_hash_theta(hs);
2060
2061 seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ",
2062 CFS_HASH_BIGNAME_LEN, hs->hs_name,
2063 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2064 1 << hs->hs_max_bits,
2065 __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2066 __cfs_hash_theta_int(hs->hs_min_theta),
2067 __cfs_hash_theta_frac(hs->hs_min_theta),
2068 __cfs_hash_theta_int(hs->hs_max_theta),
2069 __cfs_hash_theta_frac(hs->hs_max_theta),
2070 hs->hs_flags, hs->hs_rehash_count);
2071
2072 /*
2073 * The distribution is a summary of the chained hash depth in
2074 * each of the libcfs hash buckets. Each buckets hsb_count is
2075 * divided by the hash theta value and used to generate a
2076 * histogram of the hash distribution. A uniform hash will
2077 * result in all hash buckets being close to the average thus
2078 * only the first few entries in the histogram will be non-zero.
2079 * If you hash function results in a non-uniform hash the will
2080 * be observable by outlier bucks in the distribution histogram.
2081 *
2082 * Uniform hash distribution: 128/128/0/0/0/0/0/0
2083 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
2084 */
2085 for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2086 struct cfs_hash_bd bd;
2087
2088 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2089 cfs_hash_bd_lock(hs, &bd, 0);
2090 if (maxdep < bd.bd_bucket->hsb_depmax) {
2091 maxdep = bd.bd_bucket->hsb_depmax;
2092 maxdepb = ffz(~maxdep);
2093 }
2094 total += bd.bd_bucket->hsb_count;
2095 dist[min(__cfs_fls(bd.bd_bucket->hsb_count/max(theta,
2096 1)), 7)]++;
2097 cfs_hash_bd_unlock(hs, &bd, 0);
2098 }
2099
2100 seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2101 for (i = 0; i < 8; i++)
2102 seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/');
2103
2104 cfs_hash_unlock(hs, 0);
2105
2106 return 0;
2107 }
2108 EXPORT_SYMBOL(cfs_hash_debug_str);
This page took 0.075685 seconds and 4 git commands to generate.