4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/hash.c
38 * Implement a hash class for hash process in lustre system.
40 * Author: YuZhangyong <yzy@clusterfs.com>
42 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43 * - Simplified API and improved documentation
44 * - Added per-hash feature flags:
45 * * CFS_HASH_DEBUG additional validation
46 * * CFS_HASH_REHASH dynamic rehashing
47 * - Added per-hash statistics
48 * - General performance enhancements
50 * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51 * - move all stuff to libcfs
52 * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53 * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54 * - buckets are allocated one by one(instead of contiguous memory),
55 * to avoid unnecessary cacheline conflict
57 * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58 * - "bucket" is a group of hlist_head now, user can specify bucket size
59 * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60 * one lock for reducing memory overhead.
62 * - support lockless hash, caller will take care of locks:
63 * avoid lock overhead for hash tables that are already protected
64 * by locking in the caller for another reason
66 * - support both spin_lock/rwlock for bucket:
67 * overhead of spinlock contention is lower than read/write
68 * contention of rwlock, so using spinlock to serialize operations on
69 * bucket is more reasonable for those frequently changed hash tables
71 * - support one-single lock mode:
72 * one lock to protect all hash operations to avoid overhead of
73 * multiple locks if hash table is always small
75 * - removed a lot of unnecessary addref & decref on hash element:
76 * addref & decref are atomic operations in many use-cases which
79 * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80 * some lustre use-cases require these functions to be strictly
81 * non-blocking, we need to schedule required rehash on a different
82 * thread on those cases.
84 * - safer rehash on large hash table
85 * In old implementation, rehash function will exclusively lock the
86 * hash table and finish rehash in one batch, it's dangerous on SMP
87 * system because rehash millions of elements could take long time.
88 * New implemented rehash can release lock and relax CPU in middle
89 * of rehash, it's safe for another thread to search/change on the
90 * hash table even it's in rehasing.
92 * - support two different refcount modes
93 * . hash table has refcount on element
94 * . hash table doesn't change refcount on adding/removing element
96 * - support long name hash table (for param-tree)
98 * - fix a bug for cfs_hash_rehash_key:
99 * in old implementation, cfs_hash_rehash_key could screw up the
100 * hash-table because @key is overwritten without any protection.
101 * Now we need user to define hs_keycpy for those rehash enabled
102 * hash tables, cfs_hash_rehash_key will overwrite hash-key
103 * inside lock by calling hs_keycpy.
105 * - better hash iteration:
106 * Now we support both locked iteration & lockless iteration of hash
107 * table. Also, user can break the iteration by return 1 in callback.
110 #include "../../include/linux/libcfs/libcfs.h"
111 #include <linux/seq_file.h>
113 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
114 static unsigned int warn_on_depth
= 8;
115 module_param(warn_on_depth
, uint
, 0644);
116 MODULE_PARM_DESC(warn_on_depth
, "warning when hash depth is high.");
119 struct cfs_wi_sched
*cfs_sched_rehash
;
122 cfs_hash_nl_lock(union cfs_hash_lock
*lock
, int exclusive
) {}
125 cfs_hash_nl_unlock(union cfs_hash_lock
*lock
, int exclusive
) {}
128 cfs_hash_spin_lock(union cfs_hash_lock
*lock
, int exclusive
)
130 spin_lock(&lock
->spin
);
134 cfs_hash_spin_unlock(union cfs_hash_lock
*lock
, int exclusive
)
136 spin_unlock(&lock
->spin
);
140 cfs_hash_rw_lock(union cfs_hash_lock
*lock
, int exclusive
)
143 read_lock(&lock
->rw
);
145 write_lock(&lock
->rw
);
149 cfs_hash_rw_unlock(union cfs_hash_lock
*lock
, int exclusive
)
152 read_unlock(&lock
->rw
);
154 write_unlock(&lock
->rw
);
158 static cfs_hash_lock_ops_t cfs_hash_nl_lops
= {
159 .hs_lock
= cfs_hash_nl_lock
,
160 .hs_unlock
= cfs_hash_nl_unlock
,
161 .hs_bkt_lock
= cfs_hash_nl_lock
,
162 .hs_bkt_unlock
= cfs_hash_nl_unlock
,
165 /** no bucket lock, one spinlock to protect everything */
166 static cfs_hash_lock_ops_t cfs_hash_nbl_lops
= {
167 .hs_lock
= cfs_hash_spin_lock
,
168 .hs_unlock
= cfs_hash_spin_unlock
,
169 .hs_bkt_lock
= cfs_hash_nl_lock
,
170 .hs_bkt_unlock
= cfs_hash_nl_unlock
,
173 /** spin bucket lock, rehash is enabled */
174 static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops
= {
175 .hs_lock
= cfs_hash_rw_lock
,
176 .hs_unlock
= cfs_hash_rw_unlock
,
177 .hs_bkt_lock
= cfs_hash_spin_lock
,
178 .hs_bkt_unlock
= cfs_hash_spin_unlock
,
181 /** rw bucket lock, rehash is enabled */
182 static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops
= {
183 .hs_lock
= cfs_hash_rw_lock
,
184 .hs_unlock
= cfs_hash_rw_unlock
,
185 .hs_bkt_lock
= cfs_hash_rw_lock
,
186 .hs_bkt_unlock
= cfs_hash_rw_unlock
,
189 /** spin bucket lock, rehash is disabled */
190 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops
= {
191 .hs_lock
= cfs_hash_nl_lock
,
192 .hs_unlock
= cfs_hash_nl_unlock
,
193 .hs_bkt_lock
= cfs_hash_spin_lock
,
194 .hs_bkt_unlock
= cfs_hash_spin_unlock
,
197 /** rw bucket lock, rehash is disabled */
198 static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops
= {
199 .hs_lock
= cfs_hash_nl_lock
,
200 .hs_unlock
= cfs_hash_nl_unlock
,
201 .hs_bkt_lock
= cfs_hash_rw_lock
,
202 .hs_bkt_unlock
= cfs_hash_rw_unlock
,
206 cfs_hash_lock_setup(struct cfs_hash
*hs
)
208 if (cfs_hash_with_no_lock(hs
)) {
209 hs
->hs_lops
= &cfs_hash_nl_lops
;
211 } else if (cfs_hash_with_no_bktlock(hs
)) {
212 hs
->hs_lops
= &cfs_hash_nbl_lops
;
213 spin_lock_init(&hs
->hs_lock
.spin
);
215 } else if (cfs_hash_with_rehash(hs
)) {
216 rwlock_init(&hs
->hs_lock
.rw
);
218 if (cfs_hash_with_rw_bktlock(hs
))
219 hs
->hs_lops
= &cfs_hash_bkt_rw_lops
;
220 else if (cfs_hash_with_spin_bktlock(hs
))
221 hs
->hs_lops
= &cfs_hash_bkt_spin_lops
;
225 if (cfs_hash_with_rw_bktlock(hs
))
226 hs
->hs_lops
= &cfs_hash_nr_bkt_rw_lops
;
227 else if (cfs_hash_with_spin_bktlock(hs
))
228 hs
->hs_lops
= &cfs_hash_nr_bkt_spin_lops
;
235 * Simple hash head without depth tracking
236 * new element is always added to head of hlist
239 struct hlist_head hh_head
; /**< entries list */
243 cfs_hash_hh_hhead_size(struct cfs_hash
*hs
)
245 return sizeof(cfs_hash_head_t
);
248 static struct hlist_head
*
249 cfs_hash_hh_hhead(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
)
251 cfs_hash_head_t
*head
= (cfs_hash_head_t
*)&bd
->bd_bucket
->hsb_head
[0];
253 return &head
[bd
->bd_offset
].hh_head
;
257 cfs_hash_hh_hnode_add(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
258 struct hlist_node
*hnode
)
260 hlist_add_head(hnode
, cfs_hash_hh_hhead(hs
, bd
));
261 return -1; /* unknown depth */
265 cfs_hash_hh_hnode_del(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
266 struct hlist_node
*hnode
)
268 hlist_del_init(hnode
);
269 return -1; /* unknown depth */
273 * Simple hash head with depth tracking
274 * new element is always added to head of hlist
277 struct hlist_head hd_head
; /**< entries list */
278 unsigned int hd_depth
; /**< list length */
279 } cfs_hash_head_dep_t
;
282 cfs_hash_hd_hhead_size(struct cfs_hash
*hs
)
284 return sizeof(cfs_hash_head_dep_t
);
287 static struct hlist_head
*
288 cfs_hash_hd_hhead(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
)
290 cfs_hash_head_dep_t
*head
;
292 head
= (cfs_hash_head_dep_t
*)&bd
->bd_bucket
->hsb_head
[0];
293 return &head
[bd
->bd_offset
].hd_head
;
297 cfs_hash_hd_hnode_add(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
298 struct hlist_node
*hnode
)
300 cfs_hash_head_dep_t
*hh
= container_of(cfs_hash_hd_hhead(hs
, bd
),
301 cfs_hash_head_dep_t
, hd_head
);
302 hlist_add_head(hnode
, &hh
->hd_head
);
303 return ++hh
->hd_depth
;
307 cfs_hash_hd_hnode_del(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
308 struct hlist_node
*hnode
)
310 cfs_hash_head_dep_t
*hh
= container_of(cfs_hash_hd_hhead(hs
, bd
),
311 cfs_hash_head_dep_t
, hd_head
);
312 hlist_del_init(hnode
);
313 return --hh
->hd_depth
;
317 * double links hash head without depth tracking
318 * new element is always added to tail of hlist
321 struct hlist_head dh_head
; /**< entries list */
322 struct hlist_node
*dh_tail
; /**< the last entry */
326 cfs_hash_dh_hhead_size(struct cfs_hash
*hs
)
328 return sizeof(cfs_hash_dhead_t
);
331 static struct hlist_head
*
332 cfs_hash_dh_hhead(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
)
334 cfs_hash_dhead_t
*head
;
336 head
= (cfs_hash_dhead_t
*)&bd
->bd_bucket
->hsb_head
[0];
337 return &head
[bd
->bd_offset
].dh_head
;
341 cfs_hash_dh_hnode_add(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
342 struct hlist_node
*hnode
)
344 cfs_hash_dhead_t
*dh
= container_of(cfs_hash_dh_hhead(hs
, bd
),
345 cfs_hash_dhead_t
, dh_head
);
347 if (dh
->dh_tail
!= NULL
) /* not empty */
348 hlist_add_behind(hnode
, dh
->dh_tail
);
349 else /* empty list */
350 hlist_add_head(hnode
, &dh
->dh_head
);
352 return -1; /* unknown depth */
356 cfs_hash_dh_hnode_del(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
357 struct hlist_node
*hnd
)
359 cfs_hash_dhead_t
*dh
= container_of(cfs_hash_dh_hhead(hs
, bd
),
360 cfs_hash_dhead_t
, dh_head
);
362 if (hnd
->next
== NULL
) { /* it's the tail */
363 dh
->dh_tail
= (hnd
->pprev
== &dh
->dh_head
.first
) ? NULL
:
364 container_of(hnd
->pprev
, struct hlist_node
, next
);
367 return -1; /* unknown depth */
371 * double links hash head with depth tracking
372 * new element is always added to tail of hlist
375 struct hlist_head dd_head
; /**< entries list */
376 struct hlist_node
*dd_tail
; /**< the last entry */
377 unsigned int dd_depth
; /**< list length */
378 } cfs_hash_dhead_dep_t
;
381 cfs_hash_dd_hhead_size(struct cfs_hash
*hs
)
383 return sizeof(cfs_hash_dhead_dep_t
);
386 static struct hlist_head
*
387 cfs_hash_dd_hhead(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
)
389 cfs_hash_dhead_dep_t
*head
;
391 head
= (cfs_hash_dhead_dep_t
*)&bd
->bd_bucket
->hsb_head
[0];
392 return &head
[bd
->bd_offset
].dd_head
;
396 cfs_hash_dd_hnode_add(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
397 struct hlist_node
*hnode
)
399 cfs_hash_dhead_dep_t
*dh
= container_of(cfs_hash_dd_hhead(hs
, bd
),
400 cfs_hash_dhead_dep_t
, dd_head
);
402 if (dh
->dd_tail
!= NULL
) /* not empty */
403 hlist_add_behind(hnode
, dh
->dd_tail
);
404 else /* empty list */
405 hlist_add_head(hnode
, &dh
->dd_head
);
407 return ++dh
->dd_depth
;
411 cfs_hash_dd_hnode_del(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
412 struct hlist_node
*hnd
)
414 cfs_hash_dhead_dep_t
*dh
= container_of(cfs_hash_dd_hhead(hs
, bd
),
415 cfs_hash_dhead_dep_t
, dd_head
);
417 if (hnd
->next
== NULL
) { /* it's the tail */
418 dh
->dd_tail
= (hnd
->pprev
== &dh
->dd_head
.first
) ? NULL
:
419 container_of(hnd
->pprev
, struct hlist_node
, next
);
422 return --dh
->dd_depth
;
425 static cfs_hash_hlist_ops_t cfs_hash_hh_hops
= {
426 .hop_hhead
= cfs_hash_hh_hhead
,
427 .hop_hhead_size
= cfs_hash_hh_hhead_size
,
428 .hop_hnode_add
= cfs_hash_hh_hnode_add
,
429 .hop_hnode_del
= cfs_hash_hh_hnode_del
,
432 static cfs_hash_hlist_ops_t cfs_hash_hd_hops
= {
433 .hop_hhead
= cfs_hash_hd_hhead
,
434 .hop_hhead_size
= cfs_hash_hd_hhead_size
,
435 .hop_hnode_add
= cfs_hash_hd_hnode_add
,
436 .hop_hnode_del
= cfs_hash_hd_hnode_del
,
439 static cfs_hash_hlist_ops_t cfs_hash_dh_hops
= {
440 .hop_hhead
= cfs_hash_dh_hhead
,
441 .hop_hhead_size
= cfs_hash_dh_hhead_size
,
442 .hop_hnode_add
= cfs_hash_dh_hnode_add
,
443 .hop_hnode_del
= cfs_hash_dh_hnode_del
,
446 static cfs_hash_hlist_ops_t cfs_hash_dd_hops
= {
447 .hop_hhead
= cfs_hash_dd_hhead
,
448 .hop_hhead_size
= cfs_hash_dd_hhead_size
,
449 .hop_hnode_add
= cfs_hash_dd_hnode_add
,
450 .hop_hnode_del
= cfs_hash_dd_hnode_del
,
454 cfs_hash_hlist_setup(struct cfs_hash
*hs
)
456 if (cfs_hash_with_add_tail(hs
)) {
457 hs
->hs_hops
= cfs_hash_with_depth(hs
) ?
458 &cfs_hash_dd_hops
: &cfs_hash_dh_hops
;
460 hs
->hs_hops
= cfs_hash_with_depth(hs
) ?
461 &cfs_hash_hd_hops
: &cfs_hash_hh_hops
;
466 cfs_hash_bd_from_key(struct cfs_hash
*hs
, struct cfs_hash_bucket
**bkts
,
467 unsigned int bits
, const void *key
, struct cfs_hash_bd
*bd
)
469 unsigned int index
= cfs_hash_id(hs
, key
, (1U << bits
) - 1);
471 LASSERT(bits
== hs
->hs_cur_bits
|| bits
== hs
->hs_rehash_bits
);
473 bd
->bd_bucket
= bkts
[index
& ((1U << (bits
- hs
->hs_bkt_bits
)) - 1)];
474 bd
->bd_offset
= index
>> (bits
- hs
->hs_bkt_bits
);
478 cfs_hash_bd_get(struct cfs_hash
*hs
, const void *key
, struct cfs_hash_bd
*bd
)
480 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
481 if (likely(hs
->hs_rehash_buckets
== NULL
)) {
482 cfs_hash_bd_from_key(hs
, hs
->hs_buckets
,
483 hs
->hs_cur_bits
, key
, bd
);
485 LASSERT(hs
->hs_rehash_bits
!= 0);
486 cfs_hash_bd_from_key(hs
, hs
->hs_rehash_buckets
,
487 hs
->hs_rehash_bits
, key
, bd
);
490 EXPORT_SYMBOL(cfs_hash_bd_get
);
493 cfs_hash_bd_dep_record(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
, int dep_cur
)
495 if (likely(dep_cur
<= bd
->bd_bucket
->hsb_depmax
))
498 bd
->bd_bucket
->hsb_depmax
= dep_cur
;
499 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
500 if (likely(warn_on_depth
== 0 ||
501 max(warn_on_depth
, hs
->hs_dep_max
) >= dep_cur
))
504 spin_lock(&hs
->hs_dep_lock
);
505 hs
->hs_dep_max
= dep_cur
;
506 hs
->hs_dep_bkt
= bd
->bd_bucket
->hsb_index
;
507 hs
->hs_dep_off
= bd
->bd_offset
;
508 hs
->hs_dep_bits
= hs
->hs_cur_bits
;
509 spin_unlock(&hs
->hs_dep_lock
);
511 cfs_wi_schedule(cfs_sched_rehash
, &hs
->hs_dep_wi
);
516 cfs_hash_bd_add_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
517 struct hlist_node
*hnode
)
521 rc
= hs
->hs_hops
->hop_hnode_add(hs
, bd
, hnode
);
522 cfs_hash_bd_dep_record(hs
, bd
, rc
);
523 bd
->bd_bucket
->hsb_version
++;
524 if (unlikely(bd
->bd_bucket
->hsb_version
== 0))
525 bd
->bd_bucket
->hsb_version
++;
526 bd
->bd_bucket
->hsb_count
++;
528 if (cfs_hash_with_counter(hs
))
529 atomic_inc(&hs
->hs_count
);
530 if (!cfs_hash_with_no_itemref(hs
))
531 cfs_hash_get(hs
, hnode
);
533 EXPORT_SYMBOL(cfs_hash_bd_add_locked
);
536 cfs_hash_bd_del_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
537 struct hlist_node
*hnode
)
539 hs
->hs_hops
->hop_hnode_del(hs
, bd
, hnode
);
541 LASSERT(bd
->bd_bucket
->hsb_count
> 0);
542 bd
->bd_bucket
->hsb_count
--;
543 bd
->bd_bucket
->hsb_version
++;
544 if (unlikely(bd
->bd_bucket
->hsb_version
== 0))
545 bd
->bd_bucket
->hsb_version
++;
547 if (cfs_hash_with_counter(hs
)) {
548 LASSERT(atomic_read(&hs
->hs_count
) > 0);
549 atomic_dec(&hs
->hs_count
);
551 if (!cfs_hash_with_no_itemref(hs
))
552 cfs_hash_put_locked(hs
, hnode
);
554 EXPORT_SYMBOL(cfs_hash_bd_del_locked
);
557 cfs_hash_bd_move_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd_old
,
558 struct cfs_hash_bd
*bd_new
, struct hlist_node
*hnode
)
560 struct cfs_hash_bucket
*obkt
= bd_old
->bd_bucket
;
561 struct cfs_hash_bucket
*nbkt
= bd_new
->bd_bucket
;
564 if (cfs_hash_bd_compare(bd_old
, bd_new
) == 0)
567 /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
568 * in cfs_hash_bd_del/add_locked */
569 hs
->hs_hops
->hop_hnode_del(hs
, bd_old
, hnode
);
570 rc
= hs
->hs_hops
->hop_hnode_add(hs
, bd_new
, hnode
);
571 cfs_hash_bd_dep_record(hs
, bd_new
, rc
);
573 LASSERT(obkt
->hsb_count
> 0);
576 if (unlikely(obkt
->hsb_version
== 0))
580 if (unlikely(nbkt
->hsb_version
== 0))
583 EXPORT_SYMBOL(cfs_hash_bd_move_locked
);
586 /** always set, for sanity (avoid ZERO intent) */
587 CFS_HS_LOOKUP_MASK_FIND
= 1 << 0,
588 /** return entry with a ref */
589 CFS_HS_LOOKUP_MASK_REF
= 1 << 1,
590 /** add entry if not existing */
591 CFS_HS_LOOKUP_MASK_ADD
= 1 << 2,
592 /** delete entry, ignore other masks */
593 CFS_HS_LOOKUP_MASK_DEL
= 1 << 3,
596 typedef enum cfs_hash_lookup_intent
{
597 /** return item w/o refcount */
598 CFS_HS_LOOKUP_IT_PEEK
= CFS_HS_LOOKUP_MASK_FIND
,
599 /** return item with refcount */
600 CFS_HS_LOOKUP_IT_FIND
= (CFS_HS_LOOKUP_MASK_FIND
|
601 CFS_HS_LOOKUP_MASK_REF
),
602 /** return item w/o refcount if existed, otherwise add */
603 CFS_HS_LOOKUP_IT_ADD
= (CFS_HS_LOOKUP_MASK_FIND
|
604 CFS_HS_LOOKUP_MASK_ADD
),
605 /** return item with refcount if existed, otherwise add */
606 CFS_HS_LOOKUP_IT_FINDADD
= (CFS_HS_LOOKUP_IT_FIND
|
607 CFS_HS_LOOKUP_MASK_ADD
),
608 /** delete if existed */
609 CFS_HS_LOOKUP_IT_FINDDEL
= (CFS_HS_LOOKUP_MASK_FIND
|
610 CFS_HS_LOOKUP_MASK_DEL
)
611 } cfs_hash_lookup_intent_t
;
613 static struct hlist_node
*
614 cfs_hash_bd_lookup_intent(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
615 const void *key
, struct hlist_node
*hnode
,
616 cfs_hash_lookup_intent_t intent
)
619 struct hlist_head
*hhead
= cfs_hash_bd_hhead(hs
, bd
);
620 struct hlist_node
*ehnode
;
621 struct hlist_node
*match
;
622 int intent_add
= (intent
& CFS_HS_LOOKUP_MASK_ADD
) != 0;
624 /* with this function, we can avoid a lot of useless refcount ops,
625 * which are expensive atomic operations most time. */
626 match
= intent_add
? NULL
: hnode
;
627 hlist_for_each(ehnode
, hhead
) {
628 if (!cfs_hash_keycmp(hs
, key
, ehnode
))
631 if (match
!= NULL
&& match
!= ehnode
) /* can't match */
635 if ((intent
& CFS_HS_LOOKUP_MASK_DEL
) != 0) {
636 cfs_hash_bd_del_locked(hs
, bd
, ehnode
);
640 /* caller wants refcount? */
641 if ((intent
& CFS_HS_LOOKUP_MASK_REF
) != 0)
642 cfs_hash_get(hs
, ehnode
);
649 LASSERT(hnode
!= NULL
);
650 cfs_hash_bd_add_locked(hs
, bd
, hnode
);
655 cfs_hash_bd_lookup_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
, const void *key
)
657 return cfs_hash_bd_lookup_intent(hs
, bd
, key
, NULL
,
658 CFS_HS_LOOKUP_IT_FIND
);
660 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked
);
663 cfs_hash_bd_peek_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
, const void *key
)
665 return cfs_hash_bd_lookup_intent(hs
, bd
, key
, NULL
,
666 CFS_HS_LOOKUP_IT_PEEK
);
668 EXPORT_SYMBOL(cfs_hash_bd_peek_locked
);
671 cfs_hash_bd_findadd_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
672 const void *key
, struct hlist_node
*hnode
,
675 return cfs_hash_bd_lookup_intent(hs
, bd
, key
, hnode
,
676 CFS_HS_LOOKUP_IT_ADD
|
677 (!noref
* CFS_HS_LOOKUP_MASK_REF
));
679 EXPORT_SYMBOL(cfs_hash_bd_findadd_locked
);
682 cfs_hash_bd_finddel_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
683 const void *key
, struct hlist_node
*hnode
)
685 /* hnode can be NULL, we find the first item with @key */
686 return cfs_hash_bd_lookup_intent(hs
, bd
, key
, hnode
,
687 CFS_HS_LOOKUP_IT_FINDDEL
);
689 EXPORT_SYMBOL(cfs_hash_bd_finddel_locked
);
692 cfs_hash_multi_bd_lock(struct cfs_hash
*hs
, struct cfs_hash_bd
*bds
,
693 unsigned n
, int excl
)
695 struct cfs_hash_bucket
*prev
= NULL
;
699 * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
700 * NB: it's possible that several bds point to the same bucket but
701 * have different bd::bd_offset, so need take care of deadlock.
703 cfs_hash_for_each_bd(bds
, n
, i
) {
704 if (prev
== bds
[i
].bd_bucket
)
707 LASSERT(prev
== NULL
||
708 prev
->hsb_index
< bds
[i
].bd_bucket
->hsb_index
);
709 cfs_hash_bd_lock(hs
, &bds
[i
], excl
);
710 prev
= bds
[i
].bd_bucket
;
715 cfs_hash_multi_bd_unlock(struct cfs_hash
*hs
, struct cfs_hash_bd
*bds
,
716 unsigned n
, int excl
)
718 struct cfs_hash_bucket
*prev
= NULL
;
721 cfs_hash_for_each_bd(bds
, n
, i
) {
722 if (prev
!= bds
[i
].bd_bucket
) {
723 cfs_hash_bd_unlock(hs
, &bds
[i
], excl
);
724 prev
= bds
[i
].bd_bucket
;
729 static struct hlist_node
*
730 cfs_hash_multi_bd_lookup_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bds
,
731 unsigned n
, const void *key
)
733 struct hlist_node
*ehnode
;
736 cfs_hash_for_each_bd(bds
, n
, i
) {
737 ehnode
= cfs_hash_bd_lookup_intent(hs
, &bds
[i
], key
, NULL
,
738 CFS_HS_LOOKUP_IT_FIND
);
745 static struct hlist_node
*
746 cfs_hash_multi_bd_findadd_locked(struct cfs_hash
*hs
,
747 struct cfs_hash_bd
*bds
, unsigned n
, const void *key
,
748 struct hlist_node
*hnode
, int noref
)
750 struct hlist_node
*ehnode
;
754 LASSERT(hnode
!= NULL
);
755 intent
= CFS_HS_LOOKUP_IT_PEEK
| (!noref
* CFS_HS_LOOKUP_MASK_REF
);
757 cfs_hash_for_each_bd(bds
, n
, i
) {
758 ehnode
= cfs_hash_bd_lookup_intent(hs
, &bds
[i
], key
,
764 if (i
== 1) { /* only one bucket */
765 cfs_hash_bd_add_locked(hs
, &bds
[0], hnode
);
767 struct cfs_hash_bd mybd
;
769 cfs_hash_bd_get(hs
, key
, &mybd
);
770 cfs_hash_bd_add_locked(hs
, &mybd
, hnode
);
776 static struct hlist_node
*
777 cfs_hash_multi_bd_finddel_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bds
,
778 unsigned n
, const void *key
,
779 struct hlist_node
*hnode
)
781 struct hlist_node
*ehnode
;
784 cfs_hash_for_each_bd(bds
, n
, i
) {
785 ehnode
= cfs_hash_bd_lookup_intent(hs
, &bds
[i
], key
, hnode
,
786 CFS_HS_LOOKUP_IT_FINDDEL
);
794 cfs_hash_bd_order(struct cfs_hash_bd
*bd1
, struct cfs_hash_bd
*bd2
)
798 if (bd2
->bd_bucket
== NULL
)
801 if (bd1
->bd_bucket
== NULL
) {
803 bd2
->bd_bucket
= NULL
;
807 rc
= cfs_hash_bd_compare(bd1
, bd2
);
809 bd2
->bd_bucket
= NULL
;
811 } else if (rc
> 0) { /* swab bd1 and bd2 */
812 struct cfs_hash_bd tmp
;
821 cfs_hash_dual_bd_get(struct cfs_hash
*hs
, const void *key
, struct cfs_hash_bd
*bds
)
823 /* NB: caller should hold hs_lock.rw if REHASH is set */
824 cfs_hash_bd_from_key(hs
, hs
->hs_buckets
,
825 hs
->hs_cur_bits
, key
, &bds
[0]);
826 if (likely(hs
->hs_rehash_buckets
== NULL
)) {
827 /* no rehash or not rehashing */
828 bds
[1].bd_bucket
= NULL
;
832 LASSERT(hs
->hs_rehash_bits
!= 0);
833 cfs_hash_bd_from_key(hs
, hs
->hs_rehash_buckets
,
834 hs
->hs_rehash_bits
, key
, &bds
[1]);
836 cfs_hash_bd_order(&bds
[0], &bds
[1]);
838 EXPORT_SYMBOL(cfs_hash_dual_bd_get
);
841 cfs_hash_dual_bd_lock(struct cfs_hash
*hs
, struct cfs_hash_bd
*bds
, int excl
)
843 cfs_hash_multi_bd_lock(hs
, bds
, 2, excl
);
845 EXPORT_SYMBOL(cfs_hash_dual_bd_lock
);
848 cfs_hash_dual_bd_unlock(struct cfs_hash
*hs
, struct cfs_hash_bd
*bds
, int excl
)
850 cfs_hash_multi_bd_unlock(hs
, bds
, 2, excl
);
852 EXPORT_SYMBOL(cfs_hash_dual_bd_unlock
);
855 cfs_hash_dual_bd_lookup_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bds
,
858 return cfs_hash_multi_bd_lookup_locked(hs
, bds
, 2, key
);
860 EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked
);
863 cfs_hash_dual_bd_findadd_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bds
,
864 const void *key
, struct hlist_node
*hnode
,
867 return cfs_hash_multi_bd_findadd_locked(hs
, bds
, 2, key
,
870 EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked
);
873 cfs_hash_dual_bd_finddel_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bds
,
874 const void *key
, struct hlist_node
*hnode
)
876 return cfs_hash_multi_bd_finddel_locked(hs
, bds
, 2, key
, hnode
);
878 EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked
);
881 cfs_hash_buckets_free(struct cfs_hash_bucket
**buckets
,
882 int bkt_size
, int prev_size
, int size
)
886 for (i
= prev_size
; i
< size
; i
++) {
887 if (buckets
[i
] != NULL
)
888 LIBCFS_FREE(buckets
[i
], bkt_size
);
891 LIBCFS_FREE(buckets
, sizeof(buckets
[0]) * size
);
895 * Create or grow bucket memory. Return old_buckets if no allocation was
896 * needed, the newly allocated buckets if allocation was needed and
897 * successful, and NULL on error.
899 static struct cfs_hash_bucket
**
900 cfs_hash_buckets_realloc(struct cfs_hash
*hs
, struct cfs_hash_bucket
**old_bkts
,
901 unsigned int old_size
, unsigned int new_size
)
903 struct cfs_hash_bucket
**new_bkts
;
906 LASSERT(old_size
== 0 || old_bkts
!= NULL
);
908 if (old_bkts
!= NULL
&& old_size
== new_size
)
911 LIBCFS_ALLOC(new_bkts
, sizeof(new_bkts
[0]) * new_size
);
912 if (new_bkts
== NULL
)
915 if (old_bkts
!= NULL
) {
916 memcpy(new_bkts
, old_bkts
,
917 min(old_size
, new_size
) * sizeof(*old_bkts
));
920 for (i
= old_size
; i
< new_size
; i
++) {
921 struct hlist_head
*hhead
;
922 struct cfs_hash_bd bd
;
924 LIBCFS_ALLOC(new_bkts
[i
], cfs_hash_bkt_size(hs
));
925 if (new_bkts
[i
] == NULL
) {
926 cfs_hash_buckets_free(new_bkts
, cfs_hash_bkt_size(hs
),
931 new_bkts
[i
]->hsb_index
= i
;
932 new_bkts
[i
]->hsb_version
= 1; /* shouldn't be zero */
933 new_bkts
[i
]->hsb_depmax
= -1; /* unknown */
934 bd
.bd_bucket
= new_bkts
[i
];
935 cfs_hash_bd_for_each_hlist(hs
, &bd
, hhead
)
936 INIT_HLIST_HEAD(hhead
);
938 if (cfs_hash_with_no_lock(hs
) ||
939 cfs_hash_with_no_bktlock(hs
))
942 if (cfs_hash_with_rw_bktlock(hs
))
943 rwlock_init(&new_bkts
[i
]->hsb_lock
.rw
);
944 else if (cfs_hash_with_spin_bktlock(hs
))
945 spin_lock_init(&new_bkts
[i
]->hsb_lock
.spin
);
947 LBUG(); /* invalid use-case */
953 * Initialize new libcfs hash, where:
954 * @name - Descriptive hash name
955 * @cur_bits - Initial hash table size, in bits
956 * @max_bits - Maximum allowed hash table resize, in bits
957 * @ops - Registered hash table operations
958 * @flags - CFS_HASH_REHASH enable synamic hash resizing
959 * - CFS_HASH_SORT enable chained hash sort
961 static int cfs_hash_rehash_worker(cfs_workitem_t
*wi
);
963 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
964 static int cfs_hash_dep_print(cfs_workitem_t
*wi
)
966 struct cfs_hash
*hs
= container_of(wi
, struct cfs_hash
, hs_dep_wi
);
972 spin_lock(&hs
->hs_dep_lock
);
973 dep
= hs
->hs_dep_max
;
974 bkt
= hs
->hs_dep_bkt
;
975 off
= hs
->hs_dep_off
;
976 bits
= hs
->hs_dep_bits
;
977 spin_unlock(&hs
->hs_dep_lock
);
979 LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
980 hs
->hs_name
, bits
, dep
, bkt
, off
);
981 spin_lock(&hs
->hs_dep_lock
);
982 hs
->hs_dep_bits
= 0; /* mark as workitem done */
983 spin_unlock(&hs
->hs_dep_lock
);
987 static void cfs_hash_depth_wi_init(struct cfs_hash
*hs
)
989 spin_lock_init(&hs
->hs_dep_lock
);
990 cfs_wi_init(&hs
->hs_dep_wi
, hs
, cfs_hash_dep_print
);
993 static void cfs_hash_depth_wi_cancel(struct cfs_hash
*hs
)
995 if (cfs_wi_deschedule(cfs_sched_rehash
, &hs
->hs_dep_wi
))
998 spin_lock(&hs
->hs_dep_lock
);
999 while (hs
->hs_dep_bits
!= 0) {
1000 spin_unlock(&hs
->hs_dep_lock
);
1002 spin_lock(&hs
->hs_dep_lock
);
1004 spin_unlock(&hs
->hs_dep_lock
);
1007 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1009 static inline void cfs_hash_depth_wi_init(struct cfs_hash
*hs
) {}
1010 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash
*hs
) {}
1012 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1015 cfs_hash_create(char *name
, unsigned cur_bits
, unsigned max_bits
,
1016 unsigned bkt_bits
, unsigned extra_bytes
,
1017 unsigned min_theta
, unsigned max_theta
,
1018 cfs_hash_ops_t
*ops
, unsigned flags
)
1020 struct cfs_hash
*hs
;
1023 CLASSERT(CFS_HASH_THETA_BITS
< 15);
1025 LASSERT(name
!= NULL
);
1026 LASSERT(ops
!= NULL
);
1027 LASSERT(ops
->hs_key
);
1028 LASSERT(ops
->hs_hash
);
1029 LASSERT(ops
->hs_object
);
1030 LASSERT(ops
->hs_keycmp
);
1031 LASSERT(ops
->hs_get
!= NULL
);
1032 LASSERT(ops
->hs_put_locked
!= NULL
);
1034 if ((flags
& CFS_HASH_REHASH
) != 0)
1035 flags
|= CFS_HASH_COUNTER
; /* must have counter */
1037 LASSERT(cur_bits
> 0);
1038 LASSERT(cur_bits
>= bkt_bits
);
1039 LASSERT(max_bits
>= cur_bits
&& max_bits
< 31);
1040 LASSERT(ergo((flags
& CFS_HASH_REHASH
) == 0, cur_bits
== max_bits
));
1041 LASSERT(ergo((flags
& CFS_HASH_REHASH
) != 0,
1042 (flags
& CFS_HASH_NO_LOCK
) == 0));
1043 LASSERT(ergo((flags
& CFS_HASH_REHASH_KEY
) != 0,
1044 ops
->hs_keycpy
!= NULL
));
1046 len
= (flags
& CFS_HASH_BIGNAME
) == 0 ?
1047 CFS_HASH_NAME_LEN
: CFS_HASH_BIGNAME_LEN
;
1048 LIBCFS_ALLOC(hs
, offsetof(struct cfs_hash
, hs_name
[len
]));
1052 strncpy(hs
->hs_name
, name
, len
);
1053 hs
->hs_name
[len
- 1] = '\0';
1054 hs
->hs_flags
= flags
;
1056 atomic_set(&hs
->hs_refcount
, 1);
1057 atomic_set(&hs
->hs_count
, 0);
1059 cfs_hash_lock_setup(hs
);
1060 cfs_hash_hlist_setup(hs
);
1062 hs
->hs_cur_bits
= (__u8
)cur_bits
;
1063 hs
->hs_min_bits
= (__u8
)cur_bits
;
1064 hs
->hs_max_bits
= (__u8
)max_bits
;
1065 hs
->hs_bkt_bits
= (__u8
)bkt_bits
;
1068 hs
->hs_extra_bytes
= extra_bytes
;
1069 hs
->hs_rehash_bits
= 0;
1070 cfs_wi_init(&hs
->hs_rehash_wi
, hs
, cfs_hash_rehash_worker
);
1071 cfs_hash_depth_wi_init(hs
);
1073 if (cfs_hash_with_rehash(hs
))
1074 __cfs_hash_set_theta(hs
, min_theta
, max_theta
);
1076 hs
->hs_buckets
= cfs_hash_buckets_realloc(hs
, NULL
, 0,
1078 if (hs
->hs_buckets
!= NULL
)
1081 LIBCFS_FREE(hs
, offsetof(struct cfs_hash
, hs_name
[len
]));
1084 EXPORT_SYMBOL(cfs_hash_create
);
1087 * Cleanup libcfs hash @hs.
1090 cfs_hash_destroy(struct cfs_hash
*hs
)
1092 struct hlist_node
*hnode
;
1093 struct hlist_node
*pos
;
1094 struct cfs_hash_bd bd
;
1097 LASSERT(hs
!= NULL
);
1098 LASSERT(!cfs_hash_is_exiting(hs
) &&
1099 !cfs_hash_is_iterating(hs
));
1102 * prohibit further rehashes, don't need any lock because
1103 * I'm the only (last) one can change it.
1106 if (cfs_hash_with_rehash(hs
))
1107 cfs_hash_rehash_cancel(hs
);
1109 cfs_hash_depth_wi_cancel(hs
);
1110 /* rehash should be done/canceled */
1111 LASSERT(hs
->hs_buckets
!= NULL
&&
1112 hs
->hs_rehash_buckets
== NULL
);
1114 cfs_hash_for_each_bucket(hs
, &bd
, i
) {
1115 struct hlist_head
*hhead
;
1117 LASSERT(bd
.bd_bucket
!= NULL
);
1118 /* no need to take this lock, just for consistent code */
1119 cfs_hash_bd_lock(hs
, &bd
, 1);
1121 cfs_hash_bd_for_each_hlist(hs
, &bd
, hhead
) {
1122 hlist_for_each_safe(hnode
, pos
, hhead
) {
1123 LASSERTF(!cfs_hash_with_assert_empty(hs
),
1124 "hash %s bucket %u(%u) is not "
1125 " empty: %u items left\n",
1126 hs
->hs_name
, bd
.bd_bucket
->hsb_index
,
1127 bd
.bd_offset
, bd
.bd_bucket
->hsb_count
);
1128 /* can't assert key valicate, because we
1129 * can interrupt rehash */
1130 cfs_hash_bd_del_locked(hs
, &bd
, hnode
);
1131 cfs_hash_exit(hs
, hnode
);
1134 LASSERT(bd
.bd_bucket
->hsb_count
== 0);
1135 cfs_hash_bd_unlock(hs
, &bd
, 1);
1139 LASSERT(atomic_read(&hs
->hs_count
) == 0);
1141 cfs_hash_buckets_free(hs
->hs_buckets
, cfs_hash_bkt_size(hs
),
1142 0, CFS_HASH_NBKT(hs
));
1143 i
= cfs_hash_with_bigname(hs
) ?
1144 CFS_HASH_BIGNAME_LEN
: CFS_HASH_NAME_LEN
;
1145 LIBCFS_FREE(hs
, offsetof(struct cfs_hash
, hs_name
[i
]));
1148 struct cfs_hash
*cfs_hash_getref(struct cfs_hash
*hs
)
1150 if (atomic_inc_not_zero(&hs
->hs_refcount
))
1154 EXPORT_SYMBOL(cfs_hash_getref
);
1156 void cfs_hash_putref(struct cfs_hash
*hs
)
1158 if (atomic_dec_and_test(&hs
->hs_refcount
))
1159 cfs_hash_destroy(hs
);
1161 EXPORT_SYMBOL(cfs_hash_putref
);
1164 cfs_hash_rehash_bits(struct cfs_hash
*hs
)
1166 if (cfs_hash_with_no_lock(hs
) ||
1167 !cfs_hash_with_rehash(hs
))
1170 if (unlikely(cfs_hash_is_exiting(hs
)))
1173 if (unlikely(cfs_hash_is_rehashing(hs
)))
1176 if (unlikely(cfs_hash_is_iterating(hs
)))
1179 /* XXX: need to handle case with max_theta != 2.0
1180 * and the case with min_theta != 0.5 */
1181 if ((hs
->hs_cur_bits
< hs
->hs_max_bits
) &&
1182 (__cfs_hash_theta(hs
) > hs
->hs_max_theta
))
1183 return hs
->hs_cur_bits
+ 1;
1185 if (!cfs_hash_with_shrink(hs
))
1188 if ((hs
->hs_cur_bits
> hs
->hs_min_bits
) &&
1189 (__cfs_hash_theta(hs
) < hs
->hs_min_theta
))
1190 return hs
->hs_cur_bits
- 1;
1196 * don't allow inline rehash if:
1197 * - user wants non-blocking change (add/del) on hash table
1198 * - too many elements
1201 cfs_hash_rehash_inline(struct cfs_hash
*hs
)
1203 return !cfs_hash_with_nblk_change(hs
) &&
1204 atomic_read(&hs
->hs_count
) < CFS_HASH_LOOP_HOG
;
1208 * Add item @hnode to libcfs hash @hs using @key. The registered
1209 * ops->hs_get function will be called when the item is added.
1212 cfs_hash_add(struct cfs_hash
*hs
, const void *key
, struct hlist_node
*hnode
)
1214 struct cfs_hash_bd bd
;
1217 LASSERT(hlist_unhashed(hnode
));
1219 cfs_hash_lock(hs
, 0);
1220 cfs_hash_bd_get_and_lock(hs
, key
, &bd
, 1);
1222 cfs_hash_key_validate(hs
, key
, hnode
);
1223 cfs_hash_bd_add_locked(hs
, &bd
, hnode
);
1225 cfs_hash_bd_unlock(hs
, &bd
, 1);
1227 bits
= cfs_hash_rehash_bits(hs
);
1228 cfs_hash_unlock(hs
, 0);
1230 cfs_hash_rehash(hs
, cfs_hash_rehash_inline(hs
));
1232 EXPORT_SYMBOL(cfs_hash_add
);
1234 static struct hlist_node
*
1235 cfs_hash_find_or_add(struct cfs_hash
*hs
, const void *key
,
1236 struct hlist_node
*hnode
, int noref
)
1238 struct hlist_node
*ehnode
;
1239 struct cfs_hash_bd bds
[2];
1242 LASSERT(hlist_unhashed(hnode
));
1244 cfs_hash_lock(hs
, 0);
1245 cfs_hash_dual_bd_get_and_lock(hs
, key
, bds
, 1);
1247 cfs_hash_key_validate(hs
, key
, hnode
);
1248 ehnode
= cfs_hash_dual_bd_findadd_locked(hs
, bds
, key
,
1250 cfs_hash_dual_bd_unlock(hs
, bds
, 1);
1252 if (ehnode
== hnode
) /* new item added */
1253 bits
= cfs_hash_rehash_bits(hs
);
1254 cfs_hash_unlock(hs
, 0);
1256 cfs_hash_rehash(hs
, cfs_hash_rehash_inline(hs
));
1262 * Add item @hnode to libcfs hash @hs using @key. The registered
1263 * ops->hs_get function will be called if the item was added.
1264 * Returns 0 on success or -EALREADY on key collisions.
1267 cfs_hash_add_unique(struct cfs_hash
*hs
, const void *key
, struct hlist_node
*hnode
)
1269 return cfs_hash_find_or_add(hs
, key
, hnode
, 1) != hnode
?
1272 EXPORT_SYMBOL(cfs_hash_add_unique
);
1275 * Add item @hnode to libcfs hash @hs using @key. If this @key
1276 * already exists in the hash then ops->hs_get will be called on the
1277 * conflicting entry and that entry will be returned to the caller.
1278 * Otherwise ops->hs_get is called on the item which was added.
1281 cfs_hash_findadd_unique(struct cfs_hash
*hs
, const void *key
,
1282 struct hlist_node
*hnode
)
1284 hnode
= cfs_hash_find_or_add(hs
, key
, hnode
, 0);
1286 return cfs_hash_object(hs
, hnode
);
1288 EXPORT_SYMBOL(cfs_hash_findadd_unique
);
1291 * Delete item @hnode from the libcfs hash @hs using @key. The @key
1292 * is required to ensure the correct hash bucket is locked since there
1293 * is no direct linkage from the item to the bucket. The object
1294 * removed from the hash will be returned and obs->hs_put is called
1295 * on the removed object.
1298 cfs_hash_del(struct cfs_hash
*hs
, const void *key
, struct hlist_node
*hnode
)
1302 struct cfs_hash_bd bds
[2];
1304 cfs_hash_lock(hs
, 0);
1305 cfs_hash_dual_bd_get_and_lock(hs
, key
, bds
, 1);
1307 /* NB: do nothing if @hnode is not in hash table */
1308 if (hnode
== NULL
|| !hlist_unhashed(hnode
)) {
1309 if (bds
[1].bd_bucket
== NULL
&& hnode
!= NULL
) {
1310 cfs_hash_bd_del_locked(hs
, &bds
[0], hnode
);
1312 hnode
= cfs_hash_dual_bd_finddel_locked(hs
, bds
,
1317 if (hnode
!= NULL
) {
1318 obj
= cfs_hash_object(hs
, hnode
);
1319 bits
= cfs_hash_rehash_bits(hs
);
1322 cfs_hash_dual_bd_unlock(hs
, bds
, 1);
1323 cfs_hash_unlock(hs
, 0);
1325 cfs_hash_rehash(hs
, cfs_hash_rehash_inline(hs
));
1329 EXPORT_SYMBOL(cfs_hash_del
);
1332 * Delete item given @key in libcfs hash @hs. The first @key found in
1333 * the hash will be removed, if the key exists multiple times in the hash
1334 * @hs this function must be called once per key. The removed object
1335 * will be returned and ops->hs_put is called on the removed object.
1338 cfs_hash_del_key(struct cfs_hash
*hs
, const void *key
)
1340 return cfs_hash_del(hs
, key
, NULL
);
1342 EXPORT_SYMBOL(cfs_hash_del_key
);
1345 * Lookup an item using @key in the libcfs hash @hs and return it.
1346 * If the @key is found in the hash hs->hs_get() is called and the
1347 * matching objects is returned. It is the callers responsibility
1348 * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1349 * when when finished with the object. If the @key was not found
1350 * in the hash @hs NULL is returned.
1353 cfs_hash_lookup(struct cfs_hash
*hs
, const void *key
)
1356 struct hlist_node
*hnode
;
1357 struct cfs_hash_bd bds
[2];
1359 cfs_hash_lock(hs
, 0);
1360 cfs_hash_dual_bd_get_and_lock(hs
, key
, bds
, 0);
1362 hnode
= cfs_hash_dual_bd_lookup_locked(hs
, bds
, key
);
1364 obj
= cfs_hash_object(hs
, hnode
);
1366 cfs_hash_dual_bd_unlock(hs
, bds
, 0);
1367 cfs_hash_unlock(hs
, 0);
1371 EXPORT_SYMBOL(cfs_hash_lookup
);
1374 cfs_hash_for_each_enter(struct cfs_hash
*hs
)
1376 LASSERT(!cfs_hash_is_exiting(hs
));
1378 if (!cfs_hash_with_rehash(hs
))
1381 * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1382 * because it's just an unreliable signal to rehash-thread,
1383 * rehash-thread will try to finish rehash ASAP when seeing this.
1385 hs
->hs_iterating
= 1;
1387 cfs_hash_lock(hs
, 1);
1390 /* NB: iteration is mostly called by service thread,
1391 * we tend to cancel pending rehash-request, instead of
1392 * blocking service thread, we will relaunch rehash request
1393 * after iteration */
1394 if (cfs_hash_is_rehashing(hs
))
1395 cfs_hash_rehash_cancel_locked(hs
);
1396 cfs_hash_unlock(hs
, 1);
1400 cfs_hash_for_each_exit(struct cfs_hash
*hs
)
1405 if (!cfs_hash_with_rehash(hs
))
1407 cfs_hash_lock(hs
, 1);
1408 remained
= --hs
->hs_iterators
;
1409 bits
= cfs_hash_rehash_bits(hs
);
1410 cfs_hash_unlock(hs
, 1);
1411 /* NB: it's race on cfs_has_t::hs_iterating, see above */
1413 hs
->hs_iterating
= 0;
1415 cfs_hash_rehash(hs
, atomic_read(&hs
->hs_count
) <
1421 * For each item in the libcfs hash @hs call the passed callback @func
1422 * and pass to it as an argument each hash item and the private @data.
1424 * a) the function may sleep!
1425 * b) during the callback:
1426 * . the bucket lock is held so the callback must never sleep.
1427 * . if @removal_safe is true, use can remove current item by
1428 * cfs_hash_bd_del_locked
1431 cfs_hash_for_each_tight(struct cfs_hash
*hs
, cfs_hash_for_each_cb_t func
,
1432 void *data
, int remove_safe
)
1434 struct hlist_node
*hnode
;
1435 struct hlist_node
*pos
;
1436 struct cfs_hash_bd bd
;
1438 int excl
= !!remove_safe
;
1442 cfs_hash_for_each_enter(hs
);
1444 cfs_hash_lock(hs
, 0);
1445 LASSERT(!cfs_hash_is_rehashing(hs
));
1447 cfs_hash_for_each_bucket(hs
, &bd
, i
) {
1448 struct hlist_head
*hhead
;
1450 cfs_hash_bd_lock(hs
, &bd
, excl
);
1451 if (func
== NULL
) { /* only glimpse size */
1452 count
+= bd
.bd_bucket
->hsb_count
;
1453 cfs_hash_bd_unlock(hs
, &bd
, excl
);
1457 cfs_hash_bd_for_each_hlist(hs
, &bd
, hhead
) {
1458 hlist_for_each_safe(hnode
, pos
, hhead
) {
1459 cfs_hash_bucket_validate(hs
, &bd
, hnode
);
1462 if (func(hs
, &bd
, hnode
, data
)) {
1463 cfs_hash_bd_unlock(hs
, &bd
, excl
);
1468 cfs_hash_bd_unlock(hs
, &bd
, excl
);
1469 if (loop
< CFS_HASH_LOOP_HOG
)
1472 cfs_hash_unlock(hs
, 0);
1474 cfs_hash_lock(hs
, 0);
1477 cfs_hash_unlock(hs
, 0);
1479 cfs_hash_for_each_exit(hs
);
1484 cfs_hash_cond_opt_cb_t func
;
1486 } cfs_hash_cond_arg_t
;
1489 cfs_hash_cond_del_locked(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
1490 struct hlist_node
*hnode
, void *data
)
1492 cfs_hash_cond_arg_t
*cond
= data
;
1494 if (cond
->func(cfs_hash_object(hs
, hnode
), cond
->arg
))
1495 cfs_hash_bd_del_locked(hs
, bd
, hnode
);
1500 * Delete item from the libcfs hash @hs when @func return true.
1501 * The write lock being hold during loop for each bucket to avoid
1502 * any object be reference.
1505 cfs_hash_cond_del(struct cfs_hash
*hs
, cfs_hash_cond_opt_cb_t func
, void *data
)
1507 cfs_hash_cond_arg_t arg
= {
1512 cfs_hash_for_each_tight(hs
, cfs_hash_cond_del_locked
, &arg
, 1);
1514 EXPORT_SYMBOL(cfs_hash_cond_del
);
1517 cfs_hash_for_each(struct cfs_hash
*hs
,
1518 cfs_hash_for_each_cb_t func
, void *data
)
1520 cfs_hash_for_each_tight(hs
, func
, data
, 0);
1522 EXPORT_SYMBOL(cfs_hash_for_each
);
1525 cfs_hash_for_each_safe(struct cfs_hash
*hs
,
1526 cfs_hash_for_each_cb_t func
, void *data
)
1528 cfs_hash_for_each_tight(hs
, func
, data
, 1);
1530 EXPORT_SYMBOL(cfs_hash_for_each_safe
);
1533 cfs_hash_peek(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
1534 struct hlist_node
*hnode
, void *data
)
1537 return 1; /* return 1 to break the loop */
1541 cfs_hash_is_empty(struct cfs_hash
*hs
)
1545 cfs_hash_for_each_tight(hs
, cfs_hash_peek
, &empty
, 0);
1548 EXPORT_SYMBOL(cfs_hash_is_empty
);
1551 cfs_hash_size_get(struct cfs_hash
*hs
)
1553 return cfs_hash_with_counter(hs
) ?
1554 atomic_read(&hs
->hs_count
) :
1555 cfs_hash_for_each_tight(hs
, NULL
, NULL
, 0);
1557 EXPORT_SYMBOL(cfs_hash_size_get
);
1560 * cfs_hash_for_each_relax:
1561 * Iterate the hash table and call @func on each item without
1562 * any lock. This function can't guarantee to finish iteration
1563 * if these features are enabled:
1565 * a. if rehash_key is enabled, an item can be moved from
1566 * one bucket to another bucket
1567 * b. user can remove non-zero-ref item from hash-table,
1568 * so the item can be removed from hash-table, even worse,
1569 * it's possible that user changed key and insert to another
1571 * there's no way for us to finish iteration correctly on previous
1572 * two cases, so iteration has to be stopped on change.
1575 cfs_hash_for_each_relax(struct cfs_hash
*hs
, cfs_hash_for_each_cb_t func
, void *data
)
1577 struct hlist_node
*hnode
;
1578 struct hlist_node
*tmp
;
1579 struct cfs_hash_bd bd
;
1586 stop_on_change
= cfs_hash_with_rehash_key(hs
) ||
1587 !cfs_hash_with_no_itemref(hs
) ||
1588 CFS_HOP(hs
, put_locked
) == NULL
;
1589 cfs_hash_lock(hs
, 0);
1590 LASSERT(!cfs_hash_is_rehashing(hs
));
1592 cfs_hash_for_each_bucket(hs
, &bd
, i
) {
1593 struct hlist_head
*hhead
;
1595 cfs_hash_bd_lock(hs
, &bd
, 0);
1596 version
= cfs_hash_bd_version_get(&bd
);
1598 cfs_hash_bd_for_each_hlist(hs
, &bd
, hhead
) {
1599 for (hnode
= hhead
->first
; hnode
!= NULL
;) {
1600 cfs_hash_bucket_validate(hs
, &bd
, hnode
);
1601 cfs_hash_get(hs
, hnode
);
1602 cfs_hash_bd_unlock(hs
, &bd
, 0);
1603 cfs_hash_unlock(hs
, 0);
1605 rc
= func(hs
, &bd
, hnode
, data
);
1607 cfs_hash_put(hs
, hnode
);
1611 cfs_hash_lock(hs
, 0);
1612 cfs_hash_bd_lock(hs
, &bd
, 0);
1613 if (!stop_on_change
) {
1615 cfs_hash_put_locked(hs
, hnode
);
1617 } else { /* bucket changed? */
1619 cfs_hash_bd_version_get(&bd
))
1621 /* safe to continue because no change */
1622 hnode
= hnode
->next
;
1624 if (rc
) /* callback wants to break iteration */
1628 cfs_hash_bd_unlock(hs
, &bd
, 0);
1630 cfs_hash_unlock(hs
, 0);
1636 cfs_hash_for_each_nolock(struct cfs_hash
*hs
,
1637 cfs_hash_for_each_cb_t func
, void *data
)
1639 if (cfs_hash_with_no_lock(hs
) ||
1640 cfs_hash_with_rehash_key(hs
) ||
1641 !cfs_hash_with_no_itemref(hs
))
1644 if (CFS_HOP(hs
, get
) == NULL
||
1645 (CFS_HOP(hs
, put
) == NULL
&&
1646 CFS_HOP(hs
, put_locked
) == NULL
))
1649 cfs_hash_for_each_enter(hs
);
1650 cfs_hash_for_each_relax(hs
, func
, data
);
1651 cfs_hash_for_each_exit(hs
);
1655 EXPORT_SYMBOL(cfs_hash_for_each_nolock
);
1658 * For each hash bucket in the libcfs hash @hs call the passed callback
1659 * @func until all the hash buckets are empty. The passed callback @func
1660 * or the previously registered callback hs->hs_put must remove the item
1661 * from the hash. You may either use the cfs_hash_del() or hlist_del()
1662 * functions. No rwlocks will be held during the callback @func it is
1663 * safe to sleep if needed. This function will not terminate until the
1664 * hash is empty. Note it is still possible to concurrently add new
1665 * items in to the hash. It is the callers responsibility to ensure
1666 * the required locking is in place to prevent concurrent insertions.
1669 cfs_hash_for_each_empty(struct cfs_hash
*hs
,
1670 cfs_hash_for_each_cb_t func
, void *data
)
1674 if (cfs_hash_with_no_lock(hs
))
1677 if (CFS_HOP(hs
, get
) == NULL
||
1678 (CFS_HOP(hs
, put
) == NULL
&&
1679 CFS_HOP(hs
, put_locked
) == NULL
))
1682 cfs_hash_for_each_enter(hs
);
1683 while (cfs_hash_for_each_relax(hs
, func
, data
)) {
1684 CDEBUG(D_INFO
, "Try to empty hash: %s, loop: %u\n",
1687 cfs_hash_for_each_exit(hs
);
1690 EXPORT_SYMBOL(cfs_hash_for_each_empty
);
1693 cfs_hash_hlist_for_each(struct cfs_hash
*hs
, unsigned hindex
,
1694 cfs_hash_for_each_cb_t func
, void *data
)
1696 struct hlist_head
*hhead
;
1697 struct hlist_node
*hnode
;
1698 struct cfs_hash_bd bd
;
1700 cfs_hash_for_each_enter(hs
);
1701 cfs_hash_lock(hs
, 0);
1702 if (hindex
>= CFS_HASH_NHLIST(hs
))
1705 cfs_hash_bd_index_set(hs
, hindex
, &bd
);
1707 cfs_hash_bd_lock(hs
, &bd
, 0);
1708 hhead
= cfs_hash_bd_hhead(hs
, &bd
);
1709 hlist_for_each(hnode
, hhead
) {
1710 if (func(hs
, &bd
, hnode
, data
))
1713 cfs_hash_bd_unlock(hs
, &bd
, 0);
1715 cfs_hash_unlock(hs
, 0);
1716 cfs_hash_for_each_exit(hs
);
1719 EXPORT_SYMBOL(cfs_hash_hlist_for_each
);
1722 * For each item in the libcfs hash @hs which matches the @key call
1723 * the passed callback @func and pass to it as an argument each hash
1724 * item and the private @data. During the callback the bucket lock
1725 * is held so the callback must never sleep.
1728 cfs_hash_for_each_key(struct cfs_hash
*hs
, const void *key
,
1729 cfs_hash_for_each_cb_t func
, void *data
)
1731 struct hlist_node
*hnode
;
1732 struct cfs_hash_bd bds
[2];
1735 cfs_hash_lock(hs
, 0);
1737 cfs_hash_dual_bd_get_and_lock(hs
, key
, bds
, 0);
1739 cfs_hash_for_each_bd(bds
, 2, i
) {
1740 struct hlist_head
*hlist
= cfs_hash_bd_hhead(hs
, &bds
[i
]);
1742 hlist_for_each(hnode
, hlist
) {
1743 cfs_hash_bucket_validate(hs
, &bds
[i
], hnode
);
1745 if (cfs_hash_keycmp(hs
, key
, hnode
)) {
1746 if (func(hs
, &bds
[i
], hnode
, data
))
1752 cfs_hash_dual_bd_unlock(hs
, bds
, 0);
1753 cfs_hash_unlock(hs
, 0);
1755 EXPORT_SYMBOL(cfs_hash_for_each_key
);
1758 * Rehash the libcfs hash @hs to the given @bits. This can be used
1759 * to grow the hash size when excessive chaining is detected, or to
1760 * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
1761 * flag is set in @hs the libcfs hash may be dynamically rehashed
1762 * during addition or removal if the hash's theta value exceeds
1763 * either the hs->hs_min_theta or hs->max_theta values. By default
1764 * these values are tuned to keep the chained hash depth small, and
1765 * this approach assumes a reasonably uniform hashing function. The
1766 * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1769 cfs_hash_rehash_cancel_locked(struct cfs_hash
*hs
)
1773 /* need hold cfs_hash_lock(hs, 1) */
1774 LASSERT(cfs_hash_with_rehash(hs
) &&
1775 !cfs_hash_with_no_lock(hs
));
1777 if (!cfs_hash_is_rehashing(hs
))
1780 if (cfs_wi_deschedule(cfs_sched_rehash
, &hs
->hs_rehash_wi
)) {
1781 hs
->hs_rehash_bits
= 0;
1785 for (i
= 2; cfs_hash_is_rehashing(hs
); i
++) {
1786 cfs_hash_unlock(hs
, 1);
1787 /* raise console warning while waiting too long */
1788 CDEBUG(IS_PO2(i
>> 3) ? D_WARNING
: D_INFO
,
1789 "hash %s is still rehashing, rescheded %d\n",
1790 hs
->hs_name
, i
- 1);
1792 cfs_hash_lock(hs
, 1);
1795 EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked
);
1798 cfs_hash_rehash_cancel(struct cfs_hash
*hs
)
1800 cfs_hash_lock(hs
, 1);
1801 cfs_hash_rehash_cancel_locked(hs
);
1802 cfs_hash_unlock(hs
, 1);
1804 EXPORT_SYMBOL(cfs_hash_rehash_cancel
);
1807 cfs_hash_rehash(struct cfs_hash
*hs
, int do_rehash
)
1811 LASSERT(cfs_hash_with_rehash(hs
) && !cfs_hash_with_no_lock(hs
));
1813 cfs_hash_lock(hs
, 1);
1815 rc
= cfs_hash_rehash_bits(hs
);
1817 cfs_hash_unlock(hs
, 1);
1821 hs
->hs_rehash_bits
= rc
;
1823 /* launch and return */
1824 cfs_wi_schedule(cfs_sched_rehash
, &hs
->hs_rehash_wi
);
1825 cfs_hash_unlock(hs
, 1);
1829 /* rehash right now */
1830 cfs_hash_unlock(hs
, 1);
1832 return cfs_hash_rehash_worker(&hs
->hs_rehash_wi
);
1834 EXPORT_SYMBOL(cfs_hash_rehash
);
1837 cfs_hash_rehash_bd(struct cfs_hash
*hs
, struct cfs_hash_bd
*old
)
1839 struct cfs_hash_bd
new;
1840 struct hlist_head
*hhead
;
1841 struct hlist_node
*hnode
;
1842 struct hlist_node
*pos
;
1846 /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1847 cfs_hash_bd_for_each_hlist(hs
, old
, hhead
) {
1848 hlist_for_each_safe(hnode
, pos
, hhead
) {
1849 key
= cfs_hash_key(hs
, hnode
);
1850 LASSERT(key
!= NULL
);
1851 /* Validate hnode is in the correct bucket. */
1852 cfs_hash_bucket_validate(hs
, old
, hnode
);
1854 * Delete from old hash bucket; move to new bucket.
1855 * ops->hs_key must be defined.
1857 cfs_hash_bd_from_key(hs
, hs
->hs_rehash_buckets
,
1858 hs
->hs_rehash_bits
, key
, &new);
1859 cfs_hash_bd_move_locked(hs
, old
, &new, hnode
);
1868 cfs_hash_rehash_worker(cfs_workitem_t
*wi
)
1870 struct cfs_hash
*hs
= container_of(wi
, struct cfs_hash
, hs_rehash_wi
);
1871 struct cfs_hash_bucket
**bkts
;
1872 struct cfs_hash_bd bd
;
1873 unsigned int old_size
;
1874 unsigned int new_size
;
1880 LASSERT (hs
!= NULL
&& cfs_hash_with_rehash(hs
));
1882 cfs_hash_lock(hs
, 0);
1883 LASSERT(cfs_hash_is_rehashing(hs
));
1885 old_size
= CFS_HASH_NBKT(hs
);
1886 new_size
= CFS_HASH_RH_NBKT(hs
);
1888 cfs_hash_unlock(hs
, 0);
1891 * don't need hs::hs_rwlock for hs::hs_buckets,
1892 * because nobody can change bkt-table except me.
1894 bkts
= cfs_hash_buckets_realloc(hs
, hs
->hs_buckets
,
1895 old_size
, new_size
);
1896 cfs_hash_lock(hs
, 1);
1902 if (bkts
== hs
->hs_buckets
) {
1903 bkts
= NULL
; /* do nothing */
1907 rc
= __cfs_hash_theta(hs
);
1908 if ((rc
>= hs
->hs_min_theta
) && (rc
<= hs
->hs_max_theta
)) {
1909 /* free the new allocated bkt-table */
1910 old_size
= new_size
;
1911 new_size
= CFS_HASH_NBKT(hs
);
1916 LASSERT(hs
->hs_rehash_buckets
== NULL
);
1917 hs
->hs_rehash_buckets
= bkts
;
1920 cfs_hash_for_each_bucket(hs
, &bd
, i
) {
1921 if (cfs_hash_is_exiting(hs
)) {
1923 /* someone wants to destroy the hash, abort now */
1924 if (old_size
< new_size
) /* OK to free old bkt-table */
1926 /* it's shrinking, need free new bkt-table */
1927 hs
->hs_rehash_buckets
= NULL
;
1928 old_size
= new_size
;
1929 new_size
= CFS_HASH_NBKT(hs
);
1933 count
+= cfs_hash_rehash_bd(hs
, &bd
);
1934 if (count
< CFS_HASH_LOOP_HOG
||
1935 cfs_hash_is_iterating(hs
)) { /* need to finish ASAP */
1940 cfs_hash_unlock(hs
, 1);
1942 cfs_hash_lock(hs
, 1);
1945 hs
->hs_rehash_count
++;
1947 bkts
= hs
->hs_buckets
;
1948 hs
->hs_buckets
= hs
->hs_rehash_buckets
;
1949 hs
->hs_rehash_buckets
= NULL
;
1951 hs
->hs_cur_bits
= hs
->hs_rehash_bits
;
1953 hs
->hs_rehash_bits
= 0;
1954 if (rc
== -ESRCH
) /* never be scheduled again */
1955 cfs_wi_exit(cfs_sched_rehash
, wi
);
1956 bsize
= cfs_hash_bkt_size(hs
);
1957 cfs_hash_unlock(hs
, 1);
1958 /* can't refer to @hs anymore because it could be destroyed */
1960 cfs_hash_buckets_free(bkts
, bsize
, new_size
, old_size
);
1962 CDEBUG(D_INFO
, "early quit of rehashing: %d\n", rc
);
1963 /* return 1 only if cfs_wi_exit is called */
1964 return rc
== -ESRCH
;
1968 * Rehash the object referenced by @hnode in the libcfs hash @hs. The
1969 * @old_key must be provided to locate the objects previous location
1970 * in the hash, and the @new_key will be used to reinsert the object.
1971 * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1972 * combo when it is critical that there is no window in time where the
1973 * object is missing from the hash. When an object is being rehashed
1974 * the registered cfs_hash_get() and cfs_hash_put() functions will
1977 void cfs_hash_rehash_key(struct cfs_hash
*hs
, const void *old_key
,
1978 void *new_key
, struct hlist_node
*hnode
)
1980 struct cfs_hash_bd bds
[3];
1981 struct cfs_hash_bd old_bds
[2];
1982 struct cfs_hash_bd new_bd
;
1984 LASSERT(!hlist_unhashed(hnode
));
1986 cfs_hash_lock(hs
, 0);
1988 cfs_hash_dual_bd_get(hs
, old_key
, old_bds
);
1989 cfs_hash_bd_get(hs
, new_key
, &new_bd
);
1991 bds
[0] = old_bds
[0];
1992 bds
[1] = old_bds
[1];
1995 /* NB: bds[0] and bds[1] are ordered already */
1996 cfs_hash_bd_order(&bds
[1], &bds
[2]);
1997 cfs_hash_bd_order(&bds
[0], &bds
[1]);
1999 cfs_hash_multi_bd_lock(hs
, bds
, 3, 1);
2000 if (likely(old_bds
[1].bd_bucket
== NULL
)) {
2001 cfs_hash_bd_move_locked(hs
, &old_bds
[0], &new_bd
, hnode
);
2003 cfs_hash_dual_bd_finddel_locked(hs
, old_bds
, old_key
, hnode
);
2004 cfs_hash_bd_add_locked(hs
, &new_bd
, hnode
);
2006 /* overwrite key inside locks, otherwise may screw up with
2007 * other operations, i.e: rehash */
2008 cfs_hash_keycpy(hs
, new_key
, hnode
);
2010 cfs_hash_multi_bd_unlock(hs
, bds
, 3, 1);
2011 cfs_hash_unlock(hs
, 0);
2013 EXPORT_SYMBOL(cfs_hash_rehash_key
);
2015 int cfs_hash_debug_header(struct seq_file
*m
)
2017 return seq_printf(m
, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2018 CFS_HASH_BIGNAME_LEN
,
2019 "name", "cur", "min", "max", "theta", "t-min", "t-max",
2020 "flags", "rehash", "count", "maxdep", "maxdepb",
2023 EXPORT_SYMBOL(cfs_hash_debug_header
);
2025 static struct cfs_hash_bucket
**
2026 cfs_hash_full_bkts(struct cfs_hash
*hs
)
2028 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2029 if (hs
->hs_rehash_buckets
== NULL
)
2030 return hs
->hs_buckets
;
2032 LASSERT(hs
->hs_rehash_bits
!= 0);
2033 return hs
->hs_rehash_bits
> hs
->hs_cur_bits
?
2034 hs
->hs_rehash_buckets
: hs
->hs_buckets
;
2038 cfs_hash_full_nbkt(struct cfs_hash
*hs
)
2040 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2041 if (hs
->hs_rehash_buckets
== NULL
)
2042 return CFS_HASH_NBKT(hs
);
2044 LASSERT(hs
->hs_rehash_bits
!= 0);
2045 return hs
->hs_rehash_bits
> hs
->hs_cur_bits
?
2046 CFS_HASH_RH_NBKT(hs
) : CFS_HASH_NBKT(hs
);
2049 int cfs_hash_debug_str(struct cfs_hash
*hs
, struct seq_file
*m
)
2051 int dist
[8] = { 0, };
2058 cfs_hash_lock(hs
, 0);
2059 theta
= __cfs_hash_theta(hs
);
2061 seq_printf(m
, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ",
2062 CFS_HASH_BIGNAME_LEN
, hs
->hs_name
,
2063 1 << hs
->hs_cur_bits
, 1 << hs
->hs_min_bits
,
2064 1 << hs
->hs_max_bits
,
2065 __cfs_hash_theta_int(theta
), __cfs_hash_theta_frac(theta
),
2066 __cfs_hash_theta_int(hs
->hs_min_theta
),
2067 __cfs_hash_theta_frac(hs
->hs_min_theta
),
2068 __cfs_hash_theta_int(hs
->hs_max_theta
),
2069 __cfs_hash_theta_frac(hs
->hs_max_theta
),
2070 hs
->hs_flags
, hs
->hs_rehash_count
);
2073 * The distribution is a summary of the chained hash depth in
2074 * each of the libcfs hash buckets. Each buckets hsb_count is
2075 * divided by the hash theta value and used to generate a
2076 * histogram of the hash distribution. A uniform hash will
2077 * result in all hash buckets being close to the average thus
2078 * only the first few entries in the histogram will be non-zero.
2079 * If you hash function results in a non-uniform hash the will
2080 * be observable by outlier bucks in the distribution histogram.
2082 * Uniform hash distribution: 128/128/0/0/0/0/0/0
2083 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
2085 for (i
= 0; i
< cfs_hash_full_nbkt(hs
); i
++) {
2086 struct cfs_hash_bd bd
;
2088 bd
.bd_bucket
= cfs_hash_full_bkts(hs
)[i
];
2089 cfs_hash_bd_lock(hs
, &bd
, 0);
2090 if (maxdep
< bd
.bd_bucket
->hsb_depmax
) {
2091 maxdep
= bd
.bd_bucket
->hsb_depmax
;
2092 maxdepb
= ffz(~maxdep
);
2094 total
+= bd
.bd_bucket
->hsb_count
;
2095 dist
[min(__cfs_fls(bd
.bd_bucket
->hsb_count
/max(theta
,
2097 cfs_hash_bd_unlock(hs
, &bd
, 0);
2100 seq_printf(m
, "%7d %7d %7d ", total
, maxdep
, maxdepb
);
2101 for (i
= 0; i
< 8; i
++)
2102 seq_printf(m
, "%d%c", dist
[i
], (i
== 7) ? '\n' : '/');
2104 cfs_hash_unlock(hs
, 0);
2108 EXPORT_SYMBOL(cfs_hash_debug_str
);