4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/hash.c
38 * Implement a hash class for hash process in lustre system.
40 * Author: YuZhangyong <yzy@clusterfs.com>
42 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
43 * - Simplified API and improved documentation
44 * - Added per-hash feature flags:
45 * * CFS_HASH_DEBUG additional validation
46 * * CFS_HASH_REHASH dynamic rehashing
47 * - Added per-hash statistics
48 * - General performance enhancements
50 * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
51 * - move all stuff to libcfs
52 * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
53 * - ignore hs_rwlock if without CFS_HASH_REHASH setting
54 * - buckets are allocated one by one(instead of contiguous memory),
55 * to avoid unnecessary cacheline conflict
57 * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
58 * - "bucket" is a group of hlist_head now, user can specify bucket size
59 * by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
60 * one lock for reducing memory overhead.
62 * - support lockless hash, caller will take care of locks:
63 * avoid lock overhead for hash tables that are already protected
64 * by locking in the caller for another reason
66 * - support both spin_lock/rwlock for bucket:
67 * overhead of spinlock contention is lower than read/write
68 * contention of rwlock, so using spinlock to serialize operations on
69 * bucket is more reasonable for those frequently changed hash tables
71 * - support one-single lock mode:
72 * one lock to protect all hash operations to avoid overhead of
73 * multiple locks if hash table is always small
75 * - removed a lot of unnecessary addref & decref on hash element:
76 * addref & decref are atomic operations in many use-cases which
79 * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
80 * some lustre use-cases require these functions to be strictly
81 * non-blocking, we need to schedule required rehash on a different
82 * thread on those cases.
84 * - safer rehash on large hash table
85 * In old implementation, rehash function will exclusively lock the
86 * hash table and finish rehash in one batch, it's dangerous on SMP
87 * system because rehash millions of elements could take long time.
88 * New implemented rehash can release lock and relax CPU in middle
89 * of rehash, it's safe for another thread to search/change on the
90 * hash table even it's in rehasing.
92 * - support two different refcount modes
93 * . hash table has refcount on element
94 * . hash table doesn't change refcount on adding/removing element
96 * - support long name hash table (for param-tree)
98 * - fix a bug for cfs_hash_rehash_key:
99 * in old implementation, cfs_hash_rehash_key could screw up the
100 * hash-table because @key is overwritten without any protection.
101 * Now we need user to define hs_keycpy for those rehash enabled
102 * hash tables, cfs_hash_rehash_key will overwrite hash-key
103 * inside lock by calling hs_keycpy.
105 * - better hash iteration:
106 * Now we support both locked iteration & lockless iteration of hash
107 * table. Also, user can break the iteration by return 1 in callback.
109 #include <linux/seq_file.h>
110 #include <linux/log2.h>
112 #include "../../include/linux/libcfs/libcfs.h"
114 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
115 static unsigned int warn_on_depth = 8;
116 module_param(warn_on_depth, uint, 0644);
117 MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
120 struct cfs_wi_sched *cfs_sched_rehash;
123 cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
126 cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
129 cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
130 __acquires(&lock->spin)
132 spin_lock(&lock->spin);
136 cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
137 __releases(&lock->spin)
139 spin_unlock(&lock->spin);
143 cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
144 __acquires(&lock->rw)
147 read_lock(&lock->rw);
149 write_lock(&lock->rw);
153 cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
154 __releases(&lock->rw)
157 read_unlock(&lock->rw);
159 write_unlock(&lock->rw);
163 static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
164 .hs_lock = cfs_hash_nl_lock,
165 .hs_unlock = cfs_hash_nl_unlock,
166 .hs_bkt_lock = cfs_hash_nl_lock,
167 .hs_bkt_unlock = cfs_hash_nl_unlock,
170 /** no bucket lock, one spinlock to protect everything */
171 static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
172 .hs_lock = cfs_hash_spin_lock,
173 .hs_unlock = cfs_hash_spin_unlock,
174 .hs_bkt_lock = cfs_hash_nl_lock,
175 .hs_bkt_unlock = cfs_hash_nl_unlock,
178 /** spin bucket lock, rehash is enabled */
179 static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
180 .hs_lock = cfs_hash_rw_lock,
181 .hs_unlock = cfs_hash_rw_unlock,
182 .hs_bkt_lock = cfs_hash_spin_lock,
183 .hs_bkt_unlock = cfs_hash_spin_unlock,
186 /** rw bucket lock, rehash is enabled */
187 static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
188 .hs_lock = cfs_hash_rw_lock,
189 .hs_unlock = cfs_hash_rw_unlock,
190 .hs_bkt_lock = cfs_hash_rw_lock,
191 .hs_bkt_unlock = cfs_hash_rw_unlock,
194 /** spin bucket lock, rehash is disabled */
195 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
196 .hs_lock = cfs_hash_nl_lock,
197 .hs_unlock = cfs_hash_nl_unlock,
198 .hs_bkt_lock = cfs_hash_spin_lock,
199 .hs_bkt_unlock = cfs_hash_spin_unlock,
202 /** rw bucket lock, rehash is disabled */
203 static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
204 .hs_lock = cfs_hash_nl_lock,
205 .hs_unlock = cfs_hash_nl_unlock,
206 .hs_bkt_lock = cfs_hash_rw_lock,
207 .hs_bkt_unlock = cfs_hash_rw_unlock,
211 cfs_hash_lock_setup(struct cfs_hash *hs)
213 if (cfs_hash_with_no_lock(hs)) {
214 hs->hs_lops = &cfs_hash_nl_lops;
216 } else if (cfs_hash_with_no_bktlock(hs)) {
217 hs->hs_lops = &cfs_hash_nbl_lops;
218 spin_lock_init(&hs->hs_lock.spin);
220 } else if (cfs_hash_with_rehash(hs)) {
221 rwlock_init(&hs->hs_lock.rw);
223 if (cfs_hash_with_rw_bktlock(hs))
224 hs->hs_lops = &cfs_hash_bkt_rw_lops;
225 else if (cfs_hash_with_spin_bktlock(hs))
226 hs->hs_lops = &cfs_hash_bkt_spin_lops;
230 if (cfs_hash_with_rw_bktlock(hs))
231 hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
232 else if (cfs_hash_with_spin_bktlock(hs))
233 hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
240 * Simple hash head without depth tracking
241 * new element is always added to head of hlist
243 struct cfs_hash_head {
244 struct hlist_head hh_head; /**< entries list */
248 cfs_hash_hh_hhead_size(struct cfs_hash *hs)
250 return sizeof(struct cfs_hash_head);
253 static struct hlist_head *
254 cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
256 struct cfs_hash_head *head;
258 head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
259 return &head[bd->bd_offset].hh_head;
263 cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
264 struct hlist_node *hnode)
266 hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
267 return -1; /* unknown depth */
271 cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
272 struct hlist_node *hnode)
274 hlist_del_init(hnode);
275 return -1; /* unknown depth */
279 * Simple hash head with depth tracking
280 * new element is always added to head of hlist
282 struct cfs_hash_head_dep {
283 struct hlist_head hd_head; /**< entries list */
284 unsigned int hd_depth; /**< list length */
288 cfs_hash_hd_hhead_size(struct cfs_hash *hs)
290 return sizeof(struct cfs_hash_head_dep);
293 static struct hlist_head *
294 cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
296 struct cfs_hash_head_dep *head;
298 head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
299 return &head[bd->bd_offset].hd_head;
303 cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
304 struct hlist_node *hnode)
306 struct cfs_hash_head_dep *hh;
308 hh = container_of(cfs_hash_hd_hhead(hs, bd),
309 struct cfs_hash_head_dep, hd_head);
310 hlist_add_head(hnode, &hh->hd_head);
311 return ++hh->hd_depth;
315 cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
316 struct hlist_node *hnode)
318 struct cfs_hash_head_dep *hh;
320 hh = container_of(cfs_hash_hd_hhead(hs, bd),
321 struct cfs_hash_head_dep, hd_head);
322 hlist_del_init(hnode);
323 return --hh->hd_depth;
327 * double links hash head without depth tracking
328 * new element is always added to tail of hlist
330 struct cfs_hash_dhead {
331 struct hlist_head dh_head; /**< entries list */
332 struct hlist_node *dh_tail; /**< the last entry */
336 cfs_hash_dh_hhead_size(struct cfs_hash *hs)
338 return sizeof(struct cfs_hash_dhead);
341 static struct hlist_head *
342 cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
344 struct cfs_hash_dhead *head;
346 head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
347 return &head[bd->bd_offset].dh_head;
351 cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
352 struct hlist_node *hnode)
354 struct cfs_hash_dhead *dh;
356 dh = container_of(cfs_hash_dh_hhead(hs, bd),
357 struct cfs_hash_dhead, dh_head);
358 if (dh->dh_tail) /* not empty */
359 hlist_add_behind(hnode, dh->dh_tail);
360 else /* empty list */
361 hlist_add_head(hnode, &dh->dh_head);
363 return -1; /* unknown depth */
367 cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
368 struct hlist_node *hnd)
370 struct cfs_hash_dhead *dh;
372 dh = container_of(cfs_hash_dh_hhead(hs, bd),
373 struct cfs_hash_dhead, dh_head);
374 if (!hnd->next) { /* it's the tail */
375 dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
376 container_of(hnd->pprev, struct hlist_node, next);
379 return -1; /* unknown depth */
383 * double links hash head with depth tracking
384 * new element is always added to tail of hlist
386 struct cfs_hash_dhead_dep {
387 struct hlist_head dd_head; /**< entries list */
388 struct hlist_node *dd_tail; /**< the last entry */
389 unsigned int dd_depth; /**< list length */
393 cfs_hash_dd_hhead_size(struct cfs_hash *hs)
395 return sizeof(struct cfs_hash_dhead_dep);
398 static struct hlist_head *
399 cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
401 struct cfs_hash_dhead_dep *head;
403 head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
404 return &head[bd->bd_offset].dd_head;
408 cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
409 struct hlist_node *hnode)
411 struct cfs_hash_dhead_dep *dh;
413 dh = container_of(cfs_hash_dd_hhead(hs, bd),
414 struct cfs_hash_dhead_dep, dd_head);
415 if (dh->dd_tail) /* not empty */
416 hlist_add_behind(hnode, dh->dd_tail);
417 else /* empty list */
418 hlist_add_head(hnode, &dh->dd_head);
420 return ++dh->dd_depth;
424 cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
425 struct hlist_node *hnd)
427 struct cfs_hash_dhead_dep *dh;
429 dh = container_of(cfs_hash_dd_hhead(hs, bd),
430 struct cfs_hash_dhead_dep, dd_head);
431 if (!hnd->next) { /* it's the tail */
432 dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
433 container_of(hnd->pprev, struct hlist_node, next);
436 return --dh->dd_depth;
439 static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
440 .hop_hhead = cfs_hash_hh_hhead,
441 .hop_hhead_size = cfs_hash_hh_hhead_size,
442 .hop_hnode_add = cfs_hash_hh_hnode_add,
443 .hop_hnode_del = cfs_hash_hh_hnode_del,
446 static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
447 .hop_hhead = cfs_hash_hd_hhead,
448 .hop_hhead_size = cfs_hash_hd_hhead_size,
449 .hop_hnode_add = cfs_hash_hd_hnode_add,
450 .hop_hnode_del = cfs_hash_hd_hnode_del,
453 static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
454 .hop_hhead = cfs_hash_dh_hhead,
455 .hop_hhead_size = cfs_hash_dh_hhead_size,
456 .hop_hnode_add = cfs_hash_dh_hnode_add,
457 .hop_hnode_del = cfs_hash_dh_hnode_del,
460 static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
461 .hop_hhead = cfs_hash_dd_hhead,
462 .hop_hhead_size = cfs_hash_dd_hhead_size,
463 .hop_hnode_add = cfs_hash_dd_hnode_add,
464 .hop_hnode_del = cfs_hash_dd_hnode_del,
468 cfs_hash_hlist_setup(struct cfs_hash *hs)
470 if (cfs_hash_with_add_tail(hs)) {
471 hs->hs_hops = cfs_hash_with_depth(hs) ?
472 &cfs_hash_dd_hops : &cfs_hash_dh_hops;
474 hs->hs_hops = cfs_hash_with_depth(hs) ?
475 &cfs_hash_hd_hops : &cfs_hash_hh_hops;
480 cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
481 unsigned int bits, const void *key, struct cfs_hash_bd *bd)
483 unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
485 LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
487 bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
488 bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
492 cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
494 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
495 if (likely(!hs->hs_rehash_buckets)) {
496 cfs_hash_bd_from_key(hs, hs->hs_buckets,
497 hs->hs_cur_bits, key, bd);
499 LASSERT(hs->hs_rehash_bits != 0);
500 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
501 hs->hs_rehash_bits, key, bd);
504 EXPORT_SYMBOL(cfs_hash_bd_get);
507 cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
509 if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
512 bd->bd_bucket->hsb_depmax = dep_cur;
513 # if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
514 if (likely(warn_on_depth == 0 ||
515 max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
518 spin_lock(&hs->hs_dep_lock);
519 hs->hs_dep_max = dep_cur;
520 hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
521 hs->hs_dep_off = bd->bd_offset;
522 hs->hs_dep_bits = hs->hs_cur_bits;
523 spin_unlock(&hs->hs_dep_lock);
525 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
530 cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
531 struct hlist_node *hnode)
535 rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
536 cfs_hash_bd_dep_record(hs, bd, rc);
537 bd->bd_bucket->hsb_version++;
538 if (unlikely(bd->bd_bucket->hsb_version == 0))
539 bd->bd_bucket->hsb_version++;
540 bd->bd_bucket->hsb_count++;
542 if (cfs_hash_with_counter(hs))
543 atomic_inc(&hs->hs_count);
544 if (!cfs_hash_with_no_itemref(hs))
545 cfs_hash_get(hs, hnode);
547 EXPORT_SYMBOL(cfs_hash_bd_add_locked);
550 cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
551 struct hlist_node *hnode)
553 hs->hs_hops->hop_hnode_del(hs, bd, hnode);
555 LASSERT(bd->bd_bucket->hsb_count > 0);
556 bd->bd_bucket->hsb_count--;
557 bd->bd_bucket->hsb_version++;
558 if (unlikely(bd->bd_bucket->hsb_version == 0))
559 bd->bd_bucket->hsb_version++;
561 if (cfs_hash_with_counter(hs)) {
562 LASSERT(atomic_read(&hs->hs_count) > 0);
563 atomic_dec(&hs->hs_count);
565 if (!cfs_hash_with_no_itemref(hs))
566 cfs_hash_put_locked(hs, hnode);
568 EXPORT_SYMBOL(cfs_hash_bd_del_locked);
571 cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
572 struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
574 struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
575 struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
578 if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
581 /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
582 * in cfs_hash_bd_del/add_locked
584 hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
585 rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
586 cfs_hash_bd_dep_record(hs, bd_new, rc);
588 LASSERT(obkt->hsb_count > 0);
591 if (unlikely(obkt->hsb_version == 0))
595 if (unlikely(nbkt->hsb_version == 0))
600 /** always set, for sanity (avoid ZERO intent) */
601 CFS_HS_LOOKUP_MASK_FIND = BIT(0),
602 /** return entry with a ref */
603 CFS_HS_LOOKUP_MASK_REF = BIT(1),
604 /** add entry if not existing */
605 CFS_HS_LOOKUP_MASK_ADD = BIT(2),
606 /** delete entry, ignore other masks */
607 CFS_HS_LOOKUP_MASK_DEL = BIT(3),
610 enum cfs_hash_lookup_intent {
611 /** return item w/o refcount */
612 CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND,
613 /** return item with refcount */
614 CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND |
615 CFS_HS_LOOKUP_MASK_REF),
616 /** return item w/o refcount if existed, otherwise add */
617 CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND |
618 CFS_HS_LOOKUP_MASK_ADD),
619 /** return item with refcount if existed, otherwise add */
620 CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
621 CFS_HS_LOOKUP_MASK_ADD),
622 /** delete if existed */
623 CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
624 CFS_HS_LOOKUP_MASK_DEL)
627 static struct hlist_node *
628 cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
629 const void *key, struct hlist_node *hnode,
630 enum cfs_hash_lookup_intent intent)
633 struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
634 struct hlist_node *ehnode;
635 struct hlist_node *match;
636 int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
638 /* with this function, we can avoid a lot of useless refcount ops,
639 * which are expensive atomic operations most time.
641 match = intent_add ? NULL : hnode;
642 hlist_for_each(ehnode, hhead) {
643 if (!cfs_hash_keycmp(hs, key, ehnode))
646 if (match && match != ehnode) /* can't match */
650 if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
651 cfs_hash_bd_del_locked(hs, bd, ehnode);
655 /* caller wants refcount? */
656 if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
657 cfs_hash_get(hs, ehnode);
665 cfs_hash_bd_add_locked(hs, bd, hnode);
670 cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
673 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
674 CFS_HS_LOOKUP_IT_FIND);
676 EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
679 cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
682 return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
683 CFS_HS_LOOKUP_IT_PEEK);
685 EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
688 cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
689 unsigned n, int excl)
691 struct cfs_hash_bucket *prev = NULL;
695 * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
696 * NB: it's possible that several bds point to the same bucket but
697 * have different bd::bd_offset, so need take care of deadlock.
699 cfs_hash_for_each_bd(bds, n, i) {
700 if (prev == bds[i].bd_bucket)
703 LASSERT(!prev || prev->hsb_index < bds[i].bd_bucket->hsb_index);
704 cfs_hash_bd_lock(hs, &bds[i], excl);
705 prev = bds[i].bd_bucket;
710 cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
711 unsigned n, int excl)
713 struct cfs_hash_bucket *prev = NULL;
716 cfs_hash_for_each_bd(bds, n, i) {
717 if (prev != bds[i].bd_bucket) {
718 cfs_hash_bd_unlock(hs, &bds[i], excl);
719 prev = bds[i].bd_bucket;
724 static struct hlist_node *
725 cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
726 unsigned n, const void *key)
728 struct hlist_node *ehnode;
731 cfs_hash_for_each_bd(bds, n, i) {
732 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
733 CFS_HS_LOOKUP_IT_FIND);
740 static struct hlist_node *
741 cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
742 unsigned n, const void *key,
743 struct hlist_node *hnode, int noref)
745 struct hlist_node *ehnode;
750 intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
752 cfs_hash_for_each_bd(bds, n, i) {
753 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
759 if (i == 1) { /* only one bucket */
760 cfs_hash_bd_add_locked(hs, &bds[0], hnode);
762 struct cfs_hash_bd mybd;
764 cfs_hash_bd_get(hs, key, &mybd);
765 cfs_hash_bd_add_locked(hs, &mybd, hnode);
771 static struct hlist_node *
772 cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
773 unsigned n, const void *key,
774 struct hlist_node *hnode)
776 struct hlist_node *ehnode;
779 cfs_hash_for_each_bd(bds, n, i) {
780 ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
781 CFS_HS_LOOKUP_IT_FINDDEL);
789 cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
796 if (!bd1->bd_bucket) {
798 bd2->bd_bucket = NULL;
802 rc = cfs_hash_bd_compare(bd1, bd2);
804 bd2->bd_bucket = NULL;
806 swap(*bd1, *bd2); /* swap bd1 and bd2 */
810 cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
811 struct cfs_hash_bd *bds)
813 /* NB: caller should hold hs_lock.rw if REHASH is set */
814 cfs_hash_bd_from_key(hs, hs->hs_buckets,
815 hs->hs_cur_bits, key, &bds[0]);
816 if (likely(!hs->hs_rehash_buckets)) {
817 /* no rehash or not rehashing */
818 bds[1].bd_bucket = NULL;
822 LASSERT(hs->hs_rehash_bits != 0);
823 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
824 hs->hs_rehash_bits, key, &bds[1]);
826 cfs_hash_bd_order(&bds[0], &bds[1]);
830 cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
832 cfs_hash_multi_bd_lock(hs, bds, 2, excl);
836 cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
838 cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
842 cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
845 return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
849 cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
850 const void *key, struct hlist_node *hnode,
853 return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
858 cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
859 const void *key, struct hlist_node *hnode)
861 return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
865 cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
866 int bkt_size, int prev_size, int size)
870 for (i = prev_size; i < size; i++) {
872 LIBCFS_FREE(buckets[i], bkt_size);
875 LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
879 * Create or grow bucket memory. Return old_buckets if no allocation was
880 * needed, the newly allocated buckets if allocation was needed and
881 * successful, and NULL on error.
883 static struct cfs_hash_bucket **
884 cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
885 unsigned int old_size, unsigned int new_size)
887 struct cfs_hash_bucket **new_bkts;
890 LASSERT(old_size == 0 || old_bkts);
892 if (old_bkts && old_size == new_size)
895 LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
900 memcpy(new_bkts, old_bkts,
901 min(old_size, new_size) * sizeof(*old_bkts));
904 for (i = old_size; i < new_size; i++) {
905 struct hlist_head *hhead;
906 struct cfs_hash_bd bd;
908 LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
910 cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
915 new_bkts[i]->hsb_index = i;
916 new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
917 new_bkts[i]->hsb_depmax = -1; /* unknown */
918 bd.bd_bucket = new_bkts[i];
919 cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
920 INIT_HLIST_HEAD(hhead);
922 if (cfs_hash_with_no_lock(hs) ||
923 cfs_hash_with_no_bktlock(hs))
926 if (cfs_hash_with_rw_bktlock(hs))
927 rwlock_init(&new_bkts[i]->hsb_lock.rw);
928 else if (cfs_hash_with_spin_bktlock(hs))
929 spin_lock_init(&new_bkts[i]->hsb_lock.spin);
931 LBUG(); /* invalid use-case */
937 * Initialize new libcfs hash, where:
938 * @name - Descriptive hash name
939 * @cur_bits - Initial hash table size, in bits
940 * @max_bits - Maximum allowed hash table resize, in bits
941 * @ops - Registered hash table operations
942 * @flags - CFS_HASH_REHASH enable synamic hash resizing
943 * - CFS_HASH_SORT enable chained hash sort
945 static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
947 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
948 static int cfs_hash_dep_print(struct cfs_workitem *wi)
950 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
956 spin_lock(&hs->hs_dep_lock);
957 dep = hs->hs_dep_max;
958 bkt = hs->hs_dep_bkt;
959 off = hs->hs_dep_off;
960 bits = hs->hs_dep_bits;
961 spin_unlock(&hs->hs_dep_lock);
963 LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
964 hs->hs_name, bits, dep, bkt, off);
965 spin_lock(&hs->hs_dep_lock);
966 hs->hs_dep_bits = 0; /* mark as workitem done */
967 spin_unlock(&hs->hs_dep_lock);
971 static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
973 spin_lock_init(&hs->hs_dep_lock);
974 cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
977 static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
979 if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
982 spin_lock(&hs->hs_dep_lock);
983 while (hs->hs_dep_bits != 0) {
984 spin_unlock(&hs->hs_dep_lock);
986 spin_lock(&hs->hs_dep_lock);
988 spin_unlock(&hs->hs_dep_lock);
991 #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
993 static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
994 static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
996 #endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
999 cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1000 unsigned bkt_bits, unsigned extra_bytes,
1001 unsigned min_theta, unsigned max_theta,
1002 struct cfs_hash_ops *ops, unsigned flags)
1004 struct cfs_hash *hs;
1007 CLASSERT(CFS_HASH_THETA_BITS < 15);
1010 LASSERT(ops->hs_key);
1011 LASSERT(ops->hs_hash);
1012 LASSERT(ops->hs_object);
1013 LASSERT(ops->hs_keycmp);
1014 LASSERT(ops->hs_get);
1015 LASSERT(ops->hs_put_locked);
1017 if ((flags & CFS_HASH_REHASH) != 0)
1018 flags |= CFS_HASH_COUNTER; /* must have counter */
1020 LASSERT(cur_bits > 0);
1021 LASSERT(cur_bits >= bkt_bits);
1022 LASSERT(max_bits >= cur_bits && max_bits < 31);
1023 LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1024 LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1025 (flags & CFS_HASH_NO_LOCK) == 0));
1026 LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, ops->hs_keycpy));
1028 len = (flags & CFS_HASH_BIGNAME) == 0 ?
1029 CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1030 LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1034 strlcpy(hs->hs_name, name, len);
1035 hs->hs_flags = flags;
1037 atomic_set(&hs->hs_refcount, 1);
1038 atomic_set(&hs->hs_count, 0);
1040 cfs_hash_lock_setup(hs);
1041 cfs_hash_hlist_setup(hs);
1043 hs->hs_cur_bits = (__u8)cur_bits;
1044 hs->hs_min_bits = (__u8)cur_bits;
1045 hs->hs_max_bits = (__u8)max_bits;
1046 hs->hs_bkt_bits = (__u8)bkt_bits;
1049 hs->hs_extra_bytes = extra_bytes;
1050 hs->hs_rehash_bits = 0;
1051 cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1052 cfs_hash_depth_wi_init(hs);
1054 if (cfs_hash_with_rehash(hs))
1055 __cfs_hash_set_theta(hs, min_theta, max_theta);
1057 hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1062 LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1065 EXPORT_SYMBOL(cfs_hash_create);
1068 * Cleanup libcfs hash @hs.
1071 cfs_hash_destroy(struct cfs_hash *hs)
1073 struct hlist_node *hnode;
1074 struct hlist_node *pos;
1075 struct cfs_hash_bd bd;
1079 LASSERT(!cfs_hash_is_exiting(hs) &&
1080 !cfs_hash_is_iterating(hs));
1083 * prohibit further rehashes, don't need any lock because
1084 * I'm the only (last) one can change it.
1087 if (cfs_hash_with_rehash(hs))
1088 cfs_hash_rehash_cancel(hs);
1090 cfs_hash_depth_wi_cancel(hs);
1091 /* rehash should be done/canceled */
1092 LASSERT(hs->hs_buckets && !hs->hs_rehash_buckets);
1094 cfs_hash_for_each_bucket(hs, &bd, i) {
1095 struct hlist_head *hhead;
1097 LASSERT(bd.bd_bucket);
1098 /* no need to take this lock, just for consistent code */
1099 cfs_hash_bd_lock(hs, &bd, 1);
1101 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1102 hlist_for_each_safe(hnode, pos, hhead) {
1103 LASSERTF(!cfs_hash_with_assert_empty(hs),
1104 "hash %s bucket %u(%u) is not empty: %u items left\n",
1105 hs->hs_name, bd.bd_bucket->hsb_index,
1106 bd.bd_offset, bd.bd_bucket->hsb_count);
1107 /* can't assert key valicate, because we
1108 * can interrupt rehash
1110 cfs_hash_bd_del_locked(hs, &bd, hnode);
1111 cfs_hash_exit(hs, hnode);
1114 LASSERT(bd.bd_bucket->hsb_count == 0);
1115 cfs_hash_bd_unlock(hs, &bd, 1);
1119 LASSERT(atomic_read(&hs->hs_count) == 0);
1121 cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1122 0, CFS_HASH_NBKT(hs));
1123 i = cfs_hash_with_bigname(hs) ?
1124 CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1125 LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1128 struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1130 if (atomic_inc_not_zero(&hs->hs_refcount))
1134 EXPORT_SYMBOL(cfs_hash_getref);
1136 void cfs_hash_putref(struct cfs_hash *hs)
1138 if (atomic_dec_and_test(&hs->hs_refcount))
1139 cfs_hash_destroy(hs);
1141 EXPORT_SYMBOL(cfs_hash_putref);
1144 cfs_hash_rehash_bits(struct cfs_hash *hs)
1146 if (cfs_hash_with_no_lock(hs) ||
1147 !cfs_hash_with_rehash(hs))
1150 if (unlikely(cfs_hash_is_exiting(hs)))
1153 if (unlikely(cfs_hash_is_rehashing(hs)))
1156 if (unlikely(cfs_hash_is_iterating(hs)))
1159 /* XXX: need to handle case with max_theta != 2.0
1160 * and the case with min_theta != 0.5
1162 if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1163 (__cfs_hash_theta(hs) > hs->hs_max_theta))
1164 return hs->hs_cur_bits + 1;
1166 if (!cfs_hash_with_shrink(hs))
1169 if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1170 (__cfs_hash_theta(hs) < hs->hs_min_theta))
1171 return hs->hs_cur_bits - 1;
1177 * don't allow inline rehash if:
1178 * - user wants non-blocking change (add/del) on hash table
1179 * - too many elements
1182 cfs_hash_rehash_inline(struct cfs_hash *hs)
1184 return !cfs_hash_with_nblk_change(hs) &&
1185 atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1189 * Add item @hnode to libcfs hash @hs using @key. The registered
1190 * ops->hs_get function will be called when the item is added.
1193 cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1195 struct cfs_hash_bd bd;
1198 LASSERT(hlist_unhashed(hnode));
1200 cfs_hash_lock(hs, 0);
1201 cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1203 cfs_hash_key_validate(hs, key, hnode);
1204 cfs_hash_bd_add_locked(hs, &bd, hnode);
1206 cfs_hash_bd_unlock(hs, &bd, 1);
1208 bits = cfs_hash_rehash_bits(hs);
1209 cfs_hash_unlock(hs, 0);
1211 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1213 EXPORT_SYMBOL(cfs_hash_add);
1215 static struct hlist_node *
1216 cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1217 struct hlist_node *hnode, int noref)
1219 struct hlist_node *ehnode;
1220 struct cfs_hash_bd bds[2];
1223 LASSERT(hlist_unhashed(hnode));
1225 cfs_hash_lock(hs, 0);
1226 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1228 cfs_hash_key_validate(hs, key, hnode);
1229 ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1231 cfs_hash_dual_bd_unlock(hs, bds, 1);
1233 if (ehnode == hnode) /* new item added */
1234 bits = cfs_hash_rehash_bits(hs);
1235 cfs_hash_unlock(hs, 0);
1237 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1243 * Add item @hnode to libcfs hash @hs using @key. The registered
1244 * ops->hs_get function will be called if the item was added.
1245 * Returns 0 on success or -EALREADY on key collisions.
1248 cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
1249 struct hlist_node *hnode)
1251 return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1254 EXPORT_SYMBOL(cfs_hash_add_unique);
1257 * Add item @hnode to libcfs hash @hs using @key. If this @key
1258 * already exists in the hash then ops->hs_get will be called on the
1259 * conflicting entry and that entry will be returned to the caller.
1260 * Otherwise ops->hs_get is called on the item which was added.
1263 cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1264 struct hlist_node *hnode)
1266 hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1268 return cfs_hash_object(hs, hnode);
1270 EXPORT_SYMBOL(cfs_hash_findadd_unique);
1273 * Delete item @hnode from the libcfs hash @hs using @key. The @key
1274 * is required to ensure the correct hash bucket is locked since there
1275 * is no direct linkage from the item to the bucket. The object
1276 * removed from the hash will be returned and obs->hs_put is called
1277 * on the removed object.
1280 cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1284 struct cfs_hash_bd bds[2];
1286 cfs_hash_lock(hs, 0);
1287 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1289 /* NB: do nothing if @hnode is not in hash table */
1290 if (!hnode || !hlist_unhashed(hnode)) {
1291 if (!bds[1].bd_bucket && hnode) {
1292 cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1294 hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1300 obj = cfs_hash_object(hs, hnode);
1301 bits = cfs_hash_rehash_bits(hs);
1304 cfs_hash_dual_bd_unlock(hs, bds, 1);
1305 cfs_hash_unlock(hs, 0);
1307 cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1311 EXPORT_SYMBOL(cfs_hash_del);
1314 * Delete item given @key in libcfs hash @hs. The first @key found in
1315 * the hash will be removed, if the key exists multiple times in the hash
1316 * @hs this function must be called once per key. The removed object
1317 * will be returned and ops->hs_put is called on the removed object.
1320 cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1322 return cfs_hash_del(hs, key, NULL);
1324 EXPORT_SYMBOL(cfs_hash_del_key);
1327 * Lookup an item using @key in the libcfs hash @hs and return it.
1328 * If the @key is found in the hash hs->hs_get() is called and the
1329 * matching objects is returned. It is the callers responsibility
1330 * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1331 * when when finished with the object. If the @key was not found
1332 * in the hash @hs NULL is returned.
1335 cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1338 struct hlist_node *hnode;
1339 struct cfs_hash_bd bds[2];
1341 cfs_hash_lock(hs, 0);
1342 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1344 hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1346 obj = cfs_hash_object(hs, hnode);
1348 cfs_hash_dual_bd_unlock(hs, bds, 0);
1349 cfs_hash_unlock(hs, 0);
1353 EXPORT_SYMBOL(cfs_hash_lookup);
1356 cfs_hash_for_each_enter(struct cfs_hash *hs)
1358 LASSERT(!cfs_hash_is_exiting(hs));
1360 if (!cfs_hash_with_rehash(hs))
1363 * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1364 * because it's just an unreliable signal to rehash-thread,
1365 * rehash-thread will try to finish rehash ASAP when seeing this.
1367 hs->hs_iterating = 1;
1369 cfs_hash_lock(hs, 1);
1372 /* NB: iteration is mostly called by service thread,
1373 * we tend to cancel pending rehash-request, instead of
1374 * blocking service thread, we will relaunch rehash request
1377 if (cfs_hash_is_rehashing(hs))
1378 cfs_hash_rehash_cancel_locked(hs);
1379 cfs_hash_unlock(hs, 1);
1383 cfs_hash_for_each_exit(struct cfs_hash *hs)
1388 if (!cfs_hash_with_rehash(hs))
1390 cfs_hash_lock(hs, 1);
1391 remained = --hs->hs_iterators;
1392 bits = cfs_hash_rehash_bits(hs);
1393 cfs_hash_unlock(hs, 1);
1394 /* NB: it's race on cfs_has_t::hs_iterating, see above */
1396 hs->hs_iterating = 0;
1398 cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1404 * For each item in the libcfs hash @hs call the passed callback @func
1405 * and pass to it as an argument each hash item and the private @data.
1407 * a) the function may sleep!
1408 * b) during the callback:
1409 * . the bucket lock is held so the callback must never sleep.
1410 * . if @removal_safe is true, use can remove current item by
1411 * cfs_hash_bd_del_locked
1414 cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1415 void *data, int remove_safe)
1417 struct hlist_node *hnode;
1418 struct hlist_node *pos;
1419 struct cfs_hash_bd bd;
1421 int excl = !!remove_safe;
1425 cfs_hash_for_each_enter(hs);
1427 cfs_hash_lock(hs, 0);
1428 LASSERT(!cfs_hash_is_rehashing(hs));
1430 cfs_hash_for_each_bucket(hs, &bd, i) {
1431 struct hlist_head *hhead;
1433 cfs_hash_bd_lock(hs, &bd, excl);
1434 if (!func) { /* only glimpse size */
1435 count += bd.bd_bucket->hsb_count;
1436 cfs_hash_bd_unlock(hs, &bd, excl);
1440 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1441 hlist_for_each_safe(hnode, pos, hhead) {
1442 cfs_hash_bucket_validate(hs, &bd, hnode);
1445 if (func(hs, &bd, hnode, data)) {
1446 cfs_hash_bd_unlock(hs, &bd, excl);
1451 cfs_hash_bd_unlock(hs, &bd, excl);
1452 if (loop < CFS_HASH_LOOP_HOG)
1455 cfs_hash_unlock(hs, 0);
1457 cfs_hash_lock(hs, 0);
1460 cfs_hash_unlock(hs, 0);
1462 cfs_hash_for_each_exit(hs);
1466 struct cfs_hash_cond_arg {
1467 cfs_hash_cond_opt_cb_t func;
1472 cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1473 struct hlist_node *hnode, void *data)
1475 struct cfs_hash_cond_arg *cond = data;
1477 if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1478 cfs_hash_bd_del_locked(hs, bd, hnode);
1483 * Delete item from the libcfs hash @hs when @func return true.
1484 * The write lock being hold during loop for each bucket to avoid
1485 * any object be reference.
1488 cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1490 struct cfs_hash_cond_arg arg = {
1495 cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1497 EXPORT_SYMBOL(cfs_hash_cond_del);
1500 cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1503 cfs_hash_for_each_tight(hs, func, data, 0);
1505 EXPORT_SYMBOL(cfs_hash_for_each);
1508 cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1511 cfs_hash_for_each_tight(hs, func, data, 1);
1513 EXPORT_SYMBOL(cfs_hash_for_each_safe);
1516 cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1517 struct hlist_node *hnode, void *data)
1520 return 1; /* return 1 to break the loop */
1524 cfs_hash_is_empty(struct cfs_hash *hs)
1528 cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1531 EXPORT_SYMBOL(cfs_hash_is_empty);
1534 cfs_hash_size_get(struct cfs_hash *hs)
1536 return cfs_hash_with_counter(hs) ?
1537 atomic_read(&hs->hs_count) :
1538 cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1540 EXPORT_SYMBOL(cfs_hash_size_get);
1543 * cfs_hash_for_each_relax:
1544 * Iterate the hash table and call @func on each item without
1545 * any lock. This function can't guarantee to finish iteration
1546 * if these features are enabled:
1548 * a. if rehash_key is enabled, an item can be moved from
1549 * one bucket to another bucket
1550 * b. user can remove non-zero-ref item from hash-table,
1551 * so the item can be removed from hash-table, even worse,
1552 * it's possible that user changed key and insert to another
1554 * there's no way for us to finish iteration correctly on previous
1555 * two cases, so iteration has to be stopped on change.
1558 cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1561 struct hlist_node *hnode;
1562 struct hlist_node *tmp;
1563 struct cfs_hash_bd bd;
1570 stop_on_change = cfs_hash_with_rehash_key(hs) ||
1571 !cfs_hash_with_no_itemref(hs) ||
1572 !hs->hs_ops->hs_put_locked;
1573 cfs_hash_lock(hs, 0);
1574 LASSERT(!cfs_hash_is_rehashing(hs));
1576 cfs_hash_for_each_bucket(hs, &bd, i) {
1577 struct hlist_head *hhead;
1579 cfs_hash_bd_lock(hs, &bd, 0);
1580 version = cfs_hash_bd_version_get(&bd);
1582 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1583 for (hnode = hhead->first; hnode;) {
1584 cfs_hash_bucket_validate(hs, &bd, hnode);
1585 cfs_hash_get(hs, hnode);
1586 cfs_hash_bd_unlock(hs, &bd, 0);
1587 cfs_hash_unlock(hs, 0);
1589 rc = func(hs, &bd, hnode, data);
1591 cfs_hash_put(hs, hnode);
1595 cfs_hash_lock(hs, 0);
1596 cfs_hash_bd_lock(hs, &bd, 0);
1597 if (!stop_on_change) {
1599 cfs_hash_put_locked(hs, hnode);
1601 } else { /* bucket changed? */
1603 cfs_hash_bd_version_get(&bd))
1605 /* safe to continue because no change */
1606 hnode = hnode->next;
1608 if (rc) /* callback wants to break iteration */
1611 if (rc) /* callback wants to break iteration */
1614 cfs_hash_bd_unlock(hs, &bd, 0);
1615 if (rc) /* callback wants to break iteration */
1618 cfs_hash_unlock(hs, 0);
1624 cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1627 if (cfs_hash_with_no_lock(hs) ||
1628 cfs_hash_with_rehash_key(hs) ||
1629 !cfs_hash_with_no_itemref(hs))
1632 if (!hs->hs_ops->hs_get ||
1633 (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
1636 cfs_hash_for_each_enter(hs);
1637 cfs_hash_for_each_relax(hs, func, data);
1638 cfs_hash_for_each_exit(hs);
1642 EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1645 * For each hash bucket in the libcfs hash @hs call the passed callback
1646 * @func until all the hash buckets are empty. The passed callback @func
1647 * or the previously registered callback hs->hs_put must remove the item
1648 * from the hash. You may either use the cfs_hash_del() or hlist_del()
1649 * functions. No rwlocks will be held during the callback @func it is
1650 * safe to sleep if needed. This function will not terminate until the
1651 * hash is empty. Note it is still possible to concurrently add new
1652 * items in to the hash. It is the callers responsibility to ensure
1653 * the required locking is in place to prevent concurrent insertions.
1656 cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1661 if (cfs_hash_with_no_lock(hs))
1664 if (!hs->hs_ops->hs_get ||
1665 (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
1668 cfs_hash_for_each_enter(hs);
1669 while (cfs_hash_for_each_relax(hs, func, data)) {
1670 CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1673 cfs_hash_for_each_exit(hs);
1676 EXPORT_SYMBOL(cfs_hash_for_each_empty);
1679 cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1680 cfs_hash_for_each_cb_t func, void *data)
1682 struct hlist_head *hhead;
1683 struct hlist_node *hnode;
1684 struct cfs_hash_bd bd;
1686 cfs_hash_for_each_enter(hs);
1687 cfs_hash_lock(hs, 0);
1688 if (hindex >= CFS_HASH_NHLIST(hs))
1691 cfs_hash_bd_index_set(hs, hindex, &bd);
1693 cfs_hash_bd_lock(hs, &bd, 0);
1694 hhead = cfs_hash_bd_hhead(hs, &bd);
1695 hlist_for_each(hnode, hhead) {
1696 if (func(hs, &bd, hnode, data))
1699 cfs_hash_bd_unlock(hs, &bd, 0);
1701 cfs_hash_unlock(hs, 0);
1702 cfs_hash_for_each_exit(hs);
1704 EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1707 * For each item in the libcfs hash @hs which matches the @key call
1708 * the passed callback @func and pass to it as an argument each hash
1709 * item and the private @data. During the callback the bucket lock
1710 * is held so the callback must never sleep.
1713 cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1714 cfs_hash_for_each_cb_t func, void *data)
1716 struct hlist_node *hnode;
1717 struct cfs_hash_bd bds[2];
1720 cfs_hash_lock(hs, 0);
1722 cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1724 cfs_hash_for_each_bd(bds, 2, i) {
1725 struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1727 hlist_for_each(hnode, hlist) {
1728 cfs_hash_bucket_validate(hs, &bds[i], hnode);
1730 if (cfs_hash_keycmp(hs, key, hnode)) {
1731 if (func(hs, &bds[i], hnode, data))
1737 cfs_hash_dual_bd_unlock(hs, bds, 0);
1738 cfs_hash_unlock(hs, 0);
1740 EXPORT_SYMBOL(cfs_hash_for_each_key);
1743 * Rehash the libcfs hash @hs to the given @bits. This can be used
1744 * to grow the hash size when excessive chaining is detected, or to
1745 * shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
1746 * flag is set in @hs the libcfs hash may be dynamically rehashed
1747 * during addition or removal if the hash's theta value exceeds
1748 * either the hs->hs_min_theta or hs->max_theta values. By default
1749 * these values are tuned to keep the chained hash depth small, and
1750 * this approach assumes a reasonably uniform hashing function. The
1751 * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1754 cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1758 /* need hold cfs_hash_lock(hs, 1) */
1759 LASSERT(cfs_hash_with_rehash(hs) &&
1760 !cfs_hash_with_no_lock(hs));
1762 if (!cfs_hash_is_rehashing(hs))
1765 if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1766 hs->hs_rehash_bits = 0;
1770 for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1771 cfs_hash_unlock(hs, 1);
1772 /* raise console warning while waiting too long */
1773 CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO,
1774 "hash %s is still rehashing, rescheded %d\n",
1775 hs->hs_name, i - 1);
1777 cfs_hash_lock(hs, 1);
1782 cfs_hash_rehash_cancel(struct cfs_hash *hs)
1784 cfs_hash_lock(hs, 1);
1785 cfs_hash_rehash_cancel_locked(hs);
1786 cfs_hash_unlock(hs, 1);
1790 cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1794 LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1796 cfs_hash_lock(hs, 1);
1798 rc = cfs_hash_rehash_bits(hs);
1800 cfs_hash_unlock(hs, 1);
1804 hs->hs_rehash_bits = rc;
1806 /* launch and return */
1807 cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1808 cfs_hash_unlock(hs, 1);
1812 /* rehash right now */
1813 cfs_hash_unlock(hs, 1);
1815 return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1819 cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1821 struct cfs_hash_bd new;
1822 struct hlist_head *hhead;
1823 struct hlist_node *hnode;
1824 struct hlist_node *pos;
1828 /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1829 cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1830 hlist_for_each_safe(hnode, pos, hhead) {
1831 key = cfs_hash_key(hs, hnode);
1833 /* Validate hnode is in the correct bucket. */
1834 cfs_hash_bucket_validate(hs, old, hnode);
1836 * Delete from old hash bucket; move to new bucket.
1837 * ops->hs_key must be defined.
1839 cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1840 hs->hs_rehash_bits, key, &new);
1841 cfs_hash_bd_move_locked(hs, old, &new, hnode);
1850 cfs_hash_rehash_worker(struct cfs_workitem *wi)
1852 struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
1853 struct cfs_hash_bucket **bkts;
1854 struct cfs_hash_bd bd;
1855 unsigned int old_size;
1856 unsigned int new_size;
1862 LASSERT(hs && cfs_hash_with_rehash(hs));
1864 cfs_hash_lock(hs, 0);
1865 LASSERT(cfs_hash_is_rehashing(hs));
1867 old_size = CFS_HASH_NBKT(hs);
1868 new_size = CFS_HASH_RH_NBKT(hs);
1870 cfs_hash_unlock(hs, 0);
1873 * don't need hs::hs_rwlock for hs::hs_buckets,
1874 * because nobody can change bkt-table except me.
1876 bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1877 old_size, new_size);
1878 cfs_hash_lock(hs, 1);
1884 if (bkts == hs->hs_buckets) {
1885 bkts = NULL; /* do nothing */
1889 rc = __cfs_hash_theta(hs);
1890 if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1891 /* free the new allocated bkt-table */
1892 old_size = new_size;
1893 new_size = CFS_HASH_NBKT(hs);
1898 LASSERT(!hs->hs_rehash_buckets);
1899 hs->hs_rehash_buckets = bkts;
1902 cfs_hash_for_each_bucket(hs, &bd, i) {
1903 if (cfs_hash_is_exiting(hs)) {
1905 /* someone wants to destroy the hash, abort now */
1906 if (old_size < new_size) /* OK to free old bkt-table */
1908 /* it's shrinking, need free new bkt-table */
1909 hs->hs_rehash_buckets = NULL;
1910 old_size = new_size;
1911 new_size = CFS_HASH_NBKT(hs);
1915 count += cfs_hash_rehash_bd(hs, &bd);
1916 if (count < CFS_HASH_LOOP_HOG ||
1917 cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1922 cfs_hash_unlock(hs, 1);
1924 cfs_hash_lock(hs, 1);
1927 hs->hs_rehash_count++;
1929 bkts = hs->hs_buckets;
1930 hs->hs_buckets = hs->hs_rehash_buckets;
1931 hs->hs_rehash_buckets = NULL;
1933 hs->hs_cur_bits = hs->hs_rehash_bits;
1935 hs->hs_rehash_bits = 0;
1936 if (rc == -ESRCH) /* never be scheduled again */
1937 cfs_wi_exit(cfs_sched_rehash, wi);
1938 bsize = cfs_hash_bkt_size(hs);
1939 cfs_hash_unlock(hs, 1);
1940 /* can't refer to @hs anymore because it could be destroyed */
1942 cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1944 CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1945 /* return 1 only if cfs_wi_exit is called */
1946 return rc == -ESRCH;
1950 * Rehash the object referenced by @hnode in the libcfs hash @hs. The
1951 * @old_key must be provided to locate the objects previous location
1952 * in the hash, and the @new_key will be used to reinsert the object.
1953 * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1954 * combo when it is critical that there is no window in time where the
1955 * object is missing from the hash. When an object is being rehashed
1956 * the registered cfs_hash_get() and cfs_hash_put() functions will
1959 void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
1960 void *new_key, struct hlist_node *hnode)
1962 struct cfs_hash_bd bds[3];
1963 struct cfs_hash_bd old_bds[2];
1964 struct cfs_hash_bd new_bd;
1966 LASSERT(!hlist_unhashed(hnode));
1968 cfs_hash_lock(hs, 0);
1970 cfs_hash_dual_bd_get(hs, old_key, old_bds);
1971 cfs_hash_bd_get(hs, new_key, &new_bd);
1973 bds[0] = old_bds[0];
1974 bds[1] = old_bds[1];
1977 /* NB: bds[0] and bds[1] are ordered already */
1978 cfs_hash_bd_order(&bds[1], &bds[2]);
1979 cfs_hash_bd_order(&bds[0], &bds[1]);
1981 cfs_hash_multi_bd_lock(hs, bds, 3, 1);
1982 if (likely(!old_bds[1].bd_bucket)) {
1983 cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
1985 cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
1986 cfs_hash_bd_add_locked(hs, &new_bd, hnode);
1988 /* overwrite key inside locks, otherwise may screw up with
1989 * other operations, i.e: rehash
1991 cfs_hash_keycpy(hs, hnode, new_key);
1993 cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
1994 cfs_hash_unlock(hs, 0);
1996 EXPORT_SYMBOL(cfs_hash_rehash_key);
1998 void cfs_hash_debug_header(struct seq_file *m)
2000 seq_printf(m, "%-*s cur min max theta t-min t-max flags rehash count maxdep maxdepb distribution\n",
2001 CFS_HASH_BIGNAME_LEN, "name");
2003 EXPORT_SYMBOL(cfs_hash_debug_header);
2005 static struct cfs_hash_bucket **
2006 cfs_hash_full_bkts(struct cfs_hash *hs)
2008 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2009 if (!hs->hs_rehash_buckets)
2010 return hs->hs_buckets;
2012 LASSERT(hs->hs_rehash_bits != 0);
2013 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2014 hs->hs_rehash_buckets : hs->hs_buckets;
2018 cfs_hash_full_nbkt(struct cfs_hash *hs)
2020 /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2021 if (!hs->hs_rehash_buckets)
2022 return CFS_HASH_NBKT(hs);
2024 LASSERT(hs->hs_rehash_bits != 0);
2025 return hs->hs_rehash_bits > hs->hs_cur_bits ?
2026 CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2029 void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2031 int dist[8] = { 0, };
2038 cfs_hash_lock(hs, 0);
2039 theta = __cfs_hash_theta(hs);
2041 seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ",
2042 CFS_HASH_BIGNAME_LEN, hs->hs_name,
2043 1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2044 1 << hs->hs_max_bits,
2045 __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2046 __cfs_hash_theta_int(hs->hs_min_theta),
2047 __cfs_hash_theta_frac(hs->hs_min_theta),
2048 __cfs_hash_theta_int(hs->hs_max_theta),
2049 __cfs_hash_theta_frac(hs->hs_max_theta),
2050 hs->hs_flags, hs->hs_rehash_count);
2053 * The distribution is a summary of the chained hash depth in
2054 * each of the libcfs hash buckets. Each buckets hsb_count is
2055 * divided by the hash theta value and used to generate a
2056 * histogram of the hash distribution. A uniform hash will
2057 * result in all hash buckets being close to the average thus
2058 * only the first few entries in the histogram will be non-zero.
2059 * If you hash function results in a non-uniform hash the will
2060 * be observable by outlier bucks in the distribution histogram.
2062 * Uniform hash distribution: 128/128/0/0/0/0/0/0
2063 * Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
2065 for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2066 struct cfs_hash_bd bd;
2068 bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2069 cfs_hash_bd_lock(hs, &bd, 0);
2070 if (maxdep < bd.bd_bucket->hsb_depmax) {
2071 maxdep = bd.bd_bucket->hsb_depmax;
2072 maxdepb = ffz(~maxdep);
2074 total += bd.bd_bucket->hsb_count;
2075 dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2076 cfs_hash_bd_unlock(hs, &bd, 0);
2079 seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2080 for (i = 0; i < 8; i++)
2081 seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/');
2083 cfs_hash_unlock(hs, 0);
2085 EXPORT_SYMBOL(cfs_hash_debug_str);