4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_object.c
39 * These are the only exported functions, they provide some generic
40 * infrastructure for managing object devices
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
45 #define DEBUG_SUBSYSTEM S_CLASS
47 #include <linux/libcfs/libcfs.h>
49 # include <linux/module.h>
52 #include <linux/libcfs/libcfs_hash.h>
53 #include <obd_class.h>
54 #include <obd_support.h>
55 #include <lustre_disk.h>
56 #include <lustre_fid.h>
57 #include <lu_object.h>
59 #include <linux/list.h>
61 static void lu_object_free(const struct lu_env
*env
, struct lu_object
*o
);
64 * Decrease reference counter on object. If last reference is freed, return
65 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
66 * case, free object immediately.
68 void lu_object_put(const struct lu_env
*env
, struct lu_object
*o
)
70 struct lu_site_bkt_data
*bkt
;
71 struct lu_object_header
*top
;
73 struct lu_object
*orig
;
74 struct cfs_hash_bd bd
;
75 const struct lu_fid
*fid
;
78 site
= o
->lo_dev
->ld_site
;
82 * till we have full fids-on-OST implemented anonymous objects
83 * are possible in OSP. such an object isn't listed in the site
84 * so we should not remove it from the site.
86 fid
= lu_object_fid(o
);
87 if (fid_is_zero(fid
)) {
88 LASSERT(top
->loh_hash
.next
== NULL
89 && top
->loh_hash
.pprev
== NULL
);
90 LASSERT(list_empty(&top
->loh_lru
));
91 if (!atomic_dec_and_test(&top
->loh_ref
))
93 list_for_each_entry_reverse(o
, &top
->loh_layers
, lo_linkage
) {
94 if (o
->lo_ops
->loo_object_release
!= NULL
)
95 o
->lo_ops
->loo_object_release(env
, o
);
97 lu_object_free(env
, orig
);
101 cfs_hash_bd_get(site
->ls_obj_hash
, &top
->loh_fid
, &bd
);
102 bkt
= cfs_hash_bd_extra_get(site
->ls_obj_hash
, &bd
);
104 if (!cfs_hash_bd_dec_and_lock(site
->ls_obj_hash
, &bd
, &top
->loh_ref
)) {
105 if (lu_object_is_dying(top
)) {
108 * somebody may be waiting for this, currently only
109 * used for cl_object, see cl_object_put_last().
111 wake_up_all(&bkt
->lsb_marche_funebre
);
116 LASSERT(bkt
->lsb_busy
> 0);
119 * When last reference is released, iterate over object
120 * layers, and notify them that object is no longer busy.
122 list_for_each_entry_reverse(o
, &top
->loh_layers
, lo_linkage
) {
123 if (o
->lo_ops
->loo_object_release
!= NULL
)
124 o
->lo_ops
->loo_object_release(env
, o
);
127 if (!lu_object_is_dying(top
)) {
128 LASSERT(list_empty(&top
->loh_lru
));
129 list_add_tail(&top
->loh_lru
, &bkt
->lsb_lru
);
130 cfs_hash_bd_unlock(site
->ls_obj_hash
, &bd
, 1);
135 * If object is dying (will not be cached), removed it
136 * from hash table and LRU.
138 * This is done with hash table and LRU lists locked. As the only
139 * way to acquire first reference to previously unreferenced
140 * object is through hash-table lookup (lu_object_find()),
141 * or LRU scanning (lu_site_purge()), that are done under hash-table
142 * and LRU lock, no race with concurrent object lookup is possible
143 * and we can safely destroy object below.
145 if (!test_and_set_bit(LU_OBJECT_UNHASHED
, &top
->loh_flags
))
146 cfs_hash_bd_del_locked(site
->ls_obj_hash
, &bd
, &top
->loh_hash
);
147 cfs_hash_bd_unlock(site
->ls_obj_hash
, &bd
, 1);
149 * Object was already removed from hash and lru above, can
152 lu_object_free(env
, orig
);
154 EXPORT_SYMBOL(lu_object_put
);
157 * Put object and don't keep in cache. This is temporary solution for
158 * multi-site objects when its layering is not constant.
160 void lu_object_put_nocache(const struct lu_env
*env
, struct lu_object
*o
)
162 set_bit(LU_OBJECT_HEARD_BANSHEE
, &o
->lo_header
->loh_flags
);
163 return lu_object_put(env
, o
);
165 EXPORT_SYMBOL(lu_object_put_nocache
);
168 * Kill the object and take it out of LRU cache.
169 * Currently used by client code for layout change.
171 void lu_object_unhash(const struct lu_env
*env
, struct lu_object
*o
)
173 struct lu_object_header
*top
;
176 set_bit(LU_OBJECT_HEARD_BANSHEE
, &top
->loh_flags
);
177 if (!test_and_set_bit(LU_OBJECT_UNHASHED
, &top
->loh_flags
)) {
178 struct cfs_hash
*obj_hash
= o
->lo_dev
->ld_site
->ls_obj_hash
;
179 struct cfs_hash_bd bd
;
181 cfs_hash_bd_get_and_lock(obj_hash
, &top
->loh_fid
, &bd
, 1);
182 list_del_init(&top
->loh_lru
);
183 cfs_hash_bd_del_locked(obj_hash
, &bd
, &top
->loh_hash
);
184 cfs_hash_bd_unlock(obj_hash
, &bd
, 1);
187 EXPORT_SYMBOL(lu_object_unhash
);
190 * Allocate new object.
192 * This follows object creation protocol, described in the comment within
193 * struct lu_device_operations definition.
195 static struct lu_object
*lu_object_alloc(const struct lu_env
*env
,
196 struct lu_device
*dev
,
197 const struct lu_fid
*f
,
198 const struct lu_object_conf
*conf
)
200 struct lu_object
*scan
;
201 struct lu_object
*top
;
202 struct list_head
*layers
;
203 unsigned int init_mask
= 0;
204 unsigned int init_flag
;
209 * Create top-level object slice. This will also create
212 top
= dev
->ld_ops
->ldo_object_alloc(env
, NULL
, dev
);
214 return ERR_PTR(-ENOMEM
);
218 * This is the only place where object fid is assigned. It's constant
221 top
->lo_header
->loh_fid
= *f
;
222 layers
= &top
->lo_header
->loh_layers
;
226 * Call ->loo_object_init() repeatedly, until no more new
227 * object slices are created.
231 list_for_each_entry(scan
, layers
, lo_linkage
) {
232 if (init_mask
& init_flag
)
235 scan
->lo_header
= top
->lo_header
;
236 result
= scan
->lo_ops
->loo_object_init(env
, scan
, conf
);
238 lu_object_free(env
, top
);
239 return ERR_PTR(result
);
241 init_mask
|= init_flag
;
247 list_for_each_entry_reverse(scan
, layers
, lo_linkage
) {
248 if (scan
->lo_ops
->loo_object_start
!= NULL
) {
249 result
= scan
->lo_ops
->loo_object_start(env
, scan
);
251 lu_object_free(env
, top
);
252 return ERR_PTR(result
);
257 lprocfs_counter_incr(dev
->ld_site
->ls_stats
, LU_SS_CREATED
);
264 static void lu_object_free(const struct lu_env
*env
, struct lu_object
*o
)
266 struct lu_site_bkt_data
*bkt
;
267 struct lu_site
*site
;
268 struct lu_object
*scan
;
269 struct list_head
*layers
;
270 struct list_head splice
;
272 site
= o
->lo_dev
->ld_site
;
273 layers
= &o
->lo_header
->loh_layers
;
274 bkt
= lu_site_bkt_from_fid(site
, &o
->lo_header
->loh_fid
);
276 * First call ->loo_object_delete() method to release all resources.
278 list_for_each_entry_reverse(scan
, layers
, lo_linkage
) {
279 if (scan
->lo_ops
->loo_object_delete
!= NULL
)
280 scan
->lo_ops
->loo_object_delete(env
, scan
);
284 * Then, splice object layers into stand-alone list, and call
285 * ->loo_object_free() on all layers to free memory. Splice is
286 * necessary, because lu_object_header is freed together with the
289 INIT_LIST_HEAD(&splice
);
290 list_splice_init(layers
, &splice
);
291 while (!list_empty(&splice
)) {
293 * Free layers in bottom-to-top order, so that object header
294 * lives as long as possible and ->loo_object_free() methods
295 * can look at its contents.
297 o
= container_of0(splice
.prev
, struct lu_object
, lo_linkage
);
298 list_del_init(&o
->lo_linkage
);
299 LASSERT(o
->lo_ops
->loo_object_free
!= NULL
);
300 o
->lo_ops
->loo_object_free(env
, o
);
303 if (waitqueue_active(&bkt
->lsb_marche_funebre
))
304 wake_up_all(&bkt
->lsb_marche_funebre
);
308 * Free \a nr objects from the cold end of the site LRU list.
310 int lu_site_purge(const struct lu_env
*env
, struct lu_site
*s
, int nr
)
312 struct lu_object_header
*h
;
313 struct lu_object_header
*temp
;
314 struct lu_site_bkt_data
*bkt
;
315 struct cfs_hash_bd bd
;
316 struct cfs_hash_bd bd2
;
317 struct list_head dispose
;
324 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU
))
327 INIT_LIST_HEAD(&dispose
);
329 * Under LRU list lock, scan LRU list and move unreferenced objects to
330 * the dispose list, removing them from LRU and hash table.
332 start
= s
->ls_purge_start
;
333 bnr
= (nr
== ~0) ? -1 : nr
/ CFS_HASH_NBKT(s
->ls_obj_hash
) + 1;
336 cfs_hash_for_each_bucket(s
->ls_obj_hash
, &bd
, i
) {
340 cfs_hash_bd_lock(s
->ls_obj_hash
, &bd
, 1);
341 bkt
= cfs_hash_bd_extra_get(s
->ls_obj_hash
, &bd
);
343 list_for_each_entry_safe(h
, temp
, &bkt
->lsb_lru
, loh_lru
) {
344 LASSERT(atomic_read(&h
->loh_ref
) == 0);
346 cfs_hash_bd_get(s
->ls_obj_hash
, &h
->loh_fid
, &bd2
);
347 LASSERT(bd
.bd_bucket
== bd2
.bd_bucket
);
349 cfs_hash_bd_del_locked(s
->ls_obj_hash
,
351 list_move(&h
->loh_lru
, &dispose
);
355 if (nr
!= ~0 && --nr
== 0)
358 if (count
> 0 && --count
== 0)
362 cfs_hash_bd_unlock(s
->ls_obj_hash
, &bd
, 1);
365 * Free everything on the dispose list. This is safe against
366 * races due to the reasons described in lu_object_put().
368 while (!list_empty(&dispose
)) {
369 h
= container_of0(dispose
.next
,
370 struct lu_object_header
, loh_lru
);
371 list_del_init(&h
->loh_lru
);
372 lu_object_free(env
, lu_object_top(h
));
373 lprocfs_counter_incr(s
->ls_stats
, LU_SS_LRU_PURGED
);
380 if (nr
!= 0 && did_sth
&& start
!= 0) {
381 start
= 0; /* restart from the first bucket */
384 /* race on s->ls_purge_start, but nobody cares */
385 s
->ls_purge_start
= i
% CFS_HASH_NBKT(s
->ls_obj_hash
);
389 EXPORT_SYMBOL(lu_site_purge
);
394 * Code below has to jump through certain loops to output object description
395 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
396 * composes object description from strings that are parts of _lines_ of
397 * output (i.e., strings that are not terminated by newline). This doesn't fit
398 * very well into libcfs_debug_msg() interface that assumes that each message
399 * supplied to it is a self-contained output line.
401 * To work around this, strings are collected in a temporary buffer
402 * (implemented as a value of lu_cdebug_key key), until terminating newline
403 * character is detected.
411 * XXX overflow is not handled correctly.
416 struct lu_cdebug_data
{
420 char lck_area
[LU_CDEBUG_LINE
];
423 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
424 LU_KEY_INIT_FINI(lu_global
, struct lu_cdebug_data
);
427 * Key, holding temporary buffer. This key is registered very early by
430 struct lu_context_key lu_global_key
= {
431 .lct_tags
= LCT_MD_THREAD
| LCT_DT_THREAD
|
432 LCT_MG_THREAD
| LCT_CL_THREAD
| LCT_LOCAL
,
433 .lct_init
= lu_global_key_init
,
434 .lct_fini
= lu_global_key_fini
438 * Printer function emitting messages through libcfs_debug_msg().
440 int lu_cdebug_printer(const struct lu_env
*env
,
441 void *cookie
, const char *format
, ...)
443 struct libcfs_debug_msg_data
*msgdata
= cookie
;
444 struct lu_cdebug_data
*key
;
449 va_start(args
, format
);
451 key
= lu_context_key_get(&env
->le_ctx
, &lu_global_key
);
452 LASSERT(key
!= NULL
);
454 used
= strlen(key
->lck_area
);
455 complete
= format
[strlen(format
) - 1] == '\n';
457 * Append new chunk to the buffer.
459 vsnprintf(key
->lck_area
+ used
,
460 ARRAY_SIZE(key
->lck_area
) - used
, format
, args
);
462 if (cfs_cdebug_show(msgdata
->msg_mask
, msgdata
->msg_subsys
))
463 libcfs_debug_msg(msgdata
, "%s", key
->lck_area
);
464 key
->lck_area
[0] = 0;
469 EXPORT_SYMBOL(lu_cdebug_printer
);
472 * Print object header.
474 void lu_object_header_print(const struct lu_env
*env
, void *cookie
,
475 lu_printer_t printer
,
476 const struct lu_object_header
*hdr
)
478 (*printer
)(env
, cookie
, "header@%p[%#lx, %d, "DFID
"%s%s%s]",
479 hdr
, hdr
->loh_flags
, atomic_read(&hdr
->loh_ref
),
481 hlist_unhashed(&hdr
->loh_hash
) ? "" : " hash",
482 list_empty((struct list_head
*)&hdr
->loh_lru
) ? \
484 hdr
->loh_attr
& LOHA_EXISTS
? " exist":"");
486 EXPORT_SYMBOL(lu_object_header_print
);
489 * Print human readable representation of the \a o to the \a printer.
491 void lu_object_print(const struct lu_env
*env
, void *cookie
,
492 lu_printer_t printer
, const struct lu_object
*o
)
494 static const char ruler
[] = "........................................";
495 struct lu_object_header
*top
;
499 lu_object_header_print(env
, cookie
, printer
, top
);
500 (*printer
)(env
, cookie
, "{\n");
502 list_for_each_entry(o
, &top
->loh_layers
, lo_linkage
) {
504 * print `.' \a depth times followed by type name and address
506 (*printer
)(env
, cookie
, "%*.*s%s@%p", depth
, depth
, ruler
,
507 o
->lo_dev
->ld_type
->ldt_name
, o
);
509 if (o
->lo_ops
->loo_object_print
!= NULL
)
510 (*o
->lo_ops
->loo_object_print
)(env
, cookie
, printer
, o
);
512 (*printer
)(env
, cookie
, "\n");
515 (*printer
)(env
, cookie
, "} header@%p\n", top
);
517 EXPORT_SYMBOL(lu_object_print
);
520 * Check object consistency.
522 int lu_object_invariant(const struct lu_object
*o
)
524 struct lu_object_header
*top
;
527 list_for_each_entry(o
, &top
->loh_layers
, lo_linkage
) {
528 if (o
->lo_ops
->loo_object_invariant
!= NULL
&&
529 !o
->lo_ops
->loo_object_invariant(o
))
534 EXPORT_SYMBOL(lu_object_invariant
);
536 static struct lu_object
*htable_lookup(struct lu_site
*s
,
537 struct cfs_hash_bd
*bd
,
538 const struct lu_fid
*f
,
539 wait_queue_t
*waiter
,
542 struct lu_site_bkt_data
*bkt
;
543 struct lu_object_header
*h
;
544 struct hlist_node
*hnode
;
545 __u64 ver
= cfs_hash_bd_version_get(bd
);
548 return ERR_PTR(-ENOENT
);
551 bkt
= cfs_hash_bd_extra_get(s
->ls_obj_hash
, bd
);
552 /* cfs_hash_bd_peek_locked is a somehow "internal" function
553 * of cfs_hash, it doesn't add refcount on object. */
554 hnode
= cfs_hash_bd_peek_locked(s
->ls_obj_hash
, bd
, (void *)f
);
556 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_MISS
);
557 return ERR_PTR(-ENOENT
);
560 h
= container_of0(hnode
, struct lu_object_header
, loh_hash
);
561 if (likely(!lu_object_is_dying(h
))) {
562 cfs_hash_get(s
->ls_obj_hash
, hnode
);
563 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_HIT
);
564 list_del_init(&h
->loh_lru
);
565 return lu_object_top(h
);
569 * Lookup found an object being destroyed this object cannot be
570 * returned (to assure that references to dying objects are eventually
571 * drained), and moreover, lookup has to wait until object is freed.
574 init_waitqueue_entry(waiter
, current
);
575 add_wait_queue(&bkt
->lsb_marche_funebre
, waiter
);
576 set_current_state(TASK_UNINTERRUPTIBLE
);
577 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_DEATH_RACE
);
578 return ERR_PTR(-EAGAIN
);
582 * Search cache for an object with the fid \a f. If such object is found,
583 * return it. Otherwise, create new object, insert it into cache and return
584 * it. In any case, additional reference is acquired on the returned object.
586 struct lu_object
*lu_object_find(const struct lu_env
*env
,
587 struct lu_device
*dev
, const struct lu_fid
*f
,
588 const struct lu_object_conf
*conf
)
590 return lu_object_find_at(env
, dev
->ld_site
->ls_top_dev
, f
, conf
);
592 EXPORT_SYMBOL(lu_object_find
);
594 static struct lu_object
*lu_object_new(const struct lu_env
*env
,
595 struct lu_device
*dev
,
596 const struct lu_fid
*f
,
597 const struct lu_object_conf
*conf
)
601 struct cfs_hash_bd bd
;
602 struct lu_site_bkt_data
*bkt
;
604 o
= lu_object_alloc(env
, dev
, f
, conf
);
605 if (unlikely(IS_ERR(o
)))
608 hs
= dev
->ld_site
->ls_obj_hash
;
609 cfs_hash_bd_get_and_lock(hs
, (void *)f
, &bd
, 1);
610 bkt
= cfs_hash_bd_extra_get(hs
, &bd
);
611 cfs_hash_bd_add_locked(hs
, &bd
, &o
->lo_header
->loh_hash
);
613 cfs_hash_bd_unlock(hs
, &bd
, 1);
618 * Core logic of lu_object_find*() functions.
620 static struct lu_object
*lu_object_find_try(const struct lu_env
*env
,
621 struct lu_device
*dev
,
622 const struct lu_fid
*f
,
623 const struct lu_object_conf
*conf
,
624 wait_queue_t
*waiter
)
627 struct lu_object
*shadow
;
630 struct cfs_hash_bd bd
;
634 * This uses standard index maintenance protocol:
636 * - search index under lock, and return object if found;
637 * - otherwise, unlock index, allocate new object;
638 * - lock index and search again;
639 * - if nothing is found (usual case), insert newly created
641 * - otherwise (race: other thread inserted object), free
642 * object just allocated.
646 * For "LOC_F_NEW" case, we are sure the object is new established.
647 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
648 * just alloc and insert directly.
650 * If dying object is found during index search, add @waiter to the
651 * site wait-queue and return ERR_PTR(-EAGAIN).
653 if (conf
!= NULL
&& conf
->loc_flags
& LOC_F_NEW
)
654 return lu_object_new(env
, dev
, f
, conf
);
658 cfs_hash_bd_get_and_lock(hs
, (void *)f
, &bd
, 1);
659 o
= htable_lookup(s
, &bd
, f
, waiter
, &version
);
660 cfs_hash_bd_unlock(hs
, &bd
, 1);
661 if (!IS_ERR(o
) || PTR_ERR(o
) != -ENOENT
)
665 * Allocate new object. This may result in rather complicated
666 * operations, including fld queries, inode loading, etc.
668 o
= lu_object_alloc(env
, dev
, f
, conf
);
669 if (unlikely(IS_ERR(o
)))
672 LASSERT(lu_fid_eq(lu_object_fid(o
), f
));
674 cfs_hash_bd_lock(hs
, &bd
, 1);
676 shadow
= htable_lookup(s
, &bd
, f
, waiter
, &version
);
677 if (likely(IS_ERR(shadow
) && PTR_ERR(shadow
) == -ENOENT
)) {
678 struct lu_site_bkt_data
*bkt
;
680 bkt
= cfs_hash_bd_extra_get(hs
, &bd
);
681 cfs_hash_bd_add_locked(hs
, &bd
, &o
->lo_header
->loh_hash
);
683 cfs_hash_bd_unlock(hs
, &bd
, 1);
687 lprocfs_counter_incr(s
->ls_stats
, LU_SS_CACHE_RACE
);
688 cfs_hash_bd_unlock(hs
, &bd
, 1);
689 lu_object_free(env
, o
);
694 * Much like lu_object_find(), but top level device of object is specifically
695 * \a dev rather than top level device of the site. This interface allows
696 * objects of different "stacking" to be created within the same site.
698 struct lu_object
*lu_object_find_at(const struct lu_env
*env
,
699 struct lu_device
*dev
,
700 const struct lu_fid
*f
,
701 const struct lu_object_conf
*conf
)
703 struct lu_site_bkt_data
*bkt
;
704 struct lu_object
*obj
;
708 obj
= lu_object_find_try(env
, dev
, f
, conf
, &wait
);
709 if (obj
!= ERR_PTR(-EAGAIN
))
712 * lu_object_find_try() already added waiter into the
715 waitq_wait(&wait
, TASK_UNINTERRUPTIBLE
);
716 bkt
= lu_site_bkt_from_fid(dev
->ld_site
, (void *)f
);
717 remove_wait_queue(&bkt
->lsb_marche_funebre
, &wait
);
720 EXPORT_SYMBOL(lu_object_find_at
);
723 * Find object with given fid, and return its slice belonging to given device.
725 struct lu_object
*lu_object_find_slice(const struct lu_env
*env
,
726 struct lu_device
*dev
,
727 const struct lu_fid
*f
,
728 const struct lu_object_conf
*conf
)
730 struct lu_object
*top
;
731 struct lu_object
*obj
;
733 top
= lu_object_find(env
, dev
, f
, conf
);
735 obj
= lu_object_locate(top
->lo_header
, dev
->ld_type
);
737 lu_object_put(env
, top
);
742 EXPORT_SYMBOL(lu_object_find_slice
);
745 * Global list of all device types.
747 static LIST_HEAD(lu_device_types
);
749 int lu_device_type_init(struct lu_device_type
*ldt
)
753 INIT_LIST_HEAD(&ldt
->ldt_linkage
);
754 if (ldt
->ldt_ops
->ldto_init
)
755 result
= ldt
->ldt_ops
->ldto_init(ldt
);
757 list_add(&ldt
->ldt_linkage
, &lu_device_types
);
760 EXPORT_SYMBOL(lu_device_type_init
);
762 void lu_device_type_fini(struct lu_device_type
*ldt
)
764 list_del_init(&ldt
->ldt_linkage
);
765 if (ldt
->ldt_ops
->ldto_fini
)
766 ldt
->ldt_ops
->ldto_fini(ldt
);
768 EXPORT_SYMBOL(lu_device_type_fini
);
770 void lu_types_stop(void)
772 struct lu_device_type
*ldt
;
774 list_for_each_entry(ldt
, &lu_device_types
, ldt_linkage
) {
775 if (ldt
->ldt_device_nr
== 0 && ldt
->ldt_ops
->ldto_stop
)
776 ldt
->ldt_ops
->ldto_stop(ldt
);
779 EXPORT_SYMBOL(lu_types_stop
);
782 * Global list of all sites on this node
784 static LIST_HEAD(lu_sites
);
785 static DEFINE_MUTEX(lu_sites_guard
);
788 * Global environment used by site shrinker.
790 static struct lu_env lu_shrink_env
;
792 struct lu_site_print_arg
{
793 struct lu_env
*lsp_env
;
795 lu_printer_t lsp_printer
;
799 lu_site_obj_print(struct cfs_hash
*hs
, struct cfs_hash_bd
*bd
,
800 struct hlist_node
*hnode
, void *data
)
802 struct lu_site_print_arg
*arg
= (struct lu_site_print_arg
*)data
;
803 struct lu_object_header
*h
;
805 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
806 if (!list_empty(&h
->loh_layers
)) {
807 const struct lu_object
*o
;
809 o
= lu_object_top(h
);
810 lu_object_print(arg
->lsp_env
, arg
->lsp_cookie
,
811 arg
->lsp_printer
, o
);
813 lu_object_header_print(arg
->lsp_env
, arg
->lsp_cookie
,
814 arg
->lsp_printer
, h
);
820 * Print all objects in \a s.
822 void lu_site_print(const struct lu_env
*env
, struct lu_site
*s
, void *cookie
,
823 lu_printer_t printer
)
825 struct lu_site_print_arg arg
= {
826 .lsp_env
= (struct lu_env
*)env
,
827 .lsp_cookie
= cookie
,
828 .lsp_printer
= printer
,
831 cfs_hash_for_each(s
->ls_obj_hash
, lu_site_obj_print
, &arg
);
833 EXPORT_SYMBOL(lu_site_print
);
836 LU_CACHE_PERCENT_MAX
= 50,
837 LU_CACHE_PERCENT_DEFAULT
= 20
840 static unsigned int lu_cache_percent
= LU_CACHE_PERCENT_DEFAULT
;
841 module_param(lu_cache_percent
, int, 0644);
842 MODULE_PARM_DESC(lu_cache_percent
, "Percentage of memory to be used as lu_object cache");
845 * Return desired hash table order.
847 static int lu_htable_order(void)
849 unsigned long cache_size
;
853 * Calculate hash table size, assuming that we want reasonable
854 * performance when 20% of total memory is occupied by cache of
857 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
859 cache_size
= totalram_pages
;
861 #if BITS_PER_LONG == 32
862 /* limit hashtable size for lowmem systems to low RAM */
863 if (cache_size
> 1 << (30 - PAGE_CACHE_SHIFT
))
864 cache_size
= 1 << (30 - PAGE_CACHE_SHIFT
) * 3 / 4;
867 /* clear off unreasonable cache setting. */
868 if (lu_cache_percent
== 0 || lu_cache_percent
> LU_CACHE_PERCENT_MAX
) {
869 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
870 " the range of (0, %u]. Will use default value: %u.\n",
871 lu_cache_percent
, LU_CACHE_PERCENT_MAX
,
872 LU_CACHE_PERCENT_DEFAULT
);
874 lu_cache_percent
= LU_CACHE_PERCENT_DEFAULT
;
876 cache_size
= cache_size
/ 100 * lu_cache_percent
*
877 (PAGE_CACHE_SIZE
/ 1024);
879 for (bits
= 1; (1 << bits
) < cache_size
; ++bits
) {
885 static unsigned lu_obj_hop_hash(struct cfs_hash
*hs
,
886 const void *key
, unsigned mask
)
888 struct lu_fid
*fid
= (struct lu_fid
*)key
;
891 hash
= fid_flatten32(fid
);
892 hash
+= (hash
>> 4) + (hash
<< 12); /* mixing oid and seq */
893 hash
= hash_long(hash
, hs
->hs_bkt_bits
);
895 /* give me another random factor */
896 hash
-= hash_long((unsigned long)hs
, fid_oid(fid
) % 11 + 3);
898 hash
<<= hs
->hs_cur_bits
- hs
->hs_bkt_bits
;
899 hash
|= (fid_seq(fid
) + fid_oid(fid
)) & (CFS_HASH_NBKT(hs
) - 1);
904 static void *lu_obj_hop_object(struct hlist_node
*hnode
)
906 return hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
909 static void *lu_obj_hop_key(struct hlist_node
*hnode
)
911 struct lu_object_header
*h
;
913 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
917 static int lu_obj_hop_keycmp(const void *key
, struct hlist_node
*hnode
)
919 struct lu_object_header
*h
;
921 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
922 return lu_fid_eq(&h
->loh_fid
, (struct lu_fid
*)key
);
925 static void lu_obj_hop_get(struct cfs_hash
*hs
, struct hlist_node
*hnode
)
927 struct lu_object_header
*h
;
929 h
= hlist_entry(hnode
, struct lu_object_header
, loh_hash
);
930 if (atomic_add_return(1, &h
->loh_ref
) == 1) {
931 struct lu_site_bkt_data
*bkt
;
932 struct cfs_hash_bd bd
;
934 cfs_hash_bd_get(hs
, &h
->loh_fid
, &bd
);
935 bkt
= cfs_hash_bd_extra_get(hs
, &bd
);
940 static void lu_obj_hop_put_locked(struct cfs_hash
*hs
, struct hlist_node
*hnode
)
942 LBUG(); /* we should never called it */
945 cfs_hash_ops_t lu_site_hash_ops
= {
946 .hs_hash
= lu_obj_hop_hash
,
947 .hs_key
= lu_obj_hop_key
,
948 .hs_keycmp
= lu_obj_hop_keycmp
,
949 .hs_object
= lu_obj_hop_object
,
950 .hs_get
= lu_obj_hop_get
,
951 .hs_put_locked
= lu_obj_hop_put_locked
,
954 void lu_dev_add_linkage(struct lu_site
*s
, struct lu_device
*d
)
956 spin_lock(&s
->ls_ld_lock
);
957 if (list_empty(&d
->ld_linkage
))
958 list_add(&d
->ld_linkage
, &s
->ls_ld_linkage
);
959 spin_unlock(&s
->ls_ld_lock
);
961 EXPORT_SYMBOL(lu_dev_add_linkage
);
963 void lu_dev_del_linkage(struct lu_site
*s
, struct lu_device
*d
)
965 spin_lock(&s
->ls_ld_lock
);
966 list_del_init(&d
->ld_linkage
);
967 spin_unlock(&s
->ls_ld_lock
);
969 EXPORT_SYMBOL(lu_dev_del_linkage
);
972 * Initialize site \a s, with \a d as the top level device.
974 #define LU_SITE_BITS_MIN 12
975 #define LU_SITE_BITS_MAX 24
977 * total 256 buckets, we don't want too many buckets because:
978 * - consume too much memory
979 * - avoid unbalanced LRU list
981 #define LU_SITE_BKT_BITS 8
983 int lu_site_init(struct lu_site
*s
, struct lu_device
*top
)
985 struct lu_site_bkt_data
*bkt
;
986 struct cfs_hash_bd bd
;
991 memset(s
, 0, sizeof(*s
));
992 bits
= lu_htable_order();
993 snprintf(name
, 16, "lu_site_%s", top
->ld_type
->ldt_name
);
994 for (bits
= min(max(LU_SITE_BITS_MIN
, bits
), LU_SITE_BITS_MAX
);
995 bits
>= LU_SITE_BITS_MIN
; bits
--) {
996 s
->ls_obj_hash
= cfs_hash_create(name
, bits
, bits
,
997 bits
- LU_SITE_BKT_BITS
,
1000 CFS_HASH_SPIN_BKTLOCK
|
1001 CFS_HASH_NO_ITEMREF
|
1003 CFS_HASH_ASSERT_EMPTY
);
1004 if (s
->ls_obj_hash
!= NULL
)
1008 if (s
->ls_obj_hash
== NULL
) {
1009 CERROR("failed to create lu_site hash with bits: %d\n", bits
);
1013 cfs_hash_for_each_bucket(s
->ls_obj_hash
, &bd
, i
) {
1014 bkt
= cfs_hash_bd_extra_get(s
->ls_obj_hash
, &bd
);
1015 INIT_LIST_HEAD(&bkt
->lsb_lru
);
1016 init_waitqueue_head(&bkt
->lsb_marche_funebre
);
1019 s
->ls_stats
= lprocfs_alloc_stats(LU_SS_LAST_STAT
, 0);
1020 if (s
->ls_stats
== NULL
) {
1021 cfs_hash_putref(s
->ls_obj_hash
);
1022 s
->ls_obj_hash
= NULL
;
1026 lprocfs_counter_init(s
->ls_stats
, LU_SS_CREATED
,
1027 0, "created", "created");
1028 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_HIT
,
1029 0, "cache_hit", "cache_hit");
1030 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_MISS
,
1031 0, "cache_miss", "cache_miss");
1032 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_RACE
,
1033 0, "cache_race", "cache_race");
1034 lprocfs_counter_init(s
->ls_stats
, LU_SS_CACHE_DEATH_RACE
,
1035 0, "cache_death_race", "cache_death_race");
1036 lprocfs_counter_init(s
->ls_stats
, LU_SS_LRU_PURGED
,
1037 0, "lru_purged", "lru_purged");
1039 INIT_LIST_HEAD(&s
->ls_linkage
);
1040 s
->ls_top_dev
= top
;
1043 lu_ref_add(&top
->ld_reference
, "site-top", s
);
1045 INIT_LIST_HEAD(&s
->ls_ld_linkage
);
1046 spin_lock_init(&s
->ls_ld_lock
);
1048 lu_dev_add_linkage(s
, top
);
1052 EXPORT_SYMBOL(lu_site_init
);
1055 * Finalize \a s and release its resources.
1057 void lu_site_fini(struct lu_site
*s
)
1059 mutex_lock(&lu_sites_guard
);
1060 list_del_init(&s
->ls_linkage
);
1061 mutex_unlock(&lu_sites_guard
);
1063 if (s
->ls_obj_hash
!= NULL
) {
1064 cfs_hash_putref(s
->ls_obj_hash
);
1065 s
->ls_obj_hash
= NULL
;
1068 if (s
->ls_top_dev
!= NULL
) {
1069 s
->ls_top_dev
->ld_site
= NULL
;
1070 lu_ref_del(&s
->ls_top_dev
->ld_reference
, "site-top", s
);
1071 lu_device_put(s
->ls_top_dev
);
1072 s
->ls_top_dev
= NULL
;
1075 if (s
->ls_stats
!= NULL
)
1076 lprocfs_free_stats(&s
->ls_stats
);
1078 EXPORT_SYMBOL(lu_site_fini
);
1081 * Called when initialization of stack for this site is completed.
1083 int lu_site_init_finish(struct lu_site
*s
)
1086 mutex_lock(&lu_sites_guard
);
1087 result
= lu_context_refill(&lu_shrink_env
.le_ctx
);
1089 list_add(&s
->ls_linkage
, &lu_sites
);
1090 mutex_unlock(&lu_sites_guard
);
1093 EXPORT_SYMBOL(lu_site_init_finish
);
1096 * Acquire additional reference on device \a d
1098 void lu_device_get(struct lu_device
*d
)
1100 atomic_inc(&d
->ld_ref
);
1102 EXPORT_SYMBOL(lu_device_get
);
1105 * Release reference on device \a d.
1107 void lu_device_put(struct lu_device
*d
)
1109 LASSERT(atomic_read(&d
->ld_ref
) > 0);
1110 atomic_dec(&d
->ld_ref
);
1112 EXPORT_SYMBOL(lu_device_put
);
1115 * Initialize device \a d of type \a t.
1117 int lu_device_init(struct lu_device
*d
, struct lu_device_type
*t
)
1119 if (t
->ldt_device_nr
++ == 0 && t
->ldt_ops
->ldto_start
!= NULL
)
1120 t
->ldt_ops
->ldto_start(t
);
1121 memset(d
, 0, sizeof(*d
));
1122 atomic_set(&d
->ld_ref
, 0);
1124 lu_ref_init(&d
->ld_reference
);
1125 INIT_LIST_HEAD(&d
->ld_linkage
);
1128 EXPORT_SYMBOL(lu_device_init
);
1131 * Finalize device \a d.
1133 void lu_device_fini(struct lu_device
*d
)
1135 struct lu_device_type
*t
;
1138 if (d
->ld_obd
!= NULL
) {
1139 d
->ld_obd
->obd_lu_dev
= NULL
;
1143 lu_ref_fini(&d
->ld_reference
);
1144 LASSERTF(atomic_read(&d
->ld_ref
) == 0,
1145 "Refcount is %u\n", atomic_read(&d
->ld_ref
));
1146 LASSERT(t
->ldt_device_nr
> 0);
1147 if (--t
->ldt_device_nr
== 0 && t
->ldt_ops
->ldto_stop
!= NULL
)
1148 t
->ldt_ops
->ldto_stop(t
);
1150 EXPORT_SYMBOL(lu_device_fini
);
1153 * Initialize object \a o that is part of compound object \a h and was created
1156 int lu_object_init(struct lu_object
*o
, struct lu_object_header
*h
,
1157 struct lu_device
*d
)
1159 memset(o
, 0, sizeof(*o
));
1163 lu_ref_add_at(&d
->ld_reference
, &o
->lo_dev_ref
, "lu_object", o
);
1164 INIT_LIST_HEAD(&o
->lo_linkage
);
1168 EXPORT_SYMBOL(lu_object_init
);
1171 * Finalize object and release its resources.
1173 void lu_object_fini(struct lu_object
*o
)
1175 struct lu_device
*dev
= o
->lo_dev
;
1177 LASSERT(list_empty(&o
->lo_linkage
));
1180 lu_ref_del_at(&dev
->ld_reference
, &o
->lo_dev_ref
,
1186 EXPORT_SYMBOL(lu_object_fini
);
1189 * Add object \a o as first layer of compound object \a h
1191 * This is typically called by the ->ldo_object_alloc() method of top-level
1194 void lu_object_add_top(struct lu_object_header
*h
, struct lu_object
*o
)
1196 list_move(&o
->lo_linkage
, &h
->loh_layers
);
1198 EXPORT_SYMBOL(lu_object_add_top
);
1201 * Add object \a o as a layer of compound object, going after \a before.
1203 * This is typically called by the ->ldo_object_alloc() method of \a
1206 void lu_object_add(struct lu_object
*before
, struct lu_object
*o
)
1208 list_move(&o
->lo_linkage
, &before
->lo_linkage
);
1210 EXPORT_SYMBOL(lu_object_add
);
1213 * Initialize compound object.
1215 int lu_object_header_init(struct lu_object_header
*h
)
1217 memset(h
, 0, sizeof(*h
));
1218 atomic_set(&h
->loh_ref
, 1);
1219 INIT_HLIST_NODE(&h
->loh_hash
);
1220 INIT_LIST_HEAD(&h
->loh_lru
);
1221 INIT_LIST_HEAD(&h
->loh_layers
);
1222 lu_ref_init(&h
->loh_reference
);
1225 EXPORT_SYMBOL(lu_object_header_init
);
1228 * Finalize compound object.
1230 void lu_object_header_fini(struct lu_object_header
*h
)
1232 LASSERT(list_empty(&h
->loh_layers
));
1233 LASSERT(list_empty(&h
->loh_lru
));
1234 LASSERT(hlist_unhashed(&h
->loh_hash
));
1235 lu_ref_fini(&h
->loh_reference
);
1237 EXPORT_SYMBOL(lu_object_header_fini
);
1240 * Given a compound object, find its slice, corresponding to the device type
1243 struct lu_object
*lu_object_locate(struct lu_object_header
*h
,
1244 const struct lu_device_type
*dtype
)
1246 struct lu_object
*o
;
1248 list_for_each_entry(o
, &h
->loh_layers
, lo_linkage
) {
1249 if (o
->lo_dev
->ld_type
== dtype
)
1254 EXPORT_SYMBOL(lu_object_locate
);
1259 * Finalize and free devices in the device stack.
1261 * Finalize device stack by purging object cache, and calling
1262 * lu_device_type_operations::ldto_device_fini() and
1263 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1265 void lu_stack_fini(const struct lu_env
*env
, struct lu_device
*top
)
1267 struct lu_site
*site
= top
->ld_site
;
1268 struct lu_device
*scan
;
1269 struct lu_device
*next
;
1271 lu_site_purge(env
, site
, ~0);
1272 for (scan
= top
; scan
!= NULL
; scan
= next
) {
1273 next
= scan
->ld_type
->ldt_ops
->ldto_device_fini(env
, scan
);
1274 lu_ref_del(&scan
->ld_reference
, "lu-stack", &lu_site_init
);
1275 lu_device_put(scan
);
1279 lu_site_purge(env
, site
, ~0);
1281 for (scan
= top
; scan
!= NULL
; scan
= next
) {
1282 const struct lu_device_type
*ldt
= scan
->ld_type
;
1283 struct obd_type
*type
;
1285 next
= ldt
->ldt_ops
->ldto_device_free(env
, scan
);
1286 type
= ldt
->ldt_obd_type
;
1289 class_put_type(type
);
1293 EXPORT_SYMBOL(lu_stack_fini
);
1297 * Maximal number of tld slots.
1299 LU_CONTEXT_KEY_NR
= 40
1302 static struct lu_context_key
*lu_keys
[LU_CONTEXT_KEY_NR
] = { NULL
, };
1304 static DEFINE_SPINLOCK(lu_keys_guard
);
1307 * Global counter incremented whenever key is registered, unregistered,
1308 * revived or quiesced. This is used to void unnecessary calls to
1309 * lu_context_refill(). No locking is provided, as initialization and shutdown
1310 * are supposed to be externally serialized.
1312 static unsigned key_set_version
= 0;
1317 int lu_context_key_register(struct lu_context_key
*key
)
1322 LASSERT(key
->lct_init
!= NULL
);
1323 LASSERT(key
->lct_fini
!= NULL
);
1324 LASSERT(key
->lct_tags
!= 0);
1327 spin_lock(&lu_keys_guard
);
1328 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
) {
1329 if (lu_keys
[i
] == NULL
) {
1331 atomic_set(&key
->lct_used
, 1);
1333 lu_ref_init(&key
->lct_reference
);
1339 spin_unlock(&lu_keys_guard
);
1342 EXPORT_SYMBOL(lu_context_key_register
);
1344 static void key_fini(struct lu_context
*ctx
, int index
)
1346 if (ctx
->lc_value
!= NULL
&& ctx
->lc_value
[index
] != NULL
) {
1347 struct lu_context_key
*key
;
1349 key
= lu_keys
[index
];
1350 LASSERT(key
!= NULL
);
1351 LASSERT(key
->lct_fini
!= NULL
);
1352 LASSERT(atomic_read(&key
->lct_used
) > 1);
1354 key
->lct_fini(ctx
, key
, ctx
->lc_value
[index
]);
1355 lu_ref_del(&key
->lct_reference
, "ctx", ctx
);
1356 atomic_dec(&key
->lct_used
);
1358 if ((ctx
->lc_tags
& LCT_NOREF
) == 0) {
1359 #ifdef CONFIG_MODULE_UNLOAD
1360 LINVRNT(module_refcount(key
->lct_owner
) > 0);
1362 module_put(key
->lct_owner
);
1364 ctx
->lc_value
[index
] = NULL
;
1371 void lu_context_key_degister(struct lu_context_key
*key
)
1373 LASSERT(atomic_read(&key
->lct_used
) >= 1);
1374 LINVRNT(0 <= key
->lct_index
&& key
->lct_index
< ARRAY_SIZE(lu_keys
));
1376 lu_context_key_quiesce(key
);
1379 spin_lock(&lu_keys_guard
);
1380 key_fini(&lu_shrink_env
.le_ctx
, key
->lct_index
);
1381 if (lu_keys
[key
->lct_index
]) {
1382 lu_keys
[key
->lct_index
] = NULL
;
1383 lu_ref_fini(&key
->lct_reference
);
1385 spin_unlock(&lu_keys_guard
);
1387 LASSERTF(atomic_read(&key
->lct_used
) == 1,
1388 "key has instances: %d\n",
1389 atomic_read(&key
->lct_used
));
1391 EXPORT_SYMBOL(lu_context_key_degister
);
1394 * Register a number of keys. This has to be called after all keys have been
1395 * initialized by a call to LU_CONTEXT_KEY_INIT().
1397 int lu_context_key_register_many(struct lu_context_key
*k
, ...)
1399 struct lu_context_key
*key
= k
;
1405 result
= lu_context_key_register(key
);
1408 key
= va_arg(args
, struct lu_context_key
*);
1409 } while (key
!= NULL
);
1415 lu_context_key_degister(k
);
1416 k
= va_arg(args
, struct lu_context_key
*);
1423 EXPORT_SYMBOL(lu_context_key_register_many
);
1426 * De-register a number of keys. This is a dual to
1427 * lu_context_key_register_many().
1429 void lu_context_key_degister_many(struct lu_context_key
*k
, ...)
1435 lu_context_key_degister(k
);
1436 k
= va_arg(args
, struct lu_context_key
*);
1437 } while (k
!= NULL
);
1440 EXPORT_SYMBOL(lu_context_key_degister_many
);
1443 * Revive a number of keys.
1445 void lu_context_key_revive_many(struct lu_context_key
*k
, ...)
1451 lu_context_key_revive(k
);
1452 k
= va_arg(args
, struct lu_context_key
*);
1453 } while (k
!= NULL
);
1456 EXPORT_SYMBOL(lu_context_key_revive_many
);
1459 * Quiescent a number of keys.
1461 void lu_context_key_quiesce_many(struct lu_context_key
*k
, ...)
1467 lu_context_key_quiesce(k
);
1468 k
= va_arg(args
, struct lu_context_key
*);
1469 } while (k
!= NULL
);
1472 EXPORT_SYMBOL(lu_context_key_quiesce_many
);
1475 * Return value associated with key \a key in context \a ctx.
1477 void *lu_context_key_get(const struct lu_context
*ctx
,
1478 const struct lu_context_key
*key
)
1480 LINVRNT(ctx
->lc_state
== LCS_ENTERED
);
1481 LINVRNT(0 <= key
->lct_index
&& key
->lct_index
< ARRAY_SIZE(lu_keys
));
1482 LASSERT(lu_keys
[key
->lct_index
] == key
);
1483 return ctx
->lc_value
[key
->lct_index
];
1485 EXPORT_SYMBOL(lu_context_key_get
);
1488 * List of remembered contexts. XXX document me.
1490 static LIST_HEAD(lu_context_remembered
);
1493 * Destroy \a key in all remembered contexts. This is used to destroy key
1494 * values in "shared" contexts (like service threads), when a module owning
1495 * the key is about to be unloaded.
1497 void lu_context_key_quiesce(struct lu_context_key
*key
)
1499 struct lu_context
*ctx
;
1501 if (!(key
->lct_tags
& LCT_QUIESCENT
)) {
1503 * XXX layering violation.
1505 key
->lct_tags
|= LCT_QUIESCENT
;
1507 * XXX memory barrier has to go here.
1509 spin_lock(&lu_keys_guard
);
1510 list_for_each_entry(ctx
, &lu_context_remembered
,
1512 key_fini(ctx
, key
->lct_index
);
1513 spin_unlock(&lu_keys_guard
);
1517 EXPORT_SYMBOL(lu_context_key_quiesce
);
1519 void lu_context_key_revive(struct lu_context_key
*key
)
1521 key
->lct_tags
&= ~LCT_QUIESCENT
;
1524 EXPORT_SYMBOL(lu_context_key_revive
);
1526 static void keys_fini(struct lu_context
*ctx
)
1530 if (ctx
->lc_value
== NULL
)
1533 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
)
1536 OBD_FREE(ctx
->lc_value
, ARRAY_SIZE(lu_keys
) * sizeof(ctx
->lc_value
[0]));
1537 ctx
->lc_value
= NULL
;
1540 static int keys_fill(struct lu_context
*ctx
)
1544 LINVRNT(ctx
->lc_value
!= NULL
);
1545 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
) {
1546 struct lu_context_key
*key
;
1549 if (ctx
->lc_value
[i
] == NULL
&& key
!= NULL
&&
1550 (key
->lct_tags
& ctx
->lc_tags
) &&
1552 * Don't create values for a LCT_QUIESCENT key, as this
1553 * will pin module owning a key.
1555 !(key
->lct_tags
& LCT_QUIESCENT
)) {
1558 LINVRNT(key
->lct_init
!= NULL
);
1559 LINVRNT(key
->lct_index
== i
);
1561 value
= key
->lct_init(ctx
, key
);
1562 if (unlikely(IS_ERR(value
)))
1563 return PTR_ERR(value
);
1565 if (!(ctx
->lc_tags
& LCT_NOREF
))
1566 try_module_get(key
->lct_owner
);
1567 lu_ref_add_atomic(&key
->lct_reference
, "ctx", ctx
);
1568 atomic_inc(&key
->lct_used
);
1570 * This is the only place in the code, where an
1571 * element of ctx->lc_value[] array is set to non-NULL
1574 ctx
->lc_value
[i
] = value
;
1575 if (key
->lct_exit
!= NULL
)
1576 ctx
->lc_tags
|= LCT_HAS_EXIT
;
1578 ctx
->lc_version
= key_set_version
;
1583 static int keys_init(struct lu_context
*ctx
)
1585 OBD_ALLOC(ctx
->lc_value
,
1586 ARRAY_SIZE(lu_keys
) * sizeof(ctx
->lc_value
[0]));
1587 if (likely(ctx
->lc_value
!= NULL
))
1588 return keys_fill(ctx
);
1594 * Initialize context data-structure. Create values for all keys.
1596 int lu_context_init(struct lu_context
*ctx
, __u32 tags
)
1600 memset(ctx
, 0, sizeof(*ctx
));
1601 ctx
->lc_state
= LCS_INITIALIZED
;
1602 ctx
->lc_tags
= tags
;
1603 if (tags
& LCT_REMEMBER
) {
1604 spin_lock(&lu_keys_guard
);
1605 list_add(&ctx
->lc_remember
, &lu_context_remembered
);
1606 spin_unlock(&lu_keys_guard
);
1608 INIT_LIST_HEAD(&ctx
->lc_remember
);
1611 rc
= keys_init(ctx
);
1613 lu_context_fini(ctx
);
1617 EXPORT_SYMBOL(lu_context_init
);
1620 * Finalize context data-structure. Destroy key values.
1622 void lu_context_fini(struct lu_context
*ctx
)
1624 LINVRNT(ctx
->lc_state
== LCS_INITIALIZED
|| ctx
->lc_state
== LCS_LEFT
);
1625 ctx
->lc_state
= LCS_FINALIZED
;
1627 if ((ctx
->lc_tags
& LCT_REMEMBER
) == 0) {
1628 LASSERT(list_empty(&ctx
->lc_remember
));
1631 } else { /* could race with key degister */
1632 spin_lock(&lu_keys_guard
);
1634 list_del_init(&ctx
->lc_remember
);
1635 spin_unlock(&lu_keys_guard
);
1638 EXPORT_SYMBOL(lu_context_fini
);
1641 * Called before entering context.
1643 void lu_context_enter(struct lu_context
*ctx
)
1645 LINVRNT(ctx
->lc_state
== LCS_INITIALIZED
|| ctx
->lc_state
== LCS_LEFT
);
1646 ctx
->lc_state
= LCS_ENTERED
;
1648 EXPORT_SYMBOL(lu_context_enter
);
1651 * Called after exiting from \a ctx
1653 void lu_context_exit(struct lu_context
*ctx
)
1657 LINVRNT(ctx
->lc_state
== LCS_ENTERED
);
1658 ctx
->lc_state
= LCS_LEFT
;
1659 if (ctx
->lc_tags
& LCT_HAS_EXIT
&& ctx
->lc_value
!= NULL
) {
1660 for (i
= 0; i
< ARRAY_SIZE(lu_keys
); ++i
) {
1661 if (ctx
->lc_value
[i
] != NULL
) {
1662 struct lu_context_key
*key
;
1665 LASSERT(key
!= NULL
);
1666 if (key
->lct_exit
!= NULL
)
1668 key
, ctx
->lc_value
[i
]);
1673 EXPORT_SYMBOL(lu_context_exit
);
1676 * Allocate for context all missing keys that were registered after context
1677 * creation. key_set_version is only changed in rare cases when modules
1678 * are loaded and removed.
1680 int lu_context_refill(struct lu_context
*ctx
)
1682 return likely(ctx
->lc_version
== key_set_version
) ? 0 : keys_fill(ctx
);
1684 EXPORT_SYMBOL(lu_context_refill
);
1687 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1688 * obd being added. Currently, this is only used on client side, specifically
1689 * for echo device client, for other stack (like ptlrpc threads), context are
1690 * predefined when the lu_device type are registered, during the module probe
1693 __u32 lu_context_tags_default
= 0;
1694 __u32 lu_session_tags_default
= 0;
1696 void lu_context_tags_update(__u32 tags
)
1698 spin_lock(&lu_keys_guard
);
1699 lu_context_tags_default
|= tags
;
1701 spin_unlock(&lu_keys_guard
);
1703 EXPORT_SYMBOL(lu_context_tags_update
);
1705 void lu_context_tags_clear(__u32 tags
)
1707 spin_lock(&lu_keys_guard
);
1708 lu_context_tags_default
&= ~tags
;
1710 spin_unlock(&lu_keys_guard
);
1712 EXPORT_SYMBOL(lu_context_tags_clear
);
1714 void lu_session_tags_update(__u32 tags
)
1716 spin_lock(&lu_keys_guard
);
1717 lu_session_tags_default
|= tags
;
1719 spin_unlock(&lu_keys_guard
);
1721 EXPORT_SYMBOL(lu_session_tags_update
);
1723 void lu_session_tags_clear(__u32 tags
)
1725 spin_lock(&lu_keys_guard
);
1726 lu_session_tags_default
&= ~tags
;
1728 spin_unlock(&lu_keys_guard
);
1730 EXPORT_SYMBOL(lu_session_tags_clear
);
1732 int lu_env_init(struct lu_env
*env
, __u32 tags
)
1737 result
= lu_context_init(&env
->le_ctx
, tags
);
1738 if (likely(result
== 0))
1739 lu_context_enter(&env
->le_ctx
);
1742 EXPORT_SYMBOL(lu_env_init
);
1744 void lu_env_fini(struct lu_env
*env
)
1746 lu_context_exit(&env
->le_ctx
);
1747 lu_context_fini(&env
->le_ctx
);
1750 EXPORT_SYMBOL(lu_env_fini
);
1752 int lu_env_refill(struct lu_env
*env
)
1756 result
= lu_context_refill(&env
->le_ctx
);
1757 if (result
== 0 && env
->le_ses
!= NULL
)
1758 result
= lu_context_refill(env
->le_ses
);
1761 EXPORT_SYMBOL(lu_env_refill
);
1764 * Currently, this API will only be used by echo client.
1765 * Because echo client and normal lustre client will share
1766 * same cl_env cache. So echo client needs to refresh
1767 * the env context after it get one from the cache, especially
1768 * when normal client and echo client co-exist in the same client.
1770 int lu_env_refill_by_tags(struct lu_env
*env
, __u32 ctags
,
1775 if ((env
->le_ctx
.lc_tags
& ctags
) != ctags
) {
1776 env
->le_ctx
.lc_version
= 0;
1777 env
->le_ctx
.lc_tags
|= ctags
;
1780 if (env
->le_ses
&& (env
->le_ses
->lc_tags
& stags
) != stags
) {
1781 env
->le_ses
->lc_version
= 0;
1782 env
->le_ses
->lc_tags
|= stags
;
1785 result
= lu_env_refill(env
);
1789 EXPORT_SYMBOL(lu_env_refill_by_tags
);
1792 typedef struct lu_site_stats
{
1793 unsigned lss_populated
;
1794 unsigned lss_max_search
;
1799 static void lu_site_stats_get(struct cfs_hash
*hs
,
1800 lu_site_stats_t
*stats
, int populated
)
1802 struct cfs_hash_bd bd
;
1805 cfs_hash_for_each_bucket(hs
, &bd
, i
) {
1806 struct lu_site_bkt_data
*bkt
= cfs_hash_bd_extra_get(hs
, &bd
);
1807 struct hlist_head
*hhead
;
1809 cfs_hash_bd_lock(hs
, &bd
, 1);
1810 stats
->lss_busy
+= bkt
->lsb_busy
;
1811 stats
->lss_total
+= cfs_hash_bd_count_get(&bd
);
1812 stats
->lss_max_search
= max((int)stats
->lss_max_search
,
1813 cfs_hash_bd_depmax_get(&bd
));
1815 cfs_hash_bd_unlock(hs
, &bd
, 1);
1819 cfs_hash_bd_for_each_hlist(hs
, &bd
, hhead
) {
1820 if (!hlist_empty(hhead
))
1821 stats
->lss_populated
++;
1823 cfs_hash_bd_unlock(hs
, &bd
, 1);
1829 * There exists a potential lock inversion deadlock scenario when using
1830 * Lustre on top of ZFS. This occurs between one of ZFS's
1831 * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
1832 * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
1833 * while thread B will take the ht_lock and sleep on the lu_sites_guard
1834 * lock. Obviously neither thread will wake and drop their respective hold
1837 * To prevent this from happening we must ensure the lu_sites_guard lock is
1838 * not taken while down this code path. ZFS reliably does not set the
1839 * __GFP_FS bit in its code paths, so this can be used to determine if it
1840 * is safe to take the lu_sites_guard lock.
1842 * Ideally we should accurately return the remaining number of cached
1843 * objects without taking the lu_sites_guard lock, but this is not
1844 * possible in the current implementation.
1846 static unsigned long lu_cache_shrink_count(struct shrinker
*sk
,
1847 struct shrink_control
*sc
)
1849 lu_site_stats_t stats
;
1851 struct lu_site
*tmp
;
1852 unsigned long cached
= 0;
1854 if (!(sc
->gfp_mask
& __GFP_FS
))
1857 mutex_lock(&lu_sites_guard
);
1858 list_for_each_entry_safe(s
, tmp
, &lu_sites
, ls_linkage
) {
1859 memset(&stats
, 0, sizeof(stats
));
1860 lu_site_stats_get(s
->ls_obj_hash
, &stats
, 0);
1861 cached
+= stats
.lss_total
- stats
.lss_busy
;
1863 mutex_unlock(&lu_sites_guard
);
1865 cached
= (cached
/ 100) * sysctl_vfs_cache_pressure
;
1866 CDEBUG(D_INODE
, "%ld objects cached\n", cached
);
1870 static unsigned long lu_cache_shrink_scan(struct shrinker
*sk
,
1871 struct shrink_control
*sc
)
1874 struct lu_site
*tmp
;
1875 unsigned long remain
= sc
->nr_to_scan
, freed
= 0;
1878 if (!(sc
->gfp_mask
& __GFP_FS
))
1879 /* We must not take the lu_sites_guard lock when
1880 * __GFP_FS is *not* set because of the deadlock
1881 * possibility detailed above. Additionally,
1882 * since we cannot determine the number of
1883 * objects in the cache without taking this
1884 * lock, we're in a particularly tough spot. As
1885 * a result, we'll just lie and say our cache is
1886 * empty. This _should_ be ok, as we can't
1887 * reclaim objects when __GFP_FS is *not* set
1892 mutex_lock(&lu_sites_guard
);
1893 list_for_each_entry_safe(s
, tmp
, &lu_sites
, ls_linkage
) {
1894 freed
= lu_site_purge(&lu_shrink_env
, s
, remain
);
1897 * Move just shrunk site to the tail of site list to
1898 * assure shrinking fairness.
1900 list_move_tail(&s
->ls_linkage
, &splice
);
1902 list_splice(&splice
, lu_sites
.prev
);
1903 mutex_unlock(&lu_sites_guard
);
1905 return sc
->nr_to_scan
- remain
;
1913 * Environment to be used in debugger, contains all tags.
1915 struct lu_env lu_debugging_env
;
1918 * Debugging printer function using printk().
1920 int lu_printk_printer(const struct lu_env
*env
,
1921 void *unused
, const char *format
, ...)
1925 va_start(args
, format
);
1926 vprintk(format
, args
);
1931 static struct shrinker lu_site_shrinker
= {
1932 .count_objects
= lu_cache_shrink_count
,
1933 .scan_objects
= lu_cache_shrink_scan
,
1934 .seeks
= DEFAULT_SEEKS
,
1938 * Initialization of global lu_* data.
1940 int lu_global_init(void)
1944 CDEBUG(D_INFO
, "Lustre LU module (%p).\n", &lu_keys
);
1946 result
= lu_ref_global_init();
1950 LU_CONTEXT_KEY_INIT(&lu_global_key
);
1951 result
= lu_context_key_register(&lu_global_key
);
1956 * At this level, we don't know what tags are needed, so allocate them
1957 * conservatively. This should not be too bad, because this
1958 * environment is global.
1960 mutex_lock(&lu_sites_guard
);
1961 result
= lu_env_init(&lu_shrink_env
, LCT_SHRINKER
);
1962 mutex_unlock(&lu_sites_guard
);
1967 * seeks estimation: 3 seeks to read a record from oi, one to read
1968 * inode, one for ea. Unfortunately setting this high value results in
1969 * lu_object/inode cache consuming all the memory.
1971 register_shrinker(&lu_site_shrinker
);
1977 * Dual to lu_global_init().
1979 void lu_global_fini(void)
1981 unregister_shrinker(&lu_site_shrinker
);
1982 lu_context_key_degister(&lu_global_key
);
1985 * Tear shrinker environment down _after_ de-registering
1986 * lu_global_key, because the latter has a value in the former.
1988 mutex_lock(&lu_sites_guard
);
1989 lu_env_fini(&lu_shrink_env
);
1990 mutex_unlock(&lu_sites_guard
);
1992 lu_ref_global_fini();
1995 static __u32
ls_stats_read(struct lprocfs_stats
*stats
, int idx
)
1998 struct lprocfs_counter ret
;
2000 lprocfs_stats_collect(stats
, idx
, &ret
);
2001 return (__u32
)ret
.lc_count
;
2008 * Output site statistical counters into a buffer. Suitable for
2009 * lprocfs_rd_*()-style functions.
2011 int lu_site_stats_print(const struct lu_site
*s
, struct seq_file
*m
)
2013 lu_site_stats_t stats
;
2015 memset(&stats
, 0, sizeof(stats
));
2016 lu_site_stats_get(s
->ls_obj_hash
, &stats
, 1);
2018 return seq_printf(m
, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2021 stats
.lss_populated
,
2022 CFS_HASH_NHLIST(s
->ls_obj_hash
),
2023 stats
.lss_max_search
,
2024 ls_stats_read(s
->ls_stats
, LU_SS_CREATED
),
2025 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_HIT
),
2026 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_MISS
),
2027 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_RACE
),
2028 ls_stats_read(s
->ls_stats
, LU_SS_CACHE_DEATH_RACE
),
2029 ls_stats_read(s
->ls_stats
, LU_SS_LRU_PURGED
));
2031 EXPORT_SYMBOL(lu_site_stats_print
);
2034 * Helper function to initialize a number of kmem slab caches at once.
2036 int lu_kmem_init(struct lu_kmem_descr
*caches
)
2039 struct lu_kmem_descr
*iter
= caches
;
2041 for (result
= 0; iter
->ckd_cache
!= NULL
; ++iter
) {
2042 *iter
->ckd_cache
= kmem_cache_create(iter
->ckd_name
,
2045 if (*iter
->ckd_cache
== NULL
) {
2047 /* free all previously allocated caches */
2048 lu_kmem_fini(caches
);
2054 EXPORT_SYMBOL(lu_kmem_init
);
2057 * Helper function to finalize a number of kmem slab cached at once. Dual to
2060 void lu_kmem_fini(struct lu_kmem_descr
*caches
)
2062 for (; caches
->ckd_cache
!= NULL
; ++caches
) {
2063 if (*caches
->ckd_cache
!= NULL
) {
2064 kmem_cache_destroy(*caches
->ckd_cache
);
2065 *caches
->ckd_cache
= NULL
;
2069 EXPORT_SYMBOL(lu_kmem_fini
);
2072 * Temporary solution to be able to assign fid in ->do_create()
2073 * till we have fully-functional OST fids
2075 void lu_object_assign_fid(const struct lu_env
*env
, struct lu_object
*o
,
2076 const struct lu_fid
*fid
)
2078 struct lu_site
*s
= o
->lo_dev
->ld_site
;
2079 struct lu_fid
*old
= &o
->lo_header
->loh_fid
;
2080 struct lu_site_bkt_data
*bkt
;
2081 struct lu_object
*shadow
;
2082 wait_queue_t waiter
;
2083 struct cfs_hash
*hs
;
2084 struct cfs_hash_bd bd
;
2087 LASSERT(fid_is_zero(old
));
2089 hs
= s
->ls_obj_hash
;
2090 cfs_hash_bd_get_and_lock(hs
, (void *)fid
, &bd
, 1);
2091 shadow
= htable_lookup(s
, &bd
, fid
, &waiter
, &version
);
2092 /* supposed to be unique */
2093 LASSERT(IS_ERR(shadow
) && PTR_ERR(shadow
) == -ENOENT
);
2095 bkt
= cfs_hash_bd_extra_get(hs
, &bd
);
2096 cfs_hash_bd_add_locked(hs
, &bd
, &o
->lo_header
->loh_hash
);
2098 cfs_hash_bd_unlock(hs
, &bd
, 1);
2100 EXPORT_SYMBOL(lu_object_assign_fid
);
2103 * allocates object with 0 (non-assigned) fid
2104 * XXX: temporary solution to be able to assign fid in ->do_create()
2105 * till we have fully-functional OST fids
2107 struct lu_object
*lu_object_anon(const struct lu_env
*env
,
2108 struct lu_device
*dev
,
2109 const struct lu_object_conf
*conf
)
2112 struct lu_object
*o
;
2115 o
= lu_object_alloc(env
, dev
, &fid
, conf
);
2119 EXPORT_SYMBOL(lu_object_anon
);
2121 struct lu_buf LU_BUF_NULL
= {
2125 EXPORT_SYMBOL(LU_BUF_NULL
);
2127 void lu_buf_free(struct lu_buf
*buf
)
2131 LASSERT(buf
->lb_len
> 0);
2132 OBD_FREE_LARGE(buf
->lb_buf
, buf
->lb_len
);
2137 EXPORT_SYMBOL(lu_buf_free
);
2139 void lu_buf_alloc(struct lu_buf
*buf
, int size
)
2142 LASSERT(buf
->lb_buf
== NULL
);
2143 LASSERT(buf
->lb_len
== 0);
2144 OBD_ALLOC_LARGE(buf
->lb_buf
, size
);
2145 if (likely(buf
->lb_buf
))
2148 EXPORT_SYMBOL(lu_buf_alloc
);
2150 void lu_buf_realloc(struct lu_buf
*buf
, int size
)
2153 lu_buf_alloc(buf
, size
);
2155 EXPORT_SYMBOL(lu_buf_realloc
);
2157 struct lu_buf
*lu_buf_check_and_alloc(struct lu_buf
*buf
, int len
)
2159 if (buf
->lb_buf
== NULL
&& buf
->lb_len
== 0)
2160 lu_buf_alloc(buf
, len
);
2162 if ((len
> buf
->lb_len
) && (buf
->lb_buf
!= NULL
))
2163 lu_buf_realloc(buf
, len
);
2167 EXPORT_SYMBOL(lu_buf_check_and_alloc
);
2170 * Increase the size of the \a buf.
2171 * preserves old data in buffer
2172 * old buffer remains unchanged on error
2173 * \retval 0 or -ENOMEM
2175 int lu_buf_check_and_grow(struct lu_buf
*buf
, int len
)
2179 if (len
<= buf
->lb_len
)
2182 OBD_ALLOC_LARGE(ptr
, len
);
2186 /* Free the old buf */
2187 if (buf
->lb_buf
!= NULL
) {
2188 memcpy(ptr
, buf
->lb_buf
, buf
->lb_len
);
2189 OBD_FREE_LARGE(buf
->lb_buf
, buf
->lb_len
);
2196 EXPORT_SYMBOL(lu_buf_check_and_grow
);