4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Nikita Danilov <nikita.danilov@sun.com>
41 #define DEBUG_SUBSYSTEM S_CLASS
43 #include <obd_class.h>
44 #include <obd_support.h>
45 #include <lustre_fid.h>
46 #include <linux/list.h>
47 #include <cl_object.h>
48 #include "cl_internal.h"
50 /** Lock class of cl_lock::cll_guard */
51 static struct lock_class_key cl_lock_guard_class
;
52 static struct kmem_cache
*cl_lock_kmem
;
54 static struct lu_kmem_descr cl_lock_caches
[] = {
56 .ckd_cache
= &cl_lock_kmem
,
57 .ckd_name
= "cl_lock_kmem",
58 .ckd_size
= sizeof (struct cl_lock
)
65 #define CS_LOCK_INC(o, item)
66 #define CS_LOCK_DEC(o, item)
67 #define CS_LOCKSTATE_INC(o, state)
68 #define CS_LOCKSTATE_DEC(o, state)
71 * Basic lock invariant that is maintained at all times. Caller either has a
72 * reference to \a lock, or somehow assures that \a lock cannot be freed.
74 * \see cl_lock_invariant()
76 static int cl_lock_invariant_trusted(const struct lu_env
*env
,
77 const struct cl_lock
*lock
)
79 return ergo(lock
->cll_state
== CLS_FREEING
, lock
->cll_holds
== 0) &&
80 atomic_read(&lock
->cll_ref
) >= lock
->cll_holds
&&
81 lock
->cll_holds
>= lock
->cll_users
&&
82 lock
->cll_holds
>= 0 &&
83 lock
->cll_users
>= 0 &&
88 * Stronger lock invariant, checking that caller has a reference on a lock.
90 * \see cl_lock_invariant_trusted()
92 static int cl_lock_invariant(const struct lu_env
*env
,
93 const struct cl_lock
*lock
)
97 result
= atomic_read(&lock
->cll_ref
) > 0 &&
98 cl_lock_invariant_trusted(env
, lock
);
99 if (!result
&& env
!= NULL
)
100 CL_LOCK_DEBUG(D_ERROR
, env
, lock
, "invariant broken");
105 * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
107 static enum clt_nesting_level
cl_lock_nesting(const struct cl_lock
*lock
)
109 return cl_object_header(lock
->cll_descr
.cld_obj
)->coh_nesting
;
113 * Returns a set of counters for this lock, depending on a lock nesting.
115 static struct cl_thread_counters
*cl_lock_counters(const struct lu_env
*env
,
116 const struct cl_lock
*lock
)
118 struct cl_thread_info
*info
;
119 enum clt_nesting_level nesting
;
121 info
= cl_env_info(env
);
122 nesting
= cl_lock_nesting(lock
);
123 LASSERT(nesting
< ARRAY_SIZE(info
->clt_counters
));
124 return &info
->clt_counters
[nesting
];
127 static void cl_lock_trace0(int level
, const struct lu_env
*env
,
128 const char *prefix
, const struct cl_lock
*lock
,
129 const char *func
, const int line
)
131 struct cl_object_header
*h
= cl_object_header(lock
->cll_descr
.cld_obj
);
132 CDEBUG(level
, "%s: %p@(%d %p %d %d %d %d %d %lx)"
133 "(%p/%d/%d) at %s():%d\n",
134 prefix
, lock
, atomic_read(&lock
->cll_ref
),
135 lock
->cll_guarder
, lock
->cll_depth
,
136 lock
->cll_state
, lock
->cll_error
, lock
->cll_holds
,
137 lock
->cll_users
, lock
->cll_flags
,
138 env
, h
->coh_nesting
, cl_lock_nr_mutexed(env
),
141 #define cl_lock_trace(level, env, prefix, lock) \
142 cl_lock_trace0(level, env, prefix, lock, __FUNCTION__, __LINE__)
144 #define RETIP ((unsigned long)__builtin_return_address(0))
146 #ifdef CONFIG_LOCKDEP
147 static struct lock_class_key cl_lock_key
;
149 static void cl_lock_lockdep_init(struct cl_lock
*lock
)
151 lockdep_set_class_and_name(lock
, &cl_lock_key
, "EXT");
154 static void cl_lock_lockdep_acquire(const struct lu_env
*env
,
155 struct cl_lock
*lock
, __u32 enqflags
)
157 cl_lock_counters(env
, lock
)->ctc_nr_locks_acquired
++;
158 lock_map_acquire(&lock
->dep_map
);
161 static void cl_lock_lockdep_release(const struct lu_env
*env
,
162 struct cl_lock
*lock
)
164 cl_lock_counters(env
, lock
)->ctc_nr_locks_acquired
--;
165 lock_release(&lock
->dep_map
, 0, RETIP
);
168 #else /* !CONFIG_LOCKDEP */
170 static void cl_lock_lockdep_init(struct cl_lock
*lock
)
172 static void cl_lock_lockdep_acquire(const struct lu_env
*env
,
173 struct cl_lock
*lock
, __u32 enqflags
)
175 static void cl_lock_lockdep_release(const struct lu_env
*env
,
176 struct cl_lock
*lock
)
179 #endif /* !CONFIG_LOCKDEP */
182 * Adds lock slice to the compound lock.
184 * This is called by cl_object_operations::coo_lock_init() methods to add a
185 * per-layer state to the lock. New state is added at the end of
186 * cl_lock::cll_layers list, that is, it is at the bottom of the stack.
188 * \see cl_req_slice_add(), cl_page_slice_add(), cl_io_slice_add()
190 void cl_lock_slice_add(struct cl_lock
*lock
, struct cl_lock_slice
*slice
,
191 struct cl_object
*obj
,
192 const struct cl_lock_operations
*ops
)
195 slice
->cls_lock
= lock
;
196 list_add_tail(&slice
->cls_linkage
, &lock
->cll_layers
);
197 slice
->cls_obj
= obj
;
198 slice
->cls_ops
= ops
;
201 EXPORT_SYMBOL(cl_lock_slice_add
);
204 * Returns true iff a lock with the mode \a has provides at least the same
205 * guarantees as a lock with the mode \a need.
207 int cl_lock_mode_match(enum cl_lock_mode has
, enum cl_lock_mode need
)
209 LINVRNT(need
== CLM_READ
|| need
== CLM_WRITE
||
210 need
== CLM_PHANTOM
|| need
== CLM_GROUP
);
211 LINVRNT(has
== CLM_READ
|| has
== CLM_WRITE
||
212 has
== CLM_PHANTOM
|| has
== CLM_GROUP
);
213 CLASSERT(CLM_PHANTOM
< CLM_READ
);
214 CLASSERT(CLM_READ
< CLM_WRITE
);
215 CLASSERT(CLM_WRITE
< CLM_GROUP
);
217 if (has
!= CLM_GROUP
)
222 EXPORT_SYMBOL(cl_lock_mode_match
);
225 * Returns true iff extent portions of lock descriptions match.
227 int cl_lock_ext_match(const struct cl_lock_descr
*has
,
228 const struct cl_lock_descr
*need
)
231 has
->cld_start
<= need
->cld_start
&&
232 has
->cld_end
>= need
->cld_end
&&
233 cl_lock_mode_match(has
->cld_mode
, need
->cld_mode
) &&
234 (has
->cld_mode
!= CLM_GROUP
|| has
->cld_gid
== need
->cld_gid
);
236 EXPORT_SYMBOL(cl_lock_ext_match
);
239 * Returns true iff a lock with the description \a has provides at least the
240 * same guarantees as a lock with the description \a need.
242 int cl_lock_descr_match(const struct cl_lock_descr
*has
,
243 const struct cl_lock_descr
*need
)
246 cl_object_same(has
->cld_obj
, need
->cld_obj
) &&
247 cl_lock_ext_match(has
, need
);
249 EXPORT_SYMBOL(cl_lock_descr_match
);
251 static void cl_lock_free(const struct lu_env
*env
, struct cl_lock
*lock
)
253 struct cl_object
*obj
= lock
->cll_descr
.cld_obj
;
255 LINVRNT(!cl_lock_is_mutexed(lock
));
258 cl_lock_trace(D_DLMTRACE
, env
, "free lock", lock
);
260 while (!list_empty(&lock
->cll_layers
)) {
261 struct cl_lock_slice
*slice
;
263 slice
= list_entry(lock
->cll_layers
.next
,
264 struct cl_lock_slice
, cls_linkage
);
265 list_del_init(lock
->cll_layers
.next
);
266 slice
->cls_ops
->clo_fini(env
, slice
);
268 CS_LOCK_DEC(obj
, total
);
269 CS_LOCKSTATE_DEC(obj
, lock
->cll_state
);
270 lu_object_ref_del_at(&obj
->co_lu
, lock
->cll_obj_ref
, "cl_lock", lock
);
271 cl_object_put(env
, obj
);
272 lu_ref_fini(&lock
->cll_reference
);
273 lu_ref_fini(&lock
->cll_holders
);
274 mutex_destroy(&lock
->cll_guard
);
275 OBD_SLAB_FREE_PTR(lock
, cl_lock_kmem
);
280 * Releases a reference on a lock.
282 * When last reference is released, lock is returned to the cache, unless it
283 * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
286 * \see cl_object_put(), cl_page_put()
288 void cl_lock_put(const struct lu_env
*env
, struct cl_lock
*lock
)
290 struct cl_object
*obj
;
292 LINVRNT(cl_lock_invariant(env
, lock
));
294 obj
= lock
->cll_descr
.cld_obj
;
295 LINVRNT(obj
!= NULL
);
297 CDEBUG(D_TRACE
, "releasing reference: %d %p %lu\n",
298 atomic_read(&lock
->cll_ref
), lock
, RETIP
);
300 if (atomic_dec_and_test(&lock
->cll_ref
)) {
301 if (lock
->cll_state
== CLS_FREEING
) {
302 LASSERT(list_empty(&lock
->cll_linkage
));
303 cl_lock_free(env
, lock
);
305 CS_LOCK_DEC(obj
, busy
);
309 EXPORT_SYMBOL(cl_lock_put
);
312 * Acquires an additional reference to a lock.
314 * This can be called only by caller already possessing a reference to \a
317 * \see cl_object_get(), cl_page_get()
319 void cl_lock_get(struct cl_lock
*lock
)
321 LINVRNT(cl_lock_invariant(NULL
, lock
));
322 CDEBUG(D_TRACE
, "acquiring reference: %d %p %lu\n",
323 atomic_read(&lock
->cll_ref
), lock
, RETIP
);
324 atomic_inc(&lock
->cll_ref
);
326 EXPORT_SYMBOL(cl_lock_get
);
329 * Acquires a reference to a lock.
331 * This is much like cl_lock_get(), except that this function can be used to
332 * acquire initial reference to the cached lock. Caller has to deal with all
333 * possible races. Use with care!
335 * \see cl_page_get_trust()
337 void cl_lock_get_trust(struct cl_lock
*lock
)
339 CDEBUG(D_TRACE
, "acquiring trusted reference: %d %p %lu\n",
340 atomic_read(&lock
->cll_ref
), lock
, RETIP
);
341 if (atomic_inc_return(&lock
->cll_ref
) == 1)
342 CS_LOCK_INC(lock
->cll_descr
.cld_obj
, busy
);
344 EXPORT_SYMBOL(cl_lock_get_trust
);
347 * Helper function destroying the lock that wasn't completely initialized.
349 * Other threads can acquire references to the top-lock through its
350 * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
352 static void cl_lock_finish(const struct lu_env
*env
, struct cl_lock
*lock
)
354 cl_lock_mutex_get(env
, lock
);
355 cl_lock_cancel(env
, lock
);
356 cl_lock_delete(env
, lock
);
357 cl_lock_mutex_put(env
, lock
);
358 cl_lock_put(env
, lock
);
361 static struct cl_lock
*cl_lock_alloc(const struct lu_env
*env
,
362 struct cl_object
*obj
,
363 const struct cl_io
*io
,
364 const struct cl_lock_descr
*descr
)
366 struct cl_lock
*lock
;
367 struct lu_object_header
*head
;
370 OBD_SLAB_ALLOC_PTR_GFP(lock
, cl_lock_kmem
, __GFP_IO
);
372 atomic_set(&lock
->cll_ref
, 1);
373 lock
->cll_descr
= *descr
;
374 lock
->cll_state
= CLS_NEW
;
376 lock
->cll_obj_ref
= lu_object_ref_add(&obj
->co_lu
,
378 INIT_LIST_HEAD(&lock
->cll_layers
);
379 INIT_LIST_HEAD(&lock
->cll_linkage
);
380 INIT_LIST_HEAD(&lock
->cll_inclosure
);
381 lu_ref_init(&lock
->cll_reference
);
382 lu_ref_init(&lock
->cll_holders
);
383 mutex_init(&lock
->cll_guard
);
384 lockdep_set_class(&lock
->cll_guard
, &cl_lock_guard_class
);
385 init_waitqueue_head(&lock
->cll_wq
);
386 head
= obj
->co_lu
.lo_header
;
387 CS_LOCKSTATE_INC(obj
, CLS_NEW
);
388 CS_LOCK_INC(obj
, total
);
389 CS_LOCK_INC(obj
, create
);
390 cl_lock_lockdep_init(lock
);
391 list_for_each_entry(obj
, &head
->loh_layers
,
395 err
= obj
->co_ops
->coo_lock_init(env
, obj
, lock
, io
);
397 cl_lock_finish(env
, lock
);
403 lock
= ERR_PTR(-ENOMEM
);
408 * Transfer the lock into INTRANSIT state and return the original state.
410 * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
411 * \post state: CLS_INTRANSIT
414 enum cl_lock_state
cl_lock_intransit(const struct lu_env
*env
,
415 struct cl_lock
*lock
)
417 enum cl_lock_state state
= lock
->cll_state
;
419 LASSERT(cl_lock_is_mutexed(lock
));
420 LASSERT(state
!= CLS_INTRANSIT
);
421 LASSERTF(state
>= CLS_ENQUEUED
&& state
<= CLS_CACHED
,
422 "Malformed lock state %d.\n", state
);
424 cl_lock_state_set(env
, lock
, CLS_INTRANSIT
);
425 lock
->cll_intransit_owner
= current
;
426 cl_lock_hold_add(env
, lock
, "intransit", current
);
429 EXPORT_SYMBOL(cl_lock_intransit
);
432 * Exit the intransit state and restore the lock state to the original state
434 void cl_lock_extransit(const struct lu_env
*env
, struct cl_lock
*lock
,
435 enum cl_lock_state state
)
437 LASSERT(cl_lock_is_mutexed(lock
));
438 LASSERT(lock
->cll_state
== CLS_INTRANSIT
);
439 LASSERT(state
!= CLS_INTRANSIT
);
440 LASSERT(lock
->cll_intransit_owner
== current
);
442 lock
->cll_intransit_owner
= NULL
;
443 cl_lock_state_set(env
, lock
, state
);
444 cl_lock_unhold(env
, lock
, "intransit", current
);
446 EXPORT_SYMBOL(cl_lock_extransit
);
449 * Checking whether the lock is intransit state
451 int cl_lock_is_intransit(struct cl_lock
*lock
)
453 LASSERT(cl_lock_is_mutexed(lock
));
454 return lock
->cll_state
== CLS_INTRANSIT
&&
455 lock
->cll_intransit_owner
!= current
;
457 EXPORT_SYMBOL(cl_lock_is_intransit
);
459 * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
460 * truncate and O_APPEND cannot be reused for read/non-append-write, as they
461 * cover multiple stripes and can trigger cascading timeouts.
463 static int cl_lock_fits_into(const struct lu_env
*env
,
464 const struct cl_lock
*lock
,
465 const struct cl_lock_descr
*need
,
466 const struct cl_io
*io
)
468 const struct cl_lock_slice
*slice
;
470 LINVRNT(cl_lock_invariant_trusted(env
, lock
));
472 list_for_each_entry(slice
, &lock
->cll_layers
, cls_linkage
) {
473 if (slice
->cls_ops
->clo_fits_into
!= NULL
&&
474 !slice
->cls_ops
->clo_fits_into(env
, slice
, need
, io
))
480 static struct cl_lock
*cl_lock_lookup(const struct lu_env
*env
,
481 struct cl_object
*obj
,
482 const struct cl_io
*io
,
483 const struct cl_lock_descr
*need
)
485 struct cl_lock
*lock
;
486 struct cl_object_header
*head
;
490 head
= cl_object_header(obj
);
491 LINVRNT(spin_is_locked(&head
->coh_lock_guard
));
492 CS_LOCK_INC(obj
, lookup
);
493 list_for_each_entry(lock
, &head
->coh_locks
, cll_linkage
) {
496 matched
= cl_lock_ext_match(&lock
->cll_descr
, need
) &&
497 lock
->cll_state
< CLS_FREEING
&&
498 lock
->cll_error
== 0 &&
499 !(lock
->cll_flags
& CLF_CANCELLED
) &&
500 cl_lock_fits_into(env
, lock
, need
, io
);
501 CDEBUG(D_DLMTRACE
, "has: "DDESCR
"(%d) need: "DDESCR
": %d\n",
502 PDESCR(&lock
->cll_descr
), lock
->cll_state
, PDESCR(need
),
505 cl_lock_get_trust(lock
);
506 CS_LOCK_INC(obj
, hit
);
514 * Returns a lock matching description \a need.
516 * This is the main entry point into the cl_lock caching interface. First, a
517 * cache (implemented as a per-object linked list) is consulted. If lock is
518 * found there, it is returned immediately. Otherwise new lock is allocated
519 * and returned. In any case, additional reference to lock is acquired.
521 * \see cl_object_find(), cl_page_find()
523 static struct cl_lock
*cl_lock_find(const struct lu_env
*env
,
524 const struct cl_io
*io
,
525 const struct cl_lock_descr
*need
)
527 struct cl_object_header
*head
;
528 struct cl_object
*obj
;
529 struct cl_lock
*lock
;
534 head
= cl_object_header(obj
);
536 spin_lock(&head
->coh_lock_guard
);
537 lock
= cl_lock_lookup(env
, obj
, io
, need
);
538 spin_unlock(&head
->coh_lock_guard
);
541 lock
= cl_lock_alloc(env
, obj
, io
, need
);
543 struct cl_lock
*ghost
;
545 spin_lock(&head
->coh_lock_guard
);
546 ghost
= cl_lock_lookup(env
, obj
, io
, need
);
548 list_add_tail(&lock
->cll_linkage
,
550 spin_unlock(&head
->coh_lock_guard
);
551 CS_LOCK_INC(obj
, busy
);
553 spin_unlock(&head
->coh_lock_guard
);
555 * Other threads can acquire references to the
556 * top-lock through its sub-locks. Hence, it
557 * cannot be cl_lock_free()-ed immediately.
559 cl_lock_finish(env
, lock
);
568 * Returns existing lock matching given description. This is similar to
569 * cl_lock_find() except that no new lock is created, and returned lock is
570 * guaranteed to be in enum cl_lock_state::CLS_HELD state.
572 struct cl_lock
*cl_lock_peek(const struct lu_env
*env
, const struct cl_io
*io
,
573 const struct cl_lock_descr
*need
,
574 const char *scope
, const void *source
)
576 struct cl_object_header
*head
;
577 struct cl_object
*obj
;
578 struct cl_lock
*lock
;
581 head
= cl_object_header(obj
);
584 spin_lock(&head
->coh_lock_guard
);
585 lock
= cl_lock_lookup(env
, obj
, io
, need
);
586 spin_unlock(&head
->coh_lock_guard
);
590 cl_lock_mutex_get(env
, lock
);
591 if (lock
->cll_state
== CLS_INTRANSIT
)
592 /* Don't care return value. */
593 cl_lock_state_wait(env
, lock
);
594 if (lock
->cll_state
== CLS_FREEING
) {
595 cl_lock_mutex_put(env
, lock
);
596 cl_lock_put(env
, lock
);
599 } while (lock
== NULL
);
601 cl_lock_hold_add(env
, lock
, scope
, source
);
602 cl_lock_user_add(env
, lock
);
603 if (lock
->cll_state
== CLS_CACHED
)
604 cl_use_try(env
, lock
, 1);
605 if (lock
->cll_state
== CLS_HELD
) {
606 cl_lock_mutex_put(env
, lock
);
607 cl_lock_lockdep_acquire(env
, lock
, 0);
608 cl_lock_put(env
, lock
);
610 cl_unuse_try(env
, lock
);
611 cl_lock_unhold(env
, lock
, scope
, source
);
612 cl_lock_mutex_put(env
, lock
);
613 cl_lock_put(env
, lock
);
619 EXPORT_SYMBOL(cl_lock_peek
);
622 * Returns a slice within a lock, corresponding to the given layer in the
627 const struct cl_lock_slice
*cl_lock_at(const struct cl_lock
*lock
,
628 const struct lu_device_type
*dtype
)
630 const struct cl_lock_slice
*slice
;
632 LINVRNT(cl_lock_invariant_trusted(NULL
, lock
));
635 list_for_each_entry(slice
, &lock
->cll_layers
, cls_linkage
) {
636 if (slice
->cls_obj
->co_lu
.lo_dev
->ld_type
== dtype
)
641 EXPORT_SYMBOL(cl_lock_at
);
643 static void cl_lock_mutex_tail(const struct lu_env
*env
, struct cl_lock
*lock
)
645 struct cl_thread_counters
*counters
;
647 counters
= cl_lock_counters(env
, lock
);
649 counters
->ctc_nr_locks_locked
++;
650 lu_ref_add(&counters
->ctc_locks_locked
, "cll_guard", lock
);
651 cl_lock_trace(D_TRACE
, env
, "got mutex", lock
);
655 * Locks cl_lock object.
657 * This is used to manipulate cl_lock fields, and to serialize state
658 * transitions in the lock state machine.
660 * \post cl_lock_is_mutexed(lock)
662 * \see cl_lock_mutex_put()
664 void cl_lock_mutex_get(const struct lu_env
*env
, struct cl_lock
*lock
)
666 LINVRNT(cl_lock_invariant(env
, lock
));
668 if (lock
->cll_guarder
== current
) {
669 LINVRNT(cl_lock_is_mutexed(lock
));
670 LINVRNT(lock
->cll_depth
> 0);
672 struct cl_object_header
*hdr
;
673 struct cl_thread_info
*info
;
676 LINVRNT(lock
->cll_guarder
!= current
);
677 hdr
= cl_object_header(lock
->cll_descr
.cld_obj
);
679 * Check that mutices are taken in the bottom-to-top order.
681 info
= cl_env_info(env
);
682 for (i
= 0; i
< hdr
->coh_nesting
; ++i
)
683 LASSERT(info
->clt_counters
[i
].ctc_nr_locks_locked
== 0);
684 mutex_lock_nested(&lock
->cll_guard
, hdr
->coh_nesting
);
685 lock
->cll_guarder
= current
;
686 LINVRNT(lock
->cll_depth
== 0);
688 cl_lock_mutex_tail(env
, lock
);
690 EXPORT_SYMBOL(cl_lock_mutex_get
);
693 * Try-locks cl_lock object.
695 * \retval 0 \a lock was successfully locked
697 * \retval -EBUSY \a lock cannot be locked right now
699 * \post ergo(result == 0, cl_lock_is_mutexed(lock))
701 * \see cl_lock_mutex_get()
703 int cl_lock_mutex_try(const struct lu_env
*env
, struct cl_lock
*lock
)
707 LINVRNT(cl_lock_invariant_trusted(env
, lock
));
711 if (lock
->cll_guarder
== current
) {
712 LINVRNT(lock
->cll_depth
> 0);
713 cl_lock_mutex_tail(env
, lock
);
714 } else if (mutex_trylock(&lock
->cll_guard
)) {
715 LINVRNT(lock
->cll_depth
== 0);
716 lock
->cll_guarder
= current
;
717 cl_lock_mutex_tail(env
, lock
);
722 EXPORT_SYMBOL(cl_lock_mutex_try
);
725 {* Unlocks cl_lock object.
727 * \pre cl_lock_is_mutexed(lock)
729 * \see cl_lock_mutex_get()
731 void cl_lock_mutex_put(const struct lu_env
*env
, struct cl_lock
*lock
)
733 struct cl_thread_counters
*counters
;
735 LINVRNT(cl_lock_invariant(env
, lock
));
736 LINVRNT(cl_lock_is_mutexed(lock
));
737 LINVRNT(lock
->cll_guarder
== current
);
738 LINVRNT(lock
->cll_depth
> 0);
740 counters
= cl_lock_counters(env
, lock
);
741 LINVRNT(counters
->ctc_nr_locks_locked
> 0);
743 cl_lock_trace(D_TRACE
, env
, "put mutex", lock
);
744 lu_ref_del(&counters
->ctc_locks_locked
, "cll_guard", lock
);
745 counters
->ctc_nr_locks_locked
--;
746 if (--lock
->cll_depth
== 0) {
747 lock
->cll_guarder
= NULL
;
748 mutex_unlock(&lock
->cll_guard
);
751 EXPORT_SYMBOL(cl_lock_mutex_put
);
754 * Returns true iff lock's mutex is owned by the current thread.
756 int cl_lock_is_mutexed(struct cl_lock
*lock
)
758 return lock
->cll_guarder
== current
;
760 EXPORT_SYMBOL(cl_lock_is_mutexed
);
763 * Returns number of cl_lock mutices held by the current thread (environment).
765 int cl_lock_nr_mutexed(const struct lu_env
*env
)
767 struct cl_thread_info
*info
;
772 * NOTE: if summation across all nesting levels (currently 2) proves
773 * too expensive, a summary counter can be added to
774 * struct cl_thread_info.
776 info
= cl_env_info(env
);
777 for (i
= 0, locked
= 0; i
< ARRAY_SIZE(info
->clt_counters
); ++i
)
778 locked
+= info
->clt_counters
[i
].ctc_nr_locks_locked
;
781 EXPORT_SYMBOL(cl_lock_nr_mutexed
);
783 static void cl_lock_cancel0(const struct lu_env
*env
, struct cl_lock
*lock
)
785 LINVRNT(cl_lock_is_mutexed(lock
));
786 LINVRNT(cl_lock_invariant(env
, lock
));
788 if (!(lock
->cll_flags
& CLF_CANCELLED
)) {
789 const struct cl_lock_slice
*slice
;
791 lock
->cll_flags
|= CLF_CANCELLED
;
792 list_for_each_entry_reverse(slice
, &lock
->cll_layers
,
794 if (slice
->cls_ops
->clo_cancel
!= NULL
)
795 slice
->cls_ops
->clo_cancel(env
, slice
);
801 static void cl_lock_delete0(const struct lu_env
*env
, struct cl_lock
*lock
)
803 struct cl_object_header
*head
;
804 const struct cl_lock_slice
*slice
;
806 LINVRNT(cl_lock_is_mutexed(lock
));
807 LINVRNT(cl_lock_invariant(env
, lock
));
810 if (lock
->cll_state
< CLS_FREEING
) {
811 LASSERT(lock
->cll_state
!= CLS_INTRANSIT
);
812 cl_lock_state_set(env
, lock
, CLS_FREEING
);
814 head
= cl_object_header(lock
->cll_descr
.cld_obj
);
816 spin_lock(&head
->coh_lock_guard
);
817 list_del_init(&lock
->cll_linkage
);
818 spin_unlock(&head
->coh_lock_guard
);
821 * From now on, no new references to this lock can be acquired
822 * by cl_lock_lookup().
824 list_for_each_entry_reverse(slice
, &lock
->cll_layers
,
826 if (slice
->cls_ops
->clo_delete
!= NULL
)
827 slice
->cls_ops
->clo_delete(env
, slice
);
830 * From now on, no new references to this lock can be acquired
831 * by layer-specific means (like a pointer from struct
832 * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
835 * Lock will be finally freed in cl_lock_put() when last of
836 * existing references goes away.
843 * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
844 * top-lock (nesting == 0) accounts for this modification in the per-thread
845 * debugging counters. Sub-lock holds can be released by a thread different
846 * from one that acquired it.
848 static void cl_lock_hold_mod(const struct lu_env
*env
, struct cl_lock
*lock
,
851 struct cl_thread_counters
*counters
;
852 enum clt_nesting_level nesting
;
854 lock
->cll_holds
+= delta
;
855 nesting
= cl_lock_nesting(lock
);
856 if (nesting
== CNL_TOP
) {
857 counters
= &cl_env_info(env
)->clt_counters
[CNL_TOP
];
858 counters
->ctc_nr_held
+= delta
;
859 LASSERT(counters
->ctc_nr_held
>= 0);
864 * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
865 * cl_lock_hold_mod() for the explanation of the debugging code.
867 static void cl_lock_used_mod(const struct lu_env
*env
, struct cl_lock
*lock
,
870 struct cl_thread_counters
*counters
;
871 enum clt_nesting_level nesting
;
873 lock
->cll_users
+= delta
;
874 nesting
= cl_lock_nesting(lock
);
875 if (nesting
== CNL_TOP
) {
876 counters
= &cl_env_info(env
)->clt_counters
[CNL_TOP
];
877 counters
->ctc_nr_used
+= delta
;
878 LASSERT(counters
->ctc_nr_used
>= 0);
882 void cl_lock_hold_release(const struct lu_env
*env
, struct cl_lock
*lock
,
883 const char *scope
, const void *source
)
885 LINVRNT(cl_lock_is_mutexed(lock
));
886 LINVRNT(cl_lock_invariant(env
, lock
));
887 LASSERT(lock
->cll_holds
> 0);
890 cl_lock_trace(D_DLMTRACE
, env
, "hold release lock", lock
);
891 lu_ref_del(&lock
->cll_holders
, scope
, source
);
892 cl_lock_hold_mod(env
, lock
, -1);
893 if (lock
->cll_holds
== 0) {
894 CL_LOCK_ASSERT(lock
->cll_state
!= CLS_HELD
, env
, lock
);
895 if (lock
->cll_descr
.cld_mode
== CLM_PHANTOM
||
896 lock
->cll_descr
.cld_mode
== CLM_GROUP
||
897 lock
->cll_state
!= CLS_CACHED
)
899 * If lock is still phantom or grouplock when user is
900 * done with it---destroy the lock.
902 lock
->cll_flags
|= CLF_CANCELPEND
|CLF_DOOMED
;
903 if (lock
->cll_flags
& CLF_CANCELPEND
) {
904 lock
->cll_flags
&= ~CLF_CANCELPEND
;
905 cl_lock_cancel0(env
, lock
);
907 if (lock
->cll_flags
& CLF_DOOMED
) {
908 /* no longer doomed: it's dead... Jim. */
909 lock
->cll_flags
&= ~CLF_DOOMED
;
910 cl_lock_delete0(env
, lock
);
915 EXPORT_SYMBOL(cl_lock_hold_release
);
918 * Waits until lock state is changed.
920 * This function is called with cl_lock mutex locked, atomically releases
921 * mutex and goes to sleep, waiting for a lock state change (signaled by
922 * cl_lock_signal()), and re-acquires the mutex before return.
924 * This function is used to wait until lock state machine makes some progress
925 * and to emulate synchronous operations on top of asynchronous lock
928 * \retval -EINTR wait was interrupted
930 * \retval 0 wait wasn't interrupted
932 * \pre cl_lock_is_mutexed(lock)
934 * \see cl_lock_signal()
936 int cl_lock_state_wait(const struct lu_env
*env
, struct cl_lock
*lock
)
943 LINVRNT(cl_lock_is_mutexed(lock
));
944 LINVRNT(cl_lock_invariant(env
, lock
));
945 LASSERT(lock
->cll_depth
== 1);
946 LASSERT(lock
->cll_state
!= CLS_FREEING
); /* too late to wait */
948 cl_lock_trace(D_DLMTRACE
, env
, "state wait lock", lock
);
949 result
= lock
->cll_error
;
951 /* To avoid being interrupted by the 'non-fatal' signals
952 * (SIGCHLD, for instance), we'd block them temporarily.
954 blocked
= cfs_block_sigsinv(LUSTRE_FATAL_SIGS
);
956 init_waitqueue_entry_current(&waiter
);
957 add_wait_queue(&lock
->cll_wq
, &waiter
);
958 set_current_state(TASK_INTERRUPTIBLE
);
959 cl_lock_mutex_put(env
, lock
);
961 LASSERT(cl_lock_nr_mutexed(env
) == 0);
963 /* Returning ERESTARTSYS instead of EINTR so syscalls
964 * can be restarted if signals are pending here */
965 result
= -ERESTARTSYS
;
966 if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR
))) {
967 waitq_wait(&waiter
, TASK_INTERRUPTIBLE
);
968 if (!cfs_signal_pending())
972 cl_lock_mutex_get(env
, lock
);
973 set_current_state(TASK_RUNNING
);
974 remove_wait_queue(&lock
->cll_wq
, &waiter
);
976 /* Restore old blocked signals */
977 cfs_restore_sigs(blocked
);
981 EXPORT_SYMBOL(cl_lock_state_wait
);
983 static void cl_lock_state_signal(const struct lu_env
*env
, struct cl_lock
*lock
,
984 enum cl_lock_state state
)
986 const struct cl_lock_slice
*slice
;
989 LINVRNT(cl_lock_is_mutexed(lock
));
990 LINVRNT(cl_lock_invariant(env
, lock
));
992 list_for_each_entry(slice
, &lock
->cll_layers
, cls_linkage
)
993 if (slice
->cls_ops
->clo_state
!= NULL
)
994 slice
->cls_ops
->clo_state(env
, slice
, state
);
995 wake_up_all(&lock
->cll_wq
);
1000 * Notifies waiters that lock state changed.
1002 * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
1003 * layers about state change by calling cl_lock_operations::clo_state()
1006 void cl_lock_signal(const struct lu_env
*env
, struct cl_lock
*lock
)
1009 cl_lock_trace(D_DLMTRACE
, env
, "state signal lock", lock
);
1010 cl_lock_state_signal(env
, lock
, lock
->cll_state
);
1013 EXPORT_SYMBOL(cl_lock_signal
);
1016 * Changes lock state.
1018 * This function is invoked to notify layers that lock state changed, possible
1019 * as a result of an asynchronous event such as call-back reception.
1021 * \post lock->cll_state == state
1023 * \see cl_lock_operations::clo_state()
1025 void cl_lock_state_set(const struct lu_env
*env
, struct cl_lock
*lock
,
1026 enum cl_lock_state state
)
1029 LASSERT(lock
->cll_state
<= state
||
1030 (lock
->cll_state
== CLS_CACHED
&&
1031 (state
== CLS_HELD
|| /* lock found in cache */
1032 state
== CLS_NEW
|| /* sub-lock canceled */
1033 state
== CLS_INTRANSIT
)) ||
1034 /* lock is in transit state */
1035 lock
->cll_state
== CLS_INTRANSIT
);
1037 if (lock
->cll_state
!= state
) {
1038 CS_LOCKSTATE_DEC(lock
->cll_descr
.cld_obj
, lock
->cll_state
);
1039 CS_LOCKSTATE_INC(lock
->cll_descr
.cld_obj
, state
);
1041 cl_lock_state_signal(env
, lock
, state
);
1042 lock
->cll_state
= state
;
1046 EXPORT_SYMBOL(cl_lock_state_set
);
1048 static int cl_unuse_try_internal(const struct lu_env
*env
, struct cl_lock
*lock
)
1050 const struct cl_lock_slice
*slice
;
1056 LINVRNT(cl_lock_is_mutexed(lock
));
1057 LINVRNT(cl_lock_invariant(env
, lock
));
1058 LASSERT(lock
->cll_state
== CLS_INTRANSIT
);
1061 list_for_each_entry_reverse(slice
, &lock
->cll_layers
,
1063 if (slice
->cls_ops
->clo_unuse
!= NULL
) {
1064 result
= slice
->cls_ops
->clo_unuse(env
, slice
);
1069 LASSERT(result
!= -ENOSYS
);
1070 } while (result
== CLO_REPEAT
);
1076 * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
1077 * cl_lock_operations::clo_use() top-to-bottom to notify layers.
1078 * @atomic = 1, it must unuse the lock to recovery the lock to keep the
1079 * use process atomic
1081 int cl_use_try(const struct lu_env
*env
, struct cl_lock
*lock
, int atomic
)
1083 const struct cl_lock_slice
*slice
;
1085 enum cl_lock_state state
;
1088 cl_lock_trace(D_DLMTRACE
, env
, "use lock", lock
);
1090 LASSERT(lock
->cll_state
== CLS_CACHED
);
1091 if (lock
->cll_error
)
1092 RETURN(lock
->cll_error
);
1095 state
= cl_lock_intransit(env
, lock
);
1096 list_for_each_entry(slice
, &lock
->cll_layers
, cls_linkage
) {
1097 if (slice
->cls_ops
->clo_use
!= NULL
) {
1098 result
= slice
->cls_ops
->clo_use(env
, slice
);
1103 LASSERT(result
!= -ENOSYS
);
1105 LASSERTF(lock
->cll_state
== CLS_INTRANSIT
, "Wrong state %d.\n",
1111 if (result
== -ESTALE
) {
1113 * ESTALE means sublock being cancelled
1114 * at this time, and set lock state to
1115 * be NEW here and ask the caller to repeat.
1118 result
= CLO_REPEAT
;
1121 /* @atomic means back-off-on-failure. */
1124 rc
= cl_unuse_try_internal(env
, lock
);
1125 /* Vet the results. */
1126 if (rc
< 0 && result
> 0)
1131 cl_lock_extransit(env
, lock
, state
);
1134 EXPORT_SYMBOL(cl_use_try
);
1137 * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
1140 static int cl_enqueue_kick(const struct lu_env
*env
,
1141 struct cl_lock
*lock
,
1142 struct cl_io
*io
, __u32 flags
)
1145 const struct cl_lock_slice
*slice
;
1149 list_for_each_entry(slice
, &lock
->cll_layers
, cls_linkage
) {
1150 if (slice
->cls_ops
->clo_enqueue
!= NULL
) {
1151 result
= slice
->cls_ops
->clo_enqueue(env
,
1157 LASSERT(result
!= -ENOSYS
);
1162 * Tries to enqueue a lock.
1164 * This function is called repeatedly by cl_enqueue() until either lock is
1165 * enqueued, or error occurs. This function does not block waiting for
1166 * networking communication to complete.
1168 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1169 * lock->cll_state == CLS_HELD)
1171 * \see cl_enqueue() cl_lock_operations::clo_enqueue()
1172 * \see cl_lock_state::CLS_ENQUEUED
1174 int cl_enqueue_try(const struct lu_env
*env
, struct cl_lock
*lock
,
1175 struct cl_io
*io
, __u32 flags
)
1180 cl_lock_trace(D_DLMTRACE
, env
, "enqueue lock", lock
);
1182 LINVRNT(cl_lock_is_mutexed(lock
));
1184 result
= lock
->cll_error
;
1188 switch (lock
->cll_state
) {
1190 cl_lock_state_set(env
, lock
, CLS_QUEUING
);
1194 result
= cl_enqueue_kick(env
, lock
, io
, flags
);
1195 /* For AGL case, the cl_lock::cll_state may
1196 * become CLS_HELD already. */
1197 if (result
== 0 && lock
->cll_state
== CLS_QUEUING
)
1198 cl_lock_state_set(env
, lock
, CLS_ENQUEUED
);
1201 LASSERT(cl_lock_is_intransit(lock
));
1205 /* yank lock from the cache. */
1206 result
= cl_use_try(env
, lock
, 0);
1215 * impossible, only held locks with increased
1216 * ->cll_holds can be enqueued, and they cannot be
1221 } while (result
== CLO_REPEAT
);
1224 EXPORT_SYMBOL(cl_enqueue_try
);
1227 * Cancel the conflicting lock found during previous enqueue.
1229 * \retval 0 conflicting lock has been canceled.
1230 * \retval -ve error code.
1232 int cl_lock_enqueue_wait(const struct lu_env
*env
,
1233 struct cl_lock
*lock
,
1236 struct cl_lock
*conflict
;
1240 LASSERT(cl_lock_is_mutexed(lock
));
1241 LASSERT(lock
->cll_state
== CLS_QUEUING
);
1242 LASSERT(lock
->cll_conflict
!= NULL
);
1244 conflict
= lock
->cll_conflict
;
1245 lock
->cll_conflict
= NULL
;
1247 cl_lock_mutex_put(env
, lock
);
1248 LASSERT(cl_lock_nr_mutexed(env
) == 0);
1250 cl_lock_mutex_get(env
, conflict
);
1251 cl_lock_trace(D_DLMTRACE
, env
, "enqueue wait", conflict
);
1252 cl_lock_cancel(env
, conflict
);
1253 cl_lock_delete(env
, conflict
);
1255 while (conflict
->cll_state
!= CLS_FREEING
) {
1256 rc
= cl_lock_state_wait(env
, conflict
);
1260 cl_lock_mutex_put(env
, conflict
);
1261 lu_ref_del(&conflict
->cll_reference
, "cancel-wait", lock
);
1262 cl_lock_put(env
, conflict
);
1265 cl_lock_mutex_get(env
, lock
);
1270 EXPORT_SYMBOL(cl_lock_enqueue_wait
);
1272 static int cl_enqueue_locked(const struct lu_env
*env
, struct cl_lock
*lock
,
1273 struct cl_io
*io
, __u32 enqflags
)
1279 LINVRNT(cl_lock_is_mutexed(lock
));
1280 LINVRNT(cl_lock_invariant(env
, lock
));
1281 LASSERT(lock
->cll_holds
> 0);
1283 cl_lock_user_add(env
, lock
);
1285 result
= cl_enqueue_try(env
, lock
, io
, enqflags
);
1286 if (result
== CLO_WAIT
) {
1287 if (lock
->cll_conflict
!= NULL
)
1288 result
= cl_lock_enqueue_wait(env
, lock
, 1);
1290 result
= cl_lock_state_wait(env
, lock
);
1297 cl_unuse_try(env
, lock
);
1298 LASSERT(ergo(result
== 0 && !(enqflags
& CEF_AGL
),
1299 lock
->cll_state
== CLS_ENQUEUED
||
1300 lock
->cll_state
== CLS_HELD
));
1307 * \pre current thread or io owns a hold on lock.
1309 * \post ergo(result == 0, lock->users increased)
1310 * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1311 * lock->cll_state == CLS_HELD)
1313 int cl_enqueue(const struct lu_env
*env
, struct cl_lock
*lock
,
1314 struct cl_io
*io
, __u32 enqflags
)
1320 cl_lock_lockdep_acquire(env
, lock
, enqflags
);
1321 cl_lock_mutex_get(env
, lock
);
1322 result
= cl_enqueue_locked(env
, lock
, io
, enqflags
);
1323 cl_lock_mutex_put(env
, lock
);
1325 cl_lock_lockdep_release(env
, lock
);
1326 LASSERT(ergo(result
== 0, lock
->cll_state
== CLS_ENQUEUED
||
1327 lock
->cll_state
== CLS_HELD
));
1330 EXPORT_SYMBOL(cl_enqueue
);
1333 * Tries to unlock a lock.
1335 * This function is called to release underlying resource:
1336 * 1. for top lock, the resource is sublocks it held;
1337 * 2. for sublock, the resource is the reference to dlmlock.
1339 * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
1341 * \see cl_unuse() cl_lock_operations::clo_unuse()
1342 * \see cl_lock_state::CLS_CACHED
1344 int cl_unuse_try(const struct lu_env
*env
, struct cl_lock
*lock
)
1347 enum cl_lock_state state
= CLS_NEW
;
1350 cl_lock_trace(D_DLMTRACE
, env
, "unuse lock", lock
);
1352 if (lock
->cll_users
> 1) {
1353 cl_lock_user_del(env
, lock
);
1357 /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
1358 * underlying resources. */
1359 if (!(lock
->cll_state
== CLS_HELD
|| lock
->cll_state
== CLS_ENQUEUED
)) {
1360 cl_lock_user_del(env
, lock
);
1365 * New lock users (->cll_users) are not protecting unlocking
1366 * from proceeding. From this point, lock eventually reaches
1367 * CLS_CACHED, is reinitialized to CLS_NEW or fails into
1370 state
= cl_lock_intransit(env
, lock
);
1372 result
= cl_unuse_try_internal(env
, lock
);
1373 LASSERT(lock
->cll_state
== CLS_INTRANSIT
);
1374 LASSERT(result
!= CLO_WAIT
);
1375 cl_lock_user_del(env
, lock
);
1376 if (result
== 0 || result
== -ESTALE
) {
1378 * Return lock back to the cache. This is the only
1379 * place where lock is moved into CLS_CACHED state.
1381 * If one of ->clo_unuse() methods returned -ESTALE, lock
1382 * cannot be placed into cache and has to be
1383 * re-initialized. This happens e.g., when a sub-lock was
1384 * canceled while unlocking was in progress.
1386 if (state
== CLS_HELD
&& result
== 0)
1390 cl_lock_extransit(env
, lock
, state
);
1393 * Hide -ESTALE error.
1394 * If the lock is a glimpse lock, and it has multiple
1395 * stripes. Assuming that one of its sublock returned -ENAVAIL,
1396 * and other sublocks are matched write locks. In this case,
1397 * we can't set this lock to error because otherwise some of
1398 * its sublocks may not be canceled. This causes some dirty
1399 * pages won't be written to OSTs. -jay
1403 CERROR("result = %d, this is unlikely!\n", result
);
1405 cl_lock_extransit(env
, lock
, state
);
1407 RETURN(result
?: lock
->cll_error
);
1409 EXPORT_SYMBOL(cl_unuse_try
);
1411 static void cl_unuse_locked(const struct lu_env
*env
, struct cl_lock
*lock
)
1416 result
= cl_unuse_try(env
, lock
);
1418 CL_LOCK_DEBUG(D_ERROR
, env
, lock
, "unuse return %d\n", result
);
1426 void cl_unuse(const struct lu_env
*env
, struct cl_lock
*lock
)
1429 cl_lock_mutex_get(env
, lock
);
1430 cl_unuse_locked(env
, lock
);
1431 cl_lock_mutex_put(env
, lock
);
1432 cl_lock_lockdep_release(env
, lock
);
1435 EXPORT_SYMBOL(cl_unuse
);
1438 * Tries to wait for a lock.
1440 * This function is called repeatedly by cl_wait() until either lock is
1441 * granted, or error occurs. This function does not block waiting for network
1442 * communication to complete.
1444 * \see cl_wait() cl_lock_operations::clo_wait()
1445 * \see cl_lock_state::CLS_HELD
1447 int cl_wait_try(const struct lu_env
*env
, struct cl_lock
*lock
)
1449 const struct cl_lock_slice
*slice
;
1453 cl_lock_trace(D_DLMTRACE
, env
, "wait lock try", lock
);
1455 LINVRNT(cl_lock_is_mutexed(lock
));
1456 LINVRNT(cl_lock_invariant(env
, lock
));
1457 LASSERTF(lock
->cll_state
== CLS_QUEUING
||
1458 lock
->cll_state
== CLS_ENQUEUED
||
1459 lock
->cll_state
== CLS_HELD
||
1460 lock
->cll_state
== CLS_INTRANSIT
,
1461 "lock state: %d\n", lock
->cll_state
);
1462 LASSERT(lock
->cll_users
> 0);
1463 LASSERT(lock
->cll_holds
> 0);
1465 result
= lock
->cll_error
;
1469 if (cl_lock_is_intransit(lock
)) {
1474 if (lock
->cll_state
== CLS_HELD
)
1479 list_for_each_entry(slice
, &lock
->cll_layers
, cls_linkage
) {
1480 if (slice
->cls_ops
->clo_wait
!= NULL
) {
1481 result
= slice
->cls_ops
->clo_wait(env
, slice
);
1486 LASSERT(result
!= -ENOSYS
);
1488 LASSERT(lock
->cll_state
!= CLS_INTRANSIT
);
1489 cl_lock_state_set(env
, lock
, CLS_HELD
);
1491 } while (result
== CLO_REPEAT
);
1494 EXPORT_SYMBOL(cl_wait_try
);
1497 * Waits until enqueued lock is granted.
1499 * \pre current thread or io owns a hold on the lock
1500 * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
1501 * lock->cll_state == CLS_HELD)
1503 * \post ergo(result == 0, lock->cll_state == CLS_HELD)
1505 int cl_wait(const struct lu_env
*env
, struct cl_lock
*lock
)
1510 cl_lock_mutex_get(env
, lock
);
1512 LINVRNT(cl_lock_invariant(env
, lock
));
1513 LASSERTF(lock
->cll_state
== CLS_ENQUEUED
|| lock
->cll_state
== CLS_HELD
,
1514 "Wrong state %d \n", lock
->cll_state
);
1515 LASSERT(lock
->cll_holds
> 0);
1518 result
= cl_wait_try(env
, lock
);
1519 if (result
== CLO_WAIT
) {
1520 result
= cl_lock_state_wait(env
, lock
);
1527 cl_unuse_try(env
, lock
);
1528 cl_lock_lockdep_release(env
, lock
);
1530 cl_lock_trace(D_DLMTRACE
, env
, "wait lock", lock
);
1531 cl_lock_mutex_put(env
, lock
);
1532 LASSERT(ergo(result
== 0, lock
->cll_state
== CLS_HELD
));
1535 EXPORT_SYMBOL(cl_wait
);
1538 * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
1541 unsigned long cl_lock_weigh(const struct lu_env
*env
, struct cl_lock
*lock
)
1543 const struct cl_lock_slice
*slice
;
1544 unsigned long pound
;
1545 unsigned long ounce
;
1548 LINVRNT(cl_lock_is_mutexed(lock
));
1549 LINVRNT(cl_lock_invariant(env
, lock
));
1552 list_for_each_entry_reverse(slice
, &lock
->cll_layers
, cls_linkage
) {
1553 if (slice
->cls_ops
->clo_weigh
!= NULL
) {
1554 ounce
= slice
->cls_ops
->clo_weigh(env
, slice
);
1556 if (pound
< ounce
) /* over-weight^Wflow */
1562 EXPORT_SYMBOL(cl_lock_weigh
);
1565 * Notifies layers that lock description changed.
1567 * The server can grant client a lock different from one that was requested
1568 * (e.g., larger in extent). This method is called when actually granted lock
1569 * description becomes known to let layers to accommodate for changed lock
1572 * \see cl_lock_operations::clo_modify()
1574 int cl_lock_modify(const struct lu_env
*env
, struct cl_lock
*lock
,
1575 const struct cl_lock_descr
*desc
)
1577 const struct cl_lock_slice
*slice
;
1578 struct cl_object
*obj
= lock
->cll_descr
.cld_obj
;
1579 struct cl_object_header
*hdr
= cl_object_header(obj
);
1583 cl_lock_trace(D_DLMTRACE
, env
, "modify lock", lock
);
1584 /* don't allow object to change */
1585 LASSERT(obj
== desc
->cld_obj
);
1586 LINVRNT(cl_lock_is_mutexed(lock
));
1587 LINVRNT(cl_lock_invariant(env
, lock
));
1589 list_for_each_entry_reverse(slice
, &lock
->cll_layers
, cls_linkage
) {
1590 if (slice
->cls_ops
->clo_modify
!= NULL
) {
1591 result
= slice
->cls_ops
->clo_modify(env
, slice
, desc
);
1596 CL_LOCK_DEBUG(D_DLMTRACE
, env
, lock
, " -> "DDESCR
"@"DFID
"\n",
1597 PDESCR(desc
), PFID(lu_object_fid(&desc
->cld_obj
->co_lu
)));
1599 * Just replace description in place. Nothing more is needed for
1600 * now. If locks were indexed according to their extent and/or mode,
1601 * that index would have to be updated here.
1603 spin_lock(&hdr
->coh_lock_guard
);
1604 lock
->cll_descr
= *desc
;
1605 spin_unlock(&hdr
->coh_lock_guard
);
1608 EXPORT_SYMBOL(cl_lock_modify
);
1611 * Initializes lock closure with a given origin.
1613 * \see cl_lock_closure
1615 void cl_lock_closure_init(const struct lu_env
*env
,
1616 struct cl_lock_closure
*closure
,
1617 struct cl_lock
*origin
, int wait
)
1619 LINVRNT(cl_lock_is_mutexed(origin
));
1620 LINVRNT(cl_lock_invariant(env
, origin
));
1622 INIT_LIST_HEAD(&closure
->clc_list
);
1623 closure
->clc_origin
= origin
;
1624 closure
->clc_wait
= wait
;
1625 closure
->clc_nr
= 0;
1627 EXPORT_SYMBOL(cl_lock_closure_init
);
1630 * Builds a closure of \a lock.
1632 * Building of a closure consists of adding initial lock (\a lock) into it,
1633 * and calling cl_lock_operations::clo_closure() methods of \a lock. These
1634 * methods might call cl_lock_closure_build() recursively again, adding more
1635 * locks to the closure, etc.
1637 * \see cl_lock_closure
1639 int cl_lock_closure_build(const struct lu_env
*env
, struct cl_lock
*lock
,
1640 struct cl_lock_closure
*closure
)
1642 const struct cl_lock_slice
*slice
;
1646 LINVRNT(cl_lock_is_mutexed(closure
->clc_origin
));
1647 LINVRNT(cl_lock_invariant(env
, closure
->clc_origin
));
1649 result
= cl_lock_enclosure(env
, lock
, closure
);
1651 list_for_each_entry(slice
, &lock
->cll_layers
, cls_linkage
) {
1652 if (slice
->cls_ops
->clo_closure
!= NULL
) {
1653 result
= slice
->cls_ops
->clo_closure(env
, slice
,
1661 cl_lock_disclosure(env
, closure
);
1664 EXPORT_SYMBOL(cl_lock_closure_build
);
1667 * Adds new lock to a closure.
1669 * Try-locks \a lock and if succeeded, adds it to the closure (never more than
1670 * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
1671 * until next try-lock is likely to succeed.
1673 int cl_lock_enclosure(const struct lu_env
*env
, struct cl_lock
*lock
,
1674 struct cl_lock_closure
*closure
)
1678 cl_lock_trace(D_DLMTRACE
, env
, "enclosure lock", lock
);
1679 if (!cl_lock_mutex_try(env
, lock
)) {
1681 * If lock->cll_inclosure is not empty, lock is already in
1684 if (list_empty(&lock
->cll_inclosure
)) {
1685 cl_lock_get_trust(lock
);
1686 lu_ref_add(&lock
->cll_reference
, "closure", closure
);
1687 list_add(&lock
->cll_inclosure
, &closure
->clc_list
);
1690 cl_lock_mutex_put(env
, lock
);
1693 cl_lock_disclosure(env
, closure
);
1694 if (closure
->clc_wait
) {
1695 cl_lock_get_trust(lock
);
1696 lu_ref_add(&lock
->cll_reference
, "closure-w", closure
);
1697 cl_lock_mutex_put(env
, closure
->clc_origin
);
1699 LASSERT(cl_lock_nr_mutexed(env
) == 0);
1700 cl_lock_mutex_get(env
, lock
);
1701 cl_lock_mutex_put(env
, lock
);
1703 cl_lock_mutex_get(env
, closure
->clc_origin
);
1704 lu_ref_del(&lock
->cll_reference
, "closure-w", closure
);
1705 cl_lock_put(env
, lock
);
1707 result
= CLO_REPEAT
;
1711 EXPORT_SYMBOL(cl_lock_enclosure
);
1713 /** Releases mutices of enclosed locks. */
1714 void cl_lock_disclosure(const struct lu_env
*env
,
1715 struct cl_lock_closure
*closure
)
1717 struct cl_lock
*scan
;
1718 struct cl_lock
*temp
;
1720 cl_lock_trace(D_DLMTRACE
, env
, "disclosure lock", closure
->clc_origin
);
1721 list_for_each_entry_safe(scan
, temp
, &closure
->clc_list
,
1723 list_del_init(&scan
->cll_inclosure
);
1724 cl_lock_mutex_put(env
, scan
);
1725 lu_ref_del(&scan
->cll_reference
, "closure", closure
);
1726 cl_lock_put(env
, scan
);
1729 LASSERT(closure
->clc_nr
== 0);
1731 EXPORT_SYMBOL(cl_lock_disclosure
);
1733 /** Finalizes a closure. */
1734 void cl_lock_closure_fini(struct cl_lock_closure
*closure
)
1736 LASSERT(closure
->clc_nr
== 0);
1737 LASSERT(list_empty(&closure
->clc_list
));
1739 EXPORT_SYMBOL(cl_lock_closure_fini
);
1742 * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
1743 * destroyed, then destroy the lock. If there are holds on the lock, postpone
1744 * destruction until all holds are released. This is called when a decision is
1745 * made to destroy the lock in the future. E.g., when a blocking AST is
1746 * received on it, or fatal communication error happens.
1748 * Caller must have a reference on this lock to prevent a situation, when
1749 * deleted lock lingers in memory for indefinite time, because nobody calls
1750 * cl_lock_put() to finish it.
1752 * \pre atomic_read(&lock->cll_ref) > 0
1753 * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
1754 * cl_lock_nr_mutexed(env) == 1)
1755 * [i.e., if a top-lock is deleted, mutices of no other locks can be
1756 * held, as deletion of sub-locks might require releasing a top-lock
1759 * \see cl_lock_operations::clo_delete()
1760 * \see cl_lock::cll_holds
1762 void cl_lock_delete(const struct lu_env
*env
, struct cl_lock
*lock
)
1764 LINVRNT(cl_lock_is_mutexed(lock
));
1765 LINVRNT(cl_lock_invariant(env
, lock
));
1766 LASSERT(ergo(cl_lock_nesting(lock
) == CNL_TOP
,
1767 cl_lock_nr_mutexed(env
) == 1));
1770 cl_lock_trace(D_DLMTRACE
, env
, "delete lock", lock
);
1771 if (lock
->cll_holds
== 0)
1772 cl_lock_delete0(env
, lock
);
1774 lock
->cll_flags
|= CLF_DOOMED
;
1777 EXPORT_SYMBOL(cl_lock_delete
);
1780 * Mark lock as irrecoverably failed, and mark it for destruction. This
1781 * happens when, e.g., server fails to grant a lock to us, or networking
1784 * \pre atomic_read(&lock->cll_ref) > 0
1786 * \see clo_lock_delete()
1787 * \see cl_lock::cll_holds
1789 void cl_lock_error(const struct lu_env
*env
, struct cl_lock
*lock
, int error
)
1791 LINVRNT(cl_lock_is_mutexed(lock
));
1792 LINVRNT(cl_lock_invariant(env
, lock
));
1795 if (lock
->cll_error
== 0 && error
!= 0) {
1796 cl_lock_trace(D_DLMTRACE
, env
, "set lock error", lock
);
1797 lock
->cll_error
= error
;
1798 cl_lock_signal(env
, lock
);
1799 cl_lock_cancel(env
, lock
);
1800 cl_lock_delete(env
, lock
);
1804 EXPORT_SYMBOL(cl_lock_error
);
1807 * Cancels this lock. Notifies layers
1808 * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
1809 * there are holds on the lock, postpone cancellation until
1810 * all holds are released.
1812 * Cancellation notification is delivered to layers at most once.
1814 * \see cl_lock_operations::clo_cancel()
1815 * \see cl_lock::cll_holds
1817 void cl_lock_cancel(const struct lu_env
*env
, struct cl_lock
*lock
)
1819 LINVRNT(cl_lock_is_mutexed(lock
));
1820 LINVRNT(cl_lock_invariant(env
, lock
));
1823 cl_lock_trace(D_DLMTRACE
, env
, "cancel lock", lock
);
1824 if (lock
->cll_holds
== 0)
1825 cl_lock_cancel0(env
, lock
);
1827 lock
->cll_flags
|= CLF_CANCELPEND
;
1830 EXPORT_SYMBOL(cl_lock_cancel
);
1833 * Finds an existing lock covering given index and optionally different from a
1834 * given \a except lock.
1836 struct cl_lock
*cl_lock_at_pgoff(const struct lu_env
*env
,
1837 struct cl_object
*obj
, pgoff_t index
,
1838 struct cl_lock
*except
,
1839 int pending
, int canceld
)
1841 struct cl_object_header
*head
;
1842 struct cl_lock
*scan
;
1843 struct cl_lock
*lock
;
1844 struct cl_lock_descr
*need
;
1848 head
= cl_object_header(obj
);
1849 need
= &cl_env_info(env
)->clt_descr
;
1852 need
->cld_mode
= CLM_READ
; /* CLM_READ matches both READ & WRITE, but
1854 need
->cld_start
= need
->cld_end
= index
;
1855 need
->cld_enq_flags
= 0;
1857 spin_lock(&head
->coh_lock_guard
);
1858 /* It is fine to match any group lock since there could be only one
1859 * with a uniq gid and it conflicts with all other lock modes too */
1860 list_for_each_entry(scan
, &head
->coh_locks
, cll_linkage
) {
1861 if (scan
!= except
&&
1862 (scan
->cll_descr
.cld_mode
== CLM_GROUP
||
1863 cl_lock_ext_match(&scan
->cll_descr
, need
)) &&
1864 scan
->cll_state
>= CLS_HELD
&&
1865 scan
->cll_state
< CLS_FREEING
&&
1867 * This check is racy as the lock can be canceled right
1868 * after it is done, but this is fine, because page exists
1871 (canceld
|| !(scan
->cll_flags
& CLF_CANCELLED
)) &&
1872 (pending
|| !(scan
->cll_flags
& CLF_CANCELPEND
))) {
1873 /* Don't increase cs_hit here since this
1874 * is just a helper function. */
1875 cl_lock_get_trust(scan
);
1880 spin_unlock(&head
->coh_lock_guard
);
1883 EXPORT_SYMBOL(cl_lock_at_pgoff
);
1886 * Calculate the page offset at the layer of @lock.
1887 * At the time of this writing, @page is top page and @lock is sub lock.
1889 static pgoff_t
pgoff_at_lock(struct cl_page
*page
, struct cl_lock
*lock
)
1891 struct lu_device_type
*dtype
;
1892 const struct cl_page_slice
*slice
;
1894 dtype
= lock
->cll_descr
.cld_obj
->co_lu
.lo_dev
->ld_type
;
1895 slice
= cl_page_at(page
, dtype
);
1896 LASSERT(slice
!= NULL
);
1897 return slice
->cpl_page
->cp_index
;
1901 * Check if page @page is covered by an extra lock or discard it.
1903 static int check_and_discard_cb(const struct lu_env
*env
, struct cl_io
*io
,
1904 struct cl_page
*page
, void *cbdata
)
1906 struct cl_thread_info
*info
= cl_env_info(env
);
1907 struct cl_lock
*lock
= cbdata
;
1908 pgoff_t index
= pgoff_at_lock(page
, lock
);
1910 if (index
>= info
->clt_fn_index
) {
1911 struct cl_lock
*tmp
;
1913 /* refresh non-overlapped index */
1914 tmp
= cl_lock_at_pgoff(env
, lock
->cll_descr
.cld_obj
, index
,
1917 /* Cache the first-non-overlapped index so as to skip
1918 * all pages within [index, clt_fn_index). This
1919 * is safe because if tmp lock is canceled, it will
1920 * discard these pages. */
1921 info
->clt_fn_index
= tmp
->cll_descr
.cld_end
+ 1;
1922 if (tmp
->cll_descr
.cld_end
== CL_PAGE_EOF
)
1923 info
->clt_fn_index
= CL_PAGE_EOF
;
1924 cl_lock_put(env
, tmp
);
1925 } else if (cl_page_own(env
, io
, page
) == 0) {
1926 /* discard the page */
1927 cl_page_unmap(env
, io
, page
);
1928 cl_page_discard(env
, io
, page
);
1929 cl_page_disown(env
, io
, page
);
1931 LASSERT(page
->cp_state
== CPS_FREEING
);
1935 info
->clt_next_index
= index
+ 1;
1936 return CLP_GANG_OKAY
;
1939 static int discard_cb(const struct lu_env
*env
, struct cl_io
*io
,
1940 struct cl_page
*page
, void *cbdata
)
1942 struct cl_thread_info
*info
= cl_env_info(env
);
1943 struct cl_lock
*lock
= cbdata
;
1945 LASSERT(lock
->cll_descr
.cld_mode
>= CLM_WRITE
);
1946 KLASSERT(ergo(page
->cp_type
== CPT_CACHEABLE
,
1947 !PageWriteback(cl_page_vmpage(env
, page
))));
1948 KLASSERT(ergo(page
->cp_type
== CPT_CACHEABLE
,
1949 !PageDirty(cl_page_vmpage(env
, page
))));
1951 info
->clt_next_index
= pgoff_at_lock(page
, lock
) + 1;
1952 if (cl_page_own(env
, io
, page
) == 0) {
1953 /* discard the page */
1954 cl_page_unmap(env
, io
, page
);
1955 cl_page_discard(env
, io
, page
);
1956 cl_page_disown(env
, io
, page
);
1958 LASSERT(page
->cp_state
== CPS_FREEING
);
1961 return CLP_GANG_OKAY
;
1965 * Discard pages protected by the given lock. This function traverses radix
1966 * tree to find all covering pages and discard them. If a page is being covered
1967 * by other locks, it should remain in cache.
1969 * If error happens on any step, the process continues anyway (the reasoning
1970 * behind this being that lock cancellation cannot be delayed indefinitely).
1972 int cl_lock_discard_pages(const struct lu_env
*env
, struct cl_lock
*lock
)
1974 struct cl_thread_info
*info
= cl_env_info(env
);
1975 struct cl_io
*io
= &info
->clt_io
;
1976 struct cl_lock_descr
*descr
= &lock
->cll_descr
;
1977 cl_page_gang_cb_t cb
;
1981 LINVRNT(cl_lock_invariant(env
, lock
));
1984 io
->ci_obj
= cl_object_top(descr
->cld_obj
);
1985 io
->ci_ignore_layout
= 1;
1986 result
= cl_io_init(env
, io
, CIT_MISC
, io
->ci_obj
);
1990 cb
= descr
->cld_mode
== CLM_READ
? check_and_discard_cb
: discard_cb
;
1991 info
->clt_fn_index
= info
->clt_next_index
= descr
->cld_start
;
1993 res
= cl_page_gang_lookup(env
, descr
->cld_obj
, io
,
1994 info
->clt_next_index
, descr
->cld_end
,
1996 if (info
->clt_next_index
> descr
->cld_end
)
1999 if (res
== CLP_GANG_RESCHED
)
2001 } while (res
!= CLP_GANG_OKAY
);
2003 cl_io_fini(env
, io
);
2006 EXPORT_SYMBOL(cl_lock_discard_pages
);
2009 * Eliminate all locks for a given object.
2011 * Caller has to guarantee that no lock is in active use.
2013 * \param cancel when this is set, cl_locks_prune() cancels locks before
2016 void cl_locks_prune(const struct lu_env
*env
, struct cl_object
*obj
, int cancel
)
2018 struct cl_object_header
*head
;
2019 struct cl_lock
*lock
;
2022 head
= cl_object_header(obj
);
2024 * If locks are destroyed without cancellation, all pages must be
2025 * already destroyed (as otherwise they will be left unprotected).
2027 LASSERT(ergo(!cancel
,
2028 head
->coh_tree
.rnode
== NULL
&& head
->coh_pages
== 0));
2030 spin_lock(&head
->coh_lock_guard
);
2031 while (!list_empty(&head
->coh_locks
)) {
2032 lock
= container_of(head
->coh_locks
.next
,
2033 struct cl_lock
, cll_linkage
);
2034 cl_lock_get_trust(lock
);
2035 spin_unlock(&head
->coh_lock_guard
);
2036 lu_ref_add(&lock
->cll_reference
, "prune", current
);
2039 cl_lock_mutex_get(env
, lock
);
2040 if (lock
->cll_state
< CLS_FREEING
) {
2041 LASSERT(lock
->cll_users
<= 1);
2042 if (unlikely(lock
->cll_users
== 1)) {
2043 struct l_wait_info lwi
= { 0 };
2045 cl_lock_mutex_put(env
, lock
);
2046 l_wait_event(lock
->cll_wq
,
2047 lock
->cll_users
== 0,
2053 cl_lock_cancel(env
, lock
);
2054 cl_lock_delete(env
, lock
);
2056 cl_lock_mutex_put(env
, lock
);
2057 lu_ref_del(&lock
->cll_reference
, "prune", current
);
2058 cl_lock_put(env
, lock
);
2059 spin_lock(&head
->coh_lock_guard
);
2061 spin_unlock(&head
->coh_lock_guard
);
2064 EXPORT_SYMBOL(cl_locks_prune
);
2066 static struct cl_lock
*cl_lock_hold_mutex(const struct lu_env
*env
,
2067 const struct cl_io
*io
,
2068 const struct cl_lock_descr
*need
,
2069 const char *scope
, const void *source
)
2071 struct cl_lock
*lock
;
2076 lock
= cl_lock_find(env
, io
, need
);
2079 cl_lock_mutex_get(env
, lock
);
2080 if (lock
->cll_state
< CLS_FREEING
&&
2081 !(lock
->cll_flags
& CLF_CANCELLED
)) {
2082 cl_lock_hold_mod(env
, lock
, +1);
2083 lu_ref_add(&lock
->cll_holders
, scope
, source
);
2084 lu_ref_add(&lock
->cll_reference
, scope
, source
);
2087 cl_lock_mutex_put(env
, lock
);
2088 cl_lock_put(env
, lock
);
2094 * Returns a lock matching \a need description with a reference and a hold on
2097 * This is much like cl_lock_find(), except that cl_lock_hold() additionally
2098 * guarantees that lock is not in the CLS_FREEING state on return.
2100 struct cl_lock
*cl_lock_hold(const struct lu_env
*env
, const struct cl_io
*io
,
2101 const struct cl_lock_descr
*need
,
2102 const char *scope
, const void *source
)
2104 struct cl_lock
*lock
;
2108 lock
= cl_lock_hold_mutex(env
, io
, need
, scope
, source
);
2110 cl_lock_mutex_put(env
, lock
);
2113 EXPORT_SYMBOL(cl_lock_hold
);
2116 * Main high-level entry point of cl_lock interface that finds existing or
2117 * enqueues new lock matching given description.
2119 struct cl_lock
*cl_lock_request(const struct lu_env
*env
, struct cl_io
*io
,
2120 const struct cl_lock_descr
*need
,
2121 const char *scope
, const void *source
)
2123 struct cl_lock
*lock
;
2125 __u32 enqflags
= need
->cld_enq_flags
;
2129 lock
= cl_lock_hold_mutex(env
, io
, need
, scope
, source
);
2133 rc
= cl_enqueue_locked(env
, lock
, io
, enqflags
);
2135 if (cl_lock_fits_into(env
, lock
, need
, io
)) {
2136 if (!(enqflags
& CEF_AGL
)) {
2137 cl_lock_mutex_put(env
, lock
);
2138 cl_lock_lockdep_acquire(env
, lock
,
2144 cl_unuse_locked(env
, lock
);
2146 cl_lock_trace(D_DLMTRACE
, env
,
2147 rc
<= 0 ? "enqueue failed" : "agl succeed", lock
);
2148 cl_lock_hold_release(env
, lock
, scope
, source
);
2149 cl_lock_mutex_put(env
, lock
);
2150 lu_ref_del(&lock
->cll_reference
, scope
, source
);
2151 cl_lock_put(env
, lock
);
2153 LASSERT(enqflags
& CEF_AGL
);
2155 } else if (rc
!= 0) {
2161 EXPORT_SYMBOL(cl_lock_request
);
2164 * Adds a hold to a known lock.
2166 void cl_lock_hold_add(const struct lu_env
*env
, struct cl_lock
*lock
,
2167 const char *scope
, const void *source
)
2169 LINVRNT(cl_lock_is_mutexed(lock
));
2170 LINVRNT(cl_lock_invariant(env
, lock
));
2171 LASSERT(lock
->cll_state
!= CLS_FREEING
);
2174 cl_lock_hold_mod(env
, lock
, +1);
2176 lu_ref_add(&lock
->cll_holders
, scope
, source
);
2177 lu_ref_add(&lock
->cll_reference
, scope
, source
);
2180 EXPORT_SYMBOL(cl_lock_hold_add
);
2183 * Releases a hold and a reference on a lock, on which caller acquired a
2186 void cl_lock_unhold(const struct lu_env
*env
, struct cl_lock
*lock
,
2187 const char *scope
, const void *source
)
2189 LINVRNT(cl_lock_invariant(env
, lock
));
2191 cl_lock_hold_release(env
, lock
, scope
, source
);
2192 lu_ref_del(&lock
->cll_reference
, scope
, source
);
2193 cl_lock_put(env
, lock
);
2196 EXPORT_SYMBOL(cl_lock_unhold
);
2199 * Releases a hold and a reference on a lock, obtained by cl_lock_hold().
2201 void cl_lock_release(const struct lu_env
*env
, struct cl_lock
*lock
,
2202 const char *scope
, const void *source
)
2204 LINVRNT(cl_lock_invariant(env
, lock
));
2206 cl_lock_trace(D_DLMTRACE
, env
, "release lock", lock
);
2207 cl_lock_mutex_get(env
, lock
);
2208 cl_lock_hold_release(env
, lock
, scope
, source
);
2209 cl_lock_mutex_put(env
, lock
);
2210 lu_ref_del(&lock
->cll_reference
, scope
, source
);
2211 cl_lock_put(env
, lock
);
2214 EXPORT_SYMBOL(cl_lock_release
);
2216 void cl_lock_user_add(const struct lu_env
*env
, struct cl_lock
*lock
)
2218 LINVRNT(cl_lock_is_mutexed(lock
));
2219 LINVRNT(cl_lock_invariant(env
, lock
));
2222 cl_lock_used_mod(env
, lock
, +1);
2225 EXPORT_SYMBOL(cl_lock_user_add
);
2227 void cl_lock_user_del(const struct lu_env
*env
, struct cl_lock
*lock
)
2229 LINVRNT(cl_lock_is_mutexed(lock
));
2230 LINVRNT(cl_lock_invariant(env
, lock
));
2231 LASSERT(lock
->cll_users
> 0);
2234 cl_lock_used_mod(env
, lock
, -1);
2235 if (lock
->cll_users
== 0)
2236 wake_up_all(&lock
->cll_wq
);
2239 EXPORT_SYMBOL(cl_lock_user_del
);
2241 const char *cl_lock_mode_name(const enum cl_lock_mode mode
)
2243 static const char *names
[] = {
2244 [CLM_PHANTOM
] = "P",
2249 if (0 <= mode
&& mode
< ARRAY_SIZE(names
))
2254 EXPORT_SYMBOL(cl_lock_mode_name
);
2257 * Prints human readable representation of a lock description.
2259 void cl_lock_descr_print(const struct lu_env
*env
, void *cookie
,
2260 lu_printer_t printer
,
2261 const struct cl_lock_descr
*descr
)
2263 const struct lu_fid
*fid
;
2265 fid
= lu_object_fid(&descr
->cld_obj
->co_lu
);
2266 (*printer
)(env
, cookie
, DDESCR
"@"DFID
, PDESCR(descr
), PFID(fid
));
2268 EXPORT_SYMBOL(cl_lock_descr_print
);
2271 * Prints human readable representation of \a lock to the \a f.
2273 void cl_lock_print(const struct lu_env
*env
, void *cookie
,
2274 lu_printer_t printer
, const struct cl_lock
*lock
)
2276 const struct cl_lock_slice
*slice
;
2277 (*printer
)(env
, cookie
, "lock@%p[%d %d %d %d %d %08lx] ",
2278 lock
, atomic_read(&lock
->cll_ref
),
2279 lock
->cll_state
, lock
->cll_error
, lock
->cll_holds
,
2280 lock
->cll_users
, lock
->cll_flags
);
2281 cl_lock_descr_print(env
, cookie
, printer
, &lock
->cll_descr
);
2282 (*printer
)(env
, cookie
, " {\n");
2284 list_for_each_entry(slice
, &lock
->cll_layers
, cls_linkage
) {
2285 (*printer
)(env
, cookie
, " %s@%p: ",
2286 slice
->cls_obj
->co_lu
.lo_dev
->ld_type
->ldt_name
,
2288 if (slice
->cls_ops
->clo_print
!= NULL
)
2289 slice
->cls_ops
->clo_print(env
, cookie
, printer
, slice
);
2290 (*printer
)(env
, cookie
, "\n");
2292 (*printer
)(env
, cookie
, "} lock@%p\n", lock
);
2294 EXPORT_SYMBOL(cl_lock_print
);
2296 int cl_lock_init(void)
2298 return lu_kmem_init(cl_lock_caches
);
2301 void cl_lock_fini(void)
2303 lu_kmem_fini(cl_lock_caches
);