2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kref.h>
19 #include <asm/semaphore.h>
20 #include <asm/uaccess.h>
32 /* Must be kept in sync with the beginning of struct gfs2_glock */
34 struct list_head gl_list
;
35 unsigned long gl_flags
;
39 struct gfs2_holder gr_gh
;
40 struct work_struct gr_work
;
43 typedef void (*glock_examiner
) (struct gfs2_glock
* gl
);
46 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
47 * @actual: the current state of the lock
48 * @requested: the lock state that was requested by the caller
49 * @flags: the modifier flags passed in by the caller
51 * Returns: 1 if the locks are compatible, 0 otherwise
54 static inline int relaxed_state_ok(unsigned int actual
, unsigned requested
,
57 if (actual
== requested
)
63 if (actual
== LM_ST_EXCLUSIVE
&& requested
== LM_ST_SHARED
)
66 if (actual
!= LM_ST_UNLOCKED
&& (flags
& LM_FLAG_ANY
))
73 * gl_hash() - Turn glock number into hash bucket number
74 * @lock: The glock number
76 * Returns: The number of the corresponding hash bucket
79 static unsigned int gl_hash(struct lm_lockname
*name
)
83 h
= jhash(&name
->ln_number
, sizeof(uint64_t), 0);
84 h
= jhash(&name
->ln_type
, sizeof(unsigned int), h
);
85 h
&= GFS2_GL_HASH_MASK
;
91 * glock_free() - Perform a few checks and then release struct gfs2_glock
92 * @gl: The glock to release
94 * Also calls lock module to release its internal structure for this glock.
98 static void glock_free(struct gfs2_glock
*gl
)
100 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
101 struct inode
*aspace
= gl
->gl_aspace
;
103 gfs2_lm_put_lock(sdp
, gl
->gl_lock
);
106 gfs2_aspace_put(aspace
);
108 kmem_cache_free(gfs2_glock_cachep
, gl
);
112 * gfs2_glock_hold() - increment reference count on glock
113 * @gl: The glock to hold
117 void gfs2_glock_hold(struct gfs2_glock
*gl
)
119 kref_get(&gl
->gl_ref
);
122 /* All work is done after the return from kref_put() so we
123 can release the write_lock before the free. */
125 static void kill_glock(struct kref
*kref
)
127 struct gfs2_glock
*gl
= container_of(kref
, struct gfs2_glock
, gl_ref
);
128 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
130 gfs2_assert(sdp
, gl
->gl_state
== LM_ST_UNLOCKED
);
131 gfs2_assert(sdp
, list_empty(&gl
->gl_reclaim
));
132 gfs2_assert(sdp
, list_empty(&gl
->gl_holders
));
133 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters1
));
134 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters2
));
135 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters3
));
139 * gfs2_glock_put() - Decrement reference count on glock
140 * @gl: The glock to put
144 int gfs2_glock_put(struct gfs2_glock
*gl
)
146 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
147 struct gfs2_gl_hash_bucket
*bucket
= gl
->gl_bucket
;
150 mutex_lock(&sdp
->sd_invalidate_inodes_mutex
);
152 write_lock(&bucket
->hb_lock
);
153 if (kref_put(&gl
->gl_ref
, kill_glock
)) {
154 list_del_init(&gl
->gl_list
);
155 write_unlock(&bucket
->hb_lock
);
160 write_unlock(&bucket
->hb_lock
);
162 mutex_unlock(&sdp
->sd_invalidate_inodes_mutex
);
167 * queue_empty - check to see if a glock's queue is empty
169 * @head: the head of the queue to check
171 * This function protects the list in the event that a process already
172 * has a holder on the list and is adding a second holder for itself.
173 * The glmutex lock is what generally prevents processes from working
174 * on the same glock at once, but the special case of adding a second
175 * holder for yourself ("recursive" locking) doesn't involve locking
176 * glmutex, making the spin lock necessary.
178 * Returns: 1 if the queue is empty
181 static inline int queue_empty(struct gfs2_glock
*gl
, struct list_head
*head
)
184 spin_lock(&gl
->gl_spin
);
185 empty
= list_empty(head
);
186 spin_unlock(&gl
->gl_spin
);
191 * search_bucket() - Find struct gfs2_glock by lock number
192 * @bucket: the bucket to search
193 * @name: The lock name
195 * Returns: NULL, or the struct gfs2_glock with the requested number
198 static struct gfs2_glock
*search_bucket(struct gfs2_gl_hash_bucket
*bucket
,
199 struct lm_lockname
*name
)
201 struct gfs2_glock
*gl
;
203 list_for_each_entry(gl
, &bucket
->hb_list
, gl_list
) {
204 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
206 if (!lm_name_equal(&gl
->gl_name
, name
))
209 kref_get(&gl
->gl_ref
);
218 * gfs2_glock_find() - Find glock by lock number
219 * @sdp: The GFS2 superblock
220 * @name: The lock name
222 * Returns: NULL, or the struct gfs2_glock with the requested number
225 struct gfs2_glock
*gfs2_glock_find(struct gfs2_sbd
*sdp
,
226 struct lm_lockname
*name
)
228 struct gfs2_gl_hash_bucket
*bucket
= &sdp
->sd_gl_hash
[gl_hash(name
)];
229 struct gfs2_glock
*gl
;
231 read_lock(&bucket
->hb_lock
);
232 gl
= search_bucket(bucket
, name
);
233 read_unlock(&bucket
->hb_lock
);
239 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
240 * @sdp: The GFS2 superblock
241 * @number: the lock number
242 * @glops: The glock_operations to use
243 * @create: If 0, don't create the glock if it doesn't exist
244 * @glp: the glock is returned here
246 * This does not lock a glock, just finds/creates structures for one.
251 int gfs2_glock_get(struct gfs2_sbd
*sdp
, uint64_t number
,
252 struct gfs2_glock_operations
*glops
, int create
,
253 struct gfs2_glock
**glp
)
255 struct lm_lockname name
;
256 struct gfs2_glock
*gl
, *tmp
;
257 struct gfs2_gl_hash_bucket
*bucket
;
260 name
.ln_number
= number
;
261 name
.ln_type
= glops
->go_type
;
262 bucket
= &sdp
->sd_gl_hash
[gl_hash(&name
)];
264 read_lock(&bucket
->hb_lock
);
265 gl
= search_bucket(bucket
, &name
);
266 read_unlock(&bucket
->hb_lock
);
273 gl
= kmem_cache_alloc(gfs2_glock_cachep
, GFP_KERNEL
);
277 memset(gl
, 0, sizeof(struct gfs2_glock
));
279 INIT_LIST_HEAD(&gl
->gl_list
);
281 kref_init(&gl
->gl_ref
);
283 spin_lock_init(&gl
->gl_spin
);
285 gl
->gl_state
= LM_ST_UNLOCKED
;
286 INIT_LIST_HEAD(&gl
->gl_holders
);
287 INIT_LIST_HEAD(&gl
->gl_waiters1
);
288 INIT_LIST_HEAD(&gl
->gl_waiters2
);
289 INIT_LIST_HEAD(&gl
->gl_waiters3
);
293 gl
->gl_bucket
= bucket
;
294 INIT_LIST_HEAD(&gl
->gl_reclaim
);
298 lops_init_le(&gl
->gl_le
, &gfs2_glock_lops
);
299 INIT_LIST_HEAD(&gl
->gl_ail_list
);
301 /* If this glock protects actual on-disk data or metadata blocks,
302 create a VFS inode to manage the pages/buffers holding them. */
303 if (glops
== &gfs2_inode_glops
||
304 glops
== &gfs2_rgrp_glops
||
305 glops
== &gfs2_meta_glops
) {
306 gl
->gl_aspace
= gfs2_aspace_get(sdp
);
307 if (!gl
->gl_aspace
) {
313 error
= gfs2_lm_get_lock(sdp
, &name
, &gl
->gl_lock
);
317 write_lock(&bucket
->hb_lock
);
318 tmp
= search_bucket(bucket
, &name
);
320 write_unlock(&bucket
->hb_lock
);
324 list_add_tail(&gl
->gl_list
, &bucket
->hb_list
);
325 write_unlock(&bucket
->hb_lock
);
334 gfs2_aspace_put(gl
->gl_aspace
);
337 kmem_cache_free(gfs2_glock_cachep
, gl
);
343 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
345 * @state: the state we're requesting
346 * @flags: the modifier flags
347 * @gh: the holder structure
351 void gfs2_holder_init(struct gfs2_glock
*gl
, unsigned int state
, int flags
,
352 struct gfs2_holder
*gh
)
354 INIT_LIST_HEAD(&gh
->gh_list
);
356 gh
->gh_owner
= (flags
& GL_NEVER_RECURSE
) ? NULL
: current
;
357 gh
->gh_state
= state
;
358 gh
->gh_flags
= flags
;
361 init_completion(&gh
->gh_wait
);
363 if (gh
->gh_state
== LM_ST_EXCLUSIVE
)
364 gh
->gh_flags
|= GL_LOCAL_EXCL
;
370 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
371 * @state: the state we're requesting
372 * @flags: the modifier flags
373 * @gh: the holder structure
375 * Don't mess with the glock.
379 void gfs2_holder_reinit(unsigned int state
, int flags
, struct gfs2_holder
*gh
)
381 gh
->gh_state
= state
;
382 gh
->gh_flags
= flags
;
383 if (gh
->gh_state
== LM_ST_EXCLUSIVE
)
384 gh
->gh_flags
|= GL_LOCAL_EXCL
;
386 gh
->gh_iflags
&= 1 << HIF_ALLOCED
;
390 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
391 * @gh: the holder structure
395 void gfs2_holder_uninit(struct gfs2_holder
*gh
)
397 gfs2_glock_put(gh
->gh_gl
);
402 * gfs2_holder_get - get a struct gfs2_holder structure
404 * @state: the state we're requesting
405 * @flags: the modifier flags
406 * @gfp_flags: __GFP_NOFAIL
408 * Figure out how big an impact this function has. Either:
409 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
410 * 2) Leave it like it is
412 * Returns: the holder structure, NULL on ENOMEM
415 struct gfs2_holder
*gfs2_holder_get(struct gfs2_glock
*gl
, unsigned int state
,
416 int flags
, gfp_t gfp_flags
)
418 struct gfs2_holder
*gh
;
420 gh
= kmalloc(sizeof(struct gfs2_holder
), gfp_flags
);
424 gfs2_holder_init(gl
, state
, flags
, gh
);
425 set_bit(HIF_ALLOCED
, &gh
->gh_iflags
);
431 * gfs2_holder_put - get rid of a struct gfs2_holder structure
432 * @gh: the holder structure
436 void gfs2_holder_put(struct gfs2_holder
*gh
)
438 gfs2_holder_uninit(gh
);
443 * handle_recurse - put other holder structures (marked recursive)
444 * into the holders list
445 * @gh: the holder structure
449 static void handle_recurse(struct gfs2_holder
*gh
)
451 struct gfs2_glock
*gl
= gh
->gh_gl
;
452 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
453 struct gfs2_holder
*tmp_gh
, *safe
;
456 if (gfs2_assert_warn(sdp
, gh
->gh_owner
))
459 list_for_each_entry_safe(tmp_gh
, safe
, &gl
->gl_waiters3
, gh_list
) {
460 if (tmp_gh
->gh_owner
!= gh
->gh_owner
)
463 gfs2_assert_warn(sdp
,
464 test_bit(HIF_RECURSE
, &tmp_gh
->gh_iflags
));
466 list_move_tail(&tmp_gh
->gh_list
, &gl
->gl_holders
);
467 tmp_gh
->gh_error
= 0;
468 set_bit(HIF_HOLDER
, &tmp_gh
->gh_iflags
);
470 complete(&tmp_gh
->gh_wait
);
475 gfs2_assert_warn(sdp
, found
);
479 * do_unrecurse - a recursive holder was just dropped of the waiters3 list
482 * If there is only one other recursive holder, clear its HIF_RECURSE bit.
483 * If there is more than one, leave them alone.
487 static void do_unrecurse(struct gfs2_holder
*gh
)
489 struct gfs2_glock
*gl
= gh
->gh_gl
;
490 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
491 struct gfs2_holder
*tmp_gh
, *last_gh
= NULL
;
494 if (gfs2_assert_warn(sdp
, gh
->gh_owner
))
497 list_for_each_entry(tmp_gh
, &gl
->gl_waiters3
, gh_list
) {
498 if (tmp_gh
->gh_owner
!= gh
->gh_owner
)
501 gfs2_assert_warn(sdp
,
502 test_bit(HIF_RECURSE
, &tmp_gh
->gh_iflags
));
511 if (!gfs2_assert_warn(sdp
, found
))
512 clear_bit(HIF_RECURSE
, &last_gh
->gh_iflags
);
516 * rq_mutex - process a mutex request in the queue
517 * @gh: the glock holder
519 * Returns: 1 if the queue is blocked
522 static int rq_mutex(struct gfs2_holder
*gh
)
524 struct gfs2_glock
*gl
= gh
->gh_gl
;
526 list_del_init(&gh
->gh_list
);
527 /* gh->gh_error never examined. */
528 set_bit(GLF_LOCK
, &gl
->gl_flags
);
529 complete(&gh
->gh_wait
);
535 * rq_promote - process a promote request in the queue
536 * @gh: the glock holder
538 * Acquire a new inter-node lock, or change a lock state to more restrictive.
540 * Returns: 1 if the queue is blocked
543 static int rq_promote(struct gfs2_holder
*gh
)
545 struct gfs2_glock
*gl
= gh
->gh_gl
;
546 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
547 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
550 if (!relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
551 if (list_empty(&gl
->gl_holders
)) {
553 set_bit(GLF_LOCK
, &gl
->gl_flags
);
554 spin_unlock(&gl
->gl_spin
);
556 if (atomic_read(&sdp
->sd_reclaim_count
) >
557 gfs2_tune_get(sdp
, gt_reclaim_limit
) &&
558 !(gh
->gh_flags
& LM_FLAG_PRIORITY
)) {
559 gfs2_reclaim_glock(sdp
);
560 gfs2_reclaim_glock(sdp
);
563 glops
->go_xmote_th(gl
, gh
->gh_state
,
566 spin_lock(&gl
->gl_spin
);
571 if (list_empty(&gl
->gl_holders
)) {
572 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
573 set_bit(GLF_LOCK
, &gl
->gl_flags
);
576 struct gfs2_holder
*next_gh
;
577 if (gh
->gh_flags
& GL_LOCAL_EXCL
)
579 next_gh
= list_entry(gl
->gl_holders
.next
, struct gfs2_holder
,
581 if (next_gh
->gh_flags
& GL_LOCAL_EXCL
)
583 recurse
= test_bit(HIF_RECURSE
, &gh
->gh_iflags
);
586 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
588 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
593 complete(&gh
->gh_wait
);
599 * rq_demote - process a demote request in the queue
600 * @gh: the glock holder
602 * Returns: 1 if the queue is blocked
605 static int rq_demote(struct gfs2_holder
*gh
)
607 struct gfs2_glock
*gl
= gh
->gh_gl
;
608 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
610 if (!list_empty(&gl
->gl_holders
))
613 if (gl
->gl_state
== gh
->gh_state
|| gl
->gl_state
== LM_ST_UNLOCKED
) {
614 list_del_init(&gh
->gh_list
);
616 spin_unlock(&gl
->gl_spin
);
617 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
620 complete(&gh
->gh_wait
);
621 spin_lock(&gl
->gl_spin
);
624 set_bit(GLF_LOCK
, &gl
->gl_flags
);
625 spin_unlock(&gl
->gl_spin
);
627 if (gh
->gh_state
== LM_ST_UNLOCKED
||
628 gl
->gl_state
!= LM_ST_EXCLUSIVE
)
629 glops
->go_drop_th(gl
);
631 glops
->go_xmote_th(gl
, gh
->gh_state
, gh
->gh_flags
);
633 spin_lock(&gl
->gl_spin
);
640 * rq_greedy - process a queued request to drop greedy status
641 * @gh: the glock holder
643 * Returns: 1 if the queue is blocked
646 static int rq_greedy(struct gfs2_holder
*gh
)
648 struct gfs2_glock
*gl
= gh
->gh_gl
;
650 list_del_init(&gh
->gh_list
);
651 /* gh->gh_error never examined. */
652 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
653 spin_unlock(&gl
->gl_spin
);
655 gfs2_holder_uninit(gh
);
656 kfree(container_of(gh
, struct greedy
, gr_gh
));
658 spin_lock(&gl
->gl_spin
);
664 * run_queue - process holder structures on a glock
669 static void run_queue(struct gfs2_glock
*gl
)
671 struct gfs2_holder
*gh
;
675 if (test_bit(GLF_LOCK
, &gl
->gl_flags
))
678 if (!list_empty(&gl
->gl_waiters1
)) {
679 gh
= list_entry(gl
->gl_waiters1
.next
,
680 struct gfs2_holder
, gh_list
);
682 if (test_bit(HIF_MUTEX
, &gh
->gh_iflags
))
683 blocked
= rq_mutex(gh
);
685 gfs2_assert_warn(gl
->gl_sbd
, 0);
687 } else if (!list_empty(&gl
->gl_waiters2
) &&
688 !test_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
)) {
689 gh
= list_entry(gl
->gl_waiters2
.next
,
690 struct gfs2_holder
, gh_list
);
692 if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
))
693 blocked
= rq_demote(gh
);
694 else if (test_bit(HIF_GREEDY
, &gh
->gh_iflags
))
695 blocked
= rq_greedy(gh
);
697 gfs2_assert_warn(gl
->gl_sbd
, 0);
699 } else if (!list_empty(&gl
->gl_waiters3
)) {
700 gh
= list_entry(gl
->gl_waiters3
.next
,
701 struct gfs2_holder
, gh_list
);
703 if (test_bit(HIF_PROMOTE
, &gh
->gh_iflags
))
704 blocked
= rq_promote(gh
);
706 gfs2_assert_warn(gl
->gl_sbd
, 0);
717 * gfs2_glmutex_lock - acquire a local lock on a glock
720 * Gives caller exclusive access to manipulate a glock structure.
723 void gfs2_glmutex_lock(struct gfs2_glock
*gl
)
725 struct gfs2_holder gh
;
727 gfs2_holder_init(gl
, 0, 0, &gh
);
728 set_bit(HIF_MUTEX
, &gh
.gh_iflags
);
730 spin_lock(&gl
->gl_spin
);
731 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
))
732 list_add_tail(&gh
.gh_list
, &gl
->gl_waiters1
);
734 complete(&gh
.gh_wait
);
735 spin_unlock(&gl
->gl_spin
);
737 wait_for_completion(&gh
.gh_wait
);
738 gfs2_holder_uninit(&gh
);
742 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
745 * Returns: 1 if the glock is acquired
748 int gfs2_glmutex_trylock(struct gfs2_glock
*gl
)
752 spin_lock(&gl
->gl_spin
);
753 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
))
755 spin_unlock(&gl
->gl_spin
);
761 * gfs2_glmutex_unlock - release a local lock on a glock
766 void gfs2_glmutex_unlock(struct gfs2_glock
*gl
)
768 spin_lock(&gl
->gl_spin
);
769 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
771 spin_unlock(&gl
->gl_spin
);
775 * handle_callback - add a demote request to a lock's queue
777 * @state: the state the caller wants us to change to
781 static void handle_callback(struct gfs2_glock
*gl
, unsigned int state
)
783 struct gfs2_holder
*gh
, *new_gh
= NULL
;
786 spin_lock(&gl
->gl_spin
);
788 list_for_each_entry(gh
, &gl
->gl_waiters2
, gh_list
) {
789 if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
) &&
790 gl
->gl_req_gh
!= gh
) {
791 if (gh
->gh_state
!= state
)
792 gh
->gh_state
= LM_ST_UNLOCKED
;
798 list_add_tail(&new_gh
->gh_list
, &gl
->gl_waiters2
);
801 spin_unlock(&gl
->gl_spin
);
803 new_gh
= gfs2_holder_get(gl
, state
,
804 LM_FLAG_TRY
| GL_NEVER_RECURSE
,
805 GFP_KERNEL
| __GFP_NOFAIL
),
806 set_bit(HIF_DEMOTE
, &new_gh
->gh_iflags
);
807 set_bit(HIF_DEALLOC
, &new_gh
->gh_iflags
);
813 spin_unlock(&gl
->gl_spin
);
816 gfs2_holder_put(new_gh
);
820 * state_change - record that the glock is now in a different state
822 * @new_state the new state
826 static void state_change(struct gfs2_glock
*gl
, unsigned int new_state
)
830 held1
= (gl
->gl_state
!= LM_ST_UNLOCKED
);
831 held2
= (new_state
!= LM_ST_UNLOCKED
);
833 if (held1
!= held2
) {
840 gl
->gl_state
= new_state
;
844 * xmote_bh - Called after the lock module is done acquiring a lock
845 * @gl: The glock in question
846 * @ret: the int returned from the lock module
850 static void xmote_bh(struct gfs2_glock
*gl
, unsigned int ret
)
852 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
853 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
854 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
855 int prev_state
= gl
->gl_state
;
858 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
859 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
860 gfs2_assert_warn(sdp
, !(ret
& LM_OUT_ASYNC
));
862 state_change(gl
, ret
& LM_OUT_ST_MASK
);
864 if (prev_state
!= LM_ST_UNLOCKED
&& !(ret
& LM_OUT_CACHEABLE
)) {
866 glops
->go_inval(gl
, DIO_METADATA
| DIO_DATA
);
867 } else if (gl
->gl_state
== LM_ST_DEFERRED
) {
868 /* We might not want to do this here.
869 Look at moving to the inode glops. */
871 glops
->go_inval(gl
, DIO_DATA
);
874 /* Deal with each possible exit condition */
877 gl
->gl_stamp
= jiffies
;
879 else if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
880 spin_lock(&gl
->gl_spin
);
881 list_del_init(&gh
->gh_list
);
883 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
885 spin_unlock(&gl
->gl_spin
);
887 } else if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
)) {
888 spin_lock(&gl
->gl_spin
);
889 list_del_init(&gh
->gh_list
);
890 if (gl
->gl_state
== gh
->gh_state
||
891 gl
->gl_state
== LM_ST_UNLOCKED
)
894 if (gfs2_assert_warn(sdp
, gh
->gh_flags
&
895 (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) == -1)
896 fs_warn(sdp
, "ret = 0x%.8X\n", ret
);
897 gh
->gh_error
= GLR_TRYFAILED
;
899 spin_unlock(&gl
->gl_spin
);
901 if (ret
& LM_OUT_CANCELED
)
902 handle_callback(gl
, LM_ST_UNLOCKED
); /* Lame */
904 } else if (ret
& LM_OUT_CANCELED
) {
905 spin_lock(&gl
->gl_spin
);
906 list_del_init(&gh
->gh_list
);
907 gh
->gh_error
= GLR_CANCELED
;
908 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
910 spin_unlock(&gl
->gl_spin
);
912 } else if (relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
913 spin_lock(&gl
->gl_spin
);
914 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
916 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
917 spin_unlock(&gl
->gl_spin
);
919 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
923 } else if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
924 spin_lock(&gl
->gl_spin
);
925 list_del_init(&gh
->gh_list
);
926 gh
->gh_error
= GLR_TRYFAILED
;
927 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
929 spin_unlock(&gl
->gl_spin
);
932 if (gfs2_assert_withdraw(sdp
, 0) == -1)
933 fs_err(sdp
, "ret = 0x%.8X\n", ret
);
936 if (glops
->go_xmote_bh
)
937 glops
->go_xmote_bh(gl
);
940 spin_lock(&gl
->gl_spin
);
941 gl
->gl_req_gh
= NULL
;
942 gl
->gl_req_bh
= NULL
;
943 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
945 spin_unlock(&gl
->gl_spin
);
951 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
954 complete(&gh
->gh_wait
);
959 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
960 * @gl: The glock in question
961 * @state: the requested state
962 * @flags: modifier flags to the lock call
966 void gfs2_glock_xmote_th(struct gfs2_glock
*gl
, unsigned int state
, int flags
)
968 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
969 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
970 int lck_flags
= flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
|
971 LM_FLAG_NOEXP
| LM_FLAG_ANY
|
973 unsigned int lck_ret
;
975 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
976 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
977 gfs2_assert_warn(sdp
, state
!= LM_ST_UNLOCKED
);
978 gfs2_assert_warn(sdp
, state
!= gl
->gl_state
);
980 if (gl
->gl_state
== LM_ST_EXCLUSIVE
) {
983 DIO_METADATA
| DIO_DATA
| DIO_RELEASE
);
987 gl
->gl_req_bh
= xmote_bh
;
989 lck_ret
= gfs2_lm_lock(sdp
, gl
->gl_lock
, gl
->gl_state
, state
,
992 if (gfs2_assert_withdraw(sdp
, !(lck_ret
& LM_OUT_ERROR
)))
995 if (lck_ret
& LM_OUT_ASYNC
)
996 gfs2_assert_warn(sdp
, lck_ret
== LM_OUT_ASYNC
);
998 xmote_bh(gl
, lck_ret
);
1002 * drop_bh - Called after a lock module unlock completes
1004 * @ret: the return status
1006 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
1007 * Doesn't drop the reference on the glock the top half took out
1011 static void drop_bh(struct gfs2_glock
*gl
, unsigned int ret
)
1013 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1014 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1015 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
1017 clear_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1019 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1020 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
1021 gfs2_assert_warn(sdp
, !ret
);
1023 state_change(gl
, LM_ST_UNLOCKED
);
1025 if (glops
->go_inval
)
1026 glops
->go_inval(gl
, DIO_METADATA
| DIO_DATA
);
1029 spin_lock(&gl
->gl_spin
);
1030 list_del_init(&gh
->gh_list
);
1032 spin_unlock(&gl
->gl_spin
);
1035 if (glops
->go_drop_bh
)
1036 glops
->go_drop_bh(gl
);
1038 spin_lock(&gl
->gl_spin
);
1039 gl
->gl_req_gh
= NULL
;
1040 gl
->gl_req_bh
= NULL
;
1041 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1043 spin_unlock(&gl
->gl_spin
);
1048 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
1049 gfs2_holder_put(gh
);
1051 complete(&gh
->gh_wait
);
1056 * gfs2_glock_drop_th - call into the lock module to unlock a lock
1061 void gfs2_glock_drop_th(struct gfs2_glock
*gl
)
1063 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1064 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1067 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1068 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
1069 gfs2_assert_warn(sdp
, gl
->gl_state
!= LM_ST_UNLOCKED
);
1071 if (gl
->gl_state
== LM_ST_EXCLUSIVE
) {
1074 DIO_METADATA
| DIO_DATA
| DIO_RELEASE
);
1077 gfs2_glock_hold(gl
);
1078 gl
->gl_req_bh
= drop_bh
;
1080 ret
= gfs2_lm_unlock(sdp
, gl
->gl_lock
, gl
->gl_state
);
1082 if (gfs2_assert_withdraw(sdp
, !(ret
& LM_OUT_ERROR
)))
1088 gfs2_assert_warn(sdp
, ret
== LM_OUT_ASYNC
);
1092 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1093 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1095 * Don't cancel GL_NOCANCEL requests.
1098 static void do_cancels(struct gfs2_holder
*gh
)
1100 struct gfs2_glock
*gl
= gh
->gh_gl
;
1102 spin_lock(&gl
->gl_spin
);
1104 while (gl
->gl_req_gh
!= gh
&&
1105 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1106 !list_empty(&gh
->gh_list
)) {
1107 if (gl
->gl_req_bh
&&
1109 (gl
->gl_req_gh
->gh_flags
& GL_NOCANCEL
))) {
1110 spin_unlock(&gl
->gl_spin
);
1111 gfs2_lm_cancel(gl
->gl_sbd
, gl
->gl_lock
);
1113 spin_lock(&gl
->gl_spin
);
1115 spin_unlock(&gl
->gl_spin
);
1117 spin_lock(&gl
->gl_spin
);
1121 spin_unlock(&gl
->gl_spin
);
1125 * glock_wait_internal - wait on a glock acquisition
1126 * @gh: the glock holder
1128 * Returns: 0 on success
1131 static int glock_wait_internal(struct gfs2_holder
*gh
)
1133 struct gfs2_glock
*gl
= gh
->gh_gl
;
1134 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1135 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1137 if (test_bit(HIF_ABORTED
, &gh
->gh_iflags
))
1140 if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
1141 spin_lock(&gl
->gl_spin
);
1142 if (gl
->gl_req_gh
!= gh
&&
1143 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1144 !list_empty(&gh
->gh_list
)) {
1145 list_del_init(&gh
->gh_list
);
1146 gh
->gh_error
= GLR_TRYFAILED
;
1147 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
1150 spin_unlock(&gl
->gl_spin
);
1151 return gh
->gh_error
;
1153 spin_unlock(&gl
->gl_spin
);
1156 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1159 wait_for_completion(&gh
->gh_wait
);
1162 return gh
->gh_error
;
1164 gfs2_assert_withdraw(sdp
, test_bit(HIF_HOLDER
, &gh
->gh_iflags
));
1165 gfs2_assert_withdraw(sdp
, relaxed_state_ok(gl
->gl_state
,
1169 if (test_bit(HIF_FIRST
, &gh
->gh_iflags
)) {
1170 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1172 if (glops
->go_lock
) {
1173 gh
->gh_error
= glops
->go_lock(gh
);
1175 spin_lock(&gl
->gl_spin
);
1176 list_del_init(&gh
->gh_list
);
1177 if (test_and_clear_bit(HIF_RECURSE
,
1180 spin_unlock(&gl
->gl_spin
);
1184 spin_lock(&gl
->gl_spin
);
1185 gl
->gl_req_gh
= NULL
;
1186 gl
->gl_req_bh
= NULL
;
1187 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1188 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
1191 spin_unlock(&gl
->gl_spin
);
1194 return gh
->gh_error
;
1197 static inline struct gfs2_holder
*
1198 find_holder_by_owner(struct list_head
*head
, struct task_struct
*owner
)
1200 struct gfs2_holder
*gh
;
1202 list_for_each_entry(gh
, head
, gh_list
) {
1203 if (gh
->gh_owner
== owner
)
1213 * Make sure the new holder is compatible with the pre-existing one.
1217 static int recurse_check(struct gfs2_holder
*existing
, struct gfs2_holder
*new,
1220 struct gfs2_sbd
*sdp
= existing
->gh_gl
->gl_sbd
;
1222 if (gfs2_assert_warn(sdp
, (new->gh_flags
& LM_FLAG_ANY
) ||
1223 !(existing
->gh_flags
& LM_FLAG_ANY
)))
1226 if (gfs2_assert_warn(sdp
, (existing
->gh_flags
& GL_LOCAL_EXCL
) ||
1227 !(new->gh_flags
& GL_LOCAL_EXCL
)))
1230 if (gfs2_assert_warn(sdp
, relaxed_state_ok(state
, new->gh_state
,
1237 set_bit(HIF_ABORTED
, &new->gh_iflags
);
1242 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1243 * @gh: the holder structure to add
1247 static void add_to_queue(struct gfs2_holder
*gh
)
1249 struct gfs2_glock
*gl
= gh
->gh_gl
;
1250 struct gfs2_holder
*existing
;
1255 existing
= find_holder_by_owner(&gl
->gl_holders
, gh
->gh_owner
);
1257 if (recurse_check(existing
, gh
, gl
->gl_state
))
1260 list_add_tail(&gh
->gh_list
, &gl
->gl_holders
);
1261 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
1264 complete(&gh
->gh_wait
);
1269 existing
= find_holder_by_owner(&gl
->gl_waiters3
, gh
->gh_owner
);
1271 if (recurse_check(existing
, gh
, existing
->gh_state
))
1274 set_bit(HIF_RECURSE
, &gh
->gh_iflags
);
1275 set_bit(HIF_RECURSE
, &existing
->gh_iflags
);
1277 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters3
);
1283 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1284 list_add(&gh
->gh_list
, &gl
->gl_waiters3
);
1286 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters3
);
1290 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1291 * @gh: the holder structure
1293 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1295 * Returns: 0, GLR_TRYFAILED, or errno on failure
1298 int gfs2_glock_nq(struct gfs2_holder
*gh
)
1300 struct gfs2_glock
*gl
= gh
->gh_gl
;
1301 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1305 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
1306 set_bit(HIF_ABORTED
, &gh
->gh_iflags
);
1310 set_bit(HIF_PROMOTE
, &gh
->gh_iflags
);
1312 spin_lock(&gl
->gl_spin
);
1315 spin_unlock(&gl
->gl_spin
);
1317 if (!(gh
->gh_flags
& GL_ASYNC
)) {
1318 error
= glock_wait_internal(gh
);
1319 if (error
== GLR_CANCELED
) {
1325 clear_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1331 * gfs2_glock_poll - poll to see if an async request has been completed
1334 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1337 int gfs2_glock_poll(struct gfs2_holder
*gh
)
1339 struct gfs2_glock
*gl
= gh
->gh_gl
;
1342 spin_lock(&gl
->gl_spin
);
1344 if (test_bit(HIF_HOLDER
, &gh
->gh_iflags
))
1346 else if (list_empty(&gh
->gh_list
)) {
1347 if (gh
->gh_error
== GLR_CANCELED
) {
1348 spin_unlock(&gl
->gl_spin
);
1350 if (gfs2_glock_nq(gh
))
1357 spin_unlock(&gl
->gl_spin
);
1363 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1364 * @gh: the holder structure
1366 * Returns: 0, GLR_TRYFAILED, or errno on failure
1369 int gfs2_glock_wait(struct gfs2_holder
*gh
)
1373 error
= glock_wait_internal(gh
);
1374 if (error
== GLR_CANCELED
) {
1376 gh
->gh_flags
&= ~GL_ASYNC
;
1377 error
= gfs2_glock_nq(gh
);
1384 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1385 * @gh: the glock holder
1389 void gfs2_glock_dq(struct gfs2_holder
*gh
)
1391 struct gfs2_glock
*gl
= gh
->gh_gl
;
1392 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1394 if (gh
->gh_flags
& GL_SYNC
)
1395 set_bit(GLF_SYNC
, &gl
->gl_flags
);
1397 if (gh
->gh_flags
& GL_NOCACHE
)
1398 handle_callback(gl
, LM_ST_UNLOCKED
);
1400 gfs2_glmutex_lock(gl
);
1402 spin_lock(&gl
->gl_spin
);
1403 list_del_init(&gh
->gh_list
);
1405 if (list_empty(&gl
->gl_holders
)) {
1406 spin_unlock(&gl
->gl_spin
);
1408 if (glops
->go_unlock
)
1409 glops
->go_unlock(gh
);
1411 if (test_bit(GLF_SYNC
, &gl
->gl_flags
)) {
1413 glops
->go_sync(gl
, DIO_METADATA
| DIO_DATA
);
1416 gl
->gl_stamp
= jiffies
;
1418 spin_lock(&gl
->gl_spin
);
1421 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1423 spin_unlock(&gl
->gl_spin
);
1427 * gfs2_glock_prefetch - Try to prefetch a glock
1429 * @state: the state to prefetch in
1430 * @flags: flags passed to go_xmote_th()
1434 void gfs2_glock_prefetch(struct gfs2_glock
*gl
, unsigned int state
, int flags
)
1436 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1438 spin_lock(&gl
->gl_spin
);
1440 if (test_bit(GLF_LOCK
, &gl
->gl_flags
) ||
1441 !list_empty(&gl
->gl_holders
) ||
1442 !list_empty(&gl
->gl_waiters1
) ||
1443 !list_empty(&gl
->gl_waiters2
) ||
1444 !list_empty(&gl
->gl_waiters3
) ||
1445 relaxed_state_ok(gl
->gl_state
, state
, flags
)) {
1446 spin_unlock(&gl
->gl_spin
);
1450 set_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1451 set_bit(GLF_LOCK
, &gl
->gl_flags
);
1452 spin_unlock(&gl
->gl_spin
);
1454 glops
->go_xmote_th(gl
, state
, flags
);
1458 * gfs2_glock_force_drop - Force a glock to be uncached
1463 void gfs2_glock_force_drop(struct gfs2_glock
*gl
)
1465 struct gfs2_holder gh
;
1467 gfs2_holder_init(gl
, LM_ST_UNLOCKED
, GL_NEVER_RECURSE
, &gh
);
1468 set_bit(HIF_DEMOTE
, &gh
.gh_iflags
);
1470 spin_lock(&gl
->gl_spin
);
1471 list_add_tail(&gh
.gh_list
, &gl
->gl_waiters2
);
1473 spin_unlock(&gl
->gl_spin
);
1475 wait_for_completion(&gh
.gh_wait
);
1476 gfs2_holder_uninit(&gh
);
1479 static void greedy_work(void *data
)
1481 struct greedy
*gr
= (struct greedy
*)data
;
1482 struct gfs2_holder
*gh
= &gr
->gr_gh
;
1483 struct gfs2_glock
*gl
= gh
->gh_gl
;
1484 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1486 clear_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
);
1488 if (glops
->go_greedy
)
1489 glops
->go_greedy(gl
);
1491 spin_lock(&gl
->gl_spin
);
1493 if (list_empty(&gl
->gl_waiters2
)) {
1494 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
1495 spin_unlock(&gl
->gl_spin
);
1496 gfs2_holder_uninit(gh
);
1499 gfs2_glock_hold(gl
);
1500 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters2
);
1502 spin_unlock(&gl
->gl_spin
);
1508 * gfs2_glock_be_greedy -
1512 * Returns: 0 if go_greedy will be called, 1 otherwise
1515 int gfs2_glock_be_greedy(struct gfs2_glock
*gl
, unsigned int time
)
1518 struct gfs2_holder
*gh
;
1521 gl
->gl_sbd
->sd_args
.ar_localcaching
||
1522 test_and_set_bit(GLF_GREEDY
, &gl
->gl_flags
))
1525 gr
= kmalloc(sizeof(struct greedy
), GFP_KERNEL
);
1527 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
1532 gfs2_holder_init(gl
, 0, GL_NEVER_RECURSE
, gh
);
1533 set_bit(HIF_GREEDY
, &gh
->gh_iflags
);
1534 INIT_WORK(&gr
->gr_work
, greedy_work
, gr
);
1536 set_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
);
1537 schedule_delayed_work(&gr
->gr_work
, time
);
1543 * gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
1545 * @state: the state we're requesting
1546 * @flags: the modifier flags
1547 * @gh: the holder structure
1549 * Returns: 0, GLR_*, or errno
1552 int gfs2_glock_nq_init(struct gfs2_glock
*gl
, unsigned int state
, int flags
,
1553 struct gfs2_holder
*gh
)
1557 gfs2_holder_init(gl
, state
, flags
, gh
);
1559 error
= gfs2_glock_nq(gh
);
1561 gfs2_holder_uninit(gh
);
1567 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1568 * @gh: the holder structure
1572 void gfs2_glock_dq_uninit(struct gfs2_holder
*gh
)
1575 gfs2_holder_uninit(gh
);
1579 * gfs2_glock_nq_num - acquire a glock based on lock number
1580 * @sdp: the filesystem
1581 * @number: the lock number
1582 * @glops: the glock operations for the type of glock
1583 * @state: the state to acquire the glock in
1584 * @flags: modifier flags for the aquisition
1585 * @gh: the struct gfs2_holder
1590 int gfs2_glock_nq_num(struct gfs2_sbd
*sdp
, uint64_t number
,
1591 struct gfs2_glock_operations
*glops
, unsigned int state
,
1592 int flags
, struct gfs2_holder
*gh
)
1594 struct gfs2_glock
*gl
;
1597 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1599 error
= gfs2_glock_nq_init(gl
, state
, flags
, gh
);
1607 * glock_compare - Compare two struct gfs2_glock structures for sorting
1608 * @arg_a: the first structure
1609 * @arg_b: the second structure
1613 static int glock_compare(const void *arg_a
, const void *arg_b
)
1615 struct gfs2_holder
*gh_a
= *(struct gfs2_holder
**)arg_a
;
1616 struct gfs2_holder
*gh_b
= *(struct gfs2_holder
**)arg_b
;
1617 struct lm_lockname
*a
= &gh_a
->gh_gl
->gl_name
;
1618 struct lm_lockname
*b
= &gh_b
->gh_gl
->gl_name
;
1621 if (a
->ln_number
> b
->ln_number
)
1623 else if (a
->ln_number
< b
->ln_number
)
1626 if (gh_a
->gh_state
== LM_ST_SHARED
&&
1627 gh_b
->gh_state
== LM_ST_EXCLUSIVE
)
1629 else if (!(gh_a
->gh_flags
& GL_LOCAL_EXCL
) &&
1630 (gh_b
->gh_flags
& GL_LOCAL_EXCL
))
1638 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1639 * @num_gh: the number of structures
1640 * @ghs: an array of struct gfs2_holder structures
1642 * Returns: 0 on success (all glocks acquired),
1643 * errno on failure (no glocks acquired)
1646 static int nq_m_sync(unsigned int num_gh
, struct gfs2_holder
*ghs
,
1647 struct gfs2_holder
**p
)
1652 for (x
= 0; x
< num_gh
; x
++)
1655 sort(p
, num_gh
, sizeof(struct gfs2_holder
*), glock_compare
, NULL
);
1657 for (x
= 0; x
< num_gh
; x
++) {
1658 p
[x
]->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1660 error
= gfs2_glock_nq(p
[x
]);
1663 gfs2_glock_dq(p
[x
]);
1672 * gfs2_glock_nq_m - acquire multiple glocks
1673 * @num_gh: the number of structures
1674 * @ghs: an array of struct gfs2_holder structures
1676 * Figure out how big an impact this function has. Either:
1677 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1678 * 2) Forget async stuff and just call nq_m_sync()
1679 * 3) Leave it like it is
1681 * Returns: 0 on success (all glocks acquired),
1682 * errno on failure (no glocks acquired)
1685 int gfs2_glock_nq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1689 int borked
= 0, serious
= 0;
1696 ghs
->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1697 return gfs2_glock_nq(ghs
);
1700 e
= kcalloc(num_gh
, sizeof(struct gfs2_holder
*), GFP_KERNEL
);
1704 for (x
= 0; x
< num_gh
; x
++) {
1705 ghs
[x
].gh_flags
|= LM_FLAG_TRY
| GL_ASYNC
;
1706 error
= gfs2_glock_nq(&ghs
[x
]);
1715 for (x
= 0; x
< num_gh
; x
++) {
1716 error
= e
[x
] = glock_wait_internal(&ghs
[x
]);
1719 if (error
!= GLR_TRYFAILED
&& error
!= GLR_CANCELED
)
1729 for (x
= 0; x
< num_gh
; x
++)
1731 gfs2_glock_dq(&ghs
[x
]);
1736 for (x
= 0; x
< num_gh
; x
++)
1737 gfs2_holder_reinit(ghs
[x
].gh_state
, ghs
[x
].gh_flags
,
1739 error
= nq_m_sync(num_gh
, ghs
, (struct gfs2_holder
**)e
);
1748 * gfs2_glock_dq_m - release multiple glocks
1749 * @num_gh: the number of structures
1750 * @ghs: an array of struct gfs2_holder structures
1754 void gfs2_glock_dq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1758 for (x
= 0; x
< num_gh
; x
++)
1759 gfs2_glock_dq(&ghs
[x
]);
1763 * gfs2_glock_dq_uninit_m - release multiple glocks
1764 * @num_gh: the number of structures
1765 * @ghs: an array of struct gfs2_holder structures
1769 void gfs2_glock_dq_uninit_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1773 for (x
= 0; x
< num_gh
; x
++)
1774 gfs2_glock_dq_uninit(&ghs
[x
]);
1778 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1779 * @sdp: the filesystem
1780 * @number: the lock number
1781 * @glops: the glock operations for the type of glock
1782 * @state: the state to acquire the glock in
1783 * @flags: modifier flags for the aquisition
1788 void gfs2_glock_prefetch_num(struct gfs2_sbd
*sdp
, uint64_t number
,
1789 struct gfs2_glock_operations
*glops
,
1790 unsigned int state
, int flags
)
1792 struct gfs2_glock
*gl
;
1795 if (atomic_read(&sdp
->sd_reclaim_count
) <
1796 gfs2_tune_get(sdp
, gt_reclaim_limit
)) {
1797 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1799 gfs2_glock_prefetch(gl
, state
, flags
);
1806 * gfs2_lvb_hold - attach a LVB from a glock
1807 * @gl: The glock in question
1811 int gfs2_lvb_hold(struct gfs2_glock
*gl
)
1815 gfs2_glmutex_lock(gl
);
1817 if (!atomic_read(&gl
->gl_lvb_count
)) {
1818 error
= gfs2_lm_hold_lvb(gl
->gl_sbd
, gl
->gl_lock
, &gl
->gl_lvb
);
1820 gfs2_glmutex_unlock(gl
);
1823 gfs2_glock_hold(gl
);
1825 atomic_inc(&gl
->gl_lvb_count
);
1827 gfs2_glmutex_unlock(gl
);
1833 * gfs2_lvb_unhold - detach a LVB from a glock
1834 * @gl: The glock in question
1838 void gfs2_lvb_unhold(struct gfs2_glock
*gl
)
1840 gfs2_glock_hold(gl
);
1841 gfs2_glmutex_lock(gl
);
1843 gfs2_assert(gl
->gl_sbd
, atomic_read(&gl
->gl_lvb_count
) > 0);
1844 if (atomic_dec_and_test(&gl
->gl_lvb_count
)) {
1845 gfs2_lm_unhold_lvb(gl
->gl_sbd
, gl
->gl_lock
, gl
->gl_lvb
);
1850 gfs2_glmutex_unlock(gl
);
1854 void gfs2_lvb_sync(struct gfs2_glock
*gl
)
1856 gfs2_glmutex_lock(gl
);
1858 gfs2_assert(gl
->gl_sbd
, atomic_read(&gl
->gl_lvb_count
));
1859 if (!gfs2_assert_warn(gl
->gl_sbd
, gfs2_glock_is_held_excl(gl
)))
1860 gfs2_lm_sync_lvb(gl
->gl_sbd
, gl
->gl_lock
, gl
->gl_lvb
);
1862 gfs2_glmutex_unlock(gl
);
1865 static void blocking_cb(struct gfs2_sbd
*sdp
, struct lm_lockname
*name
,
1868 struct gfs2_glock
*gl
;
1870 gl
= gfs2_glock_find(sdp
, name
);
1874 if (gl
->gl_ops
->go_callback
)
1875 gl
->gl_ops
->go_callback(gl
, state
);
1876 handle_callback(gl
, state
);
1878 spin_lock(&gl
->gl_spin
);
1880 spin_unlock(&gl
->gl_spin
);
1886 * gfs2_glock_cb - Callback used by locking module
1887 * @fsdata: Pointer to the superblock
1888 * @type: Type of callback
1889 * @data: Type dependent data pointer
1891 * Called by the locking module when it wants to tell us something.
1892 * Either we need to drop a lock, one of our ASYNC requests completed, or
1893 * a journal from another client needs to be recovered.
1896 void gfs2_glock_cb(lm_fsdata_t
*fsdata
, unsigned int type
, void *data
)
1898 struct gfs2_sbd
*sdp
= (struct gfs2_sbd
*)fsdata
;
1902 blocking_cb(sdp
, (struct lm_lockname
*)data
, LM_ST_UNLOCKED
);
1906 blocking_cb(sdp
, (struct lm_lockname
*)data
, LM_ST_DEFERRED
);
1910 blocking_cb(sdp
, (struct lm_lockname
*)data
, LM_ST_SHARED
);
1914 struct lm_async_cb
*async
= (struct lm_async_cb
*)data
;
1915 struct gfs2_glock
*gl
;
1917 gl
= gfs2_glock_find(sdp
, &async
->lc_name
);
1918 if (gfs2_assert_warn(sdp
, gl
))
1920 if (!gfs2_assert_warn(sdp
, gl
->gl_req_bh
))
1921 gl
->gl_req_bh(gl
, async
->lc_ret
);
1927 case LM_CB_NEED_RECOVERY
:
1928 gfs2_jdesc_make_dirty(sdp
, *(unsigned int *)data
);
1929 if (sdp
->sd_recoverd_process
)
1930 wake_up_process(sdp
->sd_recoverd_process
);
1933 case LM_CB_DROPLOCKS
:
1934 gfs2_gl_hash_clear(sdp
, NO_WAIT
);
1935 gfs2_quota_scan(sdp
);
1939 gfs2_assert_warn(sdp
, 0);
1945 * gfs2_try_toss_inode - try to remove a particular inode struct from cache
1946 * sdp: the filesystem
1947 * inum: the inode number
1951 void gfs2_try_toss_inode(struct gfs2_sbd
*sdp
, struct gfs2_inum
*inum
)
1953 struct gfs2_glock
*gl
;
1954 struct gfs2_inode
*ip
;
1957 error
= gfs2_glock_get(sdp
, inum
->no_addr
, &gfs2_inode_glops
,
1962 if (!gfs2_glmutex_trylock(gl
))
1969 if (atomic_read(&ip
->i_count
))
1972 gfs2_inode_destroy(ip
);
1975 gfs2_glmutex_unlock(gl
);
1982 * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
1983 * iopen glock from memory
1984 * @io_gl: the iopen glock
1985 * @state: the state into which the glock should be put
1989 void gfs2_iopen_go_callback(struct gfs2_glock
*io_gl
, unsigned int state
)
1991 struct gfs2_glock
*i_gl
;
1993 if (state
!= LM_ST_UNLOCKED
)
1996 spin_lock(&io_gl
->gl_spin
);
1997 i_gl
= get_gl2gl(io_gl
);
1999 gfs2_glock_hold(i_gl
);
2000 spin_unlock(&io_gl
->gl_spin
);
2002 spin_unlock(&io_gl
->gl_spin
);
2006 if (gfs2_glmutex_trylock(i_gl
)) {
2007 struct gfs2_inode
*ip
= get_gl2ip(i_gl
);
2009 gfs2_try_toss_vnode(ip
);
2010 gfs2_glmutex_unlock(i_gl
);
2011 gfs2_glock_schedule_for_reclaim(i_gl
);
2014 gfs2_glmutex_unlock(i_gl
);
2018 gfs2_glock_put(i_gl
);
2022 * demote_ok - Check to see if it's ok to unlock a glock
2025 * Returns: 1 if it's ok
2028 static int demote_ok(struct gfs2_glock
*gl
)
2030 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
2031 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
2034 if (test_bit(GLF_STICKY
, &gl
->gl_flags
))
2036 else if (test_bit(GLF_PREFETCH
, &gl
->gl_flags
))
2037 demote
= time_after_eq(jiffies
,
2039 gfs2_tune_get(sdp
, gt_prefetch_secs
) * HZ
);
2040 else if (glops
->go_demote_ok
)
2041 demote
= glops
->go_demote_ok(gl
);
2047 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
2052 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock
*gl
)
2054 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
2056 spin_lock(&sdp
->sd_reclaim_lock
);
2057 if (list_empty(&gl
->gl_reclaim
)) {
2058 gfs2_glock_hold(gl
);
2059 list_add(&gl
->gl_reclaim
, &sdp
->sd_reclaim_list
);
2060 atomic_inc(&sdp
->sd_reclaim_count
);
2062 spin_unlock(&sdp
->sd_reclaim_lock
);
2064 wake_up(&sdp
->sd_reclaim_wq
);
2068 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
2069 * @sdp: the filesystem
2071 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
2072 * different glock and we notice that there are a lot of glocks in the
2077 void gfs2_reclaim_glock(struct gfs2_sbd
*sdp
)
2079 struct gfs2_glock
*gl
;
2081 spin_lock(&sdp
->sd_reclaim_lock
);
2082 if (list_empty(&sdp
->sd_reclaim_list
)) {
2083 spin_unlock(&sdp
->sd_reclaim_lock
);
2086 gl
= list_entry(sdp
->sd_reclaim_list
.next
,
2087 struct gfs2_glock
, gl_reclaim
);
2088 list_del_init(&gl
->gl_reclaim
);
2089 spin_unlock(&sdp
->sd_reclaim_lock
);
2091 atomic_dec(&sdp
->sd_reclaim_count
);
2092 atomic_inc(&sdp
->sd_reclaimed
);
2094 if (gfs2_glmutex_trylock(gl
)) {
2095 if (gl
->gl_ops
== &gfs2_inode_glops
) {
2096 struct gfs2_inode
*ip
= get_gl2ip(gl
);
2097 if (ip
&& !atomic_read(&ip
->i_count
))
2098 gfs2_inode_destroy(ip
);
2100 if (queue_empty(gl
, &gl
->gl_holders
) &&
2101 gl
->gl_state
!= LM_ST_UNLOCKED
&&
2103 handle_callback(gl
, LM_ST_UNLOCKED
);
2104 gfs2_glmutex_unlock(gl
);
2111 * examine_bucket - Call a function for glock in a hash bucket
2112 * @examiner: the function
2113 * @sdp: the filesystem
2114 * @bucket: the bucket
2116 * Returns: 1 if the bucket has entries
2119 static int examine_bucket(glock_examiner examiner
, struct gfs2_sbd
*sdp
,
2120 struct gfs2_gl_hash_bucket
*bucket
)
2122 struct glock_plug plug
;
2123 struct list_head
*tmp
;
2124 struct gfs2_glock
*gl
;
2127 /* Add "plug" to end of bucket list, work back up list from there */
2128 memset(&plug
.gl_flags
, 0, sizeof(unsigned long));
2129 set_bit(GLF_PLUG
, &plug
.gl_flags
);
2131 write_lock(&bucket
->hb_lock
);
2132 list_add(&plug
.gl_list
, &bucket
->hb_list
);
2133 write_unlock(&bucket
->hb_lock
);
2136 write_lock(&bucket
->hb_lock
);
2139 tmp
= plug
.gl_list
.next
;
2141 if (tmp
== &bucket
->hb_list
) {
2142 list_del(&plug
.gl_list
);
2143 entries
= !list_empty(&bucket
->hb_list
);
2144 write_unlock(&bucket
->hb_lock
);
2147 gl
= list_entry(tmp
, struct gfs2_glock
, gl_list
);
2149 /* Move plug up list */
2150 list_move(&plug
.gl_list
, &gl
->gl_list
);
2152 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
2155 /* examiner() must glock_put() */
2156 gfs2_glock_hold(gl
);
2161 write_unlock(&bucket
->hb_lock
);
2168 * scan_glock - look at a glock and see if we can reclaim it
2169 * @gl: the glock to look at
2173 static void scan_glock(struct gfs2_glock
*gl
)
2175 if (gfs2_glmutex_trylock(gl
)) {
2176 if (gl
->gl_ops
== &gfs2_inode_glops
) {
2177 struct gfs2_inode
*ip
= get_gl2ip(gl
);
2178 if (ip
&& !atomic_read(&ip
->i_count
))
2181 if (queue_empty(gl
, &gl
->gl_holders
) &&
2182 gl
->gl_state
!= LM_ST_UNLOCKED
&&
2186 gfs2_glmutex_unlock(gl
);
2194 gfs2_glmutex_unlock(gl
);
2195 gfs2_glock_schedule_for_reclaim(gl
);
2200 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
2201 * @sdp: the filesystem
2205 void gfs2_scand_internal(struct gfs2_sbd
*sdp
)
2209 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
2210 examine_bucket(scan_glock
, sdp
, &sdp
->sd_gl_hash
[x
]);
2216 * clear_glock - look at a glock and see if we can free it from glock cache
2217 * @gl: the glock to look at
2221 static void clear_glock(struct gfs2_glock
*gl
)
2223 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
2226 spin_lock(&sdp
->sd_reclaim_lock
);
2227 if (!list_empty(&gl
->gl_reclaim
)) {
2228 list_del_init(&gl
->gl_reclaim
);
2229 atomic_dec(&sdp
->sd_reclaim_count
);
2230 released
= gfs2_glock_put(gl
);
2231 gfs2_assert(sdp
, !released
);
2233 spin_unlock(&sdp
->sd_reclaim_lock
);
2235 if (gfs2_glmutex_trylock(gl
)) {
2236 if (gl
->gl_ops
== &gfs2_inode_glops
) {
2237 struct gfs2_inode
*ip
= get_gl2ip(gl
);
2238 if (ip
&& !atomic_read(&ip
->i_count
))
2239 gfs2_inode_destroy(ip
);
2241 if (queue_empty(gl
, &gl
->gl_holders
) &&
2242 gl
->gl_state
!= LM_ST_UNLOCKED
)
2243 handle_callback(gl
, LM_ST_UNLOCKED
);
2245 gfs2_glmutex_unlock(gl
);
2252 * gfs2_gl_hash_clear - Empty out the glock hash table
2253 * @sdp: the filesystem
2254 * @wait: wait until it's all gone
2256 * Called when unmounting the filesystem, or when inter-node lock manager
2257 * requests DROPLOCKS because it is running out of capacity.
2260 void gfs2_gl_hash_clear(struct gfs2_sbd
*sdp
, int wait
)
2271 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++)
2272 if (examine_bucket(clear_glock
, sdp
,
2273 &sdp
->sd_gl_hash
[x
]))
2279 if (time_after_eq(jiffies
,
2280 t
+ gfs2_tune_get(sdp
, gt_stall_secs
) * HZ
)) {
2281 fs_warn(sdp
, "Unmount seems to be stalled. "
2282 "Dumping lock state...\n");
2283 gfs2_dump_lockstate(sdp
);
2287 /* invalidate_inodes() requires that the sb inodes list
2288 not change, but an async completion callback for an
2289 unlock can occur which does glock_put() which
2290 can call iput() which will change the sb inodes list.
2291 invalidate_inodes_mutex prevents glock_put()'s during
2292 an invalidate_inodes() */
2294 mutex_lock(&sdp
->sd_invalidate_inodes_mutex
);
2295 invalidate_inodes(sdp
->sd_vfs
);
2296 mutex_unlock(&sdp
->sd_invalidate_inodes_mutex
);
2302 * Diagnostic routines to help debug distributed deadlock
2306 * dump_holder - print information about a glock holder
2307 * @str: a string naming the type of holder
2308 * @gh: the glock holder
2310 * Returns: 0 on success, -ENOBUFS when we run out of space
2313 static int dump_holder(char *str
, struct gfs2_holder
*gh
)
2316 int error
= -ENOBUFS
;
2318 printk(KERN_INFO
" %s\n", str
);
2319 printk(KERN_INFO
" owner = %ld\n",
2320 (gh
->gh_owner
) ? (long)gh
->gh_owner
->pid
: -1);
2321 printk(KERN_INFO
" gh_state = %u\n", gh
->gh_state
);
2322 printk(KERN_INFO
" gh_flags =");
2323 for (x
= 0; x
< 32; x
++)
2324 if (gh
->gh_flags
& (1 << x
))
2327 printk(KERN_INFO
" error = %d\n", gh
->gh_error
);
2328 printk(KERN_INFO
" gh_iflags =");
2329 for (x
= 0; x
< 32; x
++)
2330 if (test_bit(x
, &gh
->gh_iflags
))
2340 * dump_inode - print information about an inode
2343 * Returns: 0 on success, -ENOBUFS when we run out of space
2346 static int dump_inode(struct gfs2_inode
*ip
)
2349 int error
= -ENOBUFS
;
2351 printk(KERN_INFO
" Inode:\n");
2352 printk(KERN_INFO
" num = %llu %llu\n",
2353 ip
->i_num
.no_formal_ino
, ip
->i_num
.no_addr
);
2354 printk(KERN_INFO
" type = %u\n", IF2DT(ip
->i_di
.di_mode
));
2355 printk(KERN_INFO
" i_count = %d\n", atomic_read(&ip
->i_count
));
2356 printk(KERN_INFO
" i_flags =");
2357 for (x
= 0; x
< 32; x
++)
2358 if (test_bit(x
, &ip
->i_flags
))
2361 printk(KERN_INFO
" vnode = %s\n", (ip
->i_vnode
) ? "yes" : "no");
2369 * dump_glock - print information about a glock
2371 * @count: where we are in the buffer
2373 * Returns: 0 on success, -ENOBUFS when we run out of space
2376 static int dump_glock(struct gfs2_glock
*gl
)
2378 struct gfs2_holder
*gh
;
2380 int error
= -ENOBUFS
;
2382 spin_lock(&gl
->gl_spin
);
2384 printk(KERN_INFO
"Glock (%u, %llu)\n",
2385 gl
->gl_name
.ln_type
,
2386 gl
->gl_name
.ln_number
);
2387 printk(KERN_INFO
" gl_flags =");
2388 for (x
= 0; x
< 32; x
++)
2389 if (test_bit(x
, &gl
->gl_flags
))
2392 printk(KERN_INFO
" gl_ref = %d\n", atomic_read(&gl
->gl_ref
.refcount
));
2393 printk(KERN_INFO
" gl_state = %u\n", gl
->gl_state
);
2394 printk(KERN_INFO
" req_gh = %s\n", (gl
->gl_req_gh
) ? "yes" : "no");
2395 printk(KERN_INFO
" req_bh = %s\n", (gl
->gl_req_bh
) ? "yes" : "no");
2396 printk(KERN_INFO
" lvb_count = %d\n", atomic_read(&gl
->gl_lvb_count
));
2397 printk(KERN_INFO
" object = %s\n", (gl
->gl_object
) ? "yes" : "no");
2398 printk(KERN_INFO
" le = %s\n",
2399 (list_empty(&gl
->gl_le
.le_list
)) ? "no" : "yes");
2400 printk(KERN_INFO
" reclaim = %s\n",
2401 (list_empty(&gl
->gl_reclaim
)) ? "no" : "yes");
2403 printk(KERN_INFO
" aspace = %lu\n",
2404 gl
->gl_aspace
->i_mapping
->nrpages
);
2406 printk(KERN_INFO
" aspace = no\n");
2407 printk(KERN_INFO
" ail = %d\n", atomic_read(&gl
->gl_ail_count
));
2408 if (gl
->gl_req_gh
) {
2409 error
= dump_holder("Request", gl
->gl_req_gh
);
2413 list_for_each_entry(gh
, &gl
->gl_holders
, gh_list
) {
2414 error
= dump_holder("Holder", gh
);
2418 list_for_each_entry(gh
, &gl
->gl_waiters1
, gh_list
) {
2419 error
= dump_holder("Waiter1", gh
);
2423 list_for_each_entry(gh
, &gl
->gl_waiters2
, gh_list
) {
2424 error
= dump_holder("Waiter2", gh
);
2428 list_for_each_entry(gh
, &gl
->gl_waiters3
, gh_list
) {
2429 error
= dump_holder("Waiter3", gh
);
2433 if (gl
->gl_ops
== &gfs2_inode_glops
&& get_gl2ip(gl
)) {
2434 if (!test_bit(GLF_LOCK
, &gl
->gl_flags
) &&
2435 list_empty(&gl
->gl_holders
)) {
2436 error
= dump_inode(get_gl2ip(gl
));
2441 printk(KERN_INFO
" Inode: busy\n");
2448 spin_unlock(&gl
->gl_spin
);
2454 * gfs2_dump_lockstate - print out the current lockstate
2455 * @sdp: the filesystem
2456 * @ub: the buffer to copy the information into
2458 * If @ub is NULL, dump the lockstate to the console.
2462 int gfs2_dump_lockstate(struct gfs2_sbd
*sdp
)
2464 struct gfs2_gl_hash_bucket
*bucket
;
2465 struct gfs2_glock
*gl
;
2469 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
2470 bucket
= &sdp
->sd_gl_hash
[x
];
2472 read_lock(&bucket
->hb_lock
);
2474 list_for_each_entry(gl
, &bucket
->hb_list
, gl_list
) {
2475 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
2478 error
= dump_glock(gl
);
2483 read_unlock(&bucket
->hb_lock
);