2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kref.h>
19 #include <linux/kallsyms.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <asm/uaccess.h>
24 #include "lm_interface.h"
36 /* Must be kept in sync with the beginning of struct gfs2_glock */
38 struct list_head gl_list
;
39 unsigned long gl_flags
;
43 struct gfs2_holder gr_gh
;
44 struct work_struct gr_work
;
47 typedef void (*glock_examiner
) (struct gfs2_glock
* gl
);
49 static int gfs2_dump_lockstate(struct gfs2_sbd
*sdp
);
50 static int dump_glock(struct gfs2_glock
*gl
);
53 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
54 * @actual: the current state of the lock
55 * @requested: the lock state that was requested by the caller
56 * @flags: the modifier flags passed in by the caller
58 * Returns: 1 if the locks are compatible, 0 otherwise
61 static inline int relaxed_state_ok(unsigned int actual
, unsigned requested
,
64 if (actual
== requested
)
70 if (actual
== LM_ST_EXCLUSIVE
&& requested
== LM_ST_SHARED
)
73 if (actual
!= LM_ST_UNLOCKED
&& (flags
& LM_FLAG_ANY
))
80 * gl_hash() - Turn glock number into hash bucket number
81 * @lock: The glock number
83 * Returns: The number of the corresponding hash bucket
86 static unsigned int gl_hash(struct lm_lockname
*name
)
90 h
= jhash(&name
->ln_number
, sizeof(uint64_t), 0);
91 h
= jhash(&name
->ln_type
, sizeof(unsigned int), h
);
92 h
&= GFS2_GL_HASH_MASK
;
98 * glock_free() - Perform a few checks and then release struct gfs2_glock
99 * @gl: The glock to release
101 * Also calls lock module to release its internal structure for this glock.
105 static void glock_free(struct gfs2_glock
*gl
)
107 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
108 struct inode
*aspace
= gl
->gl_aspace
;
110 gfs2_lm_put_lock(sdp
, gl
->gl_lock
);
113 gfs2_aspace_put(aspace
);
115 kmem_cache_free(gfs2_glock_cachep
, gl
);
119 * gfs2_glock_hold() - increment reference count on glock
120 * @gl: The glock to hold
124 void gfs2_glock_hold(struct gfs2_glock
*gl
)
126 kref_get(&gl
->gl_ref
);
129 /* All work is done after the return from kref_put() so we
130 can release the write_lock before the free. */
132 static void kill_glock(struct kref
*kref
)
134 struct gfs2_glock
*gl
= container_of(kref
, struct gfs2_glock
, gl_ref
);
135 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
137 gfs2_assert(sdp
, gl
->gl_state
== LM_ST_UNLOCKED
);
138 gfs2_assert(sdp
, list_empty(&gl
->gl_reclaim
));
139 gfs2_assert(sdp
, list_empty(&gl
->gl_holders
));
140 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters1
));
141 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters2
));
142 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters3
));
146 * gfs2_glock_put() - Decrement reference count on glock
147 * @gl: The glock to put
151 int gfs2_glock_put(struct gfs2_glock
*gl
)
153 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
154 struct gfs2_gl_hash_bucket
*bucket
= gl
->gl_bucket
;
157 mutex_lock(&sdp
->sd_invalidate_inodes_mutex
);
159 write_lock(&bucket
->hb_lock
);
160 if (kref_put(&gl
->gl_ref
, kill_glock
)) {
161 list_del_init(&gl
->gl_list
);
162 write_unlock(&bucket
->hb_lock
);
163 BUG_ON(spin_is_locked(&gl
->gl_spin
));
168 write_unlock(&bucket
->hb_lock
);
170 mutex_unlock(&sdp
->sd_invalidate_inodes_mutex
);
175 * queue_empty - check to see if a glock's queue is empty
177 * @head: the head of the queue to check
179 * This function protects the list in the event that a process already
180 * has a holder on the list and is adding a second holder for itself.
181 * The glmutex lock is what generally prevents processes from working
182 * on the same glock at once, but the special case of adding a second
183 * holder for yourself ("recursive" locking) doesn't involve locking
184 * glmutex, making the spin lock necessary.
186 * Returns: 1 if the queue is empty
189 static inline int queue_empty(struct gfs2_glock
*gl
, struct list_head
*head
)
192 spin_lock(&gl
->gl_spin
);
193 empty
= list_empty(head
);
194 spin_unlock(&gl
->gl_spin
);
199 * search_bucket() - Find struct gfs2_glock by lock number
200 * @bucket: the bucket to search
201 * @name: The lock name
203 * Returns: NULL, or the struct gfs2_glock with the requested number
206 static struct gfs2_glock
*search_bucket(struct gfs2_gl_hash_bucket
*bucket
,
207 struct lm_lockname
*name
)
209 struct gfs2_glock
*gl
;
211 list_for_each_entry(gl
, &bucket
->hb_list
, gl_list
) {
212 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
214 if (!lm_name_equal(&gl
->gl_name
, name
))
217 kref_get(&gl
->gl_ref
);
226 * gfs2_glock_find() - Find glock by lock number
227 * @sdp: The GFS2 superblock
228 * @name: The lock name
230 * Returns: NULL, or the struct gfs2_glock with the requested number
233 static struct gfs2_glock
*gfs2_glock_find(struct gfs2_sbd
*sdp
,
234 struct lm_lockname
*name
)
236 struct gfs2_gl_hash_bucket
*bucket
= &sdp
->sd_gl_hash
[gl_hash(name
)];
237 struct gfs2_glock
*gl
;
239 read_lock(&bucket
->hb_lock
);
240 gl
= search_bucket(bucket
, name
);
241 read_unlock(&bucket
->hb_lock
);
247 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
248 * @sdp: The GFS2 superblock
249 * @number: the lock number
250 * @glops: The glock_operations to use
251 * @create: If 0, don't create the glock if it doesn't exist
252 * @glp: the glock is returned here
254 * This does not lock a glock, just finds/creates structures for one.
259 int gfs2_glock_get(struct gfs2_sbd
*sdp
, uint64_t number
,
260 struct gfs2_glock_operations
*glops
, int create
,
261 struct gfs2_glock
**glp
)
263 struct lm_lockname name
;
264 struct gfs2_glock
*gl
, *tmp
;
265 struct gfs2_gl_hash_bucket
*bucket
;
268 name
.ln_number
= number
;
269 name
.ln_type
= glops
->go_type
;
270 bucket
= &sdp
->sd_gl_hash
[gl_hash(&name
)];
272 read_lock(&bucket
->hb_lock
);
273 gl
= search_bucket(bucket
, &name
);
274 read_unlock(&bucket
->hb_lock
);
281 gl
= kmem_cache_alloc(gfs2_glock_cachep
, GFP_KERNEL
);
285 memset(gl
, 0, sizeof(struct gfs2_glock
));
287 INIT_LIST_HEAD(&gl
->gl_list
);
289 kref_init(&gl
->gl_ref
);
291 spin_lock_init(&gl
->gl_spin
);
293 gl
->gl_state
= LM_ST_UNLOCKED
;
296 INIT_LIST_HEAD(&gl
->gl_holders
);
297 INIT_LIST_HEAD(&gl
->gl_waiters1
);
298 INIT_LIST_HEAD(&gl
->gl_waiters2
);
299 INIT_LIST_HEAD(&gl
->gl_waiters3
);
303 gl
->gl_bucket
= bucket
;
304 INIT_LIST_HEAD(&gl
->gl_reclaim
);
308 lops_init_le(&gl
->gl_le
, &gfs2_glock_lops
);
309 INIT_LIST_HEAD(&gl
->gl_ail_list
);
311 /* If this glock protects actual on-disk data or metadata blocks,
312 create a VFS inode to manage the pages/buffers holding them. */
313 if (glops
== &gfs2_inode_glops
||
314 glops
== &gfs2_rgrp_glops
||
315 glops
== &gfs2_meta_glops
) {
316 gl
->gl_aspace
= gfs2_aspace_get(sdp
);
317 if (!gl
->gl_aspace
) {
323 error
= gfs2_lm_get_lock(sdp
, &name
, &gl
->gl_lock
);
327 write_lock(&bucket
->hb_lock
);
328 tmp
= search_bucket(bucket
, &name
);
330 write_unlock(&bucket
->hb_lock
);
334 list_add_tail(&gl
->gl_list
, &bucket
->hb_list
);
335 write_unlock(&bucket
->hb_lock
);
344 gfs2_aspace_put(gl
->gl_aspace
);
347 kmem_cache_free(gfs2_glock_cachep
, gl
);
353 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
355 * @state: the state we're requesting
356 * @flags: the modifier flags
357 * @gh: the holder structure
361 void gfs2_holder_init(struct gfs2_glock
*gl
, unsigned int state
, unsigned flags
,
362 struct gfs2_holder
*gh
)
364 INIT_LIST_HEAD(&gh
->gh_list
);
366 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
367 gh
->gh_owner
= current
;
368 gh
->gh_state
= state
;
369 gh
->gh_flags
= flags
;
372 init_completion(&gh
->gh_wait
);
374 if (gh
->gh_state
== LM_ST_EXCLUSIVE
)
375 gh
->gh_flags
|= GL_LOCAL_EXCL
;
381 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
382 * @state: the state we're requesting
383 * @flags: the modifier flags
384 * @gh: the holder structure
386 * Don't mess with the glock.
390 void gfs2_holder_reinit(unsigned int state
, unsigned flags
, struct gfs2_holder
*gh
)
392 gh
->gh_state
= state
;
393 gh
->gh_flags
= flags
;
394 if (gh
->gh_state
== LM_ST_EXCLUSIVE
)
395 gh
->gh_flags
|= GL_LOCAL_EXCL
;
397 gh
->gh_iflags
&= 1 << HIF_ALLOCED
;
398 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
402 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
403 * @gh: the holder structure
407 void gfs2_holder_uninit(struct gfs2_holder
*gh
)
409 gfs2_glock_put(gh
->gh_gl
);
415 * gfs2_holder_get - get a struct gfs2_holder structure
417 * @state: the state we're requesting
418 * @flags: the modifier flags
419 * @gfp_flags: __GFP_NOFAIL
421 * Figure out how big an impact this function has. Either:
422 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
423 * 2) Leave it like it is
425 * Returns: the holder structure, NULL on ENOMEM
428 static struct gfs2_holder
*gfs2_holder_get(struct gfs2_glock
*gl
,
430 int flags
, gfp_t gfp_flags
)
432 struct gfs2_holder
*gh
;
434 gh
= kmalloc(sizeof(struct gfs2_holder
), gfp_flags
);
438 gfs2_holder_init(gl
, state
, flags
, gh
);
439 set_bit(HIF_ALLOCED
, &gh
->gh_iflags
);
440 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
445 * gfs2_holder_put - get rid of a struct gfs2_holder structure
446 * @gh: the holder structure
450 static void gfs2_holder_put(struct gfs2_holder
*gh
)
452 gfs2_holder_uninit(gh
);
457 * rq_mutex - process a mutex request in the queue
458 * @gh: the glock holder
460 * Returns: 1 if the queue is blocked
463 static int rq_mutex(struct gfs2_holder
*gh
)
465 struct gfs2_glock
*gl
= gh
->gh_gl
;
467 list_del_init(&gh
->gh_list
);
468 /* gh->gh_error never examined. */
469 set_bit(GLF_LOCK
, &gl
->gl_flags
);
470 complete(&gh
->gh_wait
);
476 * rq_promote - process a promote request in the queue
477 * @gh: the glock holder
479 * Acquire a new inter-node lock, or change a lock state to more restrictive.
481 * Returns: 1 if the queue is blocked
484 static int rq_promote(struct gfs2_holder
*gh
)
486 struct gfs2_glock
*gl
= gh
->gh_gl
;
487 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
488 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
490 if (!relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
491 if (list_empty(&gl
->gl_holders
)) {
493 set_bit(GLF_LOCK
, &gl
->gl_flags
);
494 spin_unlock(&gl
->gl_spin
);
496 if (atomic_read(&sdp
->sd_reclaim_count
) >
497 gfs2_tune_get(sdp
, gt_reclaim_limit
) &&
498 !(gh
->gh_flags
& LM_FLAG_PRIORITY
)) {
499 gfs2_reclaim_glock(sdp
);
500 gfs2_reclaim_glock(sdp
);
503 glops
->go_xmote_th(gl
, gh
->gh_state
,
506 spin_lock(&gl
->gl_spin
);
511 if (list_empty(&gl
->gl_holders
)) {
512 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
513 set_bit(GLF_LOCK
, &gl
->gl_flags
);
515 struct gfs2_holder
*next_gh
;
516 if (gh
->gh_flags
& GL_LOCAL_EXCL
)
518 next_gh
= list_entry(gl
->gl_holders
.next
, struct gfs2_holder
,
520 if (next_gh
->gh_flags
& GL_LOCAL_EXCL
)
524 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
526 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
528 complete(&gh
->gh_wait
);
534 * rq_demote - process a demote request in the queue
535 * @gh: the glock holder
537 * Returns: 1 if the queue is blocked
540 static int rq_demote(struct gfs2_holder
*gh
)
542 struct gfs2_glock
*gl
= gh
->gh_gl
;
543 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
545 if (!list_empty(&gl
->gl_holders
))
548 if (gl
->gl_state
== gh
->gh_state
|| gl
->gl_state
== LM_ST_UNLOCKED
) {
549 list_del_init(&gh
->gh_list
);
551 spin_unlock(&gl
->gl_spin
);
552 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
555 complete(&gh
->gh_wait
);
556 spin_lock(&gl
->gl_spin
);
559 set_bit(GLF_LOCK
, &gl
->gl_flags
);
560 spin_unlock(&gl
->gl_spin
);
562 if (gh
->gh_state
== LM_ST_UNLOCKED
||
563 gl
->gl_state
!= LM_ST_EXCLUSIVE
)
564 glops
->go_drop_th(gl
);
566 glops
->go_xmote_th(gl
, gh
->gh_state
, gh
->gh_flags
);
568 spin_lock(&gl
->gl_spin
);
575 * rq_greedy - process a queued request to drop greedy status
576 * @gh: the glock holder
578 * Returns: 1 if the queue is blocked
581 static int rq_greedy(struct gfs2_holder
*gh
)
583 struct gfs2_glock
*gl
= gh
->gh_gl
;
585 list_del_init(&gh
->gh_list
);
586 /* gh->gh_error never examined. */
587 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
588 spin_unlock(&gl
->gl_spin
);
590 gfs2_holder_uninit(gh
);
591 kfree(container_of(gh
, struct greedy
, gr_gh
));
593 spin_lock(&gl
->gl_spin
);
599 * run_queue - process holder structures on a glock
603 static void run_queue(struct gfs2_glock
*gl
)
605 struct gfs2_holder
*gh
;
609 if (test_bit(GLF_LOCK
, &gl
->gl_flags
))
612 if (!list_empty(&gl
->gl_waiters1
)) {
613 gh
= list_entry(gl
->gl_waiters1
.next
,
614 struct gfs2_holder
, gh_list
);
616 if (test_bit(HIF_MUTEX
, &gh
->gh_iflags
))
617 blocked
= rq_mutex(gh
);
619 gfs2_assert_warn(gl
->gl_sbd
, 0);
621 } else if (!list_empty(&gl
->gl_waiters2
) &&
622 !test_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
)) {
623 gh
= list_entry(gl
->gl_waiters2
.next
,
624 struct gfs2_holder
, gh_list
);
626 if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
))
627 blocked
= rq_demote(gh
);
628 else if (test_bit(HIF_GREEDY
, &gh
->gh_iflags
))
629 blocked
= rq_greedy(gh
);
631 gfs2_assert_warn(gl
->gl_sbd
, 0);
633 } else if (!list_empty(&gl
->gl_waiters3
)) {
634 gh
= list_entry(gl
->gl_waiters3
.next
,
635 struct gfs2_holder
, gh_list
);
637 if (test_bit(HIF_PROMOTE
, &gh
->gh_iflags
))
638 blocked
= rq_promote(gh
);
640 gfs2_assert_warn(gl
->gl_sbd
, 0);
651 * gfs2_glmutex_lock - acquire a local lock on a glock
654 * Gives caller exclusive access to manipulate a glock structure.
657 static void gfs2_glmutex_lock(struct gfs2_glock
*gl
)
659 struct gfs2_holder gh
;
661 gfs2_holder_init(gl
, 0, 0, &gh
);
662 set_bit(HIF_MUTEX
, &gh
.gh_iflags
);
664 spin_lock(&gl
->gl_spin
);
665 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
))
666 list_add_tail(&gh
.gh_list
, &gl
->gl_waiters1
);
668 gl
->gl_owner
= current
;
669 gl
->gl_ip
= (unsigned long)__builtin_return_address(0);
670 complete(&gh
.gh_wait
);
672 spin_unlock(&gl
->gl_spin
);
674 wait_for_completion(&gh
.gh_wait
);
675 gfs2_holder_uninit(&gh
);
679 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
682 * Returns: 1 if the glock is acquired
685 static int gfs2_glmutex_trylock(struct gfs2_glock
*gl
)
689 spin_lock(&gl
->gl_spin
);
690 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
))
693 gl
->gl_owner
= current
;
694 gl
->gl_ip
= (unsigned long)__builtin_return_address(0);
696 spin_unlock(&gl
->gl_spin
);
702 * gfs2_glmutex_unlock - release a local lock on a glock
707 static void gfs2_glmutex_unlock(struct gfs2_glock
*gl
)
709 spin_lock(&gl
->gl_spin
);
710 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
714 BUG_ON(!spin_is_locked(&gl
->gl_spin
));
715 spin_unlock(&gl
->gl_spin
);
719 * handle_callback - add a demote request to a lock's queue
721 * @state: the state the caller wants us to change to
725 static void handle_callback(struct gfs2_glock
*gl
, unsigned int state
)
727 struct gfs2_holder
*gh
, *new_gh
= NULL
;
730 spin_lock(&gl
->gl_spin
);
732 list_for_each_entry(gh
, &gl
->gl_waiters2
, gh_list
) {
733 if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
) &&
734 gl
->gl_req_gh
!= gh
) {
735 if (gh
->gh_state
!= state
)
736 gh
->gh_state
= LM_ST_UNLOCKED
;
742 list_add_tail(&new_gh
->gh_list
, &gl
->gl_waiters2
);
745 spin_unlock(&gl
->gl_spin
);
747 new_gh
= gfs2_holder_get(gl
, state
, LM_FLAG_TRY
,
748 GFP_KERNEL
| __GFP_NOFAIL
),
749 set_bit(HIF_DEMOTE
, &new_gh
->gh_iflags
);
750 set_bit(HIF_DEALLOC
, &new_gh
->gh_iflags
);
756 spin_unlock(&gl
->gl_spin
);
759 gfs2_holder_put(new_gh
);
762 void gfs2_glock_inode_squish(struct inode
*inode
)
764 struct gfs2_holder gh
;
765 struct gfs2_glock
*gl
= GFS2_I(inode
)->i_gl
;
766 gfs2_holder_init(gl
, LM_ST_UNLOCKED
, 0, &gh
);
767 set_bit(HIF_DEMOTE
, &gh
.gh_iflags
);
768 spin_lock(&gl
->gl_spin
);
769 gfs2_assert(inode
->i_sb
->s_fs_info
, list_empty(&gl
->gl_holders
));
770 list_add_tail(&gh
.gh_list
, &gl
->gl_waiters2
);
772 spin_unlock(&gl
->gl_spin
);
773 gfs2_holder_uninit(&gh
);
777 * state_change - record that the glock is now in a different state
779 * @new_state the new state
783 static void state_change(struct gfs2_glock
*gl
, unsigned int new_state
)
787 held1
= (gl
->gl_state
!= LM_ST_UNLOCKED
);
788 held2
= (new_state
!= LM_ST_UNLOCKED
);
790 if (held1
!= held2
) {
797 gl
->gl_state
= new_state
;
801 * xmote_bh - Called after the lock module is done acquiring a lock
802 * @gl: The glock in question
803 * @ret: the int returned from the lock module
807 static void xmote_bh(struct gfs2_glock
*gl
, unsigned int ret
)
809 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
810 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
811 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
812 int prev_state
= gl
->gl_state
;
815 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
816 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
817 gfs2_assert_warn(sdp
, !(ret
& LM_OUT_ASYNC
));
819 state_change(gl
, ret
& LM_OUT_ST_MASK
);
821 if (prev_state
!= LM_ST_UNLOCKED
&& !(ret
& LM_OUT_CACHEABLE
)) {
823 glops
->go_inval(gl
, DIO_METADATA
| DIO_DATA
);
824 } else if (gl
->gl_state
== LM_ST_DEFERRED
) {
825 /* We might not want to do this here.
826 Look at moving to the inode glops. */
828 glops
->go_inval(gl
, DIO_DATA
);
831 /* Deal with each possible exit condition */
834 gl
->gl_stamp
= jiffies
;
836 else if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
837 spin_lock(&gl
->gl_spin
);
838 list_del_init(&gh
->gh_list
);
840 spin_unlock(&gl
->gl_spin
);
842 } else if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
)) {
843 spin_lock(&gl
->gl_spin
);
844 list_del_init(&gh
->gh_list
);
845 if (gl
->gl_state
== gh
->gh_state
||
846 gl
->gl_state
== LM_ST_UNLOCKED
)
849 if (gfs2_assert_warn(sdp
, gh
->gh_flags
&
850 (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) == -1)
851 fs_warn(sdp
, "ret = 0x%.8X\n", ret
);
852 gh
->gh_error
= GLR_TRYFAILED
;
854 spin_unlock(&gl
->gl_spin
);
856 if (ret
& LM_OUT_CANCELED
)
857 handle_callback(gl
, LM_ST_UNLOCKED
); /* Lame */
859 } else if (ret
& LM_OUT_CANCELED
) {
860 spin_lock(&gl
->gl_spin
);
861 list_del_init(&gh
->gh_list
);
862 gh
->gh_error
= GLR_CANCELED
;
863 spin_unlock(&gl
->gl_spin
);
865 } else if (relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
866 spin_lock(&gl
->gl_spin
);
867 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
869 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
870 spin_unlock(&gl
->gl_spin
);
872 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
876 } else if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
877 spin_lock(&gl
->gl_spin
);
878 list_del_init(&gh
->gh_list
);
879 gh
->gh_error
= GLR_TRYFAILED
;
880 spin_unlock(&gl
->gl_spin
);
883 if (gfs2_assert_withdraw(sdp
, 0) == -1)
884 fs_err(sdp
, "ret = 0x%.8X\n", ret
);
887 if (glops
->go_xmote_bh
)
888 glops
->go_xmote_bh(gl
);
891 spin_lock(&gl
->gl_spin
);
892 gl
->gl_req_gh
= NULL
;
893 gl
->gl_req_bh
= NULL
;
894 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
896 spin_unlock(&gl
->gl_spin
);
902 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
905 complete(&gh
->gh_wait
);
910 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
911 * @gl: The glock in question
912 * @state: the requested state
913 * @flags: modifier flags to the lock call
917 void gfs2_glock_xmote_th(struct gfs2_glock
*gl
, unsigned int state
, int flags
)
919 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
920 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
921 int lck_flags
= flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
|
922 LM_FLAG_NOEXP
| LM_FLAG_ANY
|
924 unsigned int lck_ret
;
926 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
927 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
928 gfs2_assert_warn(sdp
, state
!= LM_ST_UNLOCKED
);
929 gfs2_assert_warn(sdp
, state
!= gl
->gl_state
);
931 if (gl
->gl_state
== LM_ST_EXCLUSIVE
) {
934 DIO_METADATA
| DIO_DATA
| DIO_RELEASE
);
938 gl
->gl_req_bh
= xmote_bh
;
940 lck_ret
= gfs2_lm_lock(sdp
, gl
->gl_lock
, gl
->gl_state
, state
,
943 if (gfs2_assert_withdraw(sdp
, !(lck_ret
& LM_OUT_ERROR
)))
946 if (lck_ret
& LM_OUT_ASYNC
)
947 gfs2_assert_warn(sdp
, lck_ret
== LM_OUT_ASYNC
);
949 xmote_bh(gl
, lck_ret
);
953 * drop_bh - Called after a lock module unlock completes
955 * @ret: the return status
957 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
958 * Doesn't drop the reference on the glock the top half took out
962 static void drop_bh(struct gfs2_glock
*gl
, unsigned int ret
)
964 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
965 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
966 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
968 clear_bit(GLF_PREFETCH
, &gl
->gl_flags
);
970 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
971 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
972 gfs2_assert_warn(sdp
, !ret
);
974 state_change(gl
, LM_ST_UNLOCKED
);
977 glops
->go_inval(gl
, DIO_METADATA
| DIO_DATA
);
980 spin_lock(&gl
->gl_spin
);
981 list_del_init(&gh
->gh_list
);
983 spin_unlock(&gl
->gl_spin
);
986 if (glops
->go_drop_bh
)
987 glops
->go_drop_bh(gl
);
989 spin_lock(&gl
->gl_spin
);
990 gl
->gl_req_gh
= NULL
;
991 gl
->gl_req_bh
= NULL
;
992 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
994 spin_unlock(&gl
->gl_spin
);
999 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
1000 gfs2_holder_put(gh
);
1002 complete(&gh
->gh_wait
);
1007 * gfs2_glock_drop_th - call into the lock module to unlock a lock
1012 void gfs2_glock_drop_th(struct gfs2_glock
*gl
)
1014 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1015 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1018 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1019 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
1020 gfs2_assert_warn(sdp
, gl
->gl_state
!= LM_ST_UNLOCKED
);
1022 if (gl
->gl_state
== LM_ST_EXCLUSIVE
) {
1025 DIO_METADATA
| DIO_DATA
| DIO_RELEASE
);
1028 gfs2_glock_hold(gl
);
1029 gl
->gl_req_bh
= drop_bh
;
1031 ret
= gfs2_lm_unlock(sdp
, gl
->gl_lock
, gl
->gl_state
);
1033 if (gfs2_assert_withdraw(sdp
, !(ret
& LM_OUT_ERROR
)))
1039 gfs2_assert_warn(sdp
, ret
== LM_OUT_ASYNC
);
1043 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1044 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1046 * Don't cancel GL_NOCANCEL requests.
1049 static void do_cancels(struct gfs2_holder
*gh
)
1051 struct gfs2_glock
*gl
= gh
->gh_gl
;
1053 spin_lock(&gl
->gl_spin
);
1055 while (gl
->gl_req_gh
!= gh
&&
1056 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1057 !list_empty(&gh
->gh_list
)) {
1058 if (gl
->gl_req_bh
&&
1060 (gl
->gl_req_gh
->gh_flags
& GL_NOCANCEL
))) {
1061 spin_unlock(&gl
->gl_spin
);
1062 gfs2_lm_cancel(gl
->gl_sbd
, gl
->gl_lock
);
1064 spin_lock(&gl
->gl_spin
);
1066 spin_unlock(&gl
->gl_spin
);
1068 spin_lock(&gl
->gl_spin
);
1072 spin_unlock(&gl
->gl_spin
);
1076 * glock_wait_internal - wait on a glock acquisition
1077 * @gh: the glock holder
1079 * Returns: 0 on success
1082 static int glock_wait_internal(struct gfs2_holder
*gh
)
1084 struct gfs2_glock
*gl
= gh
->gh_gl
;
1085 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1086 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1088 if (test_bit(HIF_ABORTED
, &gh
->gh_iflags
))
1091 if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
1092 spin_lock(&gl
->gl_spin
);
1093 if (gl
->gl_req_gh
!= gh
&&
1094 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1095 !list_empty(&gh
->gh_list
)) {
1096 list_del_init(&gh
->gh_list
);
1097 gh
->gh_error
= GLR_TRYFAILED
;
1099 spin_unlock(&gl
->gl_spin
);
1100 return gh
->gh_error
;
1102 spin_unlock(&gl
->gl_spin
);
1105 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1108 wait_for_completion(&gh
->gh_wait
);
1111 return gh
->gh_error
;
1113 gfs2_assert_withdraw(sdp
, test_bit(HIF_HOLDER
, &gh
->gh_iflags
));
1114 gfs2_assert_withdraw(sdp
, relaxed_state_ok(gl
->gl_state
,
1118 if (test_bit(HIF_FIRST
, &gh
->gh_iflags
)) {
1119 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1121 if (glops
->go_lock
) {
1122 gh
->gh_error
= glops
->go_lock(gh
);
1124 spin_lock(&gl
->gl_spin
);
1125 list_del_init(&gh
->gh_list
);
1126 spin_unlock(&gl
->gl_spin
);
1130 spin_lock(&gl
->gl_spin
);
1131 gl
->gl_req_gh
= NULL
;
1132 gl
->gl_req_bh
= NULL
;
1133 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1135 spin_unlock(&gl
->gl_spin
);
1138 return gh
->gh_error
;
1141 static inline struct gfs2_holder
*
1142 find_holder_by_owner(struct list_head
*head
, struct task_struct
*owner
)
1144 struct gfs2_holder
*gh
;
1146 list_for_each_entry(gh
, head
, gh_list
) {
1147 if (gh
->gh_owner
== owner
)
1155 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1156 * @gh: the holder structure to add
1160 static void add_to_queue(struct gfs2_holder
*gh
)
1162 struct gfs2_glock
*gl
= gh
->gh_gl
;
1163 struct gfs2_holder
*existing
;
1165 BUG_ON(!gh
->gh_owner
);
1167 existing
= find_holder_by_owner(&gl
->gl_holders
, gh
->gh_owner
);
1169 print_symbol(KERN_WARNING
"original: %s\n", existing
->gh_ip
);
1170 print_symbol(KERN_WARNING
"new: %s\n", gh
->gh_ip
);
1174 existing
= find_holder_by_owner(&gl
->gl_waiters3
, gh
->gh_owner
);
1176 print_symbol(KERN_WARNING
"original: %s\n", existing
->gh_ip
);
1177 print_symbol(KERN_WARNING
"new: %s\n", gh
->gh_ip
);
1181 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1182 list_add(&gh
->gh_list
, &gl
->gl_waiters3
);
1184 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters3
);
1188 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1189 * @gh: the holder structure
1191 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1193 * Returns: 0, GLR_TRYFAILED, or errno on failure
1196 int gfs2_glock_nq(struct gfs2_holder
*gh
)
1198 struct gfs2_glock
*gl
= gh
->gh_gl
;
1199 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1203 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
1204 set_bit(HIF_ABORTED
, &gh
->gh_iflags
);
1208 set_bit(HIF_PROMOTE
, &gh
->gh_iflags
);
1210 spin_lock(&gl
->gl_spin
);
1213 spin_unlock(&gl
->gl_spin
);
1215 if (!(gh
->gh_flags
& GL_ASYNC
)) {
1216 error
= glock_wait_internal(gh
);
1217 if (error
== GLR_CANCELED
) {
1223 clear_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1225 if (error
== GLR_TRYFAILED
&& (gh
->gh_flags
& GL_DUMP
))
1232 * gfs2_glock_poll - poll to see if an async request has been completed
1235 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1238 int gfs2_glock_poll(struct gfs2_holder
*gh
)
1240 struct gfs2_glock
*gl
= gh
->gh_gl
;
1243 spin_lock(&gl
->gl_spin
);
1245 if (test_bit(HIF_HOLDER
, &gh
->gh_iflags
))
1247 else if (list_empty(&gh
->gh_list
)) {
1248 if (gh
->gh_error
== GLR_CANCELED
) {
1249 spin_unlock(&gl
->gl_spin
);
1251 if (gfs2_glock_nq(gh
))
1258 spin_unlock(&gl
->gl_spin
);
1264 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1265 * @gh: the holder structure
1267 * Returns: 0, GLR_TRYFAILED, or errno on failure
1270 int gfs2_glock_wait(struct gfs2_holder
*gh
)
1274 error
= glock_wait_internal(gh
);
1275 if (error
== GLR_CANCELED
) {
1277 gh
->gh_flags
&= ~GL_ASYNC
;
1278 error
= gfs2_glock_nq(gh
);
1285 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1286 * @gh: the glock holder
1290 void gfs2_glock_dq(struct gfs2_holder
*gh
)
1292 struct gfs2_glock
*gl
= gh
->gh_gl
;
1293 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1295 if (gh
->gh_flags
& GL_SYNC
)
1296 set_bit(GLF_SYNC
, &gl
->gl_flags
);
1298 if (gh
->gh_flags
& GL_NOCACHE
)
1299 handle_callback(gl
, LM_ST_UNLOCKED
);
1301 gfs2_glmutex_lock(gl
);
1303 spin_lock(&gl
->gl_spin
);
1304 list_del_init(&gh
->gh_list
);
1306 if (list_empty(&gl
->gl_holders
)) {
1307 spin_unlock(&gl
->gl_spin
);
1309 if (glops
->go_unlock
)
1310 glops
->go_unlock(gh
);
1312 if (test_bit(GLF_SYNC
, &gl
->gl_flags
)) {
1314 glops
->go_sync(gl
, DIO_METADATA
| DIO_DATA
);
1317 gl
->gl_stamp
= jiffies
;
1319 spin_lock(&gl
->gl_spin
);
1322 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1324 spin_unlock(&gl
->gl_spin
);
1328 * gfs2_glock_prefetch - Try to prefetch a glock
1330 * @state: the state to prefetch in
1331 * @flags: flags passed to go_xmote_th()
1335 static void gfs2_glock_prefetch(struct gfs2_glock
*gl
, unsigned int state
,
1338 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1340 spin_lock(&gl
->gl_spin
);
1342 if (test_bit(GLF_LOCK
, &gl
->gl_flags
) ||
1343 !list_empty(&gl
->gl_holders
) ||
1344 !list_empty(&gl
->gl_waiters1
) ||
1345 !list_empty(&gl
->gl_waiters2
) ||
1346 !list_empty(&gl
->gl_waiters3
) ||
1347 relaxed_state_ok(gl
->gl_state
, state
, flags
)) {
1348 spin_unlock(&gl
->gl_spin
);
1352 set_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1353 set_bit(GLF_LOCK
, &gl
->gl_flags
);
1354 spin_unlock(&gl
->gl_spin
);
1356 glops
->go_xmote_th(gl
, state
, flags
);
1359 static void greedy_work(void *data
)
1361 struct greedy
*gr
= data
;
1362 struct gfs2_holder
*gh
= &gr
->gr_gh
;
1363 struct gfs2_glock
*gl
= gh
->gh_gl
;
1364 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1366 clear_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
);
1368 if (glops
->go_greedy
)
1369 glops
->go_greedy(gl
);
1371 spin_lock(&gl
->gl_spin
);
1373 if (list_empty(&gl
->gl_waiters2
)) {
1374 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
1375 spin_unlock(&gl
->gl_spin
);
1376 gfs2_holder_uninit(gh
);
1379 gfs2_glock_hold(gl
);
1380 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters2
);
1382 spin_unlock(&gl
->gl_spin
);
1388 * gfs2_glock_be_greedy -
1392 * Returns: 0 if go_greedy will be called, 1 otherwise
1395 int gfs2_glock_be_greedy(struct gfs2_glock
*gl
, unsigned int time
)
1398 struct gfs2_holder
*gh
;
1400 if (!time
|| gl
->gl_sbd
->sd_args
.ar_localcaching
||
1401 test_and_set_bit(GLF_GREEDY
, &gl
->gl_flags
))
1404 gr
= kmalloc(sizeof(struct greedy
), GFP_KERNEL
);
1406 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
1411 gfs2_holder_init(gl
, 0, 0, gh
);
1412 set_bit(HIF_GREEDY
, &gh
->gh_iflags
);
1413 INIT_WORK(&gr
->gr_work
, greedy_work
, gr
);
1415 set_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
);
1416 schedule_delayed_work(&gr
->gr_work
, time
);
1422 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1423 * @gh: the holder structure
1427 void gfs2_glock_dq_uninit(struct gfs2_holder
*gh
)
1430 gfs2_holder_uninit(gh
);
1434 * gfs2_glock_nq_num - acquire a glock based on lock number
1435 * @sdp: the filesystem
1436 * @number: the lock number
1437 * @glops: the glock operations for the type of glock
1438 * @state: the state to acquire the glock in
1439 * @flags: modifier flags for the aquisition
1440 * @gh: the struct gfs2_holder
1445 int gfs2_glock_nq_num(struct gfs2_sbd
*sdp
, uint64_t number
,
1446 struct gfs2_glock_operations
*glops
, unsigned int state
,
1447 int flags
, struct gfs2_holder
*gh
)
1449 struct gfs2_glock
*gl
;
1452 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1454 error
= gfs2_glock_nq_init(gl
, state
, flags
, gh
);
1462 * glock_compare - Compare two struct gfs2_glock structures for sorting
1463 * @arg_a: the first structure
1464 * @arg_b: the second structure
1468 static int glock_compare(const void *arg_a
, const void *arg_b
)
1470 struct gfs2_holder
*gh_a
= *(struct gfs2_holder
**)arg_a
;
1471 struct gfs2_holder
*gh_b
= *(struct gfs2_holder
**)arg_b
;
1472 struct lm_lockname
*a
= &gh_a
->gh_gl
->gl_name
;
1473 struct lm_lockname
*b
= &gh_b
->gh_gl
->gl_name
;
1476 if (a
->ln_number
> b
->ln_number
)
1478 else if (a
->ln_number
< b
->ln_number
)
1481 if (gh_a
->gh_state
== LM_ST_SHARED
&&
1482 gh_b
->gh_state
== LM_ST_EXCLUSIVE
)
1484 else if (!(gh_a
->gh_flags
& GL_LOCAL_EXCL
) &&
1485 (gh_b
->gh_flags
& GL_LOCAL_EXCL
))
1493 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1494 * @num_gh: the number of structures
1495 * @ghs: an array of struct gfs2_holder structures
1497 * Returns: 0 on success (all glocks acquired),
1498 * errno on failure (no glocks acquired)
1501 static int nq_m_sync(unsigned int num_gh
, struct gfs2_holder
*ghs
,
1502 struct gfs2_holder
**p
)
1507 for (x
= 0; x
< num_gh
; x
++)
1510 sort(p
, num_gh
, sizeof(struct gfs2_holder
*), glock_compare
, NULL
);
1512 for (x
= 0; x
< num_gh
; x
++) {
1513 p
[x
]->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1515 error
= gfs2_glock_nq(p
[x
]);
1518 gfs2_glock_dq(p
[x
]);
1527 * gfs2_glock_nq_m - acquire multiple glocks
1528 * @num_gh: the number of structures
1529 * @ghs: an array of struct gfs2_holder structures
1531 * Figure out how big an impact this function has. Either:
1532 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1533 * 2) Forget async stuff and just call nq_m_sync()
1534 * 3) Leave it like it is
1536 * Returns: 0 on success (all glocks acquired),
1537 * errno on failure (no glocks acquired)
1540 int gfs2_glock_nq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1544 int borked
= 0, serious
= 0;
1551 ghs
->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1552 return gfs2_glock_nq(ghs
);
1555 e
= kcalloc(num_gh
, sizeof(struct gfs2_holder
*), GFP_KERNEL
);
1559 for (x
= 0; x
< num_gh
; x
++) {
1560 ghs
[x
].gh_flags
|= LM_FLAG_TRY
| GL_ASYNC
;
1561 error
= gfs2_glock_nq(&ghs
[x
]);
1570 for (x
= 0; x
< num_gh
; x
++) {
1571 error
= e
[x
] = glock_wait_internal(&ghs
[x
]);
1574 if (error
!= GLR_TRYFAILED
&& error
!= GLR_CANCELED
)
1584 for (x
= 0; x
< num_gh
; x
++)
1586 gfs2_glock_dq(&ghs
[x
]);
1591 for (x
= 0; x
< num_gh
; x
++)
1592 gfs2_holder_reinit(ghs
[x
].gh_state
, ghs
[x
].gh_flags
,
1594 error
= nq_m_sync(num_gh
, ghs
, (struct gfs2_holder
**)e
);
1603 * gfs2_glock_dq_m - release multiple glocks
1604 * @num_gh: the number of structures
1605 * @ghs: an array of struct gfs2_holder structures
1609 void gfs2_glock_dq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1613 for (x
= 0; x
< num_gh
; x
++)
1614 gfs2_glock_dq(&ghs
[x
]);
1618 * gfs2_glock_dq_uninit_m - release multiple glocks
1619 * @num_gh: the number of structures
1620 * @ghs: an array of struct gfs2_holder structures
1624 void gfs2_glock_dq_uninit_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1628 for (x
= 0; x
< num_gh
; x
++)
1629 gfs2_glock_dq_uninit(&ghs
[x
]);
1633 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1634 * @sdp: the filesystem
1635 * @number: the lock number
1636 * @glops: the glock operations for the type of glock
1637 * @state: the state to acquire the glock in
1638 * @flags: modifier flags for the aquisition
1643 void gfs2_glock_prefetch_num(struct gfs2_sbd
*sdp
, uint64_t number
,
1644 struct gfs2_glock_operations
*glops
,
1645 unsigned int state
, int flags
)
1647 struct gfs2_glock
*gl
;
1650 if (atomic_read(&sdp
->sd_reclaim_count
) <
1651 gfs2_tune_get(sdp
, gt_reclaim_limit
)) {
1652 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1654 gfs2_glock_prefetch(gl
, state
, flags
);
1661 * gfs2_lvb_hold - attach a LVB from a glock
1662 * @gl: The glock in question
1666 int gfs2_lvb_hold(struct gfs2_glock
*gl
)
1670 gfs2_glmutex_lock(gl
);
1672 if (!atomic_read(&gl
->gl_lvb_count
)) {
1673 error
= gfs2_lm_hold_lvb(gl
->gl_sbd
, gl
->gl_lock
, &gl
->gl_lvb
);
1675 gfs2_glmutex_unlock(gl
);
1678 gfs2_glock_hold(gl
);
1680 atomic_inc(&gl
->gl_lvb_count
);
1682 gfs2_glmutex_unlock(gl
);
1688 * gfs2_lvb_unhold - detach a LVB from a glock
1689 * @gl: The glock in question
1693 void gfs2_lvb_unhold(struct gfs2_glock
*gl
)
1695 gfs2_glock_hold(gl
);
1696 gfs2_glmutex_lock(gl
);
1698 gfs2_assert(gl
->gl_sbd
, atomic_read(&gl
->gl_lvb_count
) > 0);
1699 if (atomic_dec_and_test(&gl
->gl_lvb_count
)) {
1700 gfs2_lm_unhold_lvb(gl
->gl_sbd
, gl
->gl_lock
, gl
->gl_lvb
);
1705 gfs2_glmutex_unlock(gl
);
1710 void gfs2_lvb_sync(struct gfs2_glock
*gl
)
1712 gfs2_glmutex_lock(gl
);
1714 gfs2_assert(gl
->gl_sbd
, atomic_read(&gl
->gl_lvb_count
));
1715 if (!gfs2_assert_warn(gl
->gl_sbd
, gfs2_glock_is_held_excl(gl
)))
1716 gfs2_lm_sync_lvb(gl
->gl_sbd
, gl
->gl_lock
, gl
->gl_lvb
);
1718 gfs2_glmutex_unlock(gl
);
1722 static void blocking_cb(struct gfs2_sbd
*sdp
, struct lm_lockname
*name
,
1725 struct gfs2_glock
*gl
;
1727 gl
= gfs2_glock_find(sdp
, name
);
1731 if (gl
->gl_ops
->go_callback
)
1732 gl
->gl_ops
->go_callback(gl
, state
);
1733 handle_callback(gl
, state
);
1735 spin_lock(&gl
->gl_spin
);
1737 spin_unlock(&gl
->gl_spin
);
1743 * gfs2_glock_cb - Callback used by locking module
1744 * @fsdata: Pointer to the superblock
1745 * @type: Type of callback
1746 * @data: Type dependent data pointer
1748 * Called by the locking module when it wants to tell us something.
1749 * Either we need to drop a lock, one of our ASYNC requests completed, or
1750 * a journal from another client needs to be recovered.
1753 void gfs2_glock_cb(lm_fsdata_t
*fsdata
, unsigned int type
, void *data
)
1755 struct gfs2_sbd
*sdp
= (struct gfs2_sbd
*)fsdata
;
1759 blocking_cb(sdp
, data
, LM_ST_UNLOCKED
);
1763 blocking_cb(sdp
, data
, LM_ST_DEFERRED
);
1767 blocking_cb(sdp
, data
, LM_ST_SHARED
);
1771 struct lm_async_cb
*async
= data
;
1772 struct gfs2_glock
*gl
;
1774 gl
= gfs2_glock_find(sdp
, &async
->lc_name
);
1775 if (gfs2_assert_warn(sdp
, gl
))
1777 if (!gfs2_assert_warn(sdp
, gl
->gl_req_bh
))
1778 gl
->gl_req_bh(gl
, async
->lc_ret
);
1783 case LM_CB_NEED_RECOVERY
:
1784 gfs2_jdesc_make_dirty(sdp
, *(unsigned int *)data
);
1785 if (sdp
->sd_recoverd_process
)
1786 wake_up_process(sdp
->sd_recoverd_process
);
1789 case LM_CB_DROPLOCKS
:
1790 gfs2_gl_hash_clear(sdp
, NO_WAIT
);
1791 gfs2_quota_scan(sdp
);
1795 gfs2_assert_warn(sdp
, 0);
1801 * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
1802 * iopen glock from memory
1803 * @io_gl: the iopen glock
1804 * @state: the state into which the glock should be put
1808 void gfs2_iopen_go_callback(struct gfs2_glock
*io_gl
, unsigned int state
)
1811 if (state
!= LM_ST_UNLOCKED
)
1813 /* FIXME: remove this? */
1817 * demote_ok - Check to see if it's ok to unlock a glock
1820 * Returns: 1 if it's ok
1823 static int demote_ok(struct gfs2_glock
*gl
)
1825 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1826 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1829 if (test_bit(GLF_STICKY
, &gl
->gl_flags
))
1831 else if (test_bit(GLF_PREFETCH
, &gl
->gl_flags
))
1832 demote
= time_after_eq(jiffies
,
1834 gfs2_tune_get(sdp
, gt_prefetch_secs
) * HZ
);
1835 else if (glops
->go_demote_ok
)
1836 demote
= glops
->go_demote_ok(gl
);
1842 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1847 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock
*gl
)
1849 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1851 spin_lock(&sdp
->sd_reclaim_lock
);
1852 if (list_empty(&gl
->gl_reclaim
)) {
1853 gfs2_glock_hold(gl
);
1854 list_add(&gl
->gl_reclaim
, &sdp
->sd_reclaim_list
);
1855 atomic_inc(&sdp
->sd_reclaim_count
);
1857 spin_unlock(&sdp
->sd_reclaim_lock
);
1859 wake_up(&sdp
->sd_reclaim_wq
);
1863 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1864 * @sdp: the filesystem
1866 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1867 * different glock and we notice that there are a lot of glocks in the
1872 void gfs2_reclaim_glock(struct gfs2_sbd
*sdp
)
1874 struct gfs2_glock
*gl
;
1876 spin_lock(&sdp
->sd_reclaim_lock
);
1877 if (list_empty(&sdp
->sd_reclaim_list
)) {
1878 spin_unlock(&sdp
->sd_reclaim_lock
);
1881 gl
= list_entry(sdp
->sd_reclaim_list
.next
,
1882 struct gfs2_glock
, gl_reclaim
);
1883 list_del_init(&gl
->gl_reclaim
);
1884 spin_unlock(&sdp
->sd_reclaim_lock
);
1886 atomic_dec(&sdp
->sd_reclaim_count
);
1887 atomic_inc(&sdp
->sd_reclaimed
);
1889 if (gfs2_glmutex_trylock(gl
)) {
1890 if (queue_empty(gl
, &gl
->gl_holders
) &&
1891 gl
->gl_state
!= LM_ST_UNLOCKED
&&
1893 handle_callback(gl
, LM_ST_UNLOCKED
);
1894 gfs2_glmutex_unlock(gl
);
1901 * examine_bucket - Call a function for glock in a hash bucket
1902 * @examiner: the function
1903 * @sdp: the filesystem
1904 * @bucket: the bucket
1906 * Returns: 1 if the bucket has entries
1909 static int examine_bucket(glock_examiner examiner
, struct gfs2_sbd
*sdp
,
1910 struct gfs2_gl_hash_bucket
*bucket
)
1912 struct glock_plug plug
;
1913 struct list_head
*tmp
;
1914 struct gfs2_glock
*gl
;
1917 /* Add "plug" to end of bucket list, work back up list from there */
1918 memset(&plug
.gl_flags
, 0, sizeof(unsigned long));
1919 set_bit(GLF_PLUG
, &plug
.gl_flags
);
1921 write_lock(&bucket
->hb_lock
);
1922 list_add(&plug
.gl_list
, &bucket
->hb_list
);
1923 write_unlock(&bucket
->hb_lock
);
1926 write_lock(&bucket
->hb_lock
);
1929 tmp
= plug
.gl_list
.next
;
1931 if (tmp
== &bucket
->hb_list
) {
1932 list_del(&plug
.gl_list
);
1933 entries
= !list_empty(&bucket
->hb_list
);
1934 write_unlock(&bucket
->hb_lock
);
1937 gl
= list_entry(tmp
, struct gfs2_glock
, gl_list
);
1939 /* Move plug up list */
1940 list_move(&plug
.gl_list
, &gl
->gl_list
);
1942 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
1945 /* examiner() must glock_put() */
1946 gfs2_glock_hold(gl
);
1951 write_unlock(&bucket
->hb_lock
);
1958 * scan_glock - look at a glock and see if we can reclaim it
1959 * @gl: the glock to look at
1963 static void scan_glock(struct gfs2_glock
*gl
)
1965 if (gfs2_glmutex_trylock(gl
)) {
1966 if (gl
->gl_ops
== &gfs2_inode_glops
) {
1967 struct gfs2_inode
*ip
= gl
->gl_object
;
1971 if (queue_empty(gl
, &gl
->gl_holders
) &&
1972 gl
->gl_state
!= LM_ST_UNLOCKED
&&
1976 gfs2_glmutex_unlock(gl
);
1984 gfs2_glmutex_unlock(gl
);
1985 gfs2_glock_schedule_for_reclaim(gl
);
1990 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
1991 * @sdp: the filesystem
1995 void gfs2_scand_internal(struct gfs2_sbd
*sdp
)
1999 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
2000 examine_bucket(scan_glock
, sdp
, &sdp
->sd_gl_hash
[x
]);
2006 * clear_glock - look at a glock and see if we can free it from glock cache
2007 * @gl: the glock to look at
2011 static void clear_glock(struct gfs2_glock
*gl
)
2013 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
2016 spin_lock(&sdp
->sd_reclaim_lock
);
2017 if (!list_empty(&gl
->gl_reclaim
)) {
2018 list_del_init(&gl
->gl_reclaim
);
2019 atomic_dec(&sdp
->sd_reclaim_count
);
2020 spin_unlock(&sdp
->sd_reclaim_lock
);
2021 released
= gfs2_glock_put(gl
);
2022 gfs2_assert(sdp
, !released
);
2024 spin_unlock(&sdp
->sd_reclaim_lock
);
2027 if (gfs2_glmutex_trylock(gl
)) {
2028 if (queue_empty(gl
, &gl
->gl_holders
) &&
2029 gl
->gl_state
!= LM_ST_UNLOCKED
)
2030 handle_callback(gl
, LM_ST_UNLOCKED
);
2032 gfs2_glmutex_unlock(gl
);
2039 * gfs2_gl_hash_clear - Empty out the glock hash table
2040 * @sdp: the filesystem
2041 * @wait: wait until it's all gone
2043 * Called when unmounting the filesystem, or when inter-node lock manager
2044 * requests DROPLOCKS because it is running out of capacity.
2047 void gfs2_gl_hash_clear(struct gfs2_sbd
*sdp
, int wait
)
2058 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++)
2059 if (examine_bucket(clear_glock
, sdp
,
2060 &sdp
->sd_gl_hash
[x
]))
2066 if (time_after_eq(jiffies
,
2067 t
+ gfs2_tune_get(sdp
, gt_stall_secs
) * HZ
)) {
2068 fs_warn(sdp
, "Unmount seems to be stalled. "
2069 "Dumping lock state...\n");
2070 gfs2_dump_lockstate(sdp
);
2074 /* invalidate_inodes() requires that the sb inodes list
2075 not change, but an async completion callback for an
2076 unlock can occur which does glock_put() which
2077 can call iput() which will change the sb inodes list.
2078 invalidate_inodes_mutex prevents glock_put()'s during
2079 an invalidate_inodes() */
2081 mutex_lock(&sdp
->sd_invalidate_inodes_mutex
);
2082 invalidate_inodes(sdp
->sd_vfs
);
2083 mutex_unlock(&sdp
->sd_invalidate_inodes_mutex
);
2089 * Diagnostic routines to help debug distributed deadlock
2093 * dump_holder - print information about a glock holder
2094 * @str: a string naming the type of holder
2095 * @gh: the glock holder
2097 * Returns: 0 on success, -ENOBUFS when we run out of space
2100 static int dump_holder(char *str
, struct gfs2_holder
*gh
)
2103 int error
= -ENOBUFS
;
2105 printk(KERN_INFO
" %s\n", str
);
2106 printk(KERN_INFO
" owner = %ld\n",
2107 (gh
->gh_owner
) ? (long)gh
->gh_owner
->pid
: -1);
2108 printk(KERN_INFO
" gh_state = %u\n", gh
->gh_state
);
2109 printk(KERN_INFO
" gh_flags =");
2110 for (x
= 0; x
< 32; x
++)
2111 if (gh
->gh_flags
& (1 << x
))
2114 printk(KERN_INFO
" error = %d\n", gh
->gh_error
);
2115 printk(KERN_INFO
" gh_iflags =");
2116 for (x
= 0; x
< 32; x
++)
2117 if (test_bit(x
, &gh
->gh_iflags
))
2120 print_symbol(KERN_INFO
" initialized at: %s\n", gh
->gh_ip
);
2128 * dump_inode - print information about an inode
2131 * Returns: 0 on success, -ENOBUFS when we run out of space
2134 static int dump_inode(struct gfs2_inode
*ip
)
2137 int error
= -ENOBUFS
;
2139 printk(KERN_INFO
" Inode:\n");
2140 printk(KERN_INFO
" num = %llu %llu\n",
2141 (unsigned long long)ip
->i_num
.no_formal_ino
,
2142 (unsigned long long)ip
->i_num
.no_addr
);
2143 printk(KERN_INFO
" type = %u\n", IF2DT(ip
->i_di
.di_mode
));
2144 printk(KERN_INFO
" i_flags =");
2145 for (x
= 0; x
< 32; x
++)
2146 if (test_bit(x
, &ip
->i_flags
))
2156 * dump_glock - print information about a glock
2158 * @count: where we are in the buffer
2160 * Returns: 0 on success, -ENOBUFS when we run out of space
2163 static int dump_glock(struct gfs2_glock
*gl
)
2165 struct gfs2_holder
*gh
;
2167 int error
= -ENOBUFS
;
2169 spin_lock(&gl
->gl_spin
);
2171 printk(KERN_INFO
"Glock (%u, %llu)\n", gl
->gl_name
.ln_type
,
2172 (unsigned long long)gl
->gl_name
.ln_number
);
2173 printk(KERN_INFO
" gl_flags =");
2174 for (x
= 0; x
< 32; x
++)
2175 if (test_bit(x
, &gl
->gl_flags
))
2178 printk(KERN_INFO
" gl_ref = %d\n", atomic_read(&gl
->gl_ref
.refcount
));
2179 printk(KERN_INFO
" gl_state = %u\n", gl
->gl_state
);
2180 printk(KERN_INFO
" gl_owner = %s\n", gl
->gl_owner
->comm
);
2181 print_symbol(KERN_INFO
" gl_ip = %s\n", gl
->gl_ip
);
2182 printk(KERN_INFO
" req_gh = %s\n", (gl
->gl_req_gh
) ? "yes" : "no");
2183 printk(KERN_INFO
" req_bh = %s\n", (gl
->gl_req_bh
) ? "yes" : "no");
2184 printk(KERN_INFO
" lvb_count = %d\n", atomic_read(&gl
->gl_lvb_count
));
2185 printk(KERN_INFO
" object = %s\n", (gl
->gl_object
) ? "yes" : "no");
2186 printk(KERN_INFO
" le = %s\n",
2187 (list_empty(&gl
->gl_le
.le_list
)) ? "no" : "yes");
2188 printk(KERN_INFO
" reclaim = %s\n",
2189 (list_empty(&gl
->gl_reclaim
)) ? "no" : "yes");
2191 printk(KERN_INFO
" aspace = %lu\n",
2192 gl
->gl_aspace
->i_mapping
->nrpages
);
2194 printk(KERN_INFO
" aspace = no\n");
2195 printk(KERN_INFO
" ail = %d\n", atomic_read(&gl
->gl_ail_count
));
2196 if (gl
->gl_req_gh
) {
2197 error
= dump_holder("Request", gl
->gl_req_gh
);
2201 list_for_each_entry(gh
, &gl
->gl_holders
, gh_list
) {
2202 error
= dump_holder("Holder", gh
);
2206 list_for_each_entry(gh
, &gl
->gl_waiters1
, gh_list
) {
2207 error
= dump_holder("Waiter1", gh
);
2211 list_for_each_entry(gh
, &gl
->gl_waiters2
, gh_list
) {
2212 error
= dump_holder("Waiter2", gh
);
2216 list_for_each_entry(gh
, &gl
->gl_waiters3
, gh_list
) {
2217 error
= dump_holder("Waiter3", gh
);
2221 if (gl
->gl_ops
== &gfs2_inode_glops
&& gl
->gl_object
) {
2222 if (!test_bit(GLF_LOCK
, &gl
->gl_flags
) &&
2223 list_empty(&gl
->gl_holders
)) {
2224 error
= dump_inode(gl
->gl_object
);
2229 printk(KERN_INFO
" Inode: busy\n");
2236 spin_unlock(&gl
->gl_spin
);
2242 * gfs2_dump_lockstate - print out the current lockstate
2243 * @sdp: the filesystem
2244 * @ub: the buffer to copy the information into
2246 * If @ub is NULL, dump the lockstate to the console.
2250 static int gfs2_dump_lockstate(struct gfs2_sbd
*sdp
)
2252 struct gfs2_gl_hash_bucket
*bucket
;
2253 struct gfs2_glock
*gl
;
2257 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
2258 bucket
= &sdp
->sd_gl_hash
[x
];
2260 read_lock(&bucket
->hb_lock
);
2262 list_for_each_entry(gl
, &bucket
->hb_list
, gl_list
) {
2263 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
2266 error
= dump_glock(gl
);
2271 read_unlock(&bucket
->hb_lock
);