2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kref.h>
19 #include <linux/kallsyms.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <asm/semaphore.h>
22 #include <asm/uaccess.h>
25 #include "lm_interface.h"
37 /* Must be kept in sync with the beginning of struct gfs2_glock */
39 struct list_head gl_list
;
40 unsigned long gl_flags
;
44 struct gfs2_holder gr_gh
;
45 struct work_struct gr_work
;
48 typedef void (*glock_examiner
) (struct gfs2_glock
* gl
);
51 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
52 * @actual: the current state of the lock
53 * @requested: the lock state that was requested by the caller
54 * @flags: the modifier flags passed in by the caller
56 * Returns: 1 if the locks are compatible, 0 otherwise
59 static inline int relaxed_state_ok(unsigned int actual
, unsigned requested
,
62 if (actual
== requested
)
68 if (actual
== LM_ST_EXCLUSIVE
&& requested
== LM_ST_SHARED
)
71 if (actual
!= LM_ST_UNLOCKED
&& (flags
& LM_FLAG_ANY
))
78 * gl_hash() - Turn glock number into hash bucket number
79 * @lock: The glock number
81 * Returns: The number of the corresponding hash bucket
84 static unsigned int gl_hash(struct lm_lockname
*name
)
88 h
= jhash(&name
->ln_number
, sizeof(uint64_t), 0);
89 h
= jhash(&name
->ln_type
, sizeof(unsigned int), h
);
90 h
&= GFS2_GL_HASH_MASK
;
96 * glock_free() - Perform a few checks and then release struct gfs2_glock
97 * @gl: The glock to release
99 * Also calls lock module to release its internal structure for this glock.
103 static void glock_free(struct gfs2_glock
*gl
)
105 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
106 struct inode
*aspace
= gl
->gl_aspace
;
108 gfs2_lm_put_lock(sdp
, gl
->gl_lock
);
111 gfs2_aspace_put(aspace
);
113 kmem_cache_free(gfs2_glock_cachep
, gl
);
117 * gfs2_glock_hold() - increment reference count on glock
118 * @gl: The glock to hold
122 void gfs2_glock_hold(struct gfs2_glock
*gl
)
124 kref_get(&gl
->gl_ref
);
127 /* All work is done after the return from kref_put() so we
128 can release the write_lock before the free. */
130 static void kill_glock(struct kref
*kref
)
132 struct gfs2_glock
*gl
= container_of(kref
, struct gfs2_glock
, gl_ref
);
133 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
135 gfs2_assert(sdp
, gl
->gl_state
== LM_ST_UNLOCKED
);
136 gfs2_assert(sdp
, list_empty(&gl
->gl_reclaim
));
137 gfs2_assert(sdp
, list_empty(&gl
->gl_holders
));
138 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters1
));
139 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters2
));
140 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters3
));
144 * gfs2_glock_put() - Decrement reference count on glock
145 * @gl: The glock to put
149 int gfs2_glock_put(struct gfs2_glock
*gl
)
151 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
152 struct gfs2_gl_hash_bucket
*bucket
= gl
->gl_bucket
;
155 mutex_lock(&sdp
->sd_invalidate_inodes_mutex
);
157 write_lock(&bucket
->hb_lock
);
158 if (kref_put(&gl
->gl_ref
, kill_glock
)) {
159 list_del_init(&gl
->gl_list
);
160 write_unlock(&bucket
->hb_lock
);
161 BUG_ON(spin_is_locked(&gl
->gl_spin
));
166 write_unlock(&bucket
->hb_lock
);
168 mutex_unlock(&sdp
->sd_invalidate_inodes_mutex
);
173 * queue_empty - check to see if a glock's queue is empty
175 * @head: the head of the queue to check
177 * This function protects the list in the event that a process already
178 * has a holder on the list and is adding a second holder for itself.
179 * The glmutex lock is what generally prevents processes from working
180 * on the same glock at once, but the special case of adding a second
181 * holder for yourself ("recursive" locking) doesn't involve locking
182 * glmutex, making the spin lock necessary.
184 * Returns: 1 if the queue is empty
187 static inline int queue_empty(struct gfs2_glock
*gl
, struct list_head
*head
)
190 spin_lock(&gl
->gl_spin
);
191 empty
= list_empty(head
);
192 spin_unlock(&gl
->gl_spin
);
197 * search_bucket() - Find struct gfs2_glock by lock number
198 * @bucket: the bucket to search
199 * @name: The lock name
201 * Returns: NULL, or the struct gfs2_glock with the requested number
204 static struct gfs2_glock
*search_bucket(struct gfs2_gl_hash_bucket
*bucket
,
205 struct lm_lockname
*name
)
207 struct gfs2_glock
*gl
;
209 list_for_each_entry(gl
, &bucket
->hb_list
, gl_list
) {
210 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
212 if (!lm_name_equal(&gl
->gl_name
, name
))
215 kref_get(&gl
->gl_ref
);
224 * gfs2_glock_find() - Find glock by lock number
225 * @sdp: The GFS2 superblock
226 * @name: The lock name
228 * Returns: NULL, or the struct gfs2_glock with the requested number
231 struct gfs2_glock
*gfs2_glock_find(struct gfs2_sbd
*sdp
,
232 struct lm_lockname
*name
)
234 struct gfs2_gl_hash_bucket
*bucket
= &sdp
->sd_gl_hash
[gl_hash(name
)];
235 struct gfs2_glock
*gl
;
237 read_lock(&bucket
->hb_lock
);
238 gl
= search_bucket(bucket
, name
);
239 read_unlock(&bucket
->hb_lock
);
245 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
246 * @sdp: The GFS2 superblock
247 * @number: the lock number
248 * @glops: The glock_operations to use
249 * @create: If 0, don't create the glock if it doesn't exist
250 * @glp: the glock is returned here
252 * This does not lock a glock, just finds/creates structures for one.
257 int gfs2_glock_get(struct gfs2_sbd
*sdp
, uint64_t number
,
258 struct gfs2_glock_operations
*glops
, int create
,
259 struct gfs2_glock
**glp
)
261 struct lm_lockname name
;
262 struct gfs2_glock
*gl
, *tmp
;
263 struct gfs2_gl_hash_bucket
*bucket
;
266 name
.ln_number
= number
;
267 name
.ln_type
= glops
->go_type
;
268 bucket
= &sdp
->sd_gl_hash
[gl_hash(&name
)];
270 read_lock(&bucket
->hb_lock
);
271 gl
= search_bucket(bucket
, &name
);
272 read_unlock(&bucket
->hb_lock
);
279 gl
= kmem_cache_alloc(gfs2_glock_cachep
, GFP_KERNEL
);
283 memset(gl
, 0, sizeof(struct gfs2_glock
));
285 INIT_LIST_HEAD(&gl
->gl_list
);
287 kref_init(&gl
->gl_ref
);
289 spin_lock_init(&gl
->gl_spin
);
291 gl
->gl_state
= LM_ST_UNLOCKED
;
292 INIT_LIST_HEAD(&gl
->gl_holders
);
293 INIT_LIST_HEAD(&gl
->gl_waiters1
);
294 INIT_LIST_HEAD(&gl
->gl_waiters2
);
295 INIT_LIST_HEAD(&gl
->gl_waiters3
);
299 gl
->gl_bucket
= bucket
;
300 INIT_LIST_HEAD(&gl
->gl_reclaim
);
304 lops_init_le(&gl
->gl_le
, &gfs2_glock_lops
);
305 INIT_LIST_HEAD(&gl
->gl_ail_list
);
307 /* If this glock protects actual on-disk data or metadata blocks,
308 create a VFS inode to manage the pages/buffers holding them. */
309 if (glops
== &gfs2_inode_glops
||
310 glops
== &gfs2_rgrp_glops
||
311 glops
== &gfs2_meta_glops
) {
312 gl
->gl_aspace
= gfs2_aspace_get(sdp
);
313 if (!gl
->gl_aspace
) {
319 error
= gfs2_lm_get_lock(sdp
, &name
, &gl
->gl_lock
);
323 write_lock(&bucket
->hb_lock
);
324 tmp
= search_bucket(bucket
, &name
);
326 write_unlock(&bucket
->hb_lock
);
330 list_add_tail(&gl
->gl_list
, &bucket
->hb_list
);
331 write_unlock(&bucket
->hb_lock
);
340 gfs2_aspace_put(gl
->gl_aspace
);
343 kmem_cache_free(gfs2_glock_cachep
, gl
);
349 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
351 * @state: the state we're requesting
352 * @flags: the modifier flags
353 * @gh: the holder structure
357 void gfs2_holder_init(struct gfs2_glock
*gl
, unsigned int state
, unsigned flags
,
358 struct gfs2_holder
*gh
)
360 flags
|= GL_NEVER_RECURSE
;
361 INIT_LIST_HEAD(&gh
->gh_list
);
363 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
364 gh
->gh_owner
= current
;
365 gh
->gh_state
= state
;
366 gh
->gh_flags
= flags
;
369 init_completion(&gh
->gh_wait
);
371 if (gh
->gh_state
== LM_ST_EXCLUSIVE
)
372 gh
->gh_flags
|= GL_LOCAL_EXCL
;
378 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
379 * @state: the state we're requesting
380 * @flags: the modifier flags
381 * @gh: the holder structure
383 * Don't mess with the glock.
387 void gfs2_holder_reinit(unsigned int state
, unsigned flags
, struct gfs2_holder
*gh
)
389 gh
->gh_state
= state
;
390 gh
->gh_flags
= flags
| GL_NEVER_RECURSE
;
391 if (gh
->gh_state
== LM_ST_EXCLUSIVE
)
392 gh
->gh_flags
|= GL_LOCAL_EXCL
;
394 gh
->gh_iflags
&= 1 << HIF_ALLOCED
;
395 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
399 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
400 * @gh: the holder structure
404 void gfs2_holder_uninit(struct gfs2_holder
*gh
)
406 gfs2_glock_put(gh
->gh_gl
);
412 * gfs2_holder_get - get a struct gfs2_holder structure
414 * @state: the state we're requesting
415 * @flags: the modifier flags
416 * @gfp_flags: __GFP_NOFAIL
418 * Figure out how big an impact this function has. Either:
419 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
420 * 2) Leave it like it is
422 * Returns: the holder structure, NULL on ENOMEM
425 struct gfs2_holder
*gfs2_holder_get(struct gfs2_glock
*gl
, unsigned int state
,
426 int flags
, gfp_t gfp_flags
)
428 struct gfs2_holder
*gh
;
430 gh
= kmalloc(sizeof(struct gfs2_holder
), gfp_flags
);
434 gfs2_holder_init(gl
, state
, flags
, gh
);
435 set_bit(HIF_ALLOCED
, &gh
->gh_iflags
);
436 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
441 * gfs2_holder_put - get rid of a struct gfs2_holder structure
442 * @gh: the holder structure
446 void gfs2_holder_put(struct gfs2_holder
*gh
)
448 gfs2_holder_uninit(gh
);
453 * handle_recurse - put other holder structures (marked recursive)
454 * into the holders list
455 * @gh: the holder structure
459 static void handle_recurse(struct gfs2_holder
*gh
)
461 struct gfs2_glock
*gl
= gh
->gh_gl
;
462 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
463 struct gfs2_holder
*tmp_gh
, *safe
;
466 BUG_ON(!spin_is_locked(&gl
->gl_spin
));
468 printk(KERN_INFO
"recursion %016llx, %u\n", gl
->gl_name
.ln_number
,
469 gl
->gl_name
.ln_type
);
471 if (gfs2_assert_warn(sdp
, gh
->gh_owner
))
474 list_for_each_entry_safe(tmp_gh
, safe
, &gl
->gl_waiters3
, gh_list
) {
475 if (tmp_gh
->gh_owner
!= gh
->gh_owner
)
478 gfs2_assert_warn(sdp
,
479 test_bit(HIF_RECURSE
, &tmp_gh
->gh_iflags
));
481 list_move_tail(&tmp_gh
->gh_list
, &gl
->gl_holders
);
482 tmp_gh
->gh_error
= 0;
483 set_bit(HIF_HOLDER
, &tmp_gh
->gh_iflags
);
485 complete(&tmp_gh
->gh_wait
);
490 gfs2_assert_warn(sdp
, found
);
494 * do_unrecurse - a recursive holder was just dropped of the waiters3 list
497 * If there is only one other recursive holder, clear its HIF_RECURSE bit.
498 * If there is more than one, leave them alone.
502 static void do_unrecurse(struct gfs2_holder
*gh
)
504 struct gfs2_glock
*gl
= gh
->gh_gl
;
505 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
506 struct gfs2_holder
*tmp_gh
, *last_gh
= NULL
;
509 BUG_ON(!spin_is_locked(&gl
->gl_spin
));
511 if (gfs2_assert_warn(sdp
, gh
->gh_owner
))
514 list_for_each_entry(tmp_gh
, &gl
->gl_waiters3
, gh_list
) {
515 if (tmp_gh
->gh_owner
!= gh
->gh_owner
)
518 gfs2_assert_warn(sdp
,
519 test_bit(HIF_RECURSE
, &tmp_gh
->gh_iflags
));
528 if (!gfs2_assert_warn(sdp
, found
))
529 clear_bit(HIF_RECURSE
, &last_gh
->gh_iflags
);
533 * rq_mutex - process a mutex request in the queue
534 * @gh: the glock holder
536 * Returns: 1 if the queue is blocked
539 static int rq_mutex(struct gfs2_holder
*gh
)
541 struct gfs2_glock
*gl
= gh
->gh_gl
;
543 list_del_init(&gh
->gh_list
);
544 /* gh->gh_error never examined. */
545 set_bit(GLF_LOCK
, &gl
->gl_flags
);
546 complete(&gh
->gh_wait
);
552 * rq_promote - process a promote request in the queue
553 * @gh: the glock holder
555 * Acquire a new inter-node lock, or change a lock state to more restrictive.
557 * Returns: 1 if the queue is blocked
560 static int rq_promote(struct gfs2_holder
*gh
)
562 struct gfs2_glock
*gl
= gh
->gh_gl
;
563 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
564 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
567 if (!relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
568 if (list_empty(&gl
->gl_holders
)) {
570 set_bit(GLF_LOCK
, &gl
->gl_flags
);
571 spin_unlock(&gl
->gl_spin
);
573 if (atomic_read(&sdp
->sd_reclaim_count
) >
574 gfs2_tune_get(sdp
, gt_reclaim_limit
) &&
575 !(gh
->gh_flags
& LM_FLAG_PRIORITY
)) {
576 gfs2_reclaim_glock(sdp
);
577 gfs2_reclaim_glock(sdp
);
580 glops
->go_xmote_th(gl
, gh
->gh_state
,
583 spin_lock(&gl
->gl_spin
);
588 if (list_empty(&gl
->gl_holders
)) {
589 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
590 set_bit(GLF_LOCK
, &gl
->gl_flags
);
593 struct gfs2_holder
*next_gh
;
594 if (gh
->gh_flags
& GL_LOCAL_EXCL
)
596 next_gh
= list_entry(gl
->gl_holders
.next
, struct gfs2_holder
,
598 if (next_gh
->gh_flags
& GL_LOCAL_EXCL
)
600 recurse
= test_bit(HIF_RECURSE
, &gh
->gh_iflags
);
603 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
605 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
610 complete(&gh
->gh_wait
);
616 * rq_demote - process a demote request in the queue
617 * @gh: the glock holder
619 * Returns: 1 if the queue is blocked
622 static int rq_demote(struct gfs2_holder
*gh
)
624 struct gfs2_glock
*gl
= gh
->gh_gl
;
625 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
627 if (!list_empty(&gl
->gl_holders
))
630 if (gl
->gl_state
== gh
->gh_state
|| gl
->gl_state
== LM_ST_UNLOCKED
) {
631 list_del_init(&gh
->gh_list
);
633 spin_unlock(&gl
->gl_spin
);
634 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
637 complete(&gh
->gh_wait
);
638 spin_lock(&gl
->gl_spin
);
641 set_bit(GLF_LOCK
, &gl
->gl_flags
);
642 spin_unlock(&gl
->gl_spin
);
644 if (gh
->gh_state
== LM_ST_UNLOCKED
||
645 gl
->gl_state
!= LM_ST_EXCLUSIVE
)
646 glops
->go_drop_th(gl
);
648 glops
->go_xmote_th(gl
, gh
->gh_state
, gh
->gh_flags
);
650 spin_lock(&gl
->gl_spin
);
657 * rq_greedy - process a queued request to drop greedy status
658 * @gh: the glock holder
660 * Returns: 1 if the queue is blocked
663 static int rq_greedy(struct gfs2_holder
*gh
)
665 struct gfs2_glock
*gl
= gh
->gh_gl
;
667 list_del_init(&gh
->gh_list
);
668 /* gh->gh_error never examined. */
669 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
670 spin_unlock(&gl
->gl_spin
);
672 gfs2_holder_uninit(gh
);
673 kfree(container_of(gh
, struct greedy
, gr_gh
));
675 spin_lock(&gl
->gl_spin
);
681 * run_queue - process holder structures on a glock
685 static void run_queue(struct gfs2_glock
*gl
)
687 struct gfs2_holder
*gh
;
691 if (test_bit(GLF_LOCK
, &gl
->gl_flags
))
694 if (!list_empty(&gl
->gl_waiters1
)) {
695 gh
= list_entry(gl
->gl_waiters1
.next
,
696 struct gfs2_holder
, gh_list
);
698 if (test_bit(HIF_MUTEX
, &gh
->gh_iflags
))
699 blocked
= rq_mutex(gh
);
701 gfs2_assert_warn(gl
->gl_sbd
, 0);
703 } else if (!list_empty(&gl
->gl_waiters2
) &&
704 !test_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
)) {
705 gh
= list_entry(gl
->gl_waiters2
.next
,
706 struct gfs2_holder
, gh_list
);
708 if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
))
709 blocked
= rq_demote(gh
);
710 else if (test_bit(HIF_GREEDY
, &gh
->gh_iflags
))
711 blocked
= rq_greedy(gh
);
713 gfs2_assert_warn(gl
->gl_sbd
, 0);
715 } else if (!list_empty(&gl
->gl_waiters3
)) {
716 gh
= list_entry(gl
->gl_waiters3
.next
,
717 struct gfs2_holder
, gh_list
);
719 if (test_bit(HIF_PROMOTE
, &gh
->gh_iflags
))
720 blocked
= rq_promote(gh
);
722 gfs2_assert_warn(gl
->gl_sbd
, 0);
733 * gfs2_glmutex_lock - acquire a local lock on a glock
736 * Gives caller exclusive access to manipulate a glock structure.
739 void gfs2_glmutex_lock(struct gfs2_glock
*gl
)
741 struct gfs2_holder gh
;
743 gfs2_holder_init(gl
, 0, 0, &gh
);
744 set_bit(HIF_MUTEX
, &gh
.gh_iflags
);
746 spin_lock(&gl
->gl_spin
);
747 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
))
748 list_add_tail(&gh
.gh_list
, &gl
->gl_waiters1
);
750 complete(&gh
.gh_wait
);
751 spin_unlock(&gl
->gl_spin
);
753 wait_for_completion(&gh
.gh_wait
);
754 gfs2_holder_uninit(&gh
);
758 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
761 * Returns: 1 if the glock is acquired
764 int gfs2_glmutex_trylock(struct gfs2_glock
*gl
)
768 spin_lock(&gl
->gl_spin
);
769 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
))
771 spin_unlock(&gl
->gl_spin
);
777 * gfs2_glmutex_unlock - release a local lock on a glock
782 void gfs2_glmutex_unlock(struct gfs2_glock
*gl
)
784 spin_lock(&gl
->gl_spin
);
785 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
787 BUG_ON(!spin_is_locked(&gl
->gl_spin
));
788 spin_unlock(&gl
->gl_spin
);
792 * handle_callback - add a demote request to a lock's queue
794 * @state: the state the caller wants us to change to
798 static void handle_callback(struct gfs2_glock
*gl
, unsigned int state
)
800 struct gfs2_holder
*gh
, *new_gh
= NULL
;
803 spin_lock(&gl
->gl_spin
);
805 list_for_each_entry(gh
, &gl
->gl_waiters2
, gh_list
) {
806 if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
) &&
807 gl
->gl_req_gh
!= gh
) {
808 if (gh
->gh_state
!= state
)
809 gh
->gh_state
= LM_ST_UNLOCKED
;
815 list_add_tail(&new_gh
->gh_list
, &gl
->gl_waiters2
);
818 spin_unlock(&gl
->gl_spin
);
820 new_gh
= gfs2_holder_get(gl
, state
,
821 LM_FLAG_TRY
| GL_NEVER_RECURSE
,
822 GFP_KERNEL
| __GFP_NOFAIL
),
823 set_bit(HIF_DEMOTE
, &new_gh
->gh_iflags
);
824 set_bit(HIF_DEALLOC
, &new_gh
->gh_iflags
);
830 spin_unlock(&gl
->gl_spin
);
833 gfs2_holder_put(new_gh
);
837 * state_change - record that the glock is now in a different state
839 * @new_state the new state
843 static void state_change(struct gfs2_glock
*gl
, unsigned int new_state
)
847 held1
= (gl
->gl_state
!= LM_ST_UNLOCKED
);
848 held2
= (new_state
!= LM_ST_UNLOCKED
);
850 if (held1
!= held2
) {
857 gl
->gl_state
= new_state
;
861 * xmote_bh - Called after the lock module is done acquiring a lock
862 * @gl: The glock in question
863 * @ret: the int returned from the lock module
867 static void xmote_bh(struct gfs2_glock
*gl
, unsigned int ret
)
869 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
870 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
871 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
872 int prev_state
= gl
->gl_state
;
875 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
876 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
877 gfs2_assert_warn(sdp
, !(ret
& LM_OUT_ASYNC
));
879 state_change(gl
, ret
& LM_OUT_ST_MASK
);
881 if (prev_state
!= LM_ST_UNLOCKED
&& !(ret
& LM_OUT_CACHEABLE
)) {
883 glops
->go_inval(gl
, DIO_METADATA
| DIO_DATA
);
884 } else if (gl
->gl_state
== LM_ST_DEFERRED
) {
885 /* We might not want to do this here.
886 Look at moving to the inode glops. */
888 glops
->go_inval(gl
, DIO_DATA
);
891 /* Deal with each possible exit condition */
894 gl
->gl_stamp
= jiffies
;
896 else if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
897 spin_lock(&gl
->gl_spin
);
898 list_del_init(&gh
->gh_list
);
900 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
902 spin_unlock(&gl
->gl_spin
);
904 } else if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
)) {
905 spin_lock(&gl
->gl_spin
);
906 list_del_init(&gh
->gh_list
);
907 if (gl
->gl_state
== gh
->gh_state
||
908 gl
->gl_state
== LM_ST_UNLOCKED
)
911 if (gfs2_assert_warn(sdp
, gh
->gh_flags
&
912 (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) == -1)
913 fs_warn(sdp
, "ret = 0x%.8X\n", ret
);
914 gh
->gh_error
= GLR_TRYFAILED
;
916 spin_unlock(&gl
->gl_spin
);
918 if (ret
& LM_OUT_CANCELED
)
919 handle_callback(gl
, LM_ST_UNLOCKED
); /* Lame */
921 } else if (ret
& LM_OUT_CANCELED
) {
922 spin_lock(&gl
->gl_spin
);
923 list_del_init(&gh
->gh_list
);
924 gh
->gh_error
= GLR_CANCELED
;
925 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
927 spin_unlock(&gl
->gl_spin
);
929 } else if (relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
930 spin_lock(&gl
->gl_spin
);
931 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
933 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
934 spin_unlock(&gl
->gl_spin
);
936 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
940 } else if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
941 spin_lock(&gl
->gl_spin
);
942 list_del_init(&gh
->gh_list
);
943 gh
->gh_error
= GLR_TRYFAILED
;
944 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
946 spin_unlock(&gl
->gl_spin
);
949 if (gfs2_assert_withdraw(sdp
, 0) == -1)
950 fs_err(sdp
, "ret = 0x%.8X\n", ret
);
953 if (glops
->go_xmote_bh
)
954 glops
->go_xmote_bh(gl
);
957 spin_lock(&gl
->gl_spin
);
958 gl
->gl_req_gh
= NULL
;
959 gl
->gl_req_bh
= NULL
;
960 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
962 spin_unlock(&gl
->gl_spin
);
968 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
971 complete(&gh
->gh_wait
);
976 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
977 * @gl: The glock in question
978 * @state: the requested state
979 * @flags: modifier flags to the lock call
983 void gfs2_glock_xmote_th(struct gfs2_glock
*gl
, unsigned int state
, int flags
)
985 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
986 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
987 int lck_flags
= flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
|
988 LM_FLAG_NOEXP
| LM_FLAG_ANY
|
990 unsigned int lck_ret
;
992 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
993 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
994 gfs2_assert_warn(sdp
, state
!= LM_ST_UNLOCKED
);
995 gfs2_assert_warn(sdp
, state
!= gl
->gl_state
);
997 if (gl
->gl_state
== LM_ST_EXCLUSIVE
) {
1000 DIO_METADATA
| DIO_DATA
| DIO_RELEASE
);
1003 gfs2_glock_hold(gl
);
1004 gl
->gl_req_bh
= xmote_bh
;
1006 lck_ret
= gfs2_lm_lock(sdp
, gl
->gl_lock
, gl
->gl_state
, state
,
1009 if (gfs2_assert_withdraw(sdp
, !(lck_ret
& LM_OUT_ERROR
)))
1012 if (lck_ret
& LM_OUT_ASYNC
)
1013 gfs2_assert_warn(sdp
, lck_ret
== LM_OUT_ASYNC
);
1015 xmote_bh(gl
, lck_ret
);
1019 * drop_bh - Called after a lock module unlock completes
1021 * @ret: the return status
1023 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
1024 * Doesn't drop the reference on the glock the top half took out
1028 static void drop_bh(struct gfs2_glock
*gl
, unsigned int ret
)
1030 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1031 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1032 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
1034 clear_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1036 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1037 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
1038 gfs2_assert_warn(sdp
, !ret
);
1040 state_change(gl
, LM_ST_UNLOCKED
);
1042 if (glops
->go_inval
)
1043 glops
->go_inval(gl
, DIO_METADATA
| DIO_DATA
);
1046 spin_lock(&gl
->gl_spin
);
1047 list_del_init(&gh
->gh_list
);
1049 spin_unlock(&gl
->gl_spin
);
1052 if (glops
->go_drop_bh
)
1053 glops
->go_drop_bh(gl
);
1055 spin_lock(&gl
->gl_spin
);
1056 gl
->gl_req_gh
= NULL
;
1057 gl
->gl_req_bh
= NULL
;
1058 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1060 spin_unlock(&gl
->gl_spin
);
1065 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
1066 gfs2_holder_put(gh
);
1068 complete(&gh
->gh_wait
);
1073 * gfs2_glock_drop_th - call into the lock module to unlock a lock
1078 void gfs2_glock_drop_th(struct gfs2_glock
*gl
)
1080 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1081 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1084 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1085 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
1086 gfs2_assert_warn(sdp
, gl
->gl_state
!= LM_ST_UNLOCKED
);
1088 if (gl
->gl_state
== LM_ST_EXCLUSIVE
) {
1091 DIO_METADATA
| DIO_DATA
| DIO_RELEASE
);
1094 gfs2_glock_hold(gl
);
1095 gl
->gl_req_bh
= drop_bh
;
1097 ret
= gfs2_lm_unlock(sdp
, gl
->gl_lock
, gl
->gl_state
);
1099 if (gfs2_assert_withdraw(sdp
, !(ret
& LM_OUT_ERROR
)))
1105 gfs2_assert_warn(sdp
, ret
== LM_OUT_ASYNC
);
1109 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1110 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1112 * Don't cancel GL_NOCANCEL requests.
1115 static void do_cancels(struct gfs2_holder
*gh
)
1117 struct gfs2_glock
*gl
= gh
->gh_gl
;
1119 spin_lock(&gl
->gl_spin
);
1121 while (gl
->gl_req_gh
!= gh
&&
1122 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1123 !list_empty(&gh
->gh_list
)) {
1124 if (gl
->gl_req_bh
&&
1126 (gl
->gl_req_gh
->gh_flags
& GL_NOCANCEL
))) {
1127 spin_unlock(&gl
->gl_spin
);
1128 gfs2_lm_cancel(gl
->gl_sbd
, gl
->gl_lock
);
1130 spin_lock(&gl
->gl_spin
);
1132 spin_unlock(&gl
->gl_spin
);
1134 spin_lock(&gl
->gl_spin
);
1138 spin_unlock(&gl
->gl_spin
);
1142 * glock_wait_internal - wait on a glock acquisition
1143 * @gh: the glock holder
1145 * Returns: 0 on success
1148 static int glock_wait_internal(struct gfs2_holder
*gh
)
1150 struct gfs2_glock
*gl
= gh
->gh_gl
;
1151 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1152 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1154 if (test_bit(HIF_ABORTED
, &gh
->gh_iflags
))
1157 if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
1158 spin_lock(&gl
->gl_spin
);
1159 if (gl
->gl_req_gh
!= gh
&&
1160 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1161 !list_empty(&gh
->gh_list
)) {
1162 list_del_init(&gh
->gh_list
);
1163 gh
->gh_error
= GLR_TRYFAILED
;
1164 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
1167 spin_unlock(&gl
->gl_spin
);
1168 return gh
->gh_error
;
1170 spin_unlock(&gl
->gl_spin
);
1173 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1176 wait_for_completion(&gh
->gh_wait
);
1179 return gh
->gh_error
;
1181 gfs2_assert_withdraw(sdp
, test_bit(HIF_HOLDER
, &gh
->gh_iflags
));
1182 gfs2_assert_withdraw(sdp
, relaxed_state_ok(gl
->gl_state
,
1186 if (test_bit(HIF_FIRST
, &gh
->gh_iflags
)) {
1187 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1189 if (glops
->go_lock
) {
1190 gh
->gh_error
= glops
->go_lock(gh
);
1192 spin_lock(&gl
->gl_spin
);
1193 list_del_init(&gh
->gh_list
);
1194 if (test_and_clear_bit(HIF_RECURSE
,
1197 spin_unlock(&gl
->gl_spin
);
1201 spin_lock(&gl
->gl_spin
);
1202 gl
->gl_req_gh
= NULL
;
1203 gl
->gl_req_bh
= NULL
;
1204 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1205 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
1208 spin_unlock(&gl
->gl_spin
);
1211 return gh
->gh_error
;
1214 static inline struct gfs2_holder
*
1215 find_holder_by_owner(struct list_head
*head
, struct task_struct
*owner
)
1217 struct gfs2_holder
*gh
;
1219 list_for_each_entry(gh
, head
, gh_list
) {
1220 if (gh
->gh_owner
== owner
)
1230 * Make sure the new holder is compatible with the pre-existing one.
1234 static int recurse_check(struct gfs2_holder
*existing
, struct gfs2_holder
*new,
1237 struct gfs2_sbd
*sdp
= existing
->gh_gl
->gl_sbd
;
1239 if (gfs2_assert_warn(sdp
, (new->gh_flags
& LM_FLAG_ANY
) ||
1240 !(existing
->gh_flags
& LM_FLAG_ANY
)))
1243 if (gfs2_assert_warn(sdp
, (existing
->gh_flags
& GL_LOCAL_EXCL
) ||
1244 !(new->gh_flags
& GL_LOCAL_EXCL
)))
1247 if (gfs2_assert_warn(sdp
, relaxed_state_ok(state
, new->gh_state
,
1254 print_symbol(KERN_WARNING
"GFS2: Existing holder from %s\n",
1256 print_symbol(KERN_WARNING
"GFS2: New holder from %s\n", new->gh_ip
);
1257 set_bit(HIF_ABORTED
, &new->gh_iflags
);
1262 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1263 * @gh: the holder structure to add
1267 static void add_to_queue(struct gfs2_holder
*gh
)
1269 struct gfs2_glock
*gl
= gh
->gh_gl
;
1270 struct gfs2_holder
*existing
;
1272 BUG_ON(!gh
->gh_owner
);
1277 existing
= find_holder_by_owner(&gl
->gl_holders
, gh
->gh_owner
);
1279 if (recurse_check(existing
, gh
, gl
->gl_state
))
1282 list_add_tail(&gh
->gh_list
, &gl
->gl_holders
);
1283 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
1286 complete(&gh
->gh_wait
);
1291 existing
= find_holder_by_owner(&gl
->gl_waiters3
, gh
->gh_owner
);
1293 if (recurse_check(existing
, gh
, existing
->gh_state
))
1296 set_bit(HIF_RECURSE
, &gh
->gh_iflags
);
1297 set_bit(HIF_RECURSE
, &existing
->gh_iflags
);
1299 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters3
);
1305 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1306 list_add(&gh
->gh_list
, &gl
->gl_waiters3
);
1308 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters3
);
1312 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1313 * @gh: the holder structure
1315 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1317 * Returns: 0, GLR_TRYFAILED, or errno on failure
1320 int gfs2_glock_nq(struct gfs2_holder
*gh
)
1322 struct gfs2_glock
*gl
= gh
->gh_gl
;
1323 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1327 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
1328 set_bit(HIF_ABORTED
, &gh
->gh_iflags
);
1332 set_bit(HIF_PROMOTE
, &gh
->gh_iflags
);
1334 spin_lock(&gl
->gl_spin
);
1337 spin_unlock(&gl
->gl_spin
);
1339 if (!(gh
->gh_flags
& GL_ASYNC
)) {
1340 error
= glock_wait_internal(gh
);
1341 if (error
== GLR_CANCELED
) {
1347 clear_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1353 * gfs2_glock_poll - poll to see if an async request has been completed
1356 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1359 int gfs2_glock_poll(struct gfs2_holder
*gh
)
1361 struct gfs2_glock
*gl
= gh
->gh_gl
;
1364 spin_lock(&gl
->gl_spin
);
1366 if (test_bit(HIF_HOLDER
, &gh
->gh_iflags
))
1368 else if (list_empty(&gh
->gh_list
)) {
1369 if (gh
->gh_error
== GLR_CANCELED
) {
1370 spin_unlock(&gl
->gl_spin
);
1372 if (gfs2_glock_nq(gh
))
1379 spin_unlock(&gl
->gl_spin
);
1385 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1386 * @gh: the holder structure
1388 * Returns: 0, GLR_TRYFAILED, or errno on failure
1391 int gfs2_glock_wait(struct gfs2_holder
*gh
)
1395 error
= glock_wait_internal(gh
);
1396 if (error
== GLR_CANCELED
) {
1398 gh
->gh_flags
&= ~GL_ASYNC
;
1399 error
= gfs2_glock_nq(gh
);
1406 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1407 * @gh: the glock holder
1411 void gfs2_glock_dq(struct gfs2_holder
*gh
)
1413 struct gfs2_glock
*gl
= gh
->gh_gl
;
1414 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1416 if (gh
->gh_flags
& GL_SYNC
)
1417 set_bit(GLF_SYNC
, &gl
->gl_flags
);
1419 if (gh
->gh_flags
& GL_NOCACHE
)
1420 handle_callback(gl
, LM_ST_UNLOCKED
);
1422 gfs2_glmutex_lock(gl
);
1424 spin_lock(&gl
->gl_spin
);
1425 list_del_init(&gh
->gh_list
);
1427 if (list_empty(&gl
->gl_holders
)) {
1428 spin_unlock(&gl
->gl_spin
);
1430 if (glops
->go_unlock
)
1431 glops
->go_unlock(gh
);
1433 if (test_bit(GLF_SYNC
, &gl
->gl_flags
)) {
1435 glops
->go_sync(gl
, DIO_METADATA
| DIO_DATA
);
1438 gl
->gl_stamp
= jiffies
;
1440 spin_lock(&gl
->gl_spin
);
1443 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1445 spin_unlock(&gl
->gl_spin
);
1449 * gfs2_glock_prefetch - Try to prefetch a glock
1451 * @state: the state to prefetch in
1452 * @flags: flags passed to go_xmote_th()
1456 void gfs2_glock_prefetch(struct gfs2_glock
*gl
, unsigned int state
, int flags
)
1458 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1460 spin_lock(&gl
->gl_spin
);
1462 if (test_bit(GLF_LOCK
, &gl
->gl_flags
) ||
1463 !list_empty(&gl
->gl_holders
) ||
1464 !list_empty(&gl
->gl_waiters1
) ||
1465 !list_empty(&gl
->gl_waiters2
) ||
1466 !list_empty(&gl
->gl_waiters3
) ||
1467 relaxed_state_ok(gl
->gl_state
, state
, flags
)) {
1468 spin_unlock(&gl
->gl_spin
);
1472 set_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1473 set_bit(GLF_LOCK
, &gl
->gl_flags
);
1474 spin_unlock(&gl
->gl_spin
);
1476 glops
->go_xmote_th(gl
, state
, flags
);
1480 * gfs2_glock_force_drop - Force a glock to be uncached
1485 void gfs2_glock_force_drop(struct gfs2_glock
*gl
)
1487 struct gfs2_holder gh
;
1489 gfs2_holder_init(gl
, LM_ST_UNLOCKED
, GL_NEVER_RECURSE
, &gh
);
1490 set_bit(HIF_DEMOTE
, &gh
.gh_iflags
);
1492 spin_lock(&gl
->gl_spin
);
1493 list_add_tail(&gh
.gh_list
, &gl
->gl_waiters2
);
1495 spin_unlock(&gl
->gl_spin
);
1497 wait_for_completion(&gh
.gh_wait
);
1498 gfs2_holder_uninit(&gh
);
1501 static void greedy_work(void *data
)
1503 struct greedy
*gr
= (struct greedy
*)data
;
1504 struct gfs2_holder
*gh
= &gr
->gr_gh
;
1505 struct gfs2_glock
*gl
= gh
->gh_gl
;
1506 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1508 clear_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
);
1510 if (glops
->go_greedy
)
1511 glops
->go_greedy(gl
);
1513 spin_lock(&gl
->gl_spin
);
1515 if (list_empty(&gl
->gl_waiters2
)) {
1516 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
1517 spin_unlock(&gl
->gl_spin
);
1518 gfs2_holder_uninit(gh
);
1521 gfs2_glock_hold(gl
);
1522 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters2
);
1524 spin_unlock(&gl
->gl_spin
);
1530 * gfs2_glock_be_greedy -
1534 * Returns: 0 if go_greedy will be called, 1 otherwise
1537 int gfs2_glock_be_greedy(struct gfs2_glock
*gl
, unsigned int time
)
1540 struct gfs2_holder
*gh
;
1543 gl
->gl_sbd
->sd_args
.ar_localcaching
||
1544 test_and_set_bit(GLF_GREEDY
, &gl
->gl_flags
))
1547 gr
= kmalloc(sizeof(struct greedy
), GFP_KERNEL
);
1549 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
1554 gfs2_holder_init(gl
, 0, GL_NEVER_RECURSE
, gh
);
1555 set_bit(HIF_GREEDY
, &gh
->gh_iflags
);
1556 INIT_WORK(&gr
->gr_work
, greedy_work
, gr
);
1558 set_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
);
1559 schedule_delayed_work(&gr
->gr_work
, time
);
1565 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1566 * @gh: the holder structure
1570 void gfs2_glock_dq_uninit(struct gfs2_holder
*gh
)
1573 gfs2_holder_uninit(gh
);
1577 * gfs2_glock_nq_num - acquire a glock based on lock number
1578 * @sdp: the filesystem
1579 * @number: the lock number
1580 * @glops: the glock operations for the type of glock
1581 * @state: the state to acquire the glock in
1582 * @flags: modifier flags for the aquisition
1583 * @gh: the struct gfs2_holder
1588 int gfs2_glock_nq_num(struct gfs2_sbd
*sdp
, uint64_t number
,
1589 struct gfs2_glock_operations
*glops
, unsigned int state
,
1590 int flags
, struct gfs2_holder
*gh
)
1592 struct gfs2_glock
*gl
;
1595 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1597 error
= gfs2_glock_nq_init(gl
, state
, flags
, gh
);
1605 * glock_compare - Compare two struct gfs2_glock structures for sorting
1606 * @arg_a: the first structure
1607 * @arg_b: the second structure
1611 static int glock_compare(const void *arg_a
, const void *arg_b
)
1613 struct gfs2_holder
*gh_a
= *(struct gfs2_holder
**)arg_a
;
1614 struct gfs2_holder
*gh_b
= *(struct gfs2_holder
**)arg_b
;
1615 struct lm_lockname
*a
= &gh_a
->gh_gl
->gl_name
;
1616 struct lm_lockname
*b
= &gh_b
->gh_gl
->gl_name
;
1619 if (a
->ln_number
> b
->ln_number
)
1621 else if (a
->ln_number
< b
->ln_number
)
1624 if (gh_a
->gh_state
== LM_ST_SHARED
&&
1625 gh_b
->gh_state
== LM_ST_EXCLUSIVE
)
1627 else if (!(gh_a
->gh_flags
& GL_LOCAL_EXCL
) &&
1628 (gh_b
->gh_flags
& GL_LOCAL_EXCL
))
1636 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1637 * @num_gh: the number of structures
1638 * @ghs: an array of struct gfs2_holder structures
1640 * Returns: 0 on success (all glocks acquired),
1641 * errno on failure (no glocks acquired)
1644 static int nq_m_sync(unsigned int num_gh
, struct gfs2_holder
*ghs
,
1645 struct gfs2_holder
**p
)
1650 for (x
= 0; x
< num_gh
; x
++)
1653 sort(p
, num_gh
, sizeof(struct gfs2_holder
*), glock_compare
, NULL
);
1655 for (x
= 0; x
< num_gh
; x
++) {
1656 p
[x
]->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1658 error
= gfs2_glock_nq(p
[x
]);
1661 gfs2_glock_dq(p
[x
]);
1670 * gfs2_glock_nq_m - acquire multiple glocks
1671 * @num_gh: the number of structures
1672 * @ghs: an array of struct gfs2_holder structures
1674 * Figure out how big an impact this function has. Either:
1675 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1676 * 2) Forget async stuff and just call nq_m_sync()
1677 * 3) Leave it like it is
1679 * Returns: 0 on success (all glocks acquired),
1680 * errno on failure (no glocks acquired)
1683 int gfs2_glock_nq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1687 int borked
= 0, serious
= 0;
1694 ghs
->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1695 return gfs2_glock_nq(ghs
);
1698 e
= kcalloc(num_gh
, sizeof(struct gfs2_holder
*), GFP_KERNEL
);
1702 for (x
= 0; x
< num_gh
; x
++) {
1703 ghs
[x
].gh_flags
|= LM_FLAG_TRY
| GL_ASYNC
;
1704 error
= gfs2_glock_nq(&ghs
[x
]);
1713 for (x
= 0; x
< num_gh
; x
++) {
1714 error
= e
[x
] = glock_wait_internal(&ghs
[x
]);
1717 if (error
!= GLR_TRYFAILED
&& error
!= GLR_CANCELED
)
1727 for (x
= 0; x
< num_gh
; x
++)
1729 gfs2_glock_dq(&ghs
[x
]);
1734 for (x
= 0; x
< num_gh
; x
++)
1735 gfs2_holder_reinit(ghs
[x
].gh_state
, ghs
[x
].gh_flags
,
1737 error
= nq_m_sync(num_gh
, ghs
, (struct gfs2_holder
**)e
);
1746 * gfs2_glock_dq_m - release multiple glocks
1747 * @num_gh: the number of structures
1748 * @ghs: an array of struct gfs2_holder structures
1752 void gfs2_glock_dq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1756 for (x
= 0; x
< num_gh
; x
++)
1757 gfs2_glock_dq(&ghs
[x
]);
1761 * gfs2_glock_dq_uninit_m - release multiple glocks
1762 * @num_gh: the number of structures
1763 * @ghs: an array of struct gfs2_holder structures
1767 void gfs2_glock_dq_uninit_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1771 for (x
= 0; x
< num_gh
; x
++)
1772 gfs2_glock_dq_uninit(&ghs
[x
]);
1776 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1777 * @sdp: the filesystem
1778 * @number: the lock number
1779 * @glops: the glock operations for the type of glock
1780 * @state: the state to acquire the glock in
1781 * @flags: modifier flags for the aquisition
1786 void gfs2_glock_prefetch_num(struct gfs2_sbd
*sdp
, uint64_t number
,
1787 struct gfs2_glock_operations
*glops
,
1788 unsigned int state
, int flags
)
1790 struct gfs2_glock
*gl
;
1793 if (atomic_read(&sdp
->sd_reclaim_count
) <
1794 gfs2_tune_get(sdp
, gt_reclaim_limit
)) {
1795 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1797 gfs2_glock_prefetch(gl
, state
, flags
);
1804 * gfs2_lvb_hold - attach a LVB from a glock
1805 * @gl: The glock in question
1809 int gfs2_lvb_hold(struct gfs2_glock
*gl
)
1813 gfs2_glmutex_lock(gl
);
1815 if (!atomic_read(&gl
->gl_lvb_count
)) {
1816 error
= gfs2_lm_hold_lvb(gl
->gl_sbd
, gl
->gl_lock
, &gl
->gl_lvb
);
1818 gfs2_glmutex_unlock(gl
);
1821 gfs2_glock_hold(gl
);
1823 atomic_inc(&gl
->gl_lvb_count
);
1825 gfs2_glmutex_unlock(gl
);
1831 * gfs2_lvb_unhold - detach a LVB from a glock
1832 * @gl: The glock in question
1836 void gfs2_lvb_unhold(struct gfs2_glock
*gl
)
1838 gfs2_glock_hold(gl
);
1839 gfs2_glmutex_lock(gl
);
1841 gfs2_assert(gl
->gl_sbd
, atomic_read(&gl
->gl_lvb_count
) > 0);
1842 if (atomic_dec_and_test(&gl
->gl_lvb_count
)) {
1843 gfs2_lm_unhold_lvb(gl
->gl_sbd
, gl
->gl_lock
, gl
->gl_lvb
);
1848 gfs2_glmutex_unlock(gl
);
1852 void gfs2_lvb_sync(struct gfs2_glock
*gl
)
1854 gfs2_glmutex_lock(gl
);
1856 gfs2_assert(gl
->gl_sbd
, atomic_read(&gl
->gl_lvb_count
));
1857 if (!gfs2_assert_warn(gl
->gl_sbd
, gfs2_glock_is_held_excl(gl
)))
1858 gfs2_lm_sync_lvb(gl
->gl_sbd
, gl
->gl_lock
, gl
->gl_lvb
);
1860 gfs2_glmutex_unlock(gl
);
1863 static void blocking_cb(struct gfs2_sbd
*sdp
, struct lm_lockname
*name
,
1866 struct gfs2_glock
*gl
;
1868 gl
= gfs2_glock_find(sdp
, name
);
1872 if (gl
->gl_ops
->go_callback
)
1873 gl
->gl_ops
->go_callback(gl
, state
);
1874 handle_callback(gl
, state
);
1876 spin_lock(&gl
->gl_spin
);
1878 spin_unlock(&gl
->gl_spin
);
1884 * gfs2_glock_cb - Callback used by locking module
1885 * @fsdata: Pointer to the superblock
1886 * @type: Type of callback
1887 * @data: Type dependent data pointer
1889 * Called by the locking module when it wants to tell us something.
1890 * Either we need to drop a lock, one of our ASYNC requests completed, or
1891 * a journal from another client needs to be recovered.
1894 void gfs2_glock_cb(lm_fsdata_t
*fsdata
, unsigned int type
, void *data
)
1896 struct gfs2_sbd
*sdp
= (struct gfs2_sbd
*)fsdata
;
1900 blocking_cb(sdp
, (struct lm_lockname
*)data
, LM_ST_UNLOCKED
);
1904 blocking_cb(sdp
, (struct lm_lockname
*)data
, LM_ST_DEFERRED
);
1908 blocking_cb(sdp
, (struct lm_lockname
*)data
, LM_ST_SHARED
);
1912 struct lm_async_cb
*async
= (struct lm_async_cb
*)data
;
1913 struct gfs2_glock
*gl
;
1915 gl
= gfs2_glock_find(sdp
, &async
->lc_name
);
1916 if (gfs2_assert_warn(sdp
, gl
))
1918 if (!gfs2_assert_warn(sdp
, gl
->gl_req_bh
))
1919 gl
->gl_req_bh(gl
, async
->lc_ret
);
1925 case LM_CB_NEED_RECOVERY
:
1926 gfs2_jdesc_make_dirty(sdp
, *(unsigned int *)data
);
1927 if (sdp
->sd_recoverd_process
)
1928 wake_up_process(sdp
->sd_recoverd_process
);
1931 case LM_CB_DROPLOCKS
:
1932 gfs2_gl_hash_clear(sdp
, NO_WAIT
);
1933 gfs2_quota_scan(sdp
);
1937 gfs2_assert_warn(sdp
, 0);
1943 * gfs2_try_toss_inode - try to remove a particular inode struct from cache
1944 * sdp: the filesystem
1945 * inum: the inode number
1949 void gfs2_try_toss_inode(struct gfs2_sbd
*sdp
, struct gfs2_inum
*inum
)
1951 struct gfs2_glock
*gl
;
1952 struct gfs2_inode
*ip
;
1955 error
= gfs2_glock_get(sdp
, inum
->no_addr
, &gfs2_inode_glops
,
1960 if (!gfs2_glmutex_trylock(gl
))
1967 if (atomic_read(&ip
->i_count
))
1970 gfs2_inode_destroy(ip
);
1973 gfs2_glmutex_unlock(gl
);
1980 * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
1981 * iopen glock from memory
1982 * @io_gl: the iopen glock
1983 * @state: the state into which the glock should be put
1987 void gfs2_iopen_go_callback(struct gfs2_glock
*io_gl
, unsigned int state
)
1989 struct gfs2_glock
*i_gl
;
1991 if (state
!= LM_ST_UNLOCKED
)
1994 spin_lock(&io_gl
->gl_spin
);
1995 i_gl
= io_gl
->gl_object
;
1997 gfs2_glock_hold(i_gl
);
1998 spin_unlock(&io_gl
->gl_spin
);
2000 spin_unlock(&io_gl
->gl_spin
);
2004 if (gfs2_glmutex_trylock(i_gl
)) {
2005 struct gfs2_inode
*ip
= i_gl
->gl_object
;
2007 gfs2_try_toss_vnode(ip
);
2008 gfs2_glmutex_unlock(i_gl
);
2009 gfs2_glock_schedule_for_reclaim(i_gl
);
2012 gfs2_glmutex_unlock(i_gl
);
2016 gfs2_glock_put(i_gl
);
2020 * demote_ok - Check to see if it's ok to unlock a glock
2023 * Returns: 1 if it's ok
2026 static int demote_ok(struct gfs2_glock
*gl
)
2028 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
2029 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
2032 if (test_bit(GLF_STICKY
, &gl
->gl_flags
))
2034 else if (test_bit(GLF_PREFETCH
, &gl
->gl_flags
))
2035 demote
= time_after_eq(jiffies
,
2037 gfs2_tune_get(sdp
, gt_prefetch_secs
) * HZ
);
2038 else if (glops
->go_demote_ok
)
2039 demote
= glops
->go_demote_ok(gl
);
2045 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
2050 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock
*gl
)
2052 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
2054 spin_lock(&sdp
->sd_reclaim_lock
);
2055 if (list_empty(&gl
->gl_reclaim
)) {
2056 gfs2_glock_hold(gl
);
2057 list_add(&gl
->gl_reclaim
, &sdp
->sd_reclaim_list
);
2058 atomic_inc(&sdp
->sd_reclaim_count
);
2060 spin_unlock(&sdp
->sd_reclaim_lock
);
2062 wake_up(&sdp
->sd_reclaim_wq
);
2066 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
2067 * @sdp: the filesystem
2069 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
2070 * different glock and we notice that there are a lot of glocks in the
2075 void gfs2_reclaim_glock(struct gfs2_sbd
*sdp
)
2077 struct gfs2_glock
*gl
;
2079 spin_lock(&sdp
->sd_reclaim_lock
);
2080 if (list_empty(&sdp
->sd_reclaim_list
)) {
2081 spin_unlock(&sdp
->sd_reclaim_lock
);
2084 gl
= list_entry(sdp
->sd_reclaim_list
.next
,
2085 struct gfs2_glock
, gl_reclaim
);
2086 list_del_init(&gl
->gl_reclaim
);
2087 spin_unlock(&sdp
->sd_reclaim_lock
);
2089 atomic_dec(&sdp
->sd_reclaim_count
);
2090 atomic_inc(&sdp
->sd_reclaimed
);
2092 if (gfs2_glmutex_trylock(gl
)) {
2093 if (gl
->gl_ops
== &gfs2_inode_glops
) {
2094 struct gfs2_inode
*ip
= gl
->gl_object
;
2095 if (ip
&& !atomic_read(&ip
->i_count
))
2096 gfs2_inode_destroy(ip
);
2098 if (queue_empty(gl
, &gl
->gl_holders
) &&
2099 gl
->gl_state
!= LM_ST_UNLOCKED
&&
2101 handle_callback(gl
, LM_ST_UNLOCKED
);
2102 gfs2_glmutex_unlock(gl
);
2109 * examine_bucket - Call a function for glock in a hash bucket
2110 * @examiner: the function
2111 * @sdp: the filesystem
2112 * @bucket: the bucket
2114 * Returns: 1 if the bucket has entries
2117 static int examine_bucket(glock_examiner examiner
, struct gfs2_sbd
*sdp
,
2118 struct gfs2_gl_hash_bucket
*bucket
)
2120 struct glock_plug plug
;
2121 struct list_head
*tmp
;
2122 struct gfs2_glock
*gl
;
2125 /* Add "plug" to end of bucket list, work back up list from there */
2126 memset(&plug
.gl_flags
, 0, sizeof(unsigned long));
2127 set_bit(GLF_PLUG
, &plug
.gl_flags
);
2129 write_lock(&bucket
->hb_lock
);
2130 list_add(&plug
.gl_list
, &bucket
->hb_list
);
2131 write_unlock(&bucket
->hb_lock
);
2134 write_lock(&bucket
->hb_lock
);
2137 tmp
= plug
.gl_list
.next
;
2139 if (tmp
== &bucket
->hb_list
) {
2140 list_del(&plug
.gl_list
);
2141 entries
= !list_empty(&bucket
->hb_list
);
2142 write_unlock(&bucket
->hb_lock
);
2145 gl
= list_entry(tmp
, struct gfs2_glock
, gl_list
);
2147 /* Move plug up list */
2148 list_move(&plug
.gl_list
, &gl
->gl_list
);
2150 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
2153 /* examiner() must glock_put() */
2154 gfs2_glock_hold(gl
);
2159 write_unlock(&bucket
->hb_lock
);
2166 * scan_glock - look at a glock and see if we can reclaim it
2167 * @gl: the glock to look at
2171 static void scan_glock(struct gfs2_glock
*gl
)
2173 if (gfs2_glmutex_trylock(gl
)) {
2174 if (gl
->gl_ops
== &gfs2_inode_glops
) {
2175 struct gfs2_inode
*ip
= gl
->gl_object
;
2176 if (ip
&& !atomic_read(&ip
->i_count
))
2179 if (queue_empty(gl
, &gl
->gl_holders
) &&
2180 gl
->gl_state
!= LM_ST_UNLOCKED
&&
2184 gfs2_glmutex_unlock(gl
);
2192 gfs2_glmutex_unlock(gl
);
2193 gfs2_glock_schedule_for_reclaim(gl
);
2198 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
2199 * @sdp: the filesystem
2203 void gfs2_scand_internal(struct gfs2_sbd
*sdp
)
2207 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
2208 examine_bucket(scan_glock
, sdp
, &sdp
->sd_gl_hash
[x
]);
2214 * clear_glock - look at a glock and see if we can free it from glock cache
2215 * @gl: the glock to look at
2219 static void clear_glock(struct gfs2_glock
*gl
)
2221 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
2224 spin_lock(&sdp
->sd_reclaim_lock
);
2225 if (!list_empty(&gl
->gl_reclaim
)) {
2226 list_del_init(&gl
->gl_reclaim
);
2227 atomic_dec(&sdp
->sd_reclaim_count
);
2228 spin_unlock(&sdp
->sd_reclaim_lock
);
2229 released
= gfs2_glock_put(gl
);
2230 gfs2_assert(sdp
, !released
);
2232 spin_unlock(&sdp
->sd_reclaim_lock
);
2235 if (gfs2_glmutex_trylock(gl
)) {
2236 if (gl
->gl_ops
== &gfs2_inode_glops
) {
2237 struct gfs2_inode
*ip
= gl
->gl_object
;
2238 if (ip
&& !atomic_read(&ip
->i_count
))
2239 gfs2_inode_destroy(ip
);
2241 if (queue_empty(gl
, &gl
->gl_holders
) &&
2242 gl
->gl_state
!= LM_ST_UNLOCKED
)
2243 handle_callback(gl
, LM_ST_UNLOCKED
);
2245 gfs2_glmutex_unlock(gl
);
2252 * gfs2_gl_hash_clear - Empty out the glock hash table
2253 * @sdp: the filesystem
2254 * @wait: wait until it's all gone
2256 * Called when unmounting the filesystem, or when inter-node lock manager
2257 * requests DROPLOCKS because it is running out of capacity.
2260 void gfs2_gl_hash_clear(struct gfs2_sbd
*sdp
, int wait
)
2271 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++)
2272 if (examine_bucket(clear_glock
, sdp
,
2273 &sdp
->sd_gl_hash
[x
]))
2279 if (time_after_eq(jiffies
,
2280 t
+ gfs2_tune_get(sdp
, gt_stall_secs
) * HZ
)) {
2281 fs_warn(sdp
, "Unmount seems to be stalled. "
2282 "Dumping lock state...\n");
2283 gfs2_dump_lockstate(sdp
);
2287 /* invalidate_inodes() requires that the sb inodes list
2288 not change, but an async completion callback for an
2289 unlock can occur which does glock_put() which
2290 can call iput() which will change the sb inodes list.
2291 invalidate_inodes_mutex prevents glock_put()'s during
2292 an invalidate_inodes() */
2294 mutex_lock(&sdp
->sd_invalidate_inodes_mutex
);
2295 invalidate_inodes(sdp
->sd_vfs
);
2296 mutex_unlock(&sdp
->sd_invalidate_inodes_mutex
);
2302 * Diagnostic routines to help debug distributed deadlock
2306 * dump_holder - print information about a glock holder
2307 * @str: a string naming the type of holder
2308 * @gh: the glock holder
2310 * Returns: 0 on success, -ENOBUFS when we run out of space
2313 static int dump_holder(char *str
, struct gfs2_holder
*gh
)
2316 int error
= -ENOBUFS
;
2318 printk(KERN_INFO
" %s\n", str
);
2319 printk(KERN_INFO
" owner = %ld\n",
2320 (gh
->gh_owner
) ? (long)gh
->gh_owner
->pid
: -1);
2321 printk(KERN_INFO
" gh_state = %u\n", gh
->gh_state
);
2322 printk(KERN_INFO
" gh_flags =");
2323 for (x
= 0; x
< 32; x
++)
2324 if (gh
->gh_flags
& (1 << x
))
2327 printk(KERN_INFO
" error = %d\n", gh
->gh_error
);
2328 printk(KERN_INFO
" gh_iflags =");
2329 for (x
= 0; x
< 32; x
++)
2330 if (test_bit(x
, &gh
->gh_iflags
))
2333 print_symbol(KERN_INFO
" initialized at: %s\n", gh
->gh_ip
);
2341 * dump_inode - print information about an inode
2344 * Returns: 0 on success, -ENOBUFS when we run out of space
2347 static int dump_inode(struct gfs2_inode
*ip
)
2350 int error
= -ENOBUFS
;
2352 printk(KERN_INFO
" Inode:\n");
2353 printk(KERN_INFO
" num = %llu %llu\n",
2354 ip
->i_num
.no_formal_ino
, ip
->i_num
.no_addr
);
2355 printk(KERN_INFO
" type = %u\n", IF2DT(ip
->i_di
.di_mode
));
2356 printk(KERN_INFO
" i_count = %d\n", atomic_read(&ip
->i_count
));
2357 printk(KERN_INFO
" i_flags =");
2358 for (x
= 0; x
< 32; x
++)
2359 if (test_bit(x
, &ip
->i_flags
))
2362 printk(KERN_INFO
" vnode = %s\n", (ip
->i_vnode
) ? "yes" : "no");
2370 * dump_glock - print information about a glock
2372 * @count: where we are in the buffer
2374 * Returns: 0 on success, -ENOBUFS when we run out of space
2377 static int dump_glock(struct gfs2_glock
*gl
)
2379 struct gfs2_holder
*gh
;
2381 int error
= -ENOBUFS
;
2383 spin_lock(&gl
->gl_spin
);
2385 printk(KERN_INFO
"Glock (%u, %llu)\n",
2386 gl
->gl_name
.ln_type
,
2387 gl
->gl_name
.ln_number
);
2388 printk(KERN_INFO
" gl_flags =");
2389 for (x
= 0; x
< 32; x
++)
2390 if (test_bit(x
, &gl
->gl_flags
))
2393 printk(KERN_INFO
" gl_ref = %d\n", atomic_read(&gl
->gl_ref
.refcount
));
2394 printk(KERN_INFO
" gl_state = %u\n", gl
->gl_state
);
2395 printk(KERN_INFO
" req_gh = %s\n", (gl
->gl_req_gh
) ? "yes" : "no");
2396 printk(KERN_INFO
" req_bh = %s\n", (gl
->gl_req_bh
) ? "yes" : "no");
2397 printk(KERN_INFO
" lvb_count = %d\n", atomic_read(&gl
->gl_lvb_count
));
2398 printk(KERN_INFO
" object = %s\n", (gl
->gl_object
) ? "yes" : "no");
2399 printk(KERN_INFO
" le = %s\n",
2400 (list_empty(&gl
->gl_le
.le_list
)) ? "no" : "yes");
2401 printk(KERN_INFO
" reclaim = %s\n",
2402 (list_empty(&gl
->gl_reclaim
)) ? "no" : "yes");
2404 printk(KERN_INFO
" aspace = %lu\n",
2405 gl
->gl_aspace
->i_mapping
->nrpages
);
2407 printk(KERN_INFO
" aspace = no\n");
2408 printk(KERN_INFO
" ail = %d\n", atomic_read(&gl
->gl_ail_count
));
2409 if (gl
->gl_req_gh
) {
2410 error
= dump_holder("Request", gl
->gl_req_gh
);
2414 list_for_each_entry(gh
, &gl
->gl_holders
, gh_list
) {
2415 error
= dump_holder("Holder", gh
);
2419 list_for_each_entry(gh
, &gl
->gl_waiters1
, gh_list
) {
2420 error
= dump_holder("Waiter1", gh
);
2424 list_for_each_entry(gh
, &gl
->gl_waiters2
, gh_list
) {
2425 error
= dump_holder("Waiter2", gh
);
2429 list_for_each_entry(gh
, &gl
->gl_waiters3
, gh_list
) {
2430 error
= dump_holder("Waiter3", gh
);
2434 if (gl
->gl_ops
== &gfs2_inode_glops
&& gl
->gl_object
) {
2435 if (!test_bit(GLF_LOCK
, &gl
->gl_flags
) &&
2436 list_empty(&gl
->gl_holders
)) {
2437 error
= dump_inode(gl
->gl_object
);
2442 printk(KERN_INFO
" Inode: busy\n");
2449 spin_unlock(&gl
->gl_spin
);
2455 * gfs2_dump_lockstate - print out the current lockstate
2456 * @sdp: the filesystem
2457 * @ub: the buffer to copy the information into
2459 * If @ub is NULL, dump the lockstate to the console.
2463 int gfs2_dump_lockstate(struct gfs2_sbd
*sdp
)
2465 struct gfs2_gl_hash_bucket
*bucket
;
2466 struct gfs2_glock
*gl
;
2470 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
2471 bucket
= &sdp
->sd_gl_hash
[x
];
2473 read_lock(&bucket
->hb_lock
);
2475 list_for_each_entry(gl
, &bucket
->hb_list
, gl_list
) {
2476 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
2479 error
= dump_glock(gl
);
2484 read_unlock(&bucket
->hb_lock
);