2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kref.h>
19 #include <linux/kallsyms.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <asm/semaphore.h>
22 #include <asm/uaccess.h>
25 #include "lm_interface.h"
37 /* Must be kept in sync with the beginning of struct gfs2_glock */
39 struct list_head gl_list
;
40 unsigned long gl_flags
;
44 struct gfs2_holder gr_gh
;
45 struct work_struct gr_work
;
48 typedef void (*glock_examiner
) (struct gfs2_glock
* gl
);
51 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
52 * @actual: the current state of the lock
53 * @requested: the lock state that was requested by the caller
54 * @flags: the modifier flags passed in by the caller
56 * Returns: 1 if the locks are compatible, 0 otherwise
59 static inline int relaxed_state_ok(unsigned int actual
, unsigned requested
,
62 if (actual
== requested
)
68 if (actual
== LM_ST_EXCLUSIVE
&& requested
== LM_ST_SHARED
)
71 if (actual
!= LM_ST_UNLOCKED
&& (flags
& LM_FLAG_ANY
))
78 * gl_hash() - Turn glock number into hash bucket number
79 * @lock: The glock number
81 * Returns: The number of the corresponding hash bucket
84 static unsigned int gl_hash(struct lm_lockname
*name
)
88 h
= jhash(&name
->ln_number
, sizeof(uint64_t), 0);
89 h
= jhash(&name
->ln_type
, sizeof(unsigned int), h
);
90 h
&= GFS2_GL_HASH_MASK
;
96 * glock_free() - Perform a few checks and then release struct gfs2_glock
97 * @gl: The glock to release
99 * Also calls lock module to release its internal structure for this glock.
103 static void glock_free(struct gfs2_glock
*gl
)
105 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
106 struct inode
*aspace
= gl
->gl_aspace
;
108 gfs2_lm_put_lock(sdp
, gl
->gl_lock
);
111 gfs2_aspace_put(aspace
);
113 kmem_cache_free(gfs2_glock_cachep
, gl
);
117 * gfs2_glock_hold() - increment reference count on glock
118 * @gl: The glock to hold
122 void gfs2_glock_hold(struct gfs2_glock
*gl
)
124 kref_get(&gl
->gl_ref
);
127 /* All work is done after the return from kref_put() so we
128 can release the write_lock before the free. */
130 static void kill_glock(struct kref
*kref
)
132 struct gfs2_glock
*gl
= container_of(kref
, struct gfs2_glock
, gl_ref
);
133 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
135 gfs2_assert(sdp
, gl
->gl_state
== LM_ST_UNLOCKED
);
136 gfs2_assert(sdp
, list_empty(&gl
->gl_reclaim
));
137 gfs2_assert(sdp
, list_empty(&gl
->gl_holders
));
138 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters1
));
139 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters2
));
140 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters3
));
144 * gfs2_glock_put() - Decrement reference count on glock
145 * @gl: The glock to put
149 int gfs2_glock_put(struct gfs2_glock
*gl
)
151 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
152 struct gfs2_gl_hash_bucket
*bucket
= gl
->gl_bucket
;
155 mutex_lock(&sdp
->sd_invalidate_inodes_mutex
);
157 write_lock(&bucket
->hb_lock
);
158 if (kref_put(&gl
->gl_ref
, kill_glock
)) {
159 list_del_init(&gl
->gl_list
);
160 write_unlock(&bucket
->hb_lock
);
165 write_unlock(&bucket
->hb_lock
);
167 mutex_unlock(&sdp
->sd_invalidate_inodes_mutex
);
172 * queue_empty - check to see if a glock's queue is empty
174 * @head: the head of the queue to check
176 * This function protects the list in the event that a process already
177 * has a holder on the list and is adding a second holder for itself.
178 * The glmutex lock is what generally prevents processes from working
179 * on the same glock at once, but the special case of adding a second
180 * holder for yourself ("recursive" locking) doesn't involve locking
181 * glmutex, making the spin lock necessary.
183 * Returns: 1 if the queue is empty
186 static inline int queue_empty(struct gfs2_glock
*gl
, struct list_head
*head
)
189 spin_lock(&gl
->gl_spin
);
190 empty
= list_empty(head
);
191 spin_unlock(&gl
->gl_spin
);
196 * search_bucket() - Find struct gfs2_glock by lock number
197 * @bucket: the bucket to search
198 * @name: The lock name
200 * Returns: NULL, or the struct gfs2_glock with the requested number
203 static struct gfs2_glock
*search_bucket(struct gfs2_gl_hash_bucket
*bucket
,
204 struct lm_lockname
*name
)
206 struct gfs2_glock
*gl
;
208 list_for_each_entry(gl
, &bucket
->hb_list
, gl_list
) {
209 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
211 if (!lm_name_equal(&gl
->gl_name
, name
))
214 kref_get(&gl
->gl_ref
);
223 * gfs2_glock_find() - Find glock by lock number
224 * @sdp: The GFS2 superblock
225 * @name: The lock name
227 * Returns: NULL, or the struct gfs2_glock with the requested number
230 struct gfs2_glock
*gfs2_glock_find(struct gfs2_sbd
*sdp
,
231 struct lm_lockname
*name
)
233 struct gfs2_gl_hash_bucket
*bucket
= &sdp
->sd_gl_hash
[gl_hash(name
)];
234 struct gfs2_glock
*gl
;
236 read_lock(&bucket
->hb_lock
);
237 gl
= search_bucket(bucket
, name
);
238 read_unlock(&bucket
->hb_lock
);
244 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
245 * @sdp: The GFS2 superblock
246 * @number: the lock number
247 * @glops: The glock_operations to use
248 * @create: If 0, don't create the glock if it doesn't exist
249 * @glp: the glock is returned here
251 * This does not lock a glock, just finds/creates structures for one.
256 int gfs2_glock_get(struct gfs2_sbd
*sdp
, uint64_t number
,
257 struct gfs2_glock_operations
*glops
, int create
,
258 struct gfs2_glock
**glp
)
260 struct lm_lockname name
;
261 struct gfs2_glock
*gl
, *tmp
;
262 struct gfs2_gl_hash_bucket
*bucket
;
265 name
.ln_number
= number
;
266 name
.ln_type
= glops
->go_type
;
267 bucket
= &sdp
->sd_gl_hash
[gl_hash(&name
)];
269 read_lock(&bucket
->hb_lock
);
270 gl
= search_bucket(bucket
, &name
);
271 read_unlock(&bucket
->hb_lock
);
278 gl
= kmem_cache_alloc(gfs2_glock_cachep
, GFP_KERNEL
);
282 memset(gl
, 0, sizeof(struct gfs2_glock
));
284 INIT_LIST_HEAD(&gl
->gl_list
);
286 kref_init(&gl
->gl_ref
);
288 spin_lock_init(&gl
->gl_spin
);
290 gl
->gl_state
= LM_ST_UNLOCKED
;
291 INIT_LIST_HEAD(&gl
->gl_holders
);
292 INIT_LIST_HEAD(&gl
->gl_waiters1
);
293 INIT_LIST_HEAD(&gl
->gl_waiters2
);
294 INIT_LIST_HEAD(&gl
->gl_waiters3
);
298 gl
->gl_bucket
= bucket
;
299 INIT_LIST_HEAD(&gl
->gl_reclaim
);
303 lops_init_le(&gl
->gl_le
, &gfs2_glock_lops
);
304 INIT_LIST_HEAD(&gl
->gl_ail_list
);
306 /* If this glock protects actual on-disk data or metadata blocks,
307 create a VFS inode to manage the pages/buffers holding them. */
308 if (glops
== &gfs2_inode_glops
||
309 glops
== &gfs2_rgrp_glops
||
310 glops
== &gfs2_meta_glops
) {
311 gl
->gl_aspace
= gfs2_aspace_get(sdp
);
312 if (!gl
->gl_aspace
) {
318 error
= gfs2_lm_get_lock(sdp
, &name
, &gl
->gl_lock
);
322 write_lock(&bucket
->hb_lock
);
323 tmp
= search_bucket(bucket
, &name
);
325 write_unlock(&bucket
->hb_lock
);
329 list_add_tail(&gl
->gl_list
, &bucket
->hb_list
);
330 write_unlock(&bucket
->hb_lock
);
339 gfs2_aspace_put(gl
->gl_aspace
);
342 kmem_cache_free(gfs2_glock_cachep
, gl
);
348 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
350 * @state: the state we're requesting
351 * @flags: the modifier flags
352 * @gh: the holder structure
356 void gfs2_holder_init(struct gfs2_glock
*gl
, unsigned int state
, int flags
,
357 struct gfs2_holder
*gh
)
359 INIT_LIST_HEAD(&gh
->gh_list
);
361 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
362 gh
->gh_owner
= (flags
& GL_NEVER_RECURSE
) ? NULL
: current
;
363 gh
->gh_state
= state
;
364 gh
->gh_flags
= flags
;
367 init_completion(&gh
->gh_wait
);
369 if (gh
->gh_state
== LM_ST_EXCLUSIVE
)
370 gh
->gh_flags
|= GL_LOCAL_EXCL
;
376 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
377 * @state: the state we're requesting
378 * @flags: the modifier flags
379 * @gh: the holder structure
381 * Don't mess with the glock.
385 void gfs2_holder_reinit(unsigned int state
, int flags
, struct gfs2_holder
*gh
)
387 gh
->gh_state
= state
;
388 gh
->gh_flags
= flags
;
389 if (gh
->gh_state
== LM_ST_EXCLUSIVE
)
390 gh
->gh_flags
|= GL_LOCAL_EXCL
;
392 gh
->gh_iflags
&= 1 << HIF_ALLOCED
;
393 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
397 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
398 * @gh: the holder structure
402 void gfs2_holder_uninit(struct gfs2_holder
*gh
)
404 gfs2_glock_put(gh
->gh_gl
);
410 * gfs2_holder_get - get a struct gfs2_holder structure
412 * @state: the state we're requesting
413 * @flags: the modifier flags
414 * @gfp_flags: __GFP_NOFAIL
416 * Figure out how big an impact this function has. Either:
417 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
418 * 2) Leave it like it is
420 * Returns: the holder structure, NULL on ENOMEM
423 struct gfs2_holder
*gfs2_holder_get(struct gfs2_glock
*gl
, unsigned int state
,
424 int flags
, gfp_t gfp_flags
)
426 struct gfs2_holder
*gh
;
428 gh
= kmalloc(sizeof(struct gfs2_holder
), gfp_flags
);
432 gfs2_holder_init(gl
, state
, flags
, gh
);
433 set_bit(HIF_ALLOCED
, &gh
->gh_iflags
);
434 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
439 * gfs2_holder_put - get rid of a struct gfs2_holder structure
440 * @gh: the holder structure
444 void gfs2_holder_put(struct gfs2_holder
*gh
)
446 gfs2_holder_uninit(gh
);
451 * handle_recurse - put other holder structures (marked recursive)
452 * into the holders list
453 * @gh: the holder structure
457 static void handle_recurse(struct gfs2_holder
*gh
)
459 struct gfs2_glock
*gl
= gh
->gh_gl
;
460 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
461 struct gfs2_holder
*tmp_gh
, *safe
;
464 if (gfs2_assert_warn(sdp
, gh
->gh_owner
))
467 list_for_each_entry_safe(tmp_gh
, safe
, &gl
->gl_waiters3
, gh_list
) {
468 if (tmp_gh
->gh_owner
!= gh
->gh_owner
)
471 gfs2_assert_warn(sdp
,
472 test_bit(HIF_RECURSE
, &tmp_gh
->gh_iflags
));
474 list_move_tail(&tmp_gh
->gh_list
, &gl
->gl_holders
);
475 tmp_gh
->gh_error
= 0;
476 set_bit(HIF_HOLDER
, &tmp_gh
->gh_iflags
);
478 complete(&tmp_gh
->gh_wait
);
483 gfs2_assert_warn(sdp
, found
);
487 * do_unrecurse - a recursive holder was just dropped of the waiters3 list
490 * If there is only one other recursive holder, clear its HIF_RECURSE bit.
491 * If there is more than one, leave them alone.
495 static void do_unrecurse(struct gfs2_holder
*gh
)
497 struct gfs2_glock
*gl
= gh
->gh_gl
;
498 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
499 struct gfs2_holder
*tmp_gh
, *last_gh
= NULL
;
502 if (gfs2_assert_warn(sdp
, gh
->gh_owner
))
505 list_for_each_entry(tmp_gh
, &gl
->gl_waiters3
, gh_list
) {
506 if (tmp_gh
->gh_owner
!= gh
->gh_owner
)
509 gfs2_assert_warn(sdp
,
510 test_bit(HIF_RECURSE
, &tmp_gh
->gh_iflags
));
519 if (!gfs2_assert_warn(sdp
, found
))
520 clear_bit(HIF_RECURSE
, &last_gh
->gh_iflags
);
524 * rq_mutex - process a mutex request in the queue
525 * @gh: the glock holder
527 * Returns: 1 if the queue is blocked
530 static int rq_mutex(struct gfs2_holder
*gh
)
532 struct gfs2_glock
*gl
= gh
->gh_gl
;
534 list_del_init(&gh
->gh_list
);
535 /* gh->gh_error never examined. */
536 set_bit(GLF_LOCK
, &gl
->gl_flags
);
537 complete(&gh
->gh_wait
);
543 * rq_promote - process a promote request in the queue
544 * @gh: the glock holder
546 * Acquire a new inter-node lock, or change a lock state to more restrictive.
548 * Returns: 1 if the queue is blocked
551 static int rq_promote(struct gfs2_holder
*gh
)
553 struct gfs2_glock
*gl
= gh
->gh_gl
;
554 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
555 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
558 if (!relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
559 if (list_empty(&gl
->gl_holders
)) {
561 set_bit(GLF_LOCK
, &gl
->gl_flags
);
562 spin_unlock(&gl
->gl_spin
);
564 if (atomic_read(&sdp
->sd_reclaim_count
) >
565 gfs2_tune_get(sdp
, gt_reclaim_limit
) &&
566 !(gh
->gh_flags
& LM_FLAG_PRIORITY
)) {
567 gfs2_reclaim_glock(sdp
);
568 gfs2_reclaim_glock(sdp
);
571 glops
->go_xmote_th(gl
, gh
->gh_state
,
574 spin_lock(&gl
->gl_spin
);
579 if (list_empty(&gl
->gl_holders
)) {
580 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
581 set_bit(GLF_LOCK
, &gl
->gl_flags
);
584 struct gfs2_holder
*next_gh
;
585 if (gh
->gh_flags
& GL_LOCAL_EXCL
)
587 next_gh
= list_entry(gl
->gl_holders
.next
, struct gfs2_holder
,
589 if (next_gh
->gh_flags
& GL_LOCAL_EXCL
)
591 recurse
= test_bit(HIF_RECURSE
, &gh
->gh_iflags
);
594 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
596 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
601 complete(&gh
->gh_wait
);
607 * rq_demote - process a demote request in the queue
608 * @gh: the glock holder
610 * Returns: 1 if the queue is blocked
613 static int rq_demote(struct gfs2_holder
*gh
)
615 struct gfs2_glock
*gl
= gh
->gh_gl
;
616 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
618 if (!list_empty(&gl
->gl_holders
))
621 if (gl
->gl_state
== gh
->gh_state
|| gl
->gl_state
== LM_ST_UNLOCKED
) {
622 list_del_init(&gh
->gh_list
);
624 spin_unlock(&gl
->gl_spin
);
625 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
628 complete(&gh
->gh_wait
);
629 spin_lock(&gl
->gl_spin
);
632 set_bit(GLF_LOCK
, &gl
->gl_flags
);
633 spin_unlock(&gl
->gl_spin
);
635 if (gh
->gh_state
== LM_ST_UNLOCKED
||
636 gl
->gl_state
!= LM_ST_EXCLUSIVE
)
637 glops
->go_drop_th(gl
);
639 glops
->go_xmote_th(gl
, gh
->gh_state
, gh
->gh_flags
);
641 spin_lock(&gl
->gl_spin
);
648 * rq_greedy - process a queued request to drop greedy status
649 * @gh: the glock holder
651 * Returns: 1 if the queue is blocked
654 static int rq_greedy(struct gfs2_holder
*gh
)
656 struct gfs2_glock
*gl
= gh
->gh_gl
;
658 list_del_init(&gh
->gh_list
);
659 /* gh->gh_error never examined. */
660 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
661 spin_unlock(&gl
->gl_spin
);
663 gfs2_holder_uninit(gh
);
664 kfree(container_of(gh
, struct greedy
, gr_gh
));
666 spin_lock(&gl
->gl_spin
);
672 * run_queue - process holder structures on a glock
677 static void run_queue(struct gfs2_glock
*gl
)
679 struct gfs2_holder
*gh
;
683 if (test_bit(GLF_LOCK
, &gl
->gl_flags
))
686 if (!list_empty(&gl
->gl_waiters1
)) {
687 gh
= list_entry(gl
->gl_waiters1
.next
,
688 struct gfs2_holder
, gh_list
);
690 if (test_bit(HIF_MUTEX
, &gh
->gh_iflags
))
691 blocked
= rq_mutex(gh
);
693 gfs2_assert_warn(gl
->gl_sbd
, 0);
695 } else if (!list_empty(&gl
->gl_waiters2
) &&
696 !test_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
)) {
697 gh
= list_entry(gl
->gl_waiters2
.next
,
698 struct gfs2_holder
, gh_list
);
700 if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
))
701 blocked
= rq_demote(gh
);
702 else if (test_bit(HIF_GREEDY
, &gh
->gh_iflags
))
703 blocked
= rq_greedy(gh
);
705 gfs2_assert_warn(gl
->gl_sbd
, 0);
707 } else if (!list_empty(&gl
->gl_waiters3
)) {
708 gh
= list_entry(gl
->gl_waiters3
.next
,
709 struct gfs2_holder
, gh_list
);
711 if (test_bit(HIF_PROMOTE
, &gh
->gh_iflags
))
712 blocked
= rq_promote(gh
);
714 gfs2_assert_warn(gl
->gl_sbd
, 0);
725 * gfs2_glmutex_lock - acquire a local lock on a glock
728 * Gives caller exclusive access to manipulate a glock structure.
731 void gfs2_glmutex_lock(struct gfs2_glock
*gl
)
733 struct gfs2_holder gh
;
735 gfs2_holder_init(gl
, 0, 0, &gh
);
736 set_bit(HIF_MUTEX
, &gh
.gh_iflags
);
738 spin_lock(&gl
->gl_spin
);
739 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
))
740 list_add_tail(&gh
.gh_list
, &gl
->gl_waiters1
);
742 complete(&gh
.gh_wait
);
743 spin_unlock(&gl
->gl_spin
);
745 wait_for_completion(&gh
.gh_wait
);
746 gfs2_holder_uninit(&gh
);
750 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
753 * Returns: 1 if the glock is acquired
756 int gfs2_glmutex_trylock(struct gfs2_glock
*gl
)
760 spin_lock(&gl
->gl_spin
);
761 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
))
763 spin_unlock(&gl
->gl_spin
);
769 * gfs2_glmutex_unlock - release a local lock on a glock
774 void gfs2_glmutex_unlock(struct gfs2_glock
*gl
)
776 spin_lock(&gl
->gl_spin
);
777 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
779 spin_unlock(&gl
->gl_spin
);
783 * handle_callback - add a demote request to a lock's queue
785 * @state: the state the caller wants us to change to
789 static void handle_callback(struct gfs2_glock
*gl
, unsigned int state
)
791 struct gfs2_holder
*gh
, *new_gh
= NULL
;
794 spin_lock(&gl
->gl_spin
);
796 list_for_each_entry(gh
, &gl
->gl_waiters2
, gh_list
) {
797 if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
) &&
798 gl
->gl_req_gh
!= gh
) {
799 if (gh
->gh_state
!= state
)
800 gh
->gh_state
= LM_ST_UNLOCKED
;
806 list_add_tail(&new_gh
->gh_list
, &gl
->gl_waiters2
);
809 spin_unlock(&gl
->gl_spin
);
811 new_gh
= gfs2_holder_get(gl
, state
,
812 LM_FLAG_TRY
| GL_NEVER_RECURSE
,
813 GFP_KERNEL
| __GFP_NOFAIL
),
814 set_bit(HIF_DEMOTE
, &new_gh
->gh_iflags
);
815 set_bit(HIF_DEALLOC
, &new_gh
->gh_iflags
);
821 spin_unlock(&gl
->gl_spin
);
824 gfs2_holder_put(new_gh
);
828 * state_change - record that the glock is now in a different state
830 * @new_state the new state
834 static void state_change(struct gfs2_glock
*gl
, unsigned int new_state
)
838 held1
= (gl
->gl_state
!= LM_ST_UNLOCKED
);
839 held2
= (new_state
!= LM_ST_UNLOCKED
);
841 if (held1
!= held2
) {
848 gl
->gl_state
= new_state
;
852 * xmote_bh - Called after the lock module is done acquiring a lock
853 * @gl: The glock in question
854 * @ret: the int returned from the lock module
858 static void xmote_bh(struct gfs2_glock
*gl
, unsigned int ret
)
860 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
861 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
862 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
863 int prev_state
= gl
->gl_state
;
866 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
867 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
868 gfs2_assert_warn(sdp
, !(ret
& LM_OUT_ASYNC
));
870 state_change(gl
, ret
& LM_OUT_ST_MASK
);
872 if (prev_state
!= LM_ST_UNLOCKED
&& !(ret
& LM_OUT_CACHEABLE
)) {
874 glops
->go_inval(gl
, DIO_METADATA
| DIO_DATA
);
875 } else if (gl
->gl_state
== LM_ST_DEFERRED
) {
876 /* We might not want to do this here.
877 Look at moving to the inode glops. */
879 glops
->go_inval(gl
, DIO_DATA
);
882 /* Deal with each possible exit condition */
885 gl
->gl_stamp
= jiffies
;
887 else if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
888 spin_lock(&gl
->gl_spin
);
889 list_del_init(&gh
->gh_list
);
891 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
893 spin_unlock(&gl
->gl_spin
);
895 } else if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
)) {
896 spin_lock(&gl
->gl_spin
);
897 list_del_init(&gh
->gh_list
);
898 if (gl
->gl_state
== gh
->gh_state
||
899 gl
->gl_state
== LM_ST_UNLOCKED
)
902 if (gfs2_assert_warn(sdp
, gh
->gh_flags
&
903 (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) == -1)
904 fs_warn(sdp
, "ret = 0x%.8X\n", ret
);
905 gh
->gh_error
= GLR_TRYFAILED
;
907 spin_unlock(&gl
->gl_spin
);
909 if (ret
& LM_OUT_CANCELED
)
910 handle_callback(gl
, LM_ST_UNLOCKED
); /* Lame */
912 } else if (ret
& LM_OUT_CANCELED
) {
913 spin_lock(&gl
->gl_spin
);
914 list_del_init(&gh
->gh_list
);
915 gh
->gh_error
= GLR_CANCELED
;
916 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
918 spin_unlock(&gl
->gl_spin
);
920 } else if (relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
921 spin_lock(&gl
->gl_spin
);
922 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
924 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
925 spin_unlock(&gl
->gl_spin
);
927 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
931 } else if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
932 spin_lock(&gl
->gl_spin
);
933 list_del_init(&gh
->gh_list
);
934 gh
->gh_error
= GLR_TRYFAILED
;
935 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
937 spin_unlock(&gl
->gl_spin
);
940 if (gfs2_assert_withdraw(sdp
, 0) == -1)
941 fs_err(sdp
, "ret = 0x%.8X\n", ret
);
944 if (glops
->go_xmote_bh
)
945 glops
->go_xmote_bh(gl
);
948 spin_lock(&gl
->gl_spin
);
949 gl
->gl_req_gh
= NULL
;
950 gl
->gl_req_bh
= NULL
;
951 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
953 spin_unlock(&gl
->gl_spin
);
959 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
962 complete(&gh
->gh_wait
);
967 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
968 * @gl: The glock in question
969 * @state: the requested state
970 * @flags: modifier flags to the lock call
974 void gfs2_glock_xmote_th(struct gfs2_glock
*gl
, unsigned int state
, int flags
)
976 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
977 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
978 int lck_flags
= flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
|
979 LM_FLAG_NOEXP
| LM_FLAG_ANY
|
981 unsigned int lck_ret
;
983 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
984 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
985 gfs2_assert_warn(sdp
, state
!= LM_ST_UNLOCKED
);
986 gfs2_assert_warn(sdp
, state
!= gl
->gl_state
);
988 if (gl
->gl_state
== LM_ST_EXCLUSIVE
) {
991 DIO_METADATA
| DIO_DATA
| DIO_RELEASE
);
995 gl
->gl_req_bh
= xmote_bh
;
997 lck_ret
= gfs2_lm_lock(sdp
, gl
->gl_lock
, gl
->gl_state
, state
,
1000 if (gfs2_assert_withdraw(sdp
, !(lck_ret
& LM_OUT_ERROR
)))
1003 if (lck_ret
& LM_OUT_ASYNC
)
1004 gfs2_assert_warn(sdp
, lck_ret
== LM_OUT_ASYNC
);
1006 xmote_bh(gl
, lck_ret
);
1010 * drop_bh - Called after a lock module unlock completes
1012 * @ret: the return status
1014 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
1015 * Doesn't drop the reference on the glock the top half took out
1019 static void drop_bh(struct gfs2_glock
*gl
, unsigned int ret
)
1021 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1022 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1023 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
1025 clear_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1027 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1028 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
1029 gfs2_assert_warn(sdp
, !ret
);
1031 state_change(gl
, LM_ST_UNLOCKED
);
1033 if (glops
->go_inval
)
1034 glops
->go_inval(gl
, DIO_METADATA
| DIO_DATA
);
1037 spin_lock(&gl
->gl_spin
);
1038 list_del_init(&gh
->gh_list
);
1040 spin_unlock(&gl
->gl_spin
);
1043 if (glops
->go_drop_bh
)
1044 glops
->go_drop_bh(gl
);
1046 spin_lock(&gl
->gl_spin
);
1047 gl
->gl_req_gh
= NULL
;
1048 gl
->gl_req_bh
= NULL
;
1049 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1051 spin_unlock(&gl
->gl_spin
);
1056 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
1057 gfs2_holder_put(gh
);
1059 complete(&gh
->gh_wait
);
1064 * gfs2_glock_drop_th - call into the lock module to unlock a lock
1069 void gfs2_glock_drop_th(struct gfs2_glock
*gl
)
1071 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1072 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1075 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1076 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
1077 gfs2_assert_warn(sdp
, gl
->gl_state
!= LM_ST_UNLOCKED
);
1079 if (gl
->gl_state
== LM_ST_EXCLUSIVE
) {
1082 DIO_METADATA
| DIO_DATA
| DIO_RELEASE
);
1085 gfs2_glock_hold(gl
);
1086 gl
->gl_req_bh
= drop_bh
;
1088 ret
= gfs2_lm_unlock(sdp
, gl
->gl_lock
, gl
->gl_state
);
1090 if (gfs2_assert_withdraw(sdp
, !(ret
& LM_OUT_ERROR
)))
1096 gfs2_assert_warn(sdp
, ret
== LM_OUT_ASYNC
);
1100 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1101 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1103 * Don't cancel GL_NOCANCEL requests.
1106 static void do_cancels(struct gfs2_holder
*gh
)
1108 struct gfs2_glock
*gl
= gh
->gh_gl
;
1110 spin_lock(&gl
->gl_spin
);
1112 while (gl
->gl_req_gh
!= gh
&&
1113 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1114 !list_empty(&gh
->gh_list
)) {
1115 if (gl
->gl_req_bh
&&
1117 (gl
->gl_req_gh
->gh_flags
& GL_NOCANCEL
))) {
1118 spin_unlock(&gl
->gl_spin
);
1119 gfs2_lm_cancel(gl
->gl_sbd
, gl
->gl_lock
);
1121 spin_lock(&gl
->gl_spin
);
1123 spin_unlock(&gl
->gl_spin
);
1125 spin_lock(&gl
->gl_spin
);
1129 spin_unlock(&gl
->gl_spin
);
1133 * glock_wait_internal - wait on a glock acquisition
1134 * @gh: the glock holder
1136 * Returns: 0 on success
1139 static int glock_wait_internal(struct gfs2_holder
*gh
)
1141 struct gfs2_glock
*gl
= gh
->gh_gl
;
1142 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1143 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1145 if (test_bit(HIF_ABORTED
, &gh
->gh_iflags
))
1148 if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
1149 spin_lock(&gl
->gl_spin
);
1150 if (gl
->gl_req_gh
!= gh
&&
1151 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1152 !list_empty(&gh
->gh_list
)) {
1153 list_del_init(&gh
->gh_list
);
1154 gh
->gh_error
= GLR_TRYFAILED
;
1155 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
1158 spin_unlock(&gl
->gl_spin
);
1159 return gh
->gh_error
;
1161 spin_unlock(&gl
->gl_spin
);
1164 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1167 wait_for_completion(&gh
->gh_wait
);
1170 return gh
->gh_error
;
1172 gfs2_assert_withdraw(sdp
, test_bit(HIF_HOLDER
, &gh
->gh_iflags
));
1173 gfs2_assert_withdraw(sdp
, relaxed_state_ok(gl
->gl_state
,
1177 if (test_bit(HIF_FIRST
, &gh
->gh_iflags
)) {
1178 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1180 if (glops
->go_lock
) {
1181 gh
->gh_error
= glops
->go_lock(gh
);
1183 spin_lock(&gl
->gl_spin
);
1184 list_del_init(&gh
->gh_list
);
1185 if (test_and_clear_bit(HIF_RECURSE
,
1188 spin_unlock(&gl
->gl_spin
);
1192 spin_lock(&gl
->gl_spin
);
1193 gl
->gl_req_gh
= NULL
;
1194 gl
->gl_req_bh
= NULL
;
1195 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1196 if (test_bit(HIF_RECURSE
, &gh
->gh_iflags
))
1199 spin_unlock(&gl
->gl_spin
);
1202 return gh
->gh_error
;
1205 static inline struct gfs2_holder
*
1206 find_holder_by_owner(struct list_head
*head
, struct task_struct
*owner
)
1208 struct gfs2_holder
*gh
;
1210 list_for_each_entry(gh
, head
, gh_list
) {
1211 if (gh
->gh_owner
== owner
)
1221 * Make sure the new holder is compatible with the pre-existing one.
1225 static int recurse_check(struct gfs2_holder
*existing
, struct gfs2_holder
*new,
1228 struct gfs2_sbd
*sdp
= existing
->gh_gl
->gl_sbd
;
1230 if (gfs2_assert_warn(sdp
, (new->gh_flags
& LM_FLAG_ANY
) ||
1231 !(existing
->gh_flags
& LM_FLAG_ANY
)))
1234 if (gfs2_assert_warn(sdp
, (existing
->gh_flags
& GL_LOCAL_EXCL
) ||
1235 !(new->gh_flags
& GL_LOCAL_EXCL
)))
1238 if (gfs2_assert_warn(sdp
, relaxed_state_ok(state
, new->gh_state
,
1245 print_symbol(KERN_WARNING
"GFS2: Existing holder from %s\n",
1247 print_symbol(KERN_WARNING
"GFS2: New holder from %s\n", new->gh_ip
);
1248 set_bit(HIF_ABORTED
, &new->gh_iflags
);
1253 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1254 * @gh: the holder structure to add
1258 static void add_to_queue(struct gfs2_holder
*gh
)
1260 struct gfs2_glock
*gl
= gh
->gh_gl
;
1261 struct gfs2_holder
*existing
;
1266 existing
= find_holder_by_owner(&gl
->gl_holders
, gh
->gh_owner
);
1268 if (recurse_check(existing
, gh
, gl
->gl_state
))
1271 list_add_tail(&gh
->gh_list
, &gl
->gl_holders
);
1272 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
1275 complete(&gh
->gh_wait
);
1280 existing
= find_holder_by_owner(&gl
->gl_waiters3
, gh
->gh_owner
);
1282 if (recurse_check(existing
, gh
, existing
->gh_state
))
1285 set_bit(HIF_RECURSE
, &gh
->gh_iflags
);
1286 set_bit(HIF_RECURSE
, &existing
->gh_iflags
);
1288 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters3
);
1294 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1295 list_add(&gh
->gh_list
, &gl
->gl_waiters3
);
1297 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters3
);
1301 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1302 * @gh: the holder structure
1304 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1306 * Returns: 0, GLR_TRYFAILED, or errno on failure
1309 int gfs2_glock_nq(struct gfs2_holder
*gh
)
1311 struct gfs2_glock
*gl
= gh
->gh_gl
;
1312 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1316 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
1317 set_bit(HIF_ABORTED
, &gh
->gh_iflags
);
1321 set_bit(HIF_PROMOTE
, &gh
->gh_iflags
);
1323 spin_lock(&gl
->gl_spin
);
1326 spin_unlock(&gl
->gl_spin
);
1328 if (!(gh
->gh_flags
& GL_ASYNC
)) {
1329 error
= glock_wait_internal(gh
);
1330 if (error
== GLR_CANCELED
) {
1336 clear_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1342 * gfs2_glock_poll - poll to see if an async request has been completed
1345 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1348 int gfs2_glock_poll(struct gfs2_holder
*gh
)
1350 struct gfs2_glock
*gl
= gh
->gh_gl
;
1353 spin_lock(&gl
->gl_spin
);
1355 if (test_bit(HIF_HOLDER
, &gh
->gh_iflags
))
1357 else if (list_empty(&gh
->gh_list
)) {
1358 if (gh
->gh_error
== GLR_CANCELED
) {
1359 spin_unlock(&gl
->gl_spin
);
1361 if (gfs2_glock_nq(gh
))
1368 spin_unlock(&gl
->gl_spin
);
1374 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1375 * @gh: the holder structure
1377 * Returns: 0, GLR_TRYFAILED, or errno on failure
1380 int gfs2_glock_wait(struct gfs2_holder
*gh
)
1384 error
= glock_wait_internal(gh
);
1385 if (error
== GLR_CANCELED
) {
1387 gh
->gh_flags
&= ~GL_ASYNC
;
1388 error
= gfs2_glock_nq(gh
);
1395 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1396 * @gh: the glock holder
1400 void gfs2_glock_dq(struct gfs2_holder
*gh
)
1402 struct gfs2_glock
*gl
= gh
->gh_gl
;
1403 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1405 if (gh
->gh_flags
& GL_SYNC
)
1406 set_bit(GLF_SYNC
, &gl
->gl_flags
);
1408 if (gh
->gh_flags
& GL_NOCACHE
)
1409 handle_callback(gl
, LM_ST_UNLOCKED
);
1411 gfs2_glmutex_lock(gl
);
1413 spin_lock(&gl
->gl_spin
);
1414 list_del_init(&gh
->gh_list
);
1416 if (list_empty(&gl
->gl_holders
)) {
1417 spin_unlock(&gl
->gl_spin
);
1419 if (glops
->go_unlock
)
1420 glops
->go_unlock(gh
);
1422 if (test_bit(GLF_SYNC
, &gl
->gl_flags
)) {
1424 glops
->go_sync(gl
, DIO_METADATA
| DIO_DATA
);
1427 gl
->gl_stamp
= jiffies
;
1429 spin_lock(&gl
->gl_spin
);
1432 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1434 spin_unlock(&gl
->gl_spin
);
1438 * gfs2_glock_prefetch - Try to prefetch a glock
1440 * @state: the state to prefetch in
1441 * @flags: flags passed to go_xmote_th()
1445 void gfs2_glock_prefetch(struct gfs2_glock
*gl
, unsigned int state
, int flags
)
1447 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1449 spin_lock(&gl
->gl_spin
);
1451 if (test_bit(GLF_LOCK
, &gl
->gl_flags
) ||
1452 !list_empty(&gl
->gl_holders
) ||
1453 !list_empty(&gl
->gl_waiters1
) ||
1454 !list_empty(&gl
->gl_waiters2
) ||
1455 !list_empty(&gl
->gl_waiters3
) ||
1456 relaxed_state_ok(gl
->gl_state
, state
, flags
)) {
1457 spin_unlock(&gl
->gl_spin
);
1461 set_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1462 set_bit(GLF_LOCK
, &gl
->gl_flags
);
1463 spin_unlock(&gl
->gl_spin
);
1465 glops
->go_xmote_th(gl
, state
, flags
);
1469 * gfs2_glock_force_drop - Force a glock to be uncached
1474 void gfs2_glock_force_drop(struct gfs2_glock
*gl
)
1476 struct gfs2_holder gh
;
1478 gfs2_holder_init(gl
, LM_ST_UNLOCKED
, GL_NEVER_RECURSE
, &gh
);
1479 set_bit(HIF_DEMOTE
, &gh
.gh_iflags
);
1481 spin_lock(&gl
->gl_spin
);
1482 list_add_tail(&gh
.gh_list
, &gl
->gl_waiters2
);
1484 spin_unlock(&gl
->gl_spin
);
1486 wait_for_completion(&gh
.gh_wait
);
1487 gfs2_holder_uninit(&gh
);
1490 static void greedy_work(void *data
)
1492 struct greedy
*gr
= (struct greedy
*)data
;
1493 struct gfs2_holder
*gh
= &gr
->gr_gh
;
1494 struct gfs2_glock
*gl
= gh
->gh_gl
;
1495 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1497 clear_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
);
1499 if (glops
->go_greedy
)
1500 glops
->go_greedy(gl
);
1502 spin_lock(&gl
->gl_spin
);
1504 if (list_empty(&gl
->gl_waiters2
)) {
1505 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
1506 spin_unlock(&gl
->gl_spin
);
1507 gfs2_holder_uninit(gh
);
1510 gfs2_glock_hold(gl
);
1511 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters2
);
1513 spin_unlock(&gl
->gl_spin
);
1519 * gfs2_glock_be_greedy -
1523 * Returns: 0 if go_greedy will be called, 1 otherwise
1526 int gfs2_glock_be_greedy(struct gfs2_glock
*gl
, unsigned int time
)
1529 struct gfs2_holder
*gh
;
1532 gl
->gl_sbd
->sd_args
.ar_localcaching
||
1533 test_and_set_bit(GLF_GREEDY
, &gl
->gl_flags
))
1536 gr
= kmalloc(sizeof(struct greedy
), GFP_KERNEL
);
1538 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
1543 gfs2_holder_init(gl
, 0, GL_NEVER_RECURSE
, gh
);
1544 set_bit(HIF_GREEDY
, &gh
->gh_iflags
);
1545 INIT_WORK(&gr
->gr_work
, greedy_work
, gr
);
1547 set_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
);
1548 schedule_delayed_work(&gr
->gr_work
, time
);
1554 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1555 * @gh: the holder structure
1559 void gfs2_glock_dq_uninit(struct gfs2_holder
*gh
)
1562 gfs2_holder_uninit(gh
);
1566 * gfs2_glock_nq_num - acquire a glock based on lock number
1567 * @sdp: the filesystem
1568 * @number: the lock number
1569 * @glops: the glock operations for the type of glock
1570 * @state: the state to acquire the glock in
1571 * @flags: modifier flags for the aquisition
1572 * @gh: the struct gfs2_holder
1577 int gfs2_glock_nq_num(struct gfs2_sbd
*sdp
, uint64_t number
,
1578 struct gfs2_glock_operations
*glops
, unsigned int state
,
1579 int flags
, struct gfs2_holder
*gh
)
1581 struct gfs2_glock
*gl
;
1584 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1586 error
= gfs2_glock_nq_init(gl
, state
, flags
, gh
);
1594 * glock_compare - Compare two struct gfs2_glock structures for sorting
1595 * @arg_a: the first structure
1596 * @arg_b: the second structure
1600 static int glock_compare(const void *arg_a
, const void *arg_b
)
1602 struct gfs2_holder
*gh_a
= *(struct gfs2_holder
**)arg_a
;
1603 struct gfs2_holder
*gh_b
= *(struct gfs2_holder
**)arg_b
;
1604 struct lm_lockname
*a
= &gh_a
->gh_gl
->gl_name
;
1605 struct lm_lockname
*b
= &gh_b
->gh_gl
->gl_name
;
1608 if (a
->ln_number
> b
->ln_number
)
1610 else if (a
->ln_number
< b
->ln_number
)
1613 if (gh_a
->gh_state
== LM_ST_SHARED
&&
1614 gh_b
->gh_state
== LM_ST_EXCLUSIVE
)
1616 else if (!(gh_a
->gh_flags
& GL_LOCAL_EXCL
) &&
1617 (gh_b
->gh_flags
& GL_LOCAL_EXCL
))
1625 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1626 * @num_gh: the number of structures
1627 * @ghs: an array of struct gfs2_holder structures
1629 * Returns: 0 on success (all glocks acquired),
1630 * errno on failure (no glocks acquired)
1633 static int nq_m_sync(unsigned int num_gh
, struct gfs2_holder
*ghs
,
1634 struct gfs2_holder
**p
)
1639 for (x
= 0; x
< num_gh
; x
++)
1642 sort(p
, num_gh
, sizeof(struct gfs2_holder
*), glock_compare
, NULL
);
1644 for (x
= 0; x
< num_gh
; x
++) {
1645 p
[x
]->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1647 error
= gfs2_glock_nq(p
[x
]);
1650 gfs2_glock_dq(p
[x
]);
1659 * gfs2_glock_nq_m - acquire multiple glocks
1660 * @num_gh: the number of structures
1661 * @ghs: an array of struct gfs2_holder structures
1663 * Figure out how big an impact this function has. Either:
1664 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1665 * 2) Forget async stuff and just call nq_m_sync()
1666 * 3) Leave it like it is
1668 * Returns: 0 on success (all glocks acquired),
1669 * errno on failure (no glocks acquired)
1672 int gfs2_glock_nq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1676 int borked
= 0, serious
= 0;
1683 ghs
->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1684 return gfs2_glock_nq(ghs
);
1687 e
= kcalloc(num_gh
, sizeof(struct gfs2_holder
*), GFP_KERNEL
);
1691 for (x
= 0; x
< num_gh
; x
++) {
1692 ghs
[x
].gh_flags
|= LM_FLAG_TRY
| GL_ASYNC
;
1693 error
= gfs2_glock_nq(&ghs
[x
]);
1702 for (x
= 0; x
< num_gh
; x
++) {
1703 error
= e
[x
] = glock_wait_internal(&ghs
[x
]);
1706 if (error
!= GLR_TRYFAILED
&& error
!= GLR_CANCELED
)
1716 for (x
= 0; x
< num_gh
; x
++)
1718 gfs2_glock_dq(&ghs
[x
]);
1723 for (x
= 0; x
< num_gh
; x
++)
1724 gfs2_holder_reinit(ghs
[x
].gh_state
, ghs
[x
].gh_flags
,
1726 error
= nq_m_sync(num_gh
, ghs
, (struct gfs2_holder
**)e
);
1735 * gfs2_glock_dq_m - release multiple glocks
1736 * @num_gh: the number of structures
1737 * @ghs: an array of struct gfs2_holder structures
1741 void gfs2_glock_dq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1745 for (x
= 0; x
< num_gh
; x
++)
1746 gfs2_glock_dq(&ghs
[x
]);
1750 * gfs2_glock_dq_uninit_m - release multiple glocks
1751 * @num_gh: the number of structures
1752 * @ghs: an array of struct gfs2_holder structures
1756 void gfs2_glock_dq_uninit_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1760 for (x
= 0; x
< num_gh
; x
++)
1761 gfs2_glock_dq_uninit(&ghs
[x
]);
1765 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1766 * @sdp: the filesystem
1767 * @number: the lock number
1768 * @glops: the glock operations for the type of glock
1769 * @state: the state to acquire the glock in
1770 * @flags: modifier flags for the aquisition
1775 void gfs2_glock_prefetch_num(struct gfs2_sbd
*sdp
, uint64_t number
,
1776 struct gfs2_glock_operations
*glops
,
1777 unsigned int state
, int flags
)
1779 struct gfs2_glock
*gl
;
1782 if (atomic_read(&sdp
->sd_reclaim_count
) <
1783 gfs2_tune_get(sdp
, gt_reclaim_limit
)) {
1784 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1786 gfs2_glock_prefetch(gl
, state
, flags
);
1793 * gfs2_lvb_hold - attach a LVB from a glock
1794 * @gl: The glock in question
1798 int gfs2_lvb_hold(struct gfs2_glock
*gl
)
1802 gfs2_glmutex_lock(gl
);
1804 if (!atomic_read(&gl
->gl_lvb_count
)) {
1805 error
= gfs2_lm_hold_lvb(gl
->gl_sbd
, gl
->gl_lock
, &gl
->gl_lvb
);
1807 gfs2_glmutex_unlock(gl
);
1810 gfs2_glock_hold(gl
);
1812 atomic_inc(&gl
->gl_lvb_count
);
1814 gfs2_glmutex_unlock(gl
);
1820 * gfs2_lvb_unhold - detach a LVB from a glock
1821 * @gl: The glock in question
1825 void gfs2_lvb_unhold(struct gfs2_glock
*gl
)
1827 gfs2_glock_hold(gl
);
1828 gfs2_glmutex_lock(gl
);
1830 gfs2_assert(gl
->gl_sbd
, atomic_read(&gl
->gl_lvb_count
) > 0);
1831 if (atomic_dec_and_test(&gl
->gl_lvb_count
)) {
1832 gfs2_lm_unhold_lvb(gl
->gl_sbd
, gl
->gl_lock
, gl
->gl_lvb
);
1837 gfs2_glmutex_unlock(gl
);
1841 void gfs2_lvb_sync(struct gfs2_glock
*gl
)
1843 gfs2_glmutex_lock(gl
);
1845 gfs2_assert(gl
->gl_sbd
, atomic_read(&gl
->gl_lvb_count
));
1846 if (!gfs2_assert_warn(gl
->gl_sbd
, gfs2_glock_is_held_excl(gl
)))
1847 gfs2_lm_sync_lvb(gl
->gl_sbd
, gl
->gl_lock
, gl
->gl_lvb
);
1849 gfs2_glmutex_unlock(gl
);
1852 static void blocking_cb(struct gfs2_sbd
*sdp
, struct lm_lockname
*name
,
1855 struct gfs2_glock
*gl
;
1857 gl
= gfs2_glock_find(sdp
, name
);
1861 if (gl
->gl_ops
->go_callback
)
1862 gl
->gl_ops
->go_callback(gl
, state
);
1863 handle_callback(gl
, state
);
1865 spin_lock(&gl
->gl_spin
);
1867 spin_unlock(&gl
->gl_spin
);
1873 * gfs2_glock_cb - Callback used by locking module
1874 * @fsdata: Pointer to the superblock
1875 * @type: Type of callback
1876 * @data: Type dependent data pointer
1878 * Called by the locking module when it wants to tell us something.
1879 * Either we need to drop a lock, one of our ASYNC requests completed, or
1880 * a journal from another client needs to be recovered.
1883 void gfs2_glock_cb(lm_fsdata_t
*fsdata
, unsigned int type
, void *data
)
1885 struct gfs2_sbd
*sdp
= (struct gfs2_sbd
*)fsdata
;
1889 blocking_cb(sdp
, (struct lm_lockname
*)data
, LM_ST_UNLOCKED
);
1893 blocking_cb(sdp
, (struct lm_lockname
*)data
, LM_ST_DEFERRED
);
1897 blocking_cb(sdp
, (struct lm_lockname
*)data
, LM_ST_SHARED
);
1901 struct lm_async_cb
*async
= (struct lm_async_cb
*)data
;
1902 struct gfs2_glock
*gl
;
1904 gl
= gfs2_glock_find(sdp
, &async
->lc_name
);
1905 if (gfs2_assert_warn(sdp
, gl
))
1907 if (!gfs2_assert_warn(sdp
, gl
->gl_req_bh
))
1908 gl
->gl_req_bh(gl
, async
->lc_ret
);
1914 case LM_CB_NEED_RECOVERY
:
1915 gfs2_jdesc_make_dirty(sdp
, *(unsigned int *)data
);
1916 if (sdp
->sd_recoverd_process
)
1917 wake_up_process(sdp
->sd_recoverd_process
);
1920 case LM_CB_DROPLOCKS
:
1921 gfs2_gl_hash_clear(sdp
, NO_WAIT
);
1922 gfs2_quota_scan(sdp
);
1926 gfs2_assert_warn(sdp
, 0);
1932 * gfs2_try_toss_inode - try to remove a particular inode struct from cache
1933 * sdp: the filesystem
1934 * inum: the inode number
1938 void gfs2_try_toss_inode(struct gfs2_sbd
*sdp
, struct gfs2_inum
*inum
)
1940 struct gfs2_glock
*gl
;
1941 struct gfs2_inode
*ip
;
1944 error
= gfs2_glock_get(sdp
, inum
->no_addr
, &gfs2_inode_glops
,
1949 if (!gfs2_glmutex_trylock(gl
))
1956 if (atomic_read(&ip
->i_count
))
1959 gfs2_inode_destroy(ip
);
1962 gfs2_glmutex_unlock(gl
);
1969 * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
1970 * iopen glock from memory
1971 * @io_gl: the iopen glock
1972 * @state: the state into which the glock should be put
1976 void gfs2_iopen_go_callback(struct gfs2_glock
*io_gl
, unsigned int state
)
1978 struct gfs2_glock
*i_gl
;
1980 if (state
!= LM_ST_UNLOCKED
)
1983 spin_lock(&io_gl
->gl_spin
);
1984 i_gl
= io_gl
->gl_object
;
1986 gfs2_glock_hold(i_gl
);
1987 spin_unlock(&io_gl
->gl_spin
);
1989 spin_unlock(&io_gl
->gl_spin
);
1993 if (gfs2_glmutex_trylock(i_gl
)) {
1994 struct gfs2_inode
*ip
= i_gl
->gl_object
;
1996 gfs2_try_toss_vnode(ip
);
1997 gfs2_glmutex_unlock(i_gl
);
1998 gfs2_glock_schedule_for_reclaim(i_gl
);
2001 gfs2_glmutex_unlock(i_gl
);
2005 gfs2_glock_put(i_gl
);
2009 * demote_ok - Check to see if it's ok to unlock a glock
2012 * Returns: 1 if it's ok
2015 static int demote_ok(struct gfs2_glock
*gl
)
2017 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
2018 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
2021 if (test_bit(GLF_STICKY
, &gl
->gl_flags
))
2023 else if (test_bit(GLF_PREFETCH
, &gl
->gl_flags
))
2024 demote
= time_after_eq(jiffies
,
2026 gfs2_tune_get(sdp
, gt_prefetch_secs
) * HZ
);
2027 else if (glops
->go_demote_ok
)
2028 demote
= glops
->go_demote_ok(gl
);
2034 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
2039 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock
*gl
)
2041 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
2043 spin_lock(&sdp
->sd_reclaim_lock
);
2044 if (list_empty(&gl
->gl_reclaim
)) {
2045 gfs2_glock_hold(gl
);
2046 list_add(&gl
->gl_reclaim
, &sdp
->sd_reclaim_list
);
2047 atomic_inc(&sdp
->sd_reclaim_count
);
2049 spin_unlock(&sdp
->sd_reclaim_lock
);
2051 wake_up(&sdp
->sd_reclaim_wq
);
2055 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
2056 * @sdp: the filesystem
2058 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
2059 * different glock and we notice that there are a lot of glocks in the
2064 void gfs2_reclaim_glock(struct gfs2_sbd
*sdp
)
2066 struct gfs2_glock
*gl
;
2068 spin_lock(&sdp
->sd_reclaim_lock
);
2069 if (list_empty(&sdp
->sd_reclaim_list
)) {
2070 spin_unlock(&sdp
->sd_reclaim_lock
);
2073 gl
= list_entry(sdp
->sd_reclaim_list
.next
,
2074 struct gfs2_glock
, gl_reclaim
);
2075 list_del_init(&gl
->gl_reclaim
);
2076 spin_unlock(&sdp
->sd_reclaim_lock
);
2078 atomic_dec(&sdp
->sd_reclaim_count
);
2079 atomic_inc(&sdp
->sd_reclaimed
);
2081 if (gfs2_glmutex_trylock(gl
)) {
2082 if (gl
->gl_ops
== &gfs2_inode_glops
) {
2083 struct gfs2_inode
*ip
= gl
->gl_object
;
2084 if (ip
&& !atomic_read(&ip
->i_count
))
2085 gfs2_inode_destroy(ip
);
2087 if (queue_empty(gl
, &gl
->gl_holders
) &&
2088 gl
->gl_state
!= LM_ST_UNLOCKED
&&
2090 handle_callback(gl
, LM_ST_UNLOCKED
);
2091 gfs2_glmutex_unlock(gl
);
2098 * examine_bucket - Call a function for glock in a hash bucket
2099 * @examiner: the function
2100 * @sdp: the filesystem
2101 * @bucket: the bucket
2103 * Returns: 1 if the bucket has entries
2106 static int examine_bucket(glock_examiner examiner
, struct gfs2_sbd
*sdp
,
2107 struct gfs2_gl_hash_bucket
*bucket
)
2109 struct glock_plug plug
;
2110 struct list_head
*tmp
;
2111 struct gfs2_glock
*gl
;
2114 /* Add "plug" to end of bucket list, work back up list from there */
2115 memset(&plug
.gl_flags
, 0, sizeof(unsigned long));
2116 set_bit(GLF_PLUG
, &plug
.gl_flags
);
2118 write_lock(&bucket
->hb_lock
);
2119 list_add(&plug
.gl_list
, &bucket
->hb_list
);
2120 write_unlock(&bucket
->hb_lock
);
2123 write_lock(&bucket
->hb_lock
);
2126 tmp
= plug
.gl_list
.next
;
2128 if (tmp
== &bucket
->hb_list
) {
2129 list_del(&plug
.gl_list
);
2130 entries
= !list_empty(&bucket
->hb_list
);
2131 write_unlock(&bucket
->hb_lock
);
2134 gl
= list_entry(tmp
, struct gfs2_glock
, gl_list
);
2136 /* Move plug up list */
2137 list_move(&plug
.gl_list
, &gl
->gl_list
);
2139 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
2142 /* examiner() must glock_put() */
2143 gfs2_glock_hold(gl
);
2148 write_unlock(&bucket
->hb_lock
);
2155 * scan_glock - look at a glock and see if we can reclaim it
2156 * @gl: the glock to look at
2160 static void scan_glock(struct gfs2_glock
*gl
)
2162 if (gfs2_glmutex_trylock(gl
)) {
2163 if (gl
->gl_ops
== &gfs2_inode_glops
) {
2164 struct gfs2_inode
*ip
= gl
->gl_object
;
2165 if (ip
&& !atomic_read(&ip
->i_count
))
2168 if (queue_empty(gl
, &gl
->gl_holders
) &&
2169 gl
->gl_state
!= LM_ST_UNLOCKED
&&
2173 gfs2_glmutex_unlock(gl
);
2181 gfs2_glmutex_unlock(gl
);
2182 gfs2_glock_schedule_for_reclaim(gl
);
2187 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
2188 * @sdp: the filesystem
2192 void gfs2_scand_internal(struct gfs2_sbd
*sdp
)
2196 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
2197 examine_bucket(scan_glock
, sdp
, &sdp
->sd_gl_hash
[x
]);
2203 * clear_glock - look at a glock and see if we can free it from glock cache
2204 * @gl: the glock to look at
2208 static void clear_glock(struct gfs2_glock
*gl
)
2210 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
2213 spin_lock(&sdp
->sd_reclaim_lock
);
2214 if (!list_empty(&gl
->gl_reclaim
)) {
2215 list_del_init(&gl
->gl_reclaim
);
2216 atomic_dec(&sdp
->sd_reclaim_count
);
2217 released
= gfs2_glock_put(gl
);
2218 gfs2_assert(sdp
, !released
);
2220 spin_unlock(&sdp
->sd_reclaim_lock
);
2222 if (gfs2_glmutex_trylock(gl
)) {
2223 if (gl
->gl_ops
== &gfs2_inode_glops
) {
2224 struct gfs2_inode
*ip
= gl
->gl_object
;
2225 if (ip
&& !atomic_read(&ip
->i_count
))
2226 gfs2_inode_destroy(ip
);
2228 if (queue_empty(gl
, &gl
->gl_holders
) &&
2229 gl
->gl_state
!= LM_ST_UNLOCKED
)
2230 handle_callback(gl
, LM_ST_UNLOCKED
);
2232 gfs2_glmutex_unlock(gl
);
2239 * gfs2_gl_hash_clear - Empty out the glock hash table
2240 * @sdp: the filesystem
2241 * @wait: wait until it's all gone
2243 * Called when unmounting the filesystem, or when inter-node lock manager
2244 * requests DROPLOCKS because it is running out of capacity.
2247 void gfs2_gl_hash_clear(struct gfs2_sbd
*sdp
, int wait
)
2258 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++)
2259 if (examine_bucket(clear_glock
, sdp
,
2260 &sdp
->sd_gl_hash
[x
]))
2266 if (time_after_eq(jiffies
,
2267 t
+ gfs2_tune_get(sdp
, gt_stall_secs
) * HZ
)) {
2268 fs_warn(sdp
, "Unmount seems to be stalled. "
2269 "Dumping lock state...\n");
2270 gfs2_dump_lockstate(sdp
);
2274 /* invalidate_inodes() requires that the sb inodes list
2275 not change, but an async completion callback for an
2276 unlock can occur which does glock_put() which
2277 can call iput() which will change the sb inodes list.
2278 invalidate_inodes_mutex prevents glock_put()'s during
2279 an invalidate_inodes() */
2281 mutex_lock(&sdp
->sd_invalidate_inodes_mutex
);
2282 invalidate_inodes(sdp
->sd_vfs
);
2283 mutex_unlock(&sdp
->sd_invalidate_inodes_mutex
);
2289 * Diagnostic routines to help debug distributed deadlock
2293 * dump_holder - print information about a glock holder
2294 * @str: a string naming the type of holder
2295 * @gh: the glock holder
2297 * Returns: 0 on success, -ENOBUFS when we run out of space
2300 static int dump_holder(char *str
, struct gfs2_holder
*gh
)
2303 int error
= -ENOBUFS
;
2305 printk(KERN_INFO
" %s\n", str
);
2306 printk(KERN_INFO
" owner = %ld\n",
2307 (gh
->gh_owner
) ? (long)gh
->gh_owner
->pid
: -1);
2308 printk(KERN_INFO
" gh_state = %u\n", gh
->gh_state
);
2309 printk(KERN_INFO
" gh_flags =");
2310 for (x
= 0; x
< 32; x
++)
2311 if (gh
->gh_flags
& (1 << x
))
2314 printk(KERN_INFO
" error = %d\n", gh
->gh_error
);
2315 printk(KERN_INFO
" gh_iflags =");
2316 for (x
= 0; x
< 32; x
++)
2317 if (test_bit(x
, &gh
->gh_iflags
))
2320 print_symbol(KERN_INFO
" initialized at: %s\n", gh
->gh_ip
);
2328 * dump_inode - print information about an inode
2331 * Returns: 0 on success, -ENOBUFS when we run out of space
2334 static int dump_inode(struct gfs2_inode
*ip
)
2337 int error
= -ENOBUFS
;
2339 printk(KERN_INFO
" Inode:\n");
2340 printk(KERN_INFO
" num = %llu %llu\n",
2341 ip
->i_num
.no_formal_ino
, ip
->i_num
.no_addr
);
2342 printk(KERN_INFO
" type = %u\n", IF2DT(ip
->i_di
.di_mode
));
2343 printk(KERN_INFO
" i_count = %d\n", atomic_read(&ip
->i_count
));
2344 printk(KERN_INFO
" i_flags =");
2345 for (x
= 0; x
< 32; x
++)
2346 if (test_bit(x
, &ip
->i_flags
))
2349 printk(KERN_INFO
" vnode = %s\n", (ip
->i_vnode
) ? "yes" : "no");
2357 * dump_glock - print information about a glock
2359 * @count: where we are in the buffer
2361 * Returns: 0 on success, -ENOBUFS when we run out of space
2364 static int dump_glock(struct gfs2_glock
*gl
)
2366 struct gfs2_holder
*gh
;
2368 int error
= -ENOBUFS
;
2370 spin_lock(&gl
->gl_spin
);
2372 printk(KERN_INFO
"Glock (%u, %llu)\n",
2373 gl
->gl_name
.ln_type
,
2374 gl
->gl_name
.ln_number
);
2375 printk(KERN_INFO
" gl_flags =");
2376 for (x
= 0; x
< 32; x
++)
2377 if (test_bit(x
, &gl
->gl_flags
))
2380 printk(KERN_INFO
" gl_ref = %d\n", atomic_read(&gl
->gl_ref
.refcount
));
2381 printk(KERN_INFO
" gl_state = %u\n", gl
->gl_state
);
2382 printk(KERN_INFO
" req_gh = %s\n", (gl
->gl_req_gh
) ? "yes" : "no");
2383 printk(KERN_INFO
" req_bh = %s\n", (gl
->gl_req_bh
) ? "yes" : "no");
2384 printk(KERN_INFO
" lvb_count = %d\n", atomic_read(&gl
->gl_lvb_count
));
2385 printk(KERN_INFO
" object = %s\n", (gl
->gl_object
) ? "yes" : "no");
2386 printk(KERN_INFO
" le = %s\n",
2387 (list_empty(&gl
->gl_le
.le_list
)) ? "no" : "yes");
2388 printk(KERN_INFO
" reclaim = %s\n",
2389 (list_empty(&gl
->gl_reclaim
)) ? "no" : "yes");
2391 printk(KERN_INFO
" aspace = %lu\n",
2392 gl
->gl_aspace
->i_mapping
->nrpages
);
2394 printk(KERN_INFO
" aspace = no\n");
2395 printk(KERN_INFO
" ail = %d\n", atomic_read(&gl
->gl_ail_count
));
2396 if (gl
->gl_req_gh
) {
2397 error
= dump_holder("Request", gl
->gl_req_gh
);
2401 list_for_each_entry(gh
, &gl
->gl_holders
, gh_list
) {
2402 error
= dump_holder("Holder", gh
);
2406 list_for_each_entry(gh
, &gl
->gl_waiters1
, gh_list
) {
2407 error
= dump_holder("Waiter1", gh
);
2411 list_for_each_entry(gh
, &gl
->gl_waiters2
, gh_list
) {
2412 error
= dump_holder("Waiter2", gh
);
2416 list_for_each_entry(gh
, &gl
->gl_waiters3
, gh_list
) {
2417 error
= dump_holder("Waiter3", gh
);
2421 if (gl
->gl_ops
== &gfs2_inode_glops
&& gl
->gl_object
) {
2422 if (!test_bit(GLF_LOCK
, &gl
->gl_flags
) &&
2423 list_empty(&gl
->gl_holders
)) {
2424 error
= dump_inode(gl
->gl_object
);
2429 printk(KERN_INFO
" Inode: busy\n");
2436 spin_unlock(&gl
->gl_spin
);
2442 * gfs2_dump_lockstate - print out the current lockstate
2443 * @sdp: the filesystem
2444 * @ub: the buffer to copy the information into
2446 * If @ub is NULL, dump the lockstate to the console.
2450 int gfs2_dump_lockstate(struct gfs2_sbd
*sdp
)
2452 struct gfs2_gl_hash_bucket
*bucket
;
2453 struct gfs2_glock
*gl
;
2457 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
2458 bucket
= &sdp
->sd_gl_hash
[x
];
2460 read_lock(&bucket
->hb_lock
);
2462 list_for_each_entry(gl
, &bucket
->hb_list
, gl_list
) {
2463 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
2466 error
= dump_glock(gl
);
2471 read_unlock(&bucket
->hb_lock
);