2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kref.h>
19 #include <linux/kallsyms.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <asm/uaccess.h>
24 #include "lm_interface.h"
36 /* Must be kept in sync with the beginning of struct gfs2_glock */
38 struct list_head gl_list
;
39 unsigned long gl_flags
;
43 struct gfs2_holder gr_gh
;
44 struct work_struct gr_work
;
47 typedef void (*glock_examiner
) (struct gfs2_glock
* gl
);
49 static int gfs2_dump_lockstate(struct gfs2_sbd
*sdp
);
52 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
53 * @actual: the current state of the lock
54 * @requested: the lock state that was requested by the caller
55 * @flags: the modifier flags passed in by the caller
57 * Returns: 1 if the locks are compatible, 0 otherwise
60 static inline int relaxed_state_ok(unsigned int actual
, unsigned requested
,
63 if (actual
== requested
)
69 if (actual
== LM_ST_EXCLUSIVE
&& requested
== LM_ST_SHARED
)
72 if (actual
!= LM_ST_UNLOCKED
&& (flags
& LM_FLAG_ANY
))
79 * gl_hash() - Turn glock number into hash bucket number
80 * @lock: The glock number
82 * Returns: The number of the corresponding hash bucket
85 static unsigned int gl_hash(struct lm_lockname
*name
)
89 h
= jhash(&name
->ln_number
, sizeof(uint64_t), 0);
90 h
= jhash(&name
->ln_type
, sizeof(unsigned int), h
);
91 h
&= GFS2_GL_HASH_MASK
;
97 * glock_free() - Perform a few checks and then release struct gfs2_glock
98 * @gl: The glock to release
100 * Also calls lock module to release its internal structure for this glock.
104 static void glock_free(struct gfs2_glock
*gl
)
106 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
107 struct inode
*aspace
= gl
->gl_aspace
;
109 gfs2_lm_put_lock(sdp
, gl
->gl_lock
);
112 gfs2_aspace_put(aspace
);
114 kmem_cache_free(gfs2_glock_cachep
, gl
);
118 * gfs2_glock_hold() - increment reference count on glock
119 * @gl: The glock to hold
123 void gfs2_glock_hold(struct gfs2_glock
*gl
)
125 kref_get(&gl
->gl_ref
);
128 /* All work is done after the return from kref_put() so we
129 can release the write_lock before the free. */
131 static void kill_glock(struct kref
*kref
)
133 struct gfs2_glock
*gl
= container_of(kref
, struct gfs2_glock
, gl_ref
);
134 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
136 gfs2_assert(sdp
, gl
->gl_state
== LM_ST_UNLOCKED
);
137 gfs2_assert(sdp
, list_empty(&gl
->gl_reclaim
));
138 gfs2_assert(sdp
, list_empty(&gl
->gl_holders
));
139 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters1
));
140 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters2
));
141 gfs2_assert(sdp
, list_empty(&gl
->gl_waiters3
));
145 * gfs2_glock_put() - Decrement reference count on glock
146 * @gl: The glock to put
150 int gfs2_glock_put(struct gfs2_glock
*gl
)
152 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
153 struct gfs2_gl_hash_bucket
*bucket
= gl
->gl_bucket
;
156 mutex_lock(&sdp
->sd_invalidate_inodes_mutex
);
158 write_lock(&bucket
->hb_lock
);
159 if (kref_put(&gl
->gl_ref
, kill_glock
)) {
160 list_del_init(&gl
->gl_list
);
161 write_unlock(&bucket
->hb_lock
);
162 BUG_ON(spin_is_locked(&gl
->gl_spin
));
167 write_unlock(&bucket
->hb_lock
);
169 mutex_unlock(&sdp
->sd_invalidate_inodes_mutex
);
174 * queue_empty - check to see if a glock's queue is empty
176 * @head: the head of the queue to check
178 * This function protects the list in the event that a process already
179 * has a holder on the list and is adding a second holder for itself.
180 * The glmutex lock is what generally prevents processes from working
181 * on the same glock at once, but the special case of adding a second
182 * holder for yourself ("recursive" locking) doesn't involve locking
183 * glmutex, making the spin lock necessary.
185 * Returns: 1 if the queue is empty
188 static inline int queue_empty(struct gfs2_glock
*gl
, struct list_head
*head
)
191 spin_lock(&gl
->gl_spin
);
192 empty
= list_empty(head
);
193 spin_unlock(&gl
->gl_spin
);
198 * search_bucket() - Find struct gfs2_glock by lock number
199 * @bucket: the bucket to search
200 * @name: The lock name
202 * Returns: NULL, or the struct gfs2_glock with the requested number
205 static struct gfs2_glock
*search_bucket(struct gfs2_gl_hash_bucket
*bucket
,
206 struct lm_lockname
*name
)
208 struct gfs2_glock
*gl
;
210 list_for_each_entry(gl
, &bucket
->hb_list
, gl_list
) {
211 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
213 if (!lm_name_equal(&gl
->gl_name
, name
))
216 kref_get(&gl
->gl_ref
);
225 * gfs2_glock_find() - Find glock by lock number
226 * @sdp: The GFS2 superblock
227 * @name: The lock name
229 * Returns: NULL, or the struct gfs2_glock with the requested number
232 static struct gfs2_glock
*gfs2_glock_find(struct gfs2_sbd
*sdp
,
233 struct lm_lockname
*name
)
235 struct gfs2_gl_hash_bucket
*bucket
= &sdp
->sd_gl_hash
[gl_hash(name
)];
236 struct gfs2_glock
*gl
;
238 read_lock(&bucket
->hb_lock
);
239 gl
= search_bucket(bucket
, name
);
240 read_unlock(&bucket
->hb_lock
);
246 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
247 * @sdp: The GFS2 superblock
248 * @number: the lock number
249 * @glops: The glock_operations to use
250 * @create: If 0, don't create the glock if it doesn't exist
251 * @glp: the glock is returned here
253 * This does not lock a glock, just finds/creates structures for one.
258 int gfs2_glock_get(struct gfs2_sbd
*sdp
, uint64_t number
,
259 struct gfs2_glock_operations
*glops
, int create
,
260 struct gfs2_glock
**glp
)
262 struct lm_lockname name
;
263 struct gfs2_glock
*gl
, *tmp
;
264 struct gfs2_gl_hash_bucket
*bucket
;
267 name
.ln_number
= number
;
268 name
.ln_type
= glops
->go_type
;
269 bucket
= &sdp
->sd_gl_hash
[gl_hash(&name
)];
271 read_lock(&bucket
->hb_lock
);
272 gl
= search_bucket(bucket
, &name
);
273 read_unlock(&bucket
->hb_lock
);
280 gl
= kmem_cache_alloc(gfs2_glock_cachep
, GFP_KERNEL
);
284 memset(gl
, 0, sizeof(struct gfs2_glock
));
286 INIT_LIST_HEAD(&gl
->gl_list
);
288 kref_init(&gl
->gl_ref
);
290 spin_lock_init(&gl
->gl_spin
);
292 gl
->gl_state
= LM_ST_UNLOCKED
;
293 INIT_LIST_HEAD(&gl
->gl_holders
);
294 INIT_LIST_HEAD(&gl
->gl_waiters1
);
295 INIT_LIST_HEAD(&gl
->gl_waiters2
);
296 INIT_LIST_HEAD(&gl
->gl_waiters3
);
300 gl
->gl_bucket
= bucket
;
301 INIT_LIST_HEAD(&gl
->gl_reclaim
);
305 lops_init_le(&gl
->gl_le
, &gfs2_glock_lops
);
306 INIT_LIST_HEAD(&gl
->gl_ail_list
);
308 /* If this glock protects actual on-disk data or metadata blocks,
309 create a VFS inode to manage the pages/buffers holding them. */
310 if (glops
== &gfs2_inode_glops
||
311 glops
== &gfs2_rgrp_glops
||
312 glops
== &gfs2_meta_glops
) {
313 gl
->gl_aspace
= gfs2_aspace_get(sdp
);
314 if (!gl
->gl_aspace
) {
320 error
= gfs2_lm_get_lock(sdp
, &name
, &gl
->gl_lock
);
324 write_lock(&bucket
->hb_lock
);
325 tmp
= search_bucket(bucket
, &name
);
327 write_unlock(&bucket
->hb_lock
);
331 list_add_tail(&gl
->gl_list
, &bucket
->hb_list
);
332 write_unlock(&bucket
->hb_lock
);
341 gfs2_aspace_put(gl
->gl_aspace
);
344 kmem_cache_free(gfs2_glock_cachep
, gl
);
350 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
352 * @state: the state we're requesting
353 * @flags: the modifier flags
354 * @gh: the holder structure
358 void gfs2_holder_init(struct gfs2_glock
*gl
, unsigned int state
, unsigned flags
,
359 struct gfs2_holder
*gh
)
361 INIT_LIST_HEAD(&gh
->gh_list
);
363 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
364 gh
->gh_owner
= current
;
365 gh
->gh_state
= state
;
366 gh
->gh_flags
= flags
;
369 init_completion(&gh
->gh_wait
);
371 if (gh
->gh_state
== LM_ST_EXCLUSIVE
)
372 gh
->gh_flags
|= GL_LOCAL_EXCL
;
378 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
379 * @state: the state we're requesting
380 * @flags: the modifier flags
381 * @gh: the holder structure
383 * Don't mess with the glock.
387 void gfs2_holder_reinit(unsigned int state
, unsigned flags
, struct gfs2_holder
*gh
)
389 gh
->gh_state
= state
;
390 gh
->gh_flags
= flags
;
391 if (gh
->gh_state
== LM_ST_EXCLUSIVE
)
392 gh
->gh_flags
|= GL_LOCAL_EXCL
;
394 gh
->gh_iflags
&= 1 << HIF_ALLOCED
;
395 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
399 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
400 * @gh: the holder structure
404 void gfs2_holder_uninit(struct gfs2_holder
*gh
)
406 gfs2_glock_put(gh
->gh_gl
);
412 * gfs2_holder_get - get a struct gfs2_holder structure
414 * @state: the state we're requesting
415 * @flags: the modifier flags
416 * @gfp_flags: __GFP_NOFAIL
418 * Figure out how big an impact this function has. Either:
419 * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
420 * 2) Leave it like it is
422 * Returns: the holder structure, NULL on ENOMEM
425 static struct gfs2_holder
*gfs2_holder_get(struct gfs2_glock
*gl
,
427 int flags
, gfp_t gfp_flags
)
429 struct gfs2_holder
*gh
;
431 gh
= kmalloc(sizeof(struct gfs2_holder
), gfp_flags
);
435 gfs2_holder_init(gl
, state
, flags
, gh
);
436 set_bit(HIF_ALLOCED
, &gh
->gh_iflags
);
437 gh
->gh_ip
= (unsigned long)__builtin_return_address(0);
442 * gfs2_holder_put - get rid of a struct gfs2_holder structure
443 * @gh: the holder structure
447 static void gfs2_holder_put(struct gfs2_holder
*gh
)
449 gfs2_holder_uninit(gh
);
454 * rq_mutex - process a mutex request in the queue
455 * @gh: the glock holder
457 * Returns: 1 if the queue is blocked
460 static int rq_mutex(struct gfs2_holder
*gh
)
462 struct gfs2_glock
*gl
= gh
->gh_gl
;
464 list_del_init(&gh
->gh_list
);
465 /* gh->gh_error never examined. */
466 set_bit(GLF_LOCK
, &gl
->gl_flags
);
467 complete(&gh
->gh_wait
);
473 * rq_promote - process a promote request in the queue
474 * @gh: the glock holder
476 * Acquire a new inter-node lock, or change a lock state to more restrictive.
478 * Returns: 1 if the queue is blocked
481 static int rq_promote(struct gfs2_holder
*gh
)
483 struct gfs2_glock
*gl
= gh
->gh_gl
;
484 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
485 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
487 if (!relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
488 if (list_empty(&gl
->gl_holders
)) {
490 set_bit(GLF_LOCK
, &gl
->gl_flags
);
491 spin_unlock(&gl
->gl_spin
);
493 if (atomic_read(&sdp
->sd_reclaim_count
) >
494 gfs2_tune_get(sdp
, gt_reclaim_limit
) &&
495 !(gh
->gh_flags
& LM_FLAG_PRIORITY
)) {
496 gfs2_reclaim_glock(sdp
);
497 gfs2_reclaim_glock(sdp
);
500 glops
->go_xmote_th(gl
, gh
->gh_state
,
503 spin_lock(&gl
->gl_spin
);
508 if (list_empty(&gl
->gl_holders
)) {
509 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
510 set_bit(GLF_LOCK
, &gl
->gl_flags
);
512 struct gfs2_holder
*next_gh
;
513 if (gh
->gh_flags
& GL_LOCAL_EXCL
)
515 next_gh
= list_entry(gl
->gl_holders
.next
, struct gfs2_holder
,
517 if (next_gh
->gh_flags
& GL_LOCAL_EXCL
)
521 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
523 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
525 complete(&gh
->gh_wait
);
531 * rq_demote - process a demote request in the queue
532 * @gh: the glock holder
534 * Returns: 1 if the queue is blocked
537 static int rq_demote(struct gfs2_holder
*gh
)
539 struct gfs2_glock
*gl
= gh
->gh_gl
;
540 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
542 if (!list_empty(&gl
->gl_holders
))
545 if (gl
->gl_state
== gh
->gh_state
|| gl
->gl_state
== LM_ST_UNLOCKED
) {
546 list_del_init(&gh
->gh_list
);
548 spin_unlock(&gl
->gl_spin
);
549 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
552 complete(&gh
->gh_wait
);
553 spin_lock(&gl
->gl_spin
);
556 set_bit(GLF_LOCK
, &gl
->gl_flags
);
557 spin_unlock(&gl
->gl_spin
);
559 if (gh
->gh_state
== LM_ST_UNLOCKED
||
560 gl
->gl_state
!= LM_ST_EXCLUSIVE
)
561 glops
->go_drop_th(gl
);
563 glops
->go_xmote_th(gl
, gh
->gh_state
, gh
->gh_flags
);
565 spin_lock(&gl
->gl_spin
);
572 * rq_greedy - process a queued request to drop greedy status
573 * @gh: the glock holder
575 * Returns: 1 if the queue is blocked
578 static int rq_greedy(struct gfs2_holder
*gh
)
580 struct gfs2_glock
*gl
= gh
->gh_gl
;
582 list_del_init(&gh
->gh_list
);
583 /* gh->gh_error never examined. */
584 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
585 spin_unlock(&gl
->gl_spin
);
587 gfs2_holder_uninit(gh
);
588 kfree(container_of(gh
, struct greedy
, gr_gh
));
590 spin_lock(&gl
->gl_spin
);
596 * run_queue - process holder structures on a glock
600 static void run_queue(struct gfs2_glock
*gl
)
602 struct gfs2_holder
*gh
;
606 if (test_bit(GLF_LOCK
, &gl
->gl_flags
))
609 if (!list_empty(&gl
->gl_waiters1
)) {
610 gh
= list_entry(gl
->gl_waiters1
.next
,
611 struct gfs2_holder
, gh_list
);
613 if (test_bit(HIF_MUTEX
, &gh
->gh_iflags
))
614 blocked
= rq_mutex(gh
);
616 gfs2_assert_warn(gl
->gl_sbd
, 0);
618 } else if (!list_empty(&gl
->gl_waiters2
) &&
619 !test_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
)) {
620 gh
= list_entry(gl
->gl_waiters2
.next
,
621 struct gfs2_holder
, gh_list
);
623 if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
))
624 blocked
= rq_demote(gh
);
625 else if (test_bit(HIF_GREEDY
, &gh
->gh_iflags
))
626 blocked
= rq_greedy(gh
);
628 gfs2_assert_warn(gl
->gl_sbd
, 0);
630 } else if (!list_empty(&gl
->gl_waiters3
)) {
631 gh
= list_entry(gl
->gl_waiters3
.next
,
632 struct gfs2_holder
, gh_list
);
634 if (test_bit(HIF_PROMOTE
, &gh
->gh_iflags
))
635 blocked
= rq_promote(gh
);
637 gfs2_assert_warn(gl
->gl_sbd
, 0);
648 * gfs2_glmutex_lock - acquire a local lock on a glock
651 * Gives caller exclusive access to manipulate a glock structure.
654 void gfs2_glmutex_lock(struct gfs2_glock
*gl
)
656 struct gfs2_holder gh
;
658 gfs2_holder_init(gl
, 0, 0, &gh
);
659 set_bit(HIF_MUTEX
, &gh
.gh_iflags
);
661 spin_lock(&gl
->gl_spin
);
662 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
))
663 list_add_tail(&gh
.gh_list
, &gl
->gl_waiters1
);
665 complete(&gh
.gh_wait
);
666 spin_unlock(&gl
->gl_spin
);
668 wait_for_completion(&gh
.gh_wait
);
669 gfs2_holder_uninit(&gh
);
673 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
676 * Returns: 1 if the glock is acquired
679 static int gfs2_glmutex_trylock(struct gfs2_glock
*gl
)
683 spin_lock(&gl
->gl_spin
);
684 if (test_and_set_bit(GLF_LOCK
, &gl
->gl_flags
))
686 spin_unlock(&gl
->gl_spin
);
692 * gfs2_glmutex_unlock - release a local lock on a glock
697 void gfs2_glmutex_unlock(struct gfs2_glock
*gl
)
699 spin_lock(&gl
->gl_spin
);
700 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
702 BUG_ON(!spin_is_locked(&gl
->gl_spin
));
703 spin_unlock(&gl
->gl_spin
);
707 * handle_callback - add a demote request to a lock's queue
709 * @state: the state the caller wants us to change to
713 static void handle_callback(struct gfs2_glock
*gl
, unsigned int state
)
715 struct gfs2_holder
*gh
, *new_gh
= NULL
;
718 spin_lock(&gl
->gl_spin
);
720 list_for_each_entry(gh
, &gl
->gl_waiters2
, gh_list
) {
721 if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
) &&
722 gl
->gl_req_gh
!= gh
) {
723 if (gh
->gh_state
!= state
)
724 gh
->gh_state
= LM_ST_UNLOCKED
;
730 list_add_tail(&new_gh
->gh_list
, &gl
->gl_waiters2
);
733 spin_unlock(&gl
->gl_spin
);
735 new_gh
= gfs2_holder_get(gl
, state
, LM_FLAG_TRY
,
736 GFP_KERNEL
| __GFP_NOFAIL
),
737 set_bit(HIF_DEMOTE
, &new_gh
->gh_iflags
);
738 set_bit(HIF_DEALLOC
, &new_gh
->gh_iflags
);
744 spin_unlock(&gl
->gl_spin
);
747 gfs2_holder_put(new_gh
);
751 * state_change - record that the glock is now in a different state
753 * @new_state the new state
757 static void state_change(struct gfs2_glock
*gl
, unsigned int new_state
)
761 held1
= (gl
->gl_state
!= LM_ST_UNLOCKED
);
762 held2
= (new_state
!= LM_ST_UNLOCKED
);
764 if (held1
!= held2
) {
771 gl
->gl_state
= new_state
;
775 * xmote_bh - Called after the lock module is done acquiring a lock
776 * @gl: The glock in question
777 * @ret: the int returned from the lock module
781 static void xmote_bh(struct gfs2_glock
*gl
, unsigned int ret
)
783 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
784 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
785 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
786 int prev_state
= gl
->gl_state
;
789 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
790 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
791 gfs2_assert_warn(sdp
, !(ret
& LM_OUT_ASYNC
));
793 state_change(gl
, ret
& LM_OUT_ST_MASK
);
795 if (prev_state
!= LM_ST_UNLOCKED
&& !(ret
& LM_OUT_CACHEABLE
)) {
797 glops
->go_inval(gl
, DIO_METADATA
| DIO_DATA
);
798 } else if (gl
->gl_state
== LM_ST_DEFERRED
) {
799 /* We might not want to do this here.
800 Look at moving to the inode glops. */
802 glops
->go_inval(gl
, DIO_DATA
);
805 /* Deal with each possible exit condition */
808 gl
->gl_stamp
= jiffies
;
810 else if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
811 spin_lock(&gl
->gl_spin
);
812 list_del_init(&gh
->gh_list
);
814 spin_unlock(&gl
->gl_spin
);
816 } else if (test_bit(HIF_DEMOTE
, &gh
->gh_iflags
)) {
817 spin_lock(&gl
->gl_spin
);
818 list_del_init(&gh
->gh_list
);
819 if (gl
->gl_state
== gh
->gh_state
||
820 gl
->gl_state
== LM_ST_UNLOCKED
)
823 if (gfs2_assert_warn(sdp
, gh
->gh_flags
&
824 (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) == -1)
825 fs_warn(sdp
, "ret = 0x%.8X\n", ret
);
826 gh
->gh_error
= GLR_TRYFAILED
;
828 spin_unlock(&gl
->gl_spin
);
830 if (ret
& LM_OUT_CANCELED
)
831 handle_callback(gl
, LM_ST_UNLOCKED
); /* Lame */
833 } else if (ret
& LM_OUT_CANCELED
) {
834 spin_lock(&gl
->gl_spin
);
835 list_del_init(&gh
->gh_list
);
836 gh
->gh_error
= GLR_CANCELED
;
837 spin_unlock(&gl
->gl_spin
);
839 } else if (relaxed_state_ok(gl
->gl_state
, gh
->gh_state
, gh
->gh_flags
)) {
840 spin_lock(&gl
->gl_spin
);
841 list_move_tail(&gh
->gh_list
, &gl
->gl_holders
);
843 set_bit(HIF_HOLDER
, &gh
->gh_iflags
);
844 spin_unlock(&gl
->gl_spin
);
846 set_bit(HIF_FIRST
, &gh
->gh_iflags
);
850 } else if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
851 spin_lock(&gl
->gl_spin
);
852 list_del_init(&gh
->gh_list
);
853 gh
->gh_error
= GLR_TRYFAILED
;
854 spin_unlock(&gl
->gl_spin
);
857 if (gfs2_assert_withdraw(sdp
, 0) == -1)
858 fs_err(sdp
, "ret = 0x%.8X\n", ret
);
861 if (glops
->go_xmote_bh
)
862 glops
->go_xmote_bh(gl
);
865 spin_lock(&gl
->gl_spin
);
866 gl
->gl_req_gh
= NULL
;
867 gl
->gl_req_bh
= NULL
;
868 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
870 spin_unlock(&gl
->gl_spin
);
876 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
879 complete(&gh
->gh_wait
);
884 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
885 * @gl: The glock in question
886 * @state: the requested state
887 * @flags: modifier flags to the lock call
891 void gfs2_glock_xmote_th(struct gfs2_glock
*gl
, unsigned int state
, int flags
)
893 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
894 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
895 int lck_flags
= flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
|
896 LM_FLAG_NOEXP
| LM_FLAG_ANY
|
898 unsigned int lck_ret
;
900 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
901 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
902 gfs2_assert_warn(sdp
, state
!= LM_ST_UNLOCKED
);
903 gfs2_assert_warn(sdp
, state
!= gl
->gl_state
);
905 if (gl
->gl_state
== LM_ST_EXCLUSIVE
) {
908 DIO_METADATA
| DIO_DATA
| DIO_RELEASE
);
912 gl
->gl_req_bh
= xmote_bh
;
914 lck_ret
= gfs2_lm_lock(sdp
, gl
->gl_lock
, gl
->gl_state
, state
,
917 if (gfs2_assert_withdraw(sdp
, !(lck_ret
& LM_OUT_ERROR
)))
920 if (lck_ret
& LM_OUT_ASYNC
)
921 gfs2_assert_warn(sdp
, lck_ret
== LM_OUT_ASYNC
);
923 xmote_bh(gl
, lck_ret
);
927 * drop_bh - Called after a lock module unlock completes
929 * @ret: the return status
931 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
932 * Doesn't drop the reference on the glock the top half took out
936 static void drop_bh(struct gfs2_glock
*gl
, unsigned int ret
)
938 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
939 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
940 struct gfs2_holder
*gh
= gl
->gl_req_gh
;
942 clear_bit(GLF_PREFETCH
, &gl
->gl_flags
);
944 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
945 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
946 gfs2_assert_warn(sdp
, !ret
);
948 state_change(gl
, LM_ST_UNLOCKED
);
951 glops
->go_inval(gl
, DIO_METADATA
| DIO_DATA
);
954 spin_lock(&gl
->gl_spin
);
955 list_del_init(&gh
->gh_list
);
957 spin_unlock(&gl
->gl_spin
);
960 if (glops
->go_drop_bh
)
961 glops
->go_drop_bh(gl
);
963 spin_lock(&gl
->gl_spin
);
964 gl
->gl_req_gh
= NULL
;
965 gl
->gl_req_bh
= NULL
;
966 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
968 spin_unlock(&gl
->gl_spin
);
973 if (test_bit(HIF_DEALLOC
, &gh
->gh_iflags
))
976 complete(&gh
->gh_wait
);
981 * gfs2_glock_drop_th - call into the lock module to unlock a lock
986 void gfs2_glock_drop_th(struct gfs2_glock
*gl
)
988 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
989 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
992 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
993 gfs2_assert_warn(sdp
, queue_empty(gl
, &gl
->gl_holders
));
994 gfs2_assert_warn(sdp
, gl
->gl_state
!= LM_ST_UNLOCKED
);
996 if (gl
->gl_state
== LM_ST_EXCLUSIVE
) {
999 DIO_METADATA
| DIO_DATA
| DIO_RELEASE
);
1002 gfs2_glock_hold(gl
);
1003 gl
->gl_req_bh
= drop_bh
;
1005 ret
= gfs2_lm_unlock(sdp
, gl
->gl_lock
, gl
->gl_state
);
1007 if (gfs2_assert_withdraw(sdp
, !(ret
& LM_OUT_ERROR
)))
1013 gfs2_assert_warn(sdp
, ret
== LM_OUT_ASYNC
);
1017 * do_cancels - cancel requests for locks stuck waiting on an expire flag
1018 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1020 * Don't cancel GL_NOCANCEL requests.
1023 static void do_cancels(struct gfs2_holder
*gh
)
1025 struct gfs2_glock
*gl
= gh
->gh_gl
;
1027 spin_lock(&gl
->gl_spin
);
1029 while (gl
->gl_req_gh
!= gh
&&
1030 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1031 !list_empty(&gh
->gh_list
)) {
1032 if (gl
->gl_req_bh
&&
1034 (gl
->gl_req_gh
->gh_flags
& GL_NOCANCEL
))) {
1035 spin_unlock(&gl
->gl_spin
);
1036 gfs2_lm_cancel(gl
->gl_sbd
, gl
->gl_lock
);
1038 spin_lock(&gl
->gl_spin
);
1040 spin_unlock(&gl
->gl_spin
);
1042 spin_lock(&gl
->gl_spin
);
1046 spin_unlock(&gl
->gl_spin
);
1050 * glock_wait_internal - wait on a glock acquisition
1051 * @gh: the glock holder
1053 * Returns: 0 on success
1056 static int glock_wait_internal(struct gfs2_holder
*gh
)
1058 struct gfs2_glock
*gl
= gh
->gh_gl
;
1059 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1060 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1062 if (test_bit(HIF_ABORTED
, &gh
->gh_iflags
))
1065 if (gh
->gh_flags
& (LM_FLAG_TRY
| LM_FLAG_TRY_1CB
)) {
1066 spin_lock(&gl
->gl_spin
);
1067 if (gl
->gl_req_gh
!= gh
&&
1068 !test_bit(HIF_HOLDER
, &gh
->gh_iflags
) &&
1069 !list_empty(&gh
->gh_list
)) {
1070 list_del_init(&gh
->gh_list
);
1071 gh
->gh_error
= GLR_TRYFAILED
;
1073 spin_unlock(&gl
->gl_spin
);
1074 return gh
->gh_error
;
1076 spin_unlock(&gl
->gl_spin
);
1079 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1082 wait_for_completion(&gh
->gh_wait
);
1085 return gh
->gh_error
;
1087 gfs2_assert_withdraw(sdp
, test_bit(HIF_HOLDER
, &gh
->gh_iflags
));
1088 gfs2_assert_withdraw(sdp
, relaxed_state_ok(gl
->gl_state
,
1092 if (test_bit(HIF_FIRST
, &gh
->gh_iflags
)) {
1093 gfs2_assert_warn(sdp
, test_bit(GLF_LOCK
, &gl
->gl_flags
));
1095 if (glops
->go_lock
) {
1096 gh
->gh_error
= glops
->go_lock(gh
);
1098 spin_lock(&gl
->gl_spin
);
1099 list_del_init(&gh
->gh_list
);
1100 spin_unlock(&gl
->gl_spin
);
1104 spin_lock(&gl
->gl_spin
);
1105 gl
->gl_req_gh
= NULL
;
1106 gl
->gl_req_bh
= NULL
;
1107 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1109 spin_unlock(&gl
->gl_spin
);
1112 return gh
->gh_error
;
1115 static inline struct gfs2_holder
*
1116 find_holder_by_owner(struct list_head
*head
, struct task_struct
*owner
)
1118 struct gfs2_holder
*gh
;
1120 list_for_each_entry(gh
, head
, gh_list
) {
1121 if (gh
->gh_owner
== owner
)
1129 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1130 * @gh: the holder structure to add
1134 static void add_to_queue(struct gfs2_holder
*gh
)
1136 struct gfs2_glock
*gl
= gh
->gh_gl
;
1137 struct gfs2_holder
*existing
;
1139 BUG_ON(!gh
->gh_owner
);
1141 existing
= find_holder_by_owner(&gl
->gl_holders
, gh
->gh_owner
);
1143 print_symbol(KERN_WARNING
"original: %s\n", existing
->gh_ip
);
1144 print_symbol(KERN_WARNING
"new: %s\n", gh
->gh_ip
);
1148 existing
= find_holder_by_owner(&gl
->gl_waiters3
, gh
->gh_owner
);
1150 print_symbol(KERN_WARNING
"original: %s\n", existing
->gh_ip
);
1151 print_symbol(KERN_WARNING
"new: %s\n", gh
->gh_ip
);
1155 if (gh
->gh_flags
& LM_FLAG_PRIORITY
)
1156 list_add(&gh
->gh_list
, &gl
->gl_waiters3
);
1158 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters3
);
1162 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1163 * @gh: the holder structure
1165 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1167 * Returns: 0, GLR_TRYFAILED, or errno on failure
1170 int gfs2_glock_nq(struct gfs2_holder
*gh
)
1172 struct gfs2_glock
*gl
= gh
->gh_gl
;
1173 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1177 if (unlikely(test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))) {
1178 set_bit(HIF_ABORTED
, &gh
->gh_iflags
);
1182 set_bit(HIF_PROMOTE
, &gh
->gh_iflags
);
1184 spin_lock(&gl
->gl_spin
);
1187 spin_unlock(&gl
->gl_spin
);
1189 if (!(gh
->gh_flags
& GL_ASYNC
)) {
1190 error
= glock_wait_internal(gh
);
1191 if (error
== GLR_CANCELED
) {
1197 clear_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1203 * gfs2_glock_poll - poll to see if an async request has been completed
1206 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1209 int gfs2_glock_poll(struct gfs2_holder
*gh
)
1211 struct gfs2_glock
*gl
= gh
->gh_gl
;
1214 spin_lock(&gl
->gl_spin
);
1216 if (test_bit(HIF_HOLDER
, &gh
->gh_iflags
))
1218 else if (list_empty(&gh
->gh_list
)) {
1219 if (gh
->gh_error
== GLR_CANCELED
) {
1220 spin_unlock(&gl
->gl_spin
);
1222 if (gfs2_glock_nq(gh
))
1229 spin_unlock(&gl
->gl_spin
);
1235 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1236 * @gh: the holder structure
1238 * Returns: 0, GLR_TRYFAILED, or errno on failure
1241 int gfs2_glock_wait(struct gfs2_holder
*gh
)
1245 error
= glock_wait_internal(gh
);
1246 if (error
== GLR_CANCELED
) {
1248 gh
->gh_flags
&= ~GL_ASYNC
;
1249 error
= gfs2_glock_nq(gh
);
1256 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1257 * @gh: the glock holder
1261 void gfs2_glock_dq(struct gfs2_holder
*gh
)
1263 struct gfs2_glock
*gl
= gh
->gh_gl
;
1264 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1266 if (gh
->gh_flags
& GL_SYNC
)
1267 set_bit(GLF_SYNC
, &gl
->gl_flags
);
1269 if (gh
->gh_flags
& GL_NOCACHE
)
1270 handle_callback(gl
, LM_ST_UNLOCKED
);
1272 gfs2_glmutex_lock(gl
);
1274 spin_lock(&gl
->gl_spin
);
1275 list_del_init(&gh
->gh_list
);
1277 if (list_empty(&gl
->gl_holders
)) {
1278 spin_unlock(&gl
->gl_spin
);
1280 if (glops
->go_unlock
)
1281 glops
->go_unlock(gh
);
1283 if (test_bit(GLF_SYNC
, &gl
->gl_flags
)) {
1285 glops
->go_sync(gl
, DIO_METADATA
| DIO_DATA
);
1288 gl
->gl_stamp
= jiffies
;
1290 spin_lock(&gl
->gl_spin
);
1293 clear_bit(GLF_LOCK
, &gl
->gl_flags
);
1295 spin_unlock(&gl
->gl_spin
);
1299 * gfs2_glock_prefetch - Try to prefetch a glock
1301 * @state: the state to prefetch in
1302 * @flags: flags passed to go_xmote_th()
1306 static void gfs2_glock_prefetch(struct gfs2_glock
*gl
, unsigned int state
,
1309 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1311 spin_lock(&gl
->gl_spin
);
1313 if (test_bit(GLF_LOCK
, &gl
->gl_flags
) ||
1314 !list_empty(&gl
->gl_holders
) ||
1315 !list_empty(&gl
->gl_waiters1
) ||
1316 !list_empty(&gl
->gl_waiters2
) ||
1317 !list_empty(&gl
->gl_waiters3
) ||
1318 relaxed_state_ok(gl
->gl_state
, state
, flags
)) {
1319 spin_unlock(&gl
->gl_spin
);
1323 set_bit(GLF_PREFETCH
, &gl
->gl_flags
);
1324 set_bit(GLF_LOCK
, &gl
->gl_flags
);
1325 spin_unlock(&gl
->gl_spin
);
1327 glops
->go_xmote_th(gl
, state
, flags
);
1330 static void greedy_work(void *data
)
1332 struct greedy
*gr
= data
;
1333 struct gfs2_holder
*gh
= &gr
->gr_gh
;
1334 struct gfs2_glock
*gl
= gh
->gh_gl
;
1335 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1337 clear_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
);
1339 if (glops
->go_greedy
)
1340 glops
->go_greedy(gl
);
1342 spin_lock(&gl
->gl_spin
);
1344 if (list_empty(&gl
->gl_waiters2
)) {
1345 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
1346 spin_unlock(&gl
->gl_spin
);
1347 gfs2_holder_uninit(gh
);
1350 gfs2_glock_hold(gl
);
1351 list_add_tail(&gh
->gh_list
, &gl
->gl_waiters2
);
1353 spin_unlock(&gl
->gl_spin
);
1359 * gfs2_glock_be_greedy -
1363 * Returns: 0 if go_greedy will be called, 1 otherwise
1366 int gfs2_glock_be_greedy(struct gfs2_glock
*gl
, unsigned int time
)
1369 struct gfs2_holder
*gh
;
1372 gl
->gl_sbd
->sd_args
.ar_localcaching
||
1373 test_and_set_bit(GLF_GREEDY
, &gl
->gl_flags
))
1376 gr
= kmalloc(sizeof(struct greedy
), GFP_KERNEL
);
1378 clear_bit(GLF_GREEDY
, &gl
->gl_flags
);
1383 gfs2_holder_init(gl
, 0, 0, gh
);
1384 set_bit(HIF_GREEDY
, &gh
->gh_iflags
);
1385 INIT_WORK(&gr
->gr_work
, greedy_work
, gr
);
1387 set_bit(GLF_SKIP_WAITERS2
, &gl
->gl_flags
);
1388 schedule_delayed_work(&gr
->gr_work
, time
);
1394 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1395 * @gh: the holder structure
1399 void gfs2_glock_dq_uninit(struct gfs2_holder
*gh
)
1402 gfs2_holder_uninit(gh
);
1406 * gfs2_glock_nq_num - acquire a glock based on lock number
1407 * @sdp: the filesystem
1408 * @number: the lock number
1409 * @glops: the glock operations for the type of glock
1410 * @state: the state to acquire the glock in
1411 * @flags: modifier flags for the aquisition
1412 * @gh: the struct gfs2_holder
1417 int gfs2_glock_nq_num(struct gfs2_sbd
*sdp
, uint64_t number
,
1418 struct gfs2_glock_operations
*glops
, unsigned int state
,
1419 int flags
, struct gfs2_holder
*gh
)
1421 struct gfs2_glock
*gl
;
1424 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1426 error
= gfs2_glock_nq_init(gl
, state
, flags
, gh
);
1434 * glock_compare - Compare two struct gfs2_glock structures for sorting
1435 * @arg_a: the first structure
1436 * @arg_b: the second structure
1440 static int glock_compare(const void *arg_a
, const void *arg_b
)
1442 struct gfs2_holder
*gh_a
= *(struct gfs2_holder
**)arg_a
;
1443 struct gfs2_holder
*gh_b
= *(struct gfs2_holder
**)arg_b
;
1444 struct lm_lockname
*a
= &gh_a
->gh_gl
->gl_name
;
1445 struct lm_lockname
*b
= &gh_b
->gh_gl
->gl_name
;
1448 if (a
->ln_number
> b
->ln_number
)
1450 else if (a
->ln_number
< b
->ln_number
)
1453 if (gh_a
->gh_state
== LM_ST_SHARED
&&
1454 gh_b
->gh_state
== LM_ST_EXCLUSIVE
)
1456 else if (!(gh_a
->gh_flags
& GL_LOCAL_EXCL
) &&
1457 (gh_b
->gh_flags
& GL_LOCAL_EXCL
))
1465 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1466 * @num_gh: the number of structures
1467 * @ghs: an array of struct gfs2_holder structures
1469 * Returns: 0 on success (all glocks acquired),
1470 * errno on failure (no glocks acquired)
1473 static int nq_m_sync(unsigned int num_gh
, struct gfs2_holder
*ghs
,
1474 struct gfs2_holder
**p
)
1479 for (x
= 0; x
< num_gh
; x
++)
1482 sort(p
, num_gh
, sizeof(struct gfs2_holder
*), glock_compare
, NULL
);
1484 for (x
= 0; x
< num_gh
; x
++) {
1485 p
[x
]->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1487 error
= gfs2_glock_nq(p
[x
]);
1490 gfs2_glock_dq(p
[x
]);
1499 * gfs2_glock_nq_m - acquire multiple glocks
1500 * @num_gh: the number of structures
1501 * @ghs: an array of struct gfs2_holder structures
1503 * Figure out how big an impact this function has. Either:
1504 * 1) Replace this code with code that calls gfs2_glock_prefetch()
1505 * 2) Forget async stuff and just call nq_m_sync()
1506 * 3) Leave it like it is
1508 * Returns: 0 on success (all glocks acquired),
1509 * errno on failure (no glocks acquired)
1512 int gfs2_glock_nq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1516 int borked
= 0, serious
= 0;
1523 ghs
->gh_flags
&= ~(LM_FLAG_TRY
| GL_ASYNC
);
1524 return gfs2_glock_nq(ghs
);
1527 e
= kcalloc(num_gh
, sizeof(struct gfs2_holder
*), GFP_KERNEL
);
1531 for (x
= 0; x
< num_gh
; x
++) {
1532 ghs
[x
].gh_flags
|= LM_FLAG_TRY
| GL_ASYNC
;
1533 error
= gfs2_glock_nq(&ghs
[x
]);
1542 for (x
= 0; x
< num_gh
; x
++) {
1543 error
= e
[x
] = glock_wait_internal(&ghs
[x
]);
1546 if (error
!= GLR_TRYFAILED
&& error
!= GLR_CANCELED
)
1556 for (x
= 0; x
< num_gh
; x
++)
1558 gfs2_glock_dq(&ghs
[x
]);
1563 for (x
= 0; x
< num_gh
; x
++)
1564 gfs2_holder_reinit(ghs
[x
].gh_state
, ghs
[x
].gh_flags
,
1566 error
= nq_m_sync(num_gh
, ghs
, (struct gfs2_holder
**)e
);
1575 * gfs2_glock_dq_m - release multiple glocks
1576 * @num_gh: the number of structures
1577 * @ghs: an array of struct gfs2_holder structures
1581 void gfs2_glock_dq_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1585 for (x
= 0; x
< num_gh
; x
++)
1586 gfs2_glock_dq(&ghs
[x
]);
1590 * gfs2_glock_dq_uninit_m - release multiple glocks
1591 * @num_gh: the number of structures
1592 * @ghs: an array of struct gfs2_holder structures
1596 void gfs2_glock_dq_uninit_m(unsigned int num_gh
, struct gfs2_holder
*ghs
)
1600 for (x
= 0; x
< num_gh
; x
++)
1601 gfs2_glock_dq_uninit(&ghs
[x
]);
1605 * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1606 * @sdp: the filesystem
1607 * @number: the lock number
1608 * @glops: the glock operations for the type of glock
1609 * @state: the state to acquire the glock in
1610 * @flags: modifier flags for the aquisition
1615 void gfs2_glock_prefetch_num(struct gfs2_sbd
*sdp
, uint64_t number
,
1616 struct gfs2_glock_operations
*glops
,
1617 unsigned int state
, int flags
)
1619 struct gfs2_glock
*gl
;
1622 if (atomic_read(&sdp
->sd_reclaim_count
) <
1623 gfs2_tune_get(sdp
, gt_reclaim_limit
)) {
1624 error
= gfs2_glock_get(sdp
, number
, glops
, CREATE
, &gl
);
1626 gfs2_glock_prefetch(gl
, state
, flags
);
1633 * gfs2_lvb_hold - attach a LVB from a glock
1634 * @gl: The glock in question
1638 int gfs2_lvb_hold(struct gfs2_glock
*gl
)
1642 gfs2_glmutex_lock(gl
);
1644 if (!atomic_read(&gl
->gl_lvb_count
)) {
1645 error
= gfs2_lm_hold_lvb(gl
->gl_sbd
, gl
->gl_lock
, &gl
->gl_lvb
);
1647 gfs2_glmutex_unlock(gl
);
1650 gfs2_glock_hold(gl
);
1652 atomic_inc(&gl
->gl_lvb_count
);
1654 gfs2_glmutex_unlock(gl
);
1660 * gfs2_lvb_unhold - detach a LVB from a glock
1661 * @gl: The glock in question
1665 void gfs2_lvb_unhold(struct gfs2_glock
*gl
)
1667 gfs2_glock_hold(gl
);
1668 gfs2_glmutex_lock(gl
);
1670 gfs2_assert(gl
->gl_sbd
, atomic_read(&gl
->gl_lvb_count
) > 0);
1671 if (atomic_dec_and_test(&gl
->gl_lvb_count
)) {
1672 gfs2_lm_unhold_lvb(gl
->gl_sbd
, gl
->gl_lock
, gl
->gl_lvb
);
1677 gfs2_glmutex_unlock(gl
);
1682 void gfs2_lvb_sync(struct gfs2_glock
*gl
)
1684 gfs2_glmutex_lock(gl
);
1686 gfs2_assert(gl
->gl_sbd
, atomic_read(&gl
->gl_lvb_count
));
1687 if (!gfs2_assert_warn(gl
->gl_sbd
, gfs2_glock_is_held_excl(gl
)))
1688 gfs2_lm_sync_lvb(gl
->gl_sbd
, gl
->gl_lock
, gl
->gl_lvb
);
1690 gfs2_glmutex_unlock(gl
);
1694 static void blocking_cb(struct gfs2_sbd
*sdp
, struct lm_lockname
*name
,
1697 struct gfs2_glock
*gl
;
1699 gl
= gfs2_glock_find(sdp
, name
);
1703 if (gl
->gl_ops
->go_callback
)
1704 gl
->gl_ops
->go_callback(gl
, state
);
1705 handle_callback(gl
, state
);
1707 spin_lock(&gl
->gl_spin
);
1709 spin_unlock(&gl
->gl_spin
);
1715 * gfs2_glock_cb - Callback used by locking module
1716 * @fsdata: Pointer to the superblock
1717 * @type: Type of callback
1718 * @data: Type dependent data pointer
1720 * Called by the locking module when it wants to tell us something.
1721 * Either we need to drop a lock, one of our ASYNC requests completed, or
1722 * a journal from another client needs to be recovered.
1725 void gfs2_glock_cb(lm_fsdata_t
*fsdata
, unsigned int type
, void *data
)
1727 struct gfs2_sbd
*sdp
= (struct gfs2_sbd
*)fsdata
;
1731 blocking_cb(sdp
, data
, LM_ST_UNLOCKED
);
1735 blocking_cb(sdp
, data
, LM_ST_DEFERRED
);
1739 blocking_cb(sdp
, data
, LM_ST_SHARED
);
1743 struct lm_async_cb
*async
= data
;
1744 struct gfs2_glock
*gl
;
1746 gl
= gfs2_glock_find(sdp
, &async
->lc_name
);
1747 if (gfs2_assert_warn(sdp
, gl
))
1749 if (!gfs2_assert_warn(sdp
, gl
->gl_req_bh
))
1750 gl
->gl_req_bh(gl
, async
->lc_ret
);
1755 case LM_CB_NEED_RECOVERY
:
1756 gfs2_jdesc_make_dirty(sdp
, *(unsigned int *)data
);
1757 if (sdp
->sd_recoverd_process
)
1758 wake_up_process(sdp
->sd_recoverd_process
);
1761 case LM_CB_DROPLOCKS
:
1762 gfs2_gl_hash_clear(sdp
, NO_WAIT
);
1763 gfs2_quota_scan(sdp
);
1767 gfs2_assert_warn(sdp
, 0);
1773 * gfs2_try_toss_inode - try to remove a particular inode struct from cache
1774 * sdp: the filesystem
1775 * inum: the inode number
1779 void gfs2_try_toss_inode(struct gfs2_sbd
*sdp
, struct gfs2_inum
*inum
)
1781 struct gfs2_glock
*gl
;
1782 struct gfs2_inode
*ip
;
1785 error
= gfs2_glock_get(sdp
, inum
->no_addr
, &gfs2_inode_glops
,
1790 if (!gfs2_glmutex_trylock(gl
))
1797 if (atomic_read(&ip
->i_count
))
1800 gfs2_inode_destroy(ip
, 1);
1803 gfs2_glmutex_unlock(gl
);
1810 * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
1811 * iopen glock from memory
1812 * @io_gl: the iopen glock
1813 * @state: the state into which the glock should be put
1817 void gfs2_iopen_go_callback(struct gfs2_glock
*io_gl
, unsigned int state
)
1819 struct gfs2_glock
*i_gl
;
1821 if (state
!= LM_ST_UNLOCKED
)
1824 spin_lock(&io_gl
->gl_spin
);
1825 i_gl
= io_gl
->gl_object
;
1827 gfs2_glock_hold(i_gl
);
1828 spin_unlock(&io_gl
->gl_spin
);
1830 spin_unlock(&io_gl
->gl_spin
);
1834 if (gfs2_glmutex_trylock(i_gl
)) {
1835 struct gfs2_inode
*ip
= i_gl
->gl_object
;
1837 gfs2_try_toss_vnode(ip
);
1838 gfs2_glmutex_unlock(i_gl
);
1839 gfs2_glock_schedule_for_reclaim(i_gl
);
1842 gfs2_glmutex_unlock(i_gl
);
1846 gfs2_glock_put(i_gl
);
1850 * demote_ok - Check to see if it's ok to unlock a glock
1853 * Returns: 1 if it's ok
1856 static int demote_ok(struct gfs2_glock
*gl
)
1858 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1859 struct gfs2_glock_operations
*glops
= gl
->gl_ops
;
1862 if (test_bit(GLF_STICKY
, &gl
->gl_flags
))
1864 else if (test_bit(GLF_PREFETCH
, &gl
->gl_flags
))
1865 demote
= time_after_eq(jiffies
,
1867 gfs2_tune_get(sdp
, gt_prefetch_secs
) * HZ
);
1868 else if (glops
->go_demote_ok
)
1869 demote
= glops
->go_demote_ok(gl
);
1875 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1880 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock
*gl
)
1882 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
1884 spin_lock(&sdp
->sd_reclaim_lock
);
1885 if (list_empty(&gl
->gl_reclaim
)) {
1886 gfs2_glock_hold(gl
);
1887 list_add(&gl
->gl_reclaim
, &sdp
->sd_reclaim_list
);
1888 atomic_inc(&sdp
->sd_reclaim_count
);
1890 spin_unlock(&sdp
->sd_reclaim_lock
);
1892 wake_up(&sdp
->sd_reclaim_wq
);
1896 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1897 * @sdp: the filesystem
1899 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1900 * different glock and we notice that there are a lot of glocks in the
1905 void gfs2_reclaim_glock(struct gfs2_sbd
*sdp
)
1907 struct gfs2_glock
*gl
;
1909 spin_lock(&sdp
->sd_reclaim_lock
);
1910 if (list_empty(&sdp
->sd_reclaim_list
)) {
1911 spin_unlock(&sdp
->sd_reclaim_lock
);
1914 gl
= list_entry(sdp
->sd_reclaim_list
.next
,
1915 struct gfs2_glock
, gl_reclaim
);
1916 list_del_init(&gl
->gl_reclaim
);
1917 spin_unlock(&sdp
->sd_reclaim_lock
);
1919 atomic_dec(&sdp
->sd_reclaim_count
);
1920 atomic_inc(&sdp
->sd_reclaimed
);
1922 if (gfs2_glmutex_trylock(gl
)) {
1923 if (gl
->gl_ops
== &gfs2_inode_glops
) {
1924 struct gfs2_inode
*ip
= gl
->gl_object
;
1925 if (ip
&& !atomic_read(&ip
->i_count
))
1926 gfs2_inode_destroy(ip
, 1);
1928 if (queue_empty(gl
, &gl
->gl_holders
) &&
1929 gl
->gl_state
!= LM_ST_UNLOCKED
&&
1931 handle_callback(gl
, LM_ST_UNLOCKED
);
1932 gfs2_glmutex_unlock(gl
);
1939 * examine_bucket - Call a function for glock in a hash bucket
1940 * @examiner: the function
1941 * @sdp: the filesystem
1942 * @bucket: the bucket
1944 * Returns: 1 if the bucket has entries
1947 static int examine_bucket(glock_examiner examiner
, struct gfs2_sbd
*sdp
,
1948 struct gfs2_gl_hash_bucket
*bucket
)
1950 struct glock_plug plug
;
1951 struct list_head
*tmp
;
1952 struct gfs2_glock
*gl
;
1955 /* Add "plug" to end of bucket list, work back up list from there */
1956 memset(&plug
.gl_flags
, 0, sizeof(unsigned long));
1957 set_bit(GLF_PLUG
, &plug
.gl_flags
);
1959 write_lock(&bucket
->hb_lock
);
1960 list_add(&plug
.gl_list
, &bucket
->hb_list
);
1961 write_unlock(&bucket
->hb_lock
);
1964 write_lock(&bucket
->hb_lock
);
1967 tmp
= plug
.gl_list
.next
;
1969 if (tmp
== &bucket
->hb_list
) {
1970 list_del(&plug
.gl_list
);
1971 entries
= !list_empty(&bucket
->hb_list
);
1972 write_unlock(&bucket
->hb_lock
);
1975 gl
= list_entry(tmp
, struct gfs2_glock
, gl_list
);
1977 /* Move plug up list */
1978 list_move(&plug
.gl_list
, &gl
->gl_list
);
1980 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
1983 /* examiner() must glock_put() */
1984 gfs2_glock_hold(gl
);
1989 write_unlock(&bucket
->hb_lock
);
1996 * scan_glock - look at a glock and see if we can reclaim it
1997 * @gl: the glock to look at
2001 static void scan_glock(struct gfs2_glock
*gl
)
2003 if (gfs2_glmutex_trylock(gl
)) {
2004 if (gl
->gl_ops
== &gfs2_inode_glops
) {
2005 struct gfs2_inode
*ip
= gl
->gl_object
;
2006 if (ip
&& !atomic_read(&ip
->i_count
))
2009 if (queue_empty(gl
, &gl
->gl_holders
) &&
2010 gl
->gl_state
!= LM_ST_UNLOCKED
&&
2014 gfs2_glmutex_unlock(gl
);
2022 gfs2_glmutex_unlock(gl
);
2023 gfs2_glock_schedule_for_reclaim(gl
);
2028 * gfs2_scand_internal - Look for glocks and inodes to toss from memory
2029 * @sdp: the filesystem
2033 void gfs2_scand_internal(struct gfs2_sbd
*sdp
)
2037 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
2038 examine_bucket(scan_glock
, sdp
, &sdp
->sd_gl_hash
[x
]);
2044 * clear_glock - look at a glock and see if we can free it from glock cache
2045 * @gl: the glock to look at
2049 static void clear_glock(struct gfs2_glock
*gl
)
2051 struct gfs2_sbd
*sdp
= gl
->gl_sbd
;
2054 spin_lock(&sdp
->sd_reclaim_lock
);
2055 if (!list_empty(&gl
->gl_reclaim
)) {
2056 list_del_init(&gl
->gl_reclaim
);
2057 atomic_dec(&sdp
->sd_reclaim_count
);
2058 spin_unlock(&sdp
->sd_reclaim_lock
);
2059 released
= gfs2_glock_put(gl
);
2060 gfs2_assert(sdp
, !released
);
2062 spin_unlock(&sdp
->sd_reclaim_lock
);
2065 if (gfs2_glmutex_trylock(gl
)) {
2066 if (gl
->gl_ops
== &gfs2_inode_glops
) {
2067 struct gfs2_inode
*ip
= gl
->gl_object
;
2068 if (ip
&& !atomic_read(&ip
->i_count
))
2069 gfs2_inode_destroy(ip
, 1);
2071 if (queue_empty(gl
, &gl
->gl_holders
) &&
2072 gl
->gl_state
!= LM_ST_UNLOCKED
)
2073 handle_callback(gl
, LM_ST_UNLOCKED
);
2075 gfs2_glmutex_unlock(gl
);
2082 * gfs2_gl_hash_clear - Empty out the glock hash table
2083 * @sdp: the filesystem
2084 * @wait: wait until it's all gone
2086 * Called when unmounting the filesystem, or when inter-node lock manager
2087 * requests DROPLOCKS because it is running out of capacity.
2090 void gfs2_gl_hash_clear(struct gfs2_sbd
*sdp
, int wait
)
2101 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++)
2102 if (examine_bucket(clear_glock
, sdp
,
2103 &sdp
->sd_gl_hash
[x
]))
2109 if (time_after_eq(jiffies
,
2110 t
+ gfs2_tune_get(sdp
, gt_stall_secs
) * HZ
)) {
2111 fs_warn(sdp
, "Unmount seems to be stalled. "
2112 "Dumping lock state...\n");
2113 gfs2_dump_lockstate(sdp
);
2117 /* invalidate_inodes() requires that the sb inodes list
2118 not change, but an async completion callback for an
2119 unlock can occur which does glock_put() which
2120 can call iput() which will change the sb inodes list.
2121 invalidate_inodes_mutex prevents glock_put()'s during
2122 an invalidate_inodes() */
2124 mutex_lock(&sdp
->sd_invalidate_inodes_mutex
);
2125 invalidate_inodes(sdp
->sd_vfs
);
2126 mutex_unlock(&sdp
->sd_invalidate_inodes_mutex
);
2132 * Diagnostic routines to help debug distributed deadlock
2136 * dump_holder - print information about a glock holder
2137 * @str: a string naming the type of holder
2138 * @gh: the glock holder
2140 * Returns: 0 on success, -ENOBUFS when we run out of space
2143 static int dump_holder(char *str
, struct gfs2_holder
*gh
)
2146 int error
= -ENOBUFS
;
2148 printk(KERN_INFO
" %s\n", str
);
2149 printk(KERN_INFO
" owner = %ld\n",
2150 (gh
->gh_owner
) ? (long)gh
->gh_owner
->pid
: -1);
2151 printk(KERN_INFO
" gh_state = %u\n", gh
->gh_state
);
2152 printk(KERN_INFO
" gh_flags =");
2153 for (x
= 0; x
< 32; x
++)
2154 if (gh
->gh_flags
& (1 << x
))
2157 printk(KERN_INFO
" error = %d\n", gh
->gh_error
);
2158 printk(KERN_INFO
" gh_iflags =");
2159 for (x
= 0; x
< 32; x
++)
2160 if (test_bit(x
, &gh
->gh_iflags
))
2163 print_symbol(KERN_INFO
" initialized at: %s\n", gh
->gh_ip
);
2171 * dump_inode - print information about an inode
2174 * Returns: 0 on success, -ENOBUFS when we run out of space
2177 static int dump_inode(struct gfs2_inode
*ip
)
2180 int error
= -ENOBUFS
;
2182 printk(KERN_INFO
" Inode:\n");
2183 printk(KERN_INFO
" num = %llu %llu\n",
2184 ip
->i_num
.no_formal_ino
, ip
->i_num
.no_addr
);
2185 printk(KERN_INFO
" type = %u\n", IF2DT(ip
->i_di
.di_mode
));
2186 printk(KERN_INFO
" i_count = %d\n", atomic_read(&ip
->i_count
));
2187 printk(KERN_INFO
" i_flags =");
2188 for (x
= 0; x
< 32; x
++)
2189 if (test_bit(x
, &ip
->i_flags
))
2192 printk(KERN_INFO
" vnode = %s\n", (ip
->i_vnode
) ? "yes" : "no");
2200 * dump_glock - print information about a glock
2202 * @count: where we are in the buffer
2204 * Returns: 0 on success, -ENOBUFS when we run out of space
2207 static int dump_glock(struct gfs2_glock
*gl
)
2209 struct gfs2_holder
*gh
;
2211 int error
= -ENOBUFS
;
2213 spin_lock(&gl
->gl_spin
);
2215 printk(KERN_INFO
"Glock (%u, %llu)\n",
2216 gl
->gl_name
.ln_type
,
2217 gl
->gl_name
.ln_number
);
2218 printk(KERN_INFO
" gl_flags =");
2219 for (x
= 0; x
< 32; x
++)
2220 if (test_bit(x
, &gl
->gl_flags
))
2223 printk(KERN_INFO
" gl_ref = %d\n", atomic_read(&gl
->gl_ref
.refcount
));
2224 printk(KERN_INFO
" gl_state = %u\n", gl
->gl_state
);
2225 printk(KERN_INFO
" req_gh = %s\n", (gl
->gl_req_gh
) ? "yes" : "no");
2226 printk(KERN_INFO
" req_bh = %s\n", (gl
->gl_req_bh
) ? "yes" : "no");
2227 printk(KERN_INFO
" lvb_count = %d\n", atomic_read(&gl
->gl_lvb_count
));
2228 printk(KERN_INFO
" object = %s\n", (gl
->gl_object
) ? "yes" : "no");
2229 printk(KERN_INFO
" le = %s\n",
2230 (list_empty(&gl
->gl_le
.le_list
)) ? "no" : "yes");
2231 printk(KERN_INFO
" reclaim = %s\n",
2232 (list_empty(&gl
->gl_reclaim
)) ? "no" : "yes");
2234 printk(KERN_INFO
" aspace = %lu\n",
2235 gl
->gl_aspace
->i_mapping
->nrpages
);
2237 printk(KERN_INFO
" aspace = no\n");
2238 printk(KERN_INFO
" ail = %d\n", atomic_read(&gl
->gl_ail_count
));
2239 if (gl
->gl_req_gh
) {
2240 error
= dump_holder("Request", gl
->gl_req_gh
);
2244 list_for_each_entry(gh
, &gl
->gl_holders
, gh_list
) {
2245 error
= dump_holder("Holder", gh
);
2249 list_for_each_entry(gh
, &gl
->gl_waiters1
, gh_list
) {
2250 error
= dump_holder("Waiter1", gh
);
2254 list_for_each_entry(gh
, &gl
->gl_waiters2
, gh_list
) {
2255 error
= dump_holder("Waiter2", gh
);
2259 list_for_each_entry(gh
, &gl
->gl_waiters3
, gh_list
) {
2260 error
= dump_holder("Waiter3", gh
);
2264 if (gl
->gl_ops
== &gfs2_inode_glops
&& gl
->gl_object
) {
2265 if (!test_bit(GLF_LOCK
, &gl
->gl_flags
) &&
2266 list_empty(&gl
->gl_holders
)) {
2267 error
= dump_inode(gl
->gl_object
);
2272 printk(KERN_INFO
" Inode: busy\n");
2279 spin_unlock(&gl
->gl_spin
);
2285 * gfs2_dump_lockstate - print out the current lockstate
2286 * @sdp: the filesystem
2287 * @ub: the buffer to copy the information into
2289 * If @ub is NULL, dump the lockstate to the console.
2293 static int gfs2_dump_lockstate(struct gfs2_sbd
*sdp
)
2295 struct gfs2_gl_hash_bucket
*bucket
;
2296 struct gfs2_glock
*gl
;
2300 for (x
= 0; x
< GFS2_GL_HASH_SIZE
; x
++) {
2301 bucket
= &sdp
->sd_gl_hash
[x
];
2303 read_lock(&bucket
->hb_lock
);
2305 list_for_each_entry(gl
, &bucket
->hb_list
, gl_list
) {
2306 if (test_bit(GLF_PLUG
, &gl
->gl_flags
))
2309 error
= dump_glock(gl
);
2314 read_unlock(&bucket
->hb_lock
);