1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * standalone DLM module
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
54 #include "cluster/masklog.h"
56 static int dlm_thread(void *data
);
57 static void dlm_flush_asts(struct dlm_ctxt
*dlm
);
59 #define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num)
61 /* will exit holding res->spinlock, but may drop in function */
62 /* waits until flags are cleared on res->state */
63 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource
*res
, int flags
)
65 DECLARE_WAITQUEUE(wait
, current
);
67 assert_spin_locked(&res
->spinlock
);
69 add_wait_queue(&res
->wq
, &wait
);
71 set_current_state(TASK_UNINTERRUPTIBLE
);
72 if (res
->state
& flags
) {
73 spin_unlock(&res
->spinlock
);
75 spin_lock(&res
->spinlock
);
78 remove_wait_queue(&res
->wq
, &wait
);
79 __set_current_state(TASK_RUNNING
);
82 int __dlm_lockres_has_locks(struct dlm_lock_resource
*res
)
84 if (list_empty(&res
->granted
) &&
85 list_empty(&res
->converting
) &&
86 list_empty(&res
->blocked
))
91 /* "unused": the lockres has no locks, is not on the dirty list,
92 * has no inflight locks (in the gap between mastery and acquiring
93 * the first lock), and has no bits in its refmap.
94 * truly ready to be freed. */
95 int __dlm_lockres_unused(struct dlm_lock_resource
*res
)
97 if (!__dlm_lockres_has_locks(res
) &&
98 (list_empty(&res
->dirty
) && !(res
->state
& DLM_LOCK_RES_DIRTY
))) {
99 /* try not to scan the bitmap unless the first two
100 * conditions are already true */
101 int bit
= find_next_bit(res
->refmap
, O2NM_MAX_NODES
, 0);
102 if (bit
>= O2NM_MAX_NODES
) {
103 /* since the bit for dlm->node_num is not
104 * set, inflight_locks better be zero */
105 BUG_ON(res
->inflight_locks
!= 0);
113 /* Call whenever you may have added or deleted something from one of
114 * the lockres queue's. This will figure out whether it belongs on the
115 * unused list or not and does the appropriate thing. */
116 void __dlm_lockres_calc_usage(struct dlm_ctxt
*dlm
,
117 struct dlm_lock_resource
*res
)
119 mlog_entry("%.*s\n", res
->lockname
.len
, res
->lockname
.name
);
121 assert_spin_locked(&dlm
->spinlock
);
122 assert_spin_locked(&res
->spinlock
);
124 if (__dlm_lockres_unused(res
)){
125 if (list_empty(&res
->purge
)) {
126 mlog(0, "putting lockres %.*s:%p onto purge list\n",
127 res
->lockname
.len
, res
->lockname
.name
, res
);
129 res
->last_used
= jiffies
;
130 dlm_lockres_get(res
);
131 list_add_tail(&res
->purge
, &dlm
->purge_list
);
134 } else if (!list_empty(&res
->purge
)) {
135 mlog(0, "removing lockres %.*s:%p from purge list, owner=%u\n",
136 res
->lockname
.len
, res
->lockname
.name
, res
, res
->owner
);
138 list_del_init(&res
->purge
);
139 dlm_lockres_put(res
);
144 void dlm_lockres_calc_usage(struct dlm_ctxt
*dlm
,
145 struct dlm_lock_resource
*res
)
147 mlog_entry("%.*s\n", res
->lockname
.len
, res
->lockname
.name
);
148 spin_lock(&dlm
->spinlock
);
149 spin_lock(&res
->spinlock
);
151 __dlm_lockres_calc_usage(dlm
, res
);
153 spin_unlock(&res
->spinlock
);
154 spin_unlock(&dlm
->spinlock
);
157 static int dlm_purge_lockres(struct dlm_ctxt
*dlm
,
158 struct dlm_lock_resource
*res
)
163 spin_lock(&res
->spinlock
);
164 if (!__dlm_lockres_unused(res
)) {
165 mlog(0, "%s:%.*s: tried to purge but not unused\n",
166 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
167 __dlm_print_one_lock_resource(res
);
168 spin_unlock(&res
->spinlock
);
172 if (res
->state
& DLM_LOCK_RES_MIGRATING
) {
173 mlog(0, "%s:%.*s: Delay dropref as this lockres is "
174 "being remastered\n", dlm
->name
, res
->lockname
.len
,
176 /* Re-add the lockres to the end of the purge list */
177 if (!list_empty(&res
->purge
)) {
178 list_del_init(&res
->purge
);
179 list_add_tail(&res
->purge
, &dlm
->purge_list
);
181 spin_unlock(&res
->spinlock
);
185 master
= (res
->owner
== dlm
->node_num
);
188 res
->state
|= DLM_LOCK_RES_DROPPING_REF
;
189 spin_unlock(&res
->spinlock
);
191 mlog(0, "purging lockres %.*s, master = %d\n", res
->lockname
.len
,
192 res
->lockname
.name
, master
);
195 /* drop spinlock... retake below */
196 spin_unlock(&dlm
->spinlock
);
198 spin_lock(&res
->spinlock
);
199 /* This ensures that clear refmap is sent after the set */
200 __dlm_wait_on_lockres_flags(res
, DLM_LOCK_RES_SETREF_INPROG
);
201 spin_unlock(&res
->spinlock
);
203 /* clear our bit from the master's refmap, ignore errors */
204 ret
= dlm_drop_lockres_ref(dlm
, res
);
207 if (!dlm_is_host_down(ret
))
210 mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n",
211 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
, ret
);
212 spin_lock(&dlm
->spinlock
);
215 spin_lock(&res
->spinlock
);
216 if (!list_empty(&res
->purge
)) {
217 mlog(0, "removing lockres %.*s:%p from purgelist, "
218 "master = %d\n", res
->lockname
.len
, res
->lockname
.name
,
220 list_del_init(&res
->purge
);
221 spin_unlock(&res
->spinlock
);
222 dlm_lockres_put(res
);
225 spin_unlock(&res
->spinlock
);
227 __dlm_unhash_lockres(res
);
229 /* lockres is not in the hash now. drop the flag and wake up
230 * any processes waiting in dlm_get_lock_resource. */
232 spin_lock(&res
->spinlock
);
233 res
->state
&= ~DLM_LOCK_RES_DROPPING_REF
;
234 spin_unlock(&res
->spinlock
);
240 static void dlm_run_purge_list(struct dlm_ctxt
*dlm
,
243 unsigned int run_max
, unused
;
244 unsigned long purge_jiffies
;
245 struct dlm_lock_resource
*lockres
;
247 spin_lock(&dlm
->spinlock
);
248 run_max
= dlm
->purge_count
;
250 while(run_max
&& !list_empty(&dlm
->purge_list
)) {
253 lockres
= list_entry(dlm
->purge_list
.next
,
254 struct dlm_lock_resource
, purge
);
256 /* Status of the lockres *might* change so double
257 * check. If the lockres is unused, holding the dlm
258 * spinlock will prevent people from getting and more
259 * refs on it -- there's no need to keep the lockres
261 spin_lock(&lockres
->spinlock
);
262 unused
= __dlm_lockres_unused(lockres
);
263 spin_unlock(&lockres
->spinlock
);
268 purge_jiffies
= lockres
->last_used
+
269 msecs_to_jiffies(DLM_PURGE_INTERVAL_MS
);
271 /* Make sure that we want to be processing this guy at
273 if (!purge_now
&& time_after(purge_jiffies
, jiffies
)) {
274 /* Since resources are added to the purge list
275 * in tail order, we can stop at the first
276 * unpurgable resource -- anyone added after
277 * him will have a greater last_used value */
281 dlm_lockres_get(lockres
);
283 /* This may drop and reacquire the dlm spinlock if it
284 * has to do migration. */
285 if (dlm_purge_lockres(dlm
, lockres
))
288 dlm_lockres_put(lockres
);
290 /* Avoid adding any scheduling latencies */
291 cond_resched_lock(&dlm
->spinlock
);
294 spin_unlock(&dlm
->spinlock
);
297 static void dlm_shuffle_lists(struct dlm_ctxt
*dlm
,
298 struct dlm_lock_resource
*res
)
300 struct dlm_lock
*lock
, *target
;
301 struct list_head
*iter
;
302 struct list_head
*head
;
305 //mlog(0, "res->lockname.len=%d\n", res->lockname.len);
306 //mlog(0, "res->lockname.name=%p\n", res->lockname.name);
307 //mlog(0, "shuffle res %.*s\n", res->lockname.len,
308 // res->lockname.name);
310 /* because this function is called with the lockres
311 * spinlock, and because we know that it is not migrating/
312 * recovering/in-progress, it is fine to reserve asts and
313 * basts right before queueing them all throughout */
314 assert_spin_locked(&res
->spinlock
);
315 BUG_ON((res
->state
& (DLM_LOCK_RES_MIGRATING
|
316 DLM_LOCK_RES_RECOVERING
|
317 DLM_LOCK_RES_IN_PROGRESS
)));
320 if (list_empty(&res
->converting
))
322 mlog(0, "res %.*s has locks on a convert queue\n", res
->lockname
.len
,
325 target
= list_entry(res
->converting
.next
, struct dlm_lock
, list
);
326 if (target
->ml
.convert_type
== LKM_IVMODE
) {
327 mlog(ML_ERROR
, "%.*s: converting a lock with no "
328 "convert_type!\n", res
->lockname
.len
, res
->lockname
.name
);
331 head
= &res
->granted
;
332 list_for_each(iter
, head
) {
333 lock
= list_entry(iter
, struct dlm_lock
, list
);
336 if (!dlm_lock_compatible(lock
->ml
.type
,
337 target
->ml
.convert_type
)) {
339 /* queue the BAST if not already */
340 if (lock
->ml
.highest_blocked
== LKM_IVMODE
) {
341 __dlm_lockres_reserve_ast(res
);
342 dlm_queue_bast(dlm
, lock
);
344 /* update the highest_blocked if needed */
345 if (lock
->ml
.highest_blocked
< target
->ml
.convert_type
)
346 lock
->ml
.highest_blocked
=
347 target
->ml
.convert_type
;
350 head
= &res
->converting
;
351 list_for_each(iter
, head
) {
352 lock
= list_entry(iter
, struct dlm_lock
, list
);
355 if (!dlm_lock_compatible(lock
->ml
.type
,
356 target
->ml
.convert_type
)) {
358 if (lock
->ml
.highest_blocked
== LKM_IVMODE
) {
359 __dlm_lockres_reserve_ast(res
);
360 dlm_queue_bast(dlm
, lock
);
362 if (lock
->ml
.highest_blocked
< target
->ml
.convert_type
)
363 lock
->ml
.highest_blocked
=
364 target
->ml
.convert_type
;
368 /* we can convert the lock */
370 spin_lock(&target
->spinlock
);
371 BUG_ON(target
->ml
.highest_blocked
!= LKM_IVMODE
);
373 mlog(0, "calling ast for converting lock: %.*s, have: %d, "
374 "granting: %d, node: %u\n", res
->lockname
.len
,
375 res
->lockname
.name
, target
->ml
.type
,
376 target
->ml
.convert_type
, target
->ml
.node
);
378 target
->ml
.type
= target
->ml
.convert_type
;
379 target
->ml
.convert_type
= LKM_IVMODE
;
380 list_move_tail(&target
->list
, &res
->granted
);
382 BUG_ON(!target
->lksb
);
383 target
->lksb
->status
= DLM_NORMAL
;
385 spin_unlock(&target
->spinlock
);
387 __dlm_lockres_reserve_ast(res
);
388 dlm_queue_ast(dlm
, target
);
389 /* go back and check for more */
394 if (list_empty(&res
->blocked
))
396 target
= list_entry(res
->blocked
.next
, struct dlm_lock
, list
);
398 head
= &res
->granted
;
399 list_for_each(iter
, head
) {
400 lock
= list_entry(iter
, struct dlm_lock
, list
);
403 if (!dlm_lock_compatible(lock
->ml
.type
, target
->ml
.type
)) {
405 if (lock
->ml
.highest_blocked
== LKM_IVMODE
) {
406 __dlm_lockres_reserve_ast(res
);
407 dlm_queue_bast(dlm
, lock
);
409 if (lock
->ml
.highest_blocked
< target
->ml
.type
)
410 lock
->ml
.highest_blocked
= target
->ml
.type
;
414 head
= &res
->converting
;
415 list_for_each(iter
, head
) {
416 lock
= list_entry(iter
, struct dlm_lock
, list
);
419 if (!dlm_lock_compatible(lock
->ml
.type
, target
->ml
.type
)) {
421 if (lock
->ml
.highest_blocked
== LKM_IVMODE
) {
422 __dlm_lockres_reserve_ast(res
);
423 dlm_queue_bast(dlm
, lock
);
425 if (lock
->ml
.highest_blocked
< target
->ml
.type
)
426 lock
->ml
.highest_blocked
= target
->ml
.type
;
430 /* we can grant the blocked lock (only
431 * possible if converting list empty) */
433 spin_lock(&target
->spinlock
);
434 BUG_ON(target
->ml
.highest_blocked
!= LKM_IVMODE
);
436 mlog(0, "calling ast for blocked lock: %.*s, granting: %d, "
437 "node: %u\n", res
->lockname
.len
, res
->lockname
.name
,
438 target
->ml
.type
, target
->ml
.node
);
440 // target->ml.type is already correct
441 list_move_tail(&target
->list
, &res
->granted
);
443 BUG_ON(!target
->lksb
);
444 target
->lksb
->status
= DLM_NORMAL
;
446 spin_unlock(&target
->spinlock
);
448 __dlm_lockres_reserve_ast(res
);
449 dlm_queue_ast(dlm
, target
);
450 /* go back and check for more */
458 /* must have NO locks when calling this with res !=NULL * */
459 void dlm_kick_thread(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
)
461 mlog_entry("dlm=%p, res=%p\n", dlm
, res
);
463 spin_lock(&dlm
->spinlock
);
464 spin_lock(&res
->spinlock
);
465 __dlm_dirty_lockres(dlm
, res
);
466 spin_unlock(&res
->spinlock
);
467 spin_unlock(&dlm
->spinlock
);
469 wake_up(&dlm
->dlm_thread_wq
);
472 void __dlm_dirty_lockres(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
)
474 mlog_entry("dlm=%p, res=%p\n", dlm
, res
);
476 assert_spin_locked(&dlm
->spinlock
);
477 assert_spin_locked(&res
->spinlock
);
479 /* don't shuffle secondary queues */
480 if ((res
->owner
== dlm
->node_num
)) {
481 if (res
->state
& (DLM_LOCK_RES_MIGRATING
|
482 DLM_LOCK_RES_BLOCK_DIRTY
))
485 if (list_empty(&res
->dirty
)) {
486 /* ref for dirty_list */
487 dlm_lockres_get(res
);
488 list_add_tail(&res
->dirty
, &dlm
->dirty_list
);
489 res
->state
|= DLM_LOCK_RES_DIRTY
;
495 /* Launch the NM thread for the mounted volume */
496 int dlm_launch_thread(struct dlm_ctxt
*dlm
)
498 mlog(0, "starting dlm thread...\n");
500 dlm
->dlm_thread_task
= kthread_run(dlm_thread
, dlm
, "dlm_thread");
501 if (IS_ERR(dlm
->dlm_thread_task
)) {
502 mlog_errno(PTR_ERR(dlm
->dlm_thread_task
));
503 dlm
->dlm_thread_task
= NULL
;
510 void dlm_complete_thread(struct dlm_ctxt
*dlm
)
512 if (dlm
->dlm_thread_task
) {
513 mlog(ML_KTHREAD
, "waiting for dlm thread to exit\n");
514 kthread_stop(dlm
->dlm_thread_task
);
515 dlm
->dlm_thread_task
= NULL
;
519 static int dlm_dirty_list_empty(struct dlm_ctxt
*dlm
)
523 spin_lock(&dlm
->spinlock
);
524 empty
= list_empty(&dlm
->dirty_list
);
525 spin_unlock(&dlm
->spinlock
);
530 static void dlm_flush_asts(struct dlm_ctxt
*dlm
)
533 struct dlm_lock
*lock
;
534 struct dlm_lock_resource
*res
;
537 spin_lock(&dlm
->ast_lock
);
538 while (!list_empty(&dlm
->pending_asts
)) {
539 lock
= list_entry(dlm
->pending_asts
.next
,
540 struct dlm_lock
, ast_list
);
541 /* get an extra ref on lock */
544 mlog(0, "delivering an ast for this lockres\n");
546 BUG_ON(!lock
->ast_pending
);
548 /* remove from list (including ref) */
549 list_del_init(&lock
->ast_list
);
551 spin_unlock(&dlm
->ast_lock
);
553 if (lock
->ml
.node
!= dlm
->node_num
) {
554 ret
= dlm_do_remote_ast(dlm
, res
, lock
);
558 dlm_do_local_ast(dlm
, res
, lock
);
560 spin_lock(&dlm
->ast_lock
);
562 /* possible that another ast was queued while
563 * we were delivering the last one */
564 if (!list_empty(&lock
->ast_list
)) {
565 mlog(0, "aha another ast got queued while "
566 "we were finishing the last one. will "
567 "keep the ast_pending flag set.\n");
569 lock
->ast_pending
= 0;
571 /* drop the extra ref.
572 * this may drop it completely. */
574 dlm_lockres_release_ast(dlm
, res
);
577 while (!list_empty(&dlm
->pending_basts
)) {
578 lock
= list_entry(dlm
->pending_basts
.next
,
579 struct dlm_lock
, bast_list
);
580 /* get an extra ref on lock */
584 BUG_ON(!lock
->bast_pending
);
586 /* get the highest blocked lock, and reset */
587 spin_lock(&lock
->spinlock
);
588 BUG_ON(lock
->ml
.highest_blocked
<= LKM_IVMODE
);
589 hi
= lock
->ml
.highest_blocked
;
590 lock
->ml
.highest_blocked
= LKM_IVMODE
;
591 spin_unlock(&lock
->spinlock
);
593 /* remove from list (including ref) */
594 list_del_init(&lock
->bast_list
);
596 spin_unlock(&dlm
->ast_lock
);
598 mlog(0, "delivering a bast for this lockres "
599 "(blocked = %d\n", hi
);
601 if (lock
->ml
.node
!= dlm
->node_num
) {
602 ret
= dlm_send_proxy_bast(dlm
, res
, lock
, hi
);
606 dlm_do_local_bast(dlm
, res
, lock
, hi
);
608 spin_lock(&dlm
->ast_lock
);
610 /* possible that another bast was queued while
611 * we were delivering the last one */
612 if (!list_empty(&lock
->bast_list
)) {
613 mlog(0, "aha another bast got queued while "
614 "we were finishing the last one. will "
615 "keep the bast_pending flag set.\n");
617 lock
->bast_pending
= 0;
619 /* drop the extra ref.
620 * this may drop it completely. */
622 dlm_lockres_release_ast(dlm
, res
);
624 wake_up(&dlm
->ast_wq
);
625 spin_unlock(&dlm
->ast_lock
);
629 #define DLM_THREAD_TIMEOUT_MS (4 * 1000)
630 #define DLM_THREAD_MAX_DIRTY 100
631 #define DLM_THREAD_MAX_ASTS 10
633 static int dlm_thread(void *data
)
635 struct dlm_lock_resource
*res
;
636 struct dlm_ctxt
*dlm
= data
;
637 unsigned long timeout
= msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS
);
639 mlog(0, "dlm thread running for %s...\n", dlm
->name
);
641 while (!kthread_should_stop()) {
642 int n
= DLM_THREAD_MAX_DIRTY
;
644 /* dlm_shutting_down is very point-in-time, but that
645 * doesn't matter as we'll just loop back around if we
646 * get false on the leading edge of a state
648 dlm_run_purge_list(dlm
, dlm_shutting_down(dlm
));
650 /* We really don't want to hold dlm->spinlock while
651 * calling dlm_shuffle_lists on each lockres that
652 * needs to have its queues adjusted and AST/BASTs
653 * run. So let's pull each entry off the dirty_list
654 * and drop dlm->spinlock ASAP. Once off the list,
655 * res->spinlock needs to be taken again to protect
656 * the queues while calling dlm_shuffle_lists. */
657 spin_lock(&dlm
->spinlock
);
658 while (!list_empty(&dlm
->dirty_list
)) {
660 res
= list_entry(dlm
->dirty_list
.next
,
661 struct dlm_lock_resource
, dirty
);
663 /* peel a lockres off, remove it from the list,
664 * unset the dirty flag and drop the dlm lock */
666 dlm_lockres_get(res
);
668 spin_lock(&res
->spinlock
);
669 /* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
670 list_del_init(&res
->dirty
);
671 spin_unlock(&res
->spinlock
);
672 spin_unlock(&dlm
->spinlock
);
673 /* Drop dirty_list ref */
674 dlm_lockres_put(res
);
676 /* lockres can be re-dirtied/re-added to the
677 * dirty_list in this gap, but that is ok */
679 spin_lock(&res
->spinlock
);
680 if (res
->owner
!= dlm
->node_num
) {
681 __dlm_print_one_lock_resource(res
);
682 mlog(ML_ERROR
, "inprog:%s, mig:%s, reco:%s, dirty:%s\n",
683 res
->state
& DLM_LOCK_RES_IN_PROGRESS
? "yes" : "no",
684 res
->state
& DLM_LOCK_RES_MIGRATING
? "yes" : "no",
685 res
->state
& DLM_LOCK_RES_RECOVERING
? "yes" : "no",
686 res
->state
& DLM_LOCK_RES_DIRTY
? "yes" : "no");
688 BUG_ON(res
->owner
!= dlm
->node_num
);
690 /* it is now ok to move lockreses in these states
691 * to the dirty list, assuming that they will only be
692 * dirty for a short while. */
693 BUG_ON(res
->state
& DLM_LOCK_RES_MIGRATING
);
694 if (res
->state
& (DLM_LOCK_RES_IN_PROGRESS
|
695 DLM_LOCK_RES_RECOVERING
)) {
696 /* move it to the tail and keep going */
697 res
->state
&= ~DLM_LOCK_RES_DIRTY
;
698 spin_unlock(&res
->spinlock
);
699 mlog(0, "delaying list shuffling for in-"
700 "progress lockres %.*s, state=%d\n",
701 res
->lockname
.len
, res
->lockname
.name
,
707 /* at this point the lockres is not migrating/
708 * recovering/in-progress. we have the lockres
709 * spinlock and do NOT have the dlm lock.
710 * safe to reserve/queue asts and run the lists. */
712 mlog(0, "calling dlm_shuffle_lists with dlm=%s, "
713 "res=%.*s\n", dlm
->name
,
714 res
->lockname
.len
, res
->lockname
.name
);
716 /* called while holding lockres lock */
717 dlm_shuffle_lists(dlm
, res
);
718 res
->state
&= ~DLM_LOCK_RES_DIRTY
;
719 spin_unlock(&res
->spinlock
);
721 dlm_lockres_calc_usage(dlm
, res
);
725 spin_lock(&dlm
->spinlock
);
726 /* if the lock was in-progress, stick
727 * it on the back of the list */
729 spin_lock(&res
->spinlock
);
730 __dlm_dirty_lockres(dlm
, res
);
731 spin_unlock(&res
->spinlock
);
733 dlm_lockres_put(res
);
735 /* unlikely, but we may need to give time to
738 mlog(0, "throttling dlm_thread\n");
743 spin_unlock(&dlm
->spinlock
);
746 /* yield and continue right away if there is more work to do */
752 wait_event_interruptible_timeout(dlm
->dlm_thread_wq
,
753 !dlm_dirty_list_empty(dlm
) ||
754 kthread_should_stop(),
758 mlog(0, "quitting DLM thread\n");