1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54 #include "cluster/masklog.h"
56 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt
*dlm
, u8 dead_node
);
58 static int dlm_recovery_thread(void *data
);
59 void dlm_complete_recovery_thread(struct dlm_ctxt
*dlm
);
60 int dlm_launch_recovery_thread(struct dlm_ctxt
*dlm
);
61 void dlm_kick_recovery_thread(struct dlm_ctxt
*dlm
);
62 static int dlm_do_recovery(struct dlm_ctxt
*dlm
);
64 static int dlm_pick_recovery_master(struct dlm_ctxt
*dlm
);
65 static int dlm_remaster_locks(struct dlm_ctxt
*dlm
, u8 dead_node
);
66 static int dlm_init_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
);
67 static int dlm_request_all_locks(struct dlm_ctxt
*dlm
,
68 u8 request_from
, u8 dead_node
);
69 static void dlm_destroy_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
);
71 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource
*res
);
72 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres
*mres
,
73 const char *lockname
, int namelen
,
74 int total_locks
, u64 cookie
,
76 static int dlm_send_mig_lockres_msg(struct dlm_ctxt
*dlm
,
77 struct dlm_migratable_lockres
*mres
,
79 struct dlm_lock_resource
*res
,
81 static int dlm_process_recovery_data(struct dlm_ctxt
*dlm
,
82 struct dlm_lock_resource
*res
,
83 struct dlm_migratable_lockres
*mres
);
84 static int dlm_send_finalize_reco_message(struct dlm_ctxt
*dlm
);
85 static int dlm_send_all_done_msg(struct dlm_ctxt
*dlm
,
86 u8 dead_node
, u8 send_to
);
87 static int dlm_send_begin_reco_message(struct dlm_ctxt
*dlm
, u8 dead_node
);
88 static void dlm_move_reco_locks_to_list(struct dlm_ctxt
*dlm
,
89 struct list_head
*list
, u8 dead_node
);
90 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt
*dlm
,
91 u8 dead_node
, u8 new_master
);
92 static void dlm_reco_ast(void *astdata
);
93 static void dlm_reco_bast(void *astdata
, int blocked_type
);
94 static void dlm_reco_unlock_ast(void *astdata
, enum dlm_status st
);
95 static void dlm_request_all_locks_worker(struct dlm_work_item
*item
,
97 static void dlm_mig_lockres_worker(struct dlm_work_item
*item
, void *data
);
99 static u64
dlm_get_next_mig_cookie(void);
101 static spinlock_t dlm_reco_state_lock
= SPIN_LOCK_UNLOCKED
;
102 static spinlock_t dlm_mig_cookie_lock
= SPIN_LOCK_UNLOCKED
;
103 static u64 dlm_mig_cookie
= 1;
105 static u64
dlm_get_next_mig_cookie(void)
108 spin_lock(&dlm_mig_cookie_lock
);
110 if (dlm_mig_cookie
== (~0ULL))
114 spin_unlock(&dlm_mig_cookie_lock
);
118 static inline void dlm_set_reco_dead_node(struct dlm_ctxt
*dlm
,
121 assert_spin_locked(&dlm
->spinlock
);
122 if (dlm
->reco
.dead_node
!= dead_node
)
123 mlog(0, "%s: changing dead_node from %u to %u\n",
124 dlm
->name
, dlm
->reco
.dead_node
, dead_node
);
125 dlm
->reco
.dead_node
= dead_node
;
128 static inline void dlm_set_reco_master(struct dlm_ctxt
*dlm
,
131 assert_spin_locked(&dlm
->spinlock
);
132 mlog(0, "%s: changing new_master from %u to %u\n",
133 dlm
->name
, dlm
->reco
.new_master
, master
);
134 dlm
->reco
.new_master
= master
;
137 static inline void __dlm_reset_recovery(struct dlm_ctxt
*dlm
)
139 assert_spin_locked(&dlm
->spinlock
);
140 clear_bit(dlm
->reco
.dead_node
, dlm
->recovery_map
);
141 dlm_set_reco_dead_node(dlm
, O2NM_INVALID_NODE_NUM
);
142 dlm_set_reco_master(dlm
, O2NM_INVALID_NODE_NUM
);
145 static inline void dlm_reset_recovery(struct dlm_ctxt
*dlm
)
147 spin_lock(&dlm
->spinlock
);
148 __dlm_reset_recovery(dlm
);
149 spin_unlock(&dlm
->spinlock
);
152 /* Worker function used during recovery. */
153 void dlm_dispatch_work(void *data
)
155 struct dlm_ctxt
*dlm
= (struct dlm_ctxt
*)data
;
157 struct list_head
*iter
, *iter2
;
158 struct dlm_work_item
*item
;
159 dlm_workfunc_t
*workfunc
;
161 spin_lock(&dlm
->work_lock
);
162 list_splice_init(&dlm
->work_list
, &tmp_list
);
163 spin_unlock(&dlm
->work_lock
);
165 list_for_each_safe(iter
, iter2
, &tmp_list
) {
166 item
= list_entry(iter
, struct dlm_work_item
, list
);
167 workfunc
= item
->func
;
168 list_del_init(&item
->list
);
170 /* already have ref on dlm to avoid having
171 * it disappear. just double-check. */
172 BUG_ON(item
->dlm
!= dlm
);
174 /* this is allowed to sleep and
175 * call network stuff */
176 workfunc(item
, item
->data
);
187 void dlm_kick_recovery_thread(struct dlm_ctxt
*dlm
)
189 /* wake the recovery thread
190 * this will wake the reco thread in one of three places
191 * 1) sleeping with no recovery happening
192 * 2) sleeping with recovery mastered elsewhere
193 * 3) recovery mastered here, waiting on reco data */
195 wake_up(&dlm
->dlm_reco_thread_wq
);
198 /* Launch the recovery thread */
199 int dlm_launch_recovery_thread(struct dlm_ctxt
*dlm
)
201 mlog(0, "starting dlm recovery thread...\n");
203 dlm
->dlm_reco_thread_task
= kthread_run(dlm_recovery_thread
, dlm
,
205 if (IS_ERR(dlm
->dlm_reco_thread_task
)) {
206 mlog_errno(PTR_ERR(dlm
->dlm_reco_thread_task
));
207 dlm
->dlm_reco_thread_task
= NULL
;
214 void dlm_complete_recovery_thread(struct dlm_ctxt
*dlm
)
216 if (dlm
->dlm_reco_thread_task
) {
217 mlog(0, "waiting for dlm recovery thread to exit\n");
218 kthread_stop(dlm
->dlm_reco_thread_task
);
219 dlm
->dlm_reco_thread_task
= NULL
;
226 * this is lame, but here's how recovery works...
227 * 1) all recovery threads cluster wide will work on recovering
229 * 2) negotiate who will take over all the locks for the dead node.
230 * thats right... ALL the locks.
231 * 3) once a new master is chosen, everyone scans all locks
232 * and moves aside those mastered by the dead guy
233 * 4) each of these locks should be locked until recovery is done
234 * 5) the new master collects up all of secondary lock queue info
235 * one lock at a time, forcing each node to communicate back
237 * 6) each secondary lock queue responds with the full known lock info
238 * 7) once the new master has run all its locks, it sends a ALLDONE!
239 * message to everyone
240 * 8) upon receiving this message, the secondary queue node unlocks
241 * and responds to the ALLDONE
242 * 9) once the new master gets responses from everyone, he unlocks
243 * everything and recovery for this dead node is done
244 *10) go back to 2) while there are still dead nodes
248 static void dlm_print_reco_node_status(struct dlm_ctxt
*dlm
)
250 struct dlm_reco_node_data
*ndata
;
251 struct dlm_lock_resource
*res
;
253 mlog(ML_NOTICE
, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
254 dlm
->name
, dlm
->dlm_reco_thread_task
->pid
,
255 dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
? "ACTIVE" : "inactive",
256 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
258 list_for_each_entry(ndata
, &dlm
->reco
.node_data
, list
) {
259 char *st
= "unknown";
260 switch (ndata
->state
) {
261 case DLM_RECO_NODE_DATA_INIT
:
264 case DLM_RECO_NODE_DATA_REQUESTING
:
267 case DLM_RECO_NODE_DATA_DEAD
:
270 case DLM_RECO_NODE_DATA_RECEIVING
:
273 case DLM_RECO_NODE_DATA_REQUESTED
:
276 case DLM_RECO_NODE_DATA_DONE
:
279 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
280 st
= "finalize-sent";
286 mlog(ML_NOTICE
, "%s: reco state, node %u, state=%s\n",
287 dlm
->name
, ndata
->node_num
, st
);
289 list_for_each_entry(res
, &dlm
->reco
.resources
, recovering
) {
290 mlog(ML_NOTICE
, "%s: lockres %.*s on recovering list\n",
291 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
295 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
297 static int dlm_recovery_thread(void *data
)
300 struct dlm_ctxt
*dlm
= data
;
301 unsigned long timeout
= msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS
);
303 mlog(0, "dlm thread running for %s...\n", dlm
->name
);
305 while (!kthread_should_stop()) {
306 if (dlm_joined(dlm
)) {
307 status
= dlm_do_recovery(dlm
);
308 if (status
== -EAGAIN
) {
309 /* do not sleep, recheck immediately. */
316 wait_event_interruptible_timeout(dlm
->dlm_reco_thread_wq
,
317 kthread_should_stop(),
321 mlog(0, "quitting DLM recovery thread\n");
325 /* returns true when the recovery master has contacted us */
326 static int dlm_reco_master_ready(struct dlm_ctxt
*dlm
)
329 spin_lock(&dlm
->spinlock
);
330 ready
= (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
);
331 spin_unlock(&dlm
->spinlock
);
335 /* returns true if node is no longer in the domain
336 * could be dead or just not joined */
337 int dlm_is_node_dead(struct dlm_ctxt
*dlm
, u8 node
)
340 spin_lock(&dlm
->spinlock
);
341 dead
= !test_bit(node
, dlm
->domain_map
);
342 spin_unlock(&dlm
->spinlock
);
346 int dlm_wait_for_node_death(struct dlm_ctxt
*dlm
, u8 node
, int timeout
)
349 mlog(ML_NOTICE
, "%s: waiting %dms for notification of "
350 "death of node %u\n", dlm
->name
, timeout
, node
);
351 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
352 dlm_is_node_dead(dlm
, node
),
353 msecs_to_jiffies(timeout
));
355 mlog(ML_NOTICE
, "%s: waiting indefinitely for notification "
356 "of death of node %u\n", dlm
->name
, node
);
357 wait_event(dlm
->dlm_reco_thread_wq
,
358 dlm_is_node_dead(dlm
, node
));
360 /* for now, return 0 */
364 /* callers of the top-level api calls (dlmlock/dlmunlock) should
365 * block on the dlm->reco.event when recovery is in progress.
366 * the dlm recovery thread will set this state when it begins
367 * recovering a dead node (as the new master or not) and clear
368 * the state and wake as soon as all affected lock resources have
369 * been marked with the RECOVERY flag */
370 static int dlm_in_recovery(struct dlm_ctxt
*dlm
)
373 spin_lock(&dlm
->spinlock
);
374 in_recovery
= !!(dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
);
375 spin_unlock(&dlm
->spinlock
);
380 void dlm_wait_for_recovery(struct dlm_ctxt
*dlm
)
382 wait_event(dlm
->reco
.event
, !dlm_in_recovery(dlm
));
385 static void dlm_begin_recovery(struct dlm_ctxt
*dlm
)
387 spin_lock(&dlm
->spinlock
);
388 BUG_ON(dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
);
389 dlm
->reco
.state
|= DLM_RECO_STATE_ACTIVE
;
390 spin_unlock(&dlm
->spinlock
);
393 static void dlm_end_recovery(struct dlm_ctxt
*dlm
)
395 spin_lock(&dlm
->spinlock
);
396 BUG_ON(!(dlm
->reco
.state
& DLM_RECO_STATE_ACTIVE
));
397 dlm
->reco
.state
&= ~DLM_RECO_STATE_ACTIVE
;
398 spin_unlock(&dlm
->spinlock
);
399 wake_up(&dlm
->reco
.event
);
402 static int dlm_do_recovery(struct dlm_ctxt
*dlm
)
407 spin_lock(&dlm
->spinlock
);
409 /* check to see if the new master has died */
410 if (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
&&
411 test_bit(dlm
->reco
.new_master
, dlm
->recovery_map
)) {
412 mlog(0, "new master %u died while recovering %u!\n",
413 dlm
->reco
.new_master
, dlm
->reco
.dead_node
);
414 /* unset the new_master, leave dead_node */
415 dlm_set_reco_master(dlm
, O2NM_INVALID_NODE_NUM
);
418 /* select a target to recover */
419 if (dlm
->reco
.dead_node
== O2NM_INVALID_NODE_NUM
) {
422 bit
= find_next_bit (dlm
->recovery_map
, O2NM_MAX_NODES
+1, 0);
423 if (bit
>= O2NM_MAX_NODES
|| bit
< 0)
424 dlm_set_reco_dead_node(dlm
, O2NM_INVALID_NODE_NUM
);
426 dlm_set_reco_dead_node(dlm
, bit
);
427 } else if (!test_bit(dlm
->reco
.dead_node
, dlm
->recovery_map
)) {
429 mlog(ML_ERROR
, "dead_node %u no longer in recovery map!\n",
430 dlm
->reco
.dead_node
);
431 dlm_set_reco_dead_node(dlm
, O2NM_INVALID_NODE_NUM
);
434 if (dlm
->reco
.dead_node
== O2NM_INVALID_NODE_NUM
) {
435 // mlog(0, "nothing to recover! sleeping now!\n");
436 spin_unlock(&dlm
->spinlock
);
437 /* return to main thread loop and sleep. */
440 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
441 dlm
->name
, dlm
->dlm_reco_thread_task
->pid
,
442 dlm
->reco
.dead_node
);
443 spin_unlock(&dlm
->spinlock
);
445 /* take write barrier */
446 /* (stops the list reshuffling thread, proxy ast handling) */
447 dlm_begin_recovery(dlm
);
449 if (dlm
->reco
.new_master
== dlm
->node_num
)
452 if (dlm
->reco
.new_master
== O2NM_INVALID_NODE_NUM
) {
453 /* choose a new master, returns 0 if this node
454 * is the master, -EEXIST if it's another node.
455 * this does not return until a new master is chosen
456 * or recovery completes entirely. */
457 ret
= dlm_pick_recovery_master(dlm
);
459 /* already notified everyone. go. */
462 mlog(0, "another node will master this recovery session.\n");
464 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
465 dlm
->name
, dlm
->dlm_reco_thread_task
->pid
, dlm
->reco
.new_master
,
466 dlm
->node_num
, dlm
->reco
.dead_node
);
468 /* it is safe to start everything back up here
469 * because all of the dead node's lock resources
470 * have been marked as in-recovery */
471 dlm_end_recovery(dlm
);
473 /* sleep out in main dlm_recovery_thread loop. */
477 mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n",
478 dlm
->dlm_reco_thread_task
->pid
,
479 dlm
->name
, dlm
->reco
.dead_node
, dlm
->node_num
);
481 status
= dlm_remaster_locks(dlm
, dlm
->reco
.dead_node
);
483 /* we should never hit this anymore */
484 mlog(ML_ERROR
, "error %d remastering locks for node %u, "
485 "retrying.\n", status
, dlm
->reco
.dead_node
);
486 /* yield a bit to allow any final network messages
487 * to get handled on remaining nodes */
490 /* success! see if any other nodes need recovery */
491 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
492 dlm
->name
, dlm
->reco
.dead_node
, dlm
->node_num
);
493 dlm_reset_recovery(dlm
);
495 dlm_end_recovery(dlm
);
497 /* continue and look for another dead node */
501 static int dlm_remaster_locks(struct dlm_ctxt
*dlm
, u8 dead_node
)
504 struct dlm_reco_node_data
*ndata
;
505 struct list_head
*iter
;
511 /* we have become recovery master. there is no escaping
512 * this, so just keep trying until we get it. */
513 status
= dlm_init_recovery_area(dlm
, dead_node
);
515 mlog(ML_ERROR
, "%s: failed to alloc recovery area, "
516 "retrying\n", dlm
->name
);
519 } while (status
!= 0);
521 /* safe to access the node data list without a lock, since this
522 * process is the only one to change the list */
523 list_for_each(iter
, &dlm
->reco
.node_data
) {
524 ndata
= list_entry (iter
, struct dlm_reco_node_data
, list
);
525 BUG_ON(ndata
->state
!= DLM_RECO_NODE_DATA_INIT
);
526 ndata
->state
= DLM_RECO_NODE_DATA_REQUESTING
;
528 mlog(0, "requesting lock info from node %u\n",
531 if (ndata
->node_num
== dlm
->node_num
) {
532 ndata
->state
= DLM_RECO_NODE_DATA_DONE
;
537 status
= dlm_request_all_locks(dlm
, ndata
->node_num
,
541 if (dlm_is_host_down(status
)) {
542 /* node died, ignore it for recovery */
544 ndata
->state
= DLM_RECO_NODE_DATA_DEAD
;
545 /* wait for the domain map to catch up
546 * with the network state. */
547 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
548 dlm_is_node_dead(dlm
,
550 msecs_to_jiffies(1000));
551 mlog(0, "waited 1 sec for %u, "
552 "dead? %s\n", ndata
->node_num
,
553 dlm_is_node_dead(dlm
, ndata
->node_num
) ?
556 /* -ENOMEM on the other node */
557 mlog(0, "%s: node %u returned "
558 "%d during recovery, retrying "
559 "after a short wait\n",
560 dlm
->name
, ndata
->node_num
,
565 } while (status
!= 0);
567 switch (ndata
->state
) {
568 case DLM_RECO_NODE_DATA_INIT
:
569 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
570 case DLM_RECO_NODE_DATA_REQUESTED
:
573 case DLM_RECO_NODE_DATA_DEAD
:
574 mlog(0, "node %u died after requesting "
575 "recovery info for node %u\n",
576 ndata
->node_num
, dead_node
);
577 /* fine. don't need this node's info.
578 * continue without it. */
580 case DLM_RECO_NODE_DATA_REQUESTING
:
581 ndata
->state
= DLM_RECO_NODE_DATA_REQUESTED
;
582 mlog(0, "now receiving recovery data from "
583 "node %u for dead node %u\n",
584 ndata
->node_num
, dead_node
);
586 case DLM_RECO_NODE_DATA_RECEIVING
:
587 mlog(0, "already receiving recovery data from "
588 "node %u for dead node %u\n",
589 ndata
->node_num
, dead_node
);
591 case DLM_RECO_NODE_DATA_DONE
:
592 mlog(0, "already DONE receiving recovery data "
593 "from node %u for dead node %u\n",
594 ndata
->node_num
, dead_node
);
599 mlog(0, "done requesting all lock info\n");
601 /* nodes should be sending reco data now
602 * just need to wait */
605 /* check all the nodes now to see if we are
606 * done, or if anyone died */
608 spin_lock(&dlm_reco_state_lock
);
609 list_for_each(iter
, &dlm
->reco
.node_data
) {
610 ndata
= list_entry (iter
, struct dlm_reco_node_data
, list
);
612 mlog(0, "checking recovery state of node %u\n",
614 switch (ndata
->state
) {
615 case DLM_RECO_NODE_DATA_INIT
:
616 case DLM_RECO_NODE_DATA_REQUESTING
:
617 mlog(ML_ERROR
, "bad ndata state for "
618 "node %u: state=%d\n",
619 ndata
->node_num
, ndata
->state
);
622 case DLM_RECO_NODE_DATA_DEAD
:
623 mlog(0, "node %u died after "
624 "requesting recovery info for "
625 "node %u\n", ndata
->node_num
,
627 spin_unlock(&dlm_reco_state_lock
);
629 case DLM_RECO_NODE_DATA_RECEIVING
:
630 case DLM_RECO_NODE_DATA_REQUESTED
:
631 mlog(0, "%s: node %u still in state %s\n",
632 dlm
->name
, ndata
->node_num
,
633 ndata
->state
==DLM_RECO_NODE_DATA_RECEIVING
?
634 "receiving" : "requested");
637 case DLM_RECO_NODE_DATA_DONE
:
638 mlog(0, "%s: node %u state is done\n",
639 dlm
->name
, ndata
->node_num
);
641 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
642 mlog(0, "%s: node %u state is finalize\n",
643 dlm
->name
, ndata
->node_num
);
647 spin_unlock(&dlm_reco_state_lock
);
649 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass
,
650 all_nodes_done
?"yes":"no");
651 if (all_nodes_done
) {
654 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
655 * just send a finalize message to everyone and
657 mlog(0, "all nodes are done! send finalize\n");
658 ret
= dlm_send_finalize_reco_message(dlm
);
662 spin_lock(&dlm
->spinlock
);
663 dlm_finish_local_lockres_recovery(dlm
, dead_node
,
665 spin_unlock(&dlm
->spinlock
);
666 mlog(0, "should be done with recovery!\n");
668 mlog(0, "finishing recovery of %s at %lu, "
669 "dead=%u, this=%u, new=%u\n", dlm
->name
,
670 jiffies
, dlm
->reco
.dead_node
,
671 dlm
->node_num
, dlm
->reco
.new_master
);
674 /* rescan everything marked dirty along the way */
675 dlm_kick_thread(dlm
, NULL
);
678 /* wait to be signalled, with periodic timeout
679 * to check for node death */
680 wait_event_interruptible_timeout(dlm
->dlm_reco_thread_wq
,
681 kthread_should_stop(),
682 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS
));
687 dlm_destroy_recovery_area(dlm
, dead_node
);
693 static int dlm_init_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
)
696 struct dlm_reco_node_data
*ndata
;
698 spin_lock(&dlm
->spinlock
);
699 memcpy(dlm
->reco
.node_map
, dlm
->domain_map
, sizeof(dlm
->domain_map
));
700 /* nodes can only be removed (by dying) after dropping
701 * this lock, and death will be trapped later, so this should do */
702 spin_unlock(&dlm
->spinlock
);
705 num
= find_next_bit (dlm
->reco
.node_map
, O2NM_MAX_NODES
, num
);
706 if (num
>= O2NM_MAX_NODES
) {
709 BUG_ON(num
== dead_node
);
711 ndata
= kcalloc(1, sizeof(*ndata
), GFP_KERNEL
);
713 dlm_destroy_recovery_area(dlm
, dead_node
);
716 ndata
->node_num
= num
;
717 ndata
->state
= DLM_RECO_NODE_DATA_INIT
;
718 spin_lock(&dlm_reco_state_lock
);
719 list_add_tail(&ndata
->list
, &dlm
->reco
.node_data
);
720 spin_unlock(&dlm_reco_state_lock
);
727 static void dlm_destroy_recovery_area(struct dlm_ctxt
*dlm
, u8 dead_node
)
729 struct list_head
*iter
, *iter2
;
730 struct dlm_reco_node_data
*ndata
;
733 spin_lock(&dlm_reco_state_lock
);
734 list_splice_init(&dlm
->reco
.node_data
, &tmplist
);
735 spin_unlock(&dlm_reco_state_lock
);
737 list_for_each_safe(iter
, iter2
, &tmplist
) {
738 ndata
= list_entry (iter
, struct dlm_reco_node_data
, list
);
739 list_del_init(&ndata
->list
);
744 static int dlm_request_all_locks(struct dlm_ctxt
*dlm
, u8 request_from
,
747 struct dlm_lock_request lr
;
753 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
754 "to %u\n", dead_node
, request_from
);
756 memset(&lr
, 0, sizeof(lr
));
757 lr
.node_idx
= dlm
->node_num
;
758 lr
.dead_node
= dead_node
;
762 ret
= o2net_send_message(DLM_LOCK_REQUEST_MSG
, dlm
->key
,
763 &lr
, sizeof(lr
), request_from
, NULL
);
765 /* negative status is handled by caller */
769 // return from here, then
770 // sleep until all received or error
775 int dlm_request_all_locks_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
777 struct dlm_ctxt
*dlm
= data
;
778 struct dlm_lock_request
*lr
= (struct dlm_lock_request
*)msg
->buf
;
780 struct dlm_work_item
*item
= NULL
;
785 if (lr
->dead_node
!= dlm
->reco
.dead_node
) {
786 mlog(ML_ERROR
, "%s: node %u sent dead_node=%u, but local "
787 "dead_node is %u\n", dlm
->name
, lr
->node_idx
,
788 lr
->dead_node
, dlm
->reco
.dead_node
);
789 dlm_print_reco_node_status(dlm
);
794 BUG_ON(lr
->dead_node
!= dlm
->reco
.dead_node
);
796 item
= kcalloc(1, sizeof(*item
), GFP_KERNEL
);
802 /* this will get freed by dlm_request_all_locks_worker */
803 buf
= (char *) __get_free_page(GFP_KERNEL
);
810 /* queue up work for dlm_request_all_locks_worker */
811 dlm_grab(dlm
); /* get an extra ref for the work item */
812 dlm_init_work_item(dlm
, item
, dlm_request_all_locks_worker
, buf
);
813 item
->u
.ral
.reco_master
= lr
->node_idx
;
814 item
->u
.ral
.dead_node
= lr
->dead_node
;
815 spin_lock(&dlm
->work_lock
);
816 list_add_tail(&item
->list
, &dlm
->work_list
);
817 spin_unlock(&dlm
->work_lock
);
818 schedule_work(&dlm
->dispatched_work
);
824 static void dlm_request_all_locks_worker(struct dlm_work_item
*item
, void *data
)
826 struct dlm_migratable_lockres
*mres
;
827 struct dlm_lock_resource
*res
;
828 struct dlm_ctxt
*dlm
;
829 LIST_HEAD(resources
);
830 struct list_head
*iter
;
832 u8 dead_node
, reco_master
;
833 int skip_all_done
= 0;
836 dead_node
= item
->u
.ral
.dead_node
;
837 reco_master
= item
->u
.ral
.reco_master
;
838 mres
= (struct dlm_migratable_lockres
*)data
;
840 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
841 dlm
->name
, dead_node
, reco_master
);
843 if (dead_node
!= dlm
->reco
.dead_node
||
844 reco_master
!= dlm
->reco
.new_master
) {
845 /* worker could have been created before the recovery master
846 * died. if so, do not continue, but do not error. */
847 if (dlm
->reco
.new_master
== O2NM_INVALID_NODE_NUM
) {
848 mlog(ML_NOTICE
, "%s: will not send recovery state, "
849 "recovery master %u died, thread=(dead=%u,mas=%u)"
850 " current=(dead=%u,mas=%u)\n", dlm
->name
,
851 reco_master
, dead_node
, reco_master
,
852 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
854 mlog(ML_NOTICE
, "%s: reco state invalid: reco(dead=%u, "
855 "master=%u), request(dead=%u, master=%u)\n",
856 dlm
->name
, dlm
->reco
.dead_node
,
857 dlm
->reco
.new_master
, dead_node
, reco_master
);
862 /* lock resources should have already been moved to the
863 * dlm->reco.resources list. now move items from that list
864 * to a temp list if the dead owner matches. note that the
865 * whole cluster recovers only one node at a time, so we
866 * can safely move UNKNOWN lock resources for each recovery
868 dlm_move_reco_locks_to_list(dlm
, &resources
, dead_node
);
870 /* now we can begin blasting lockreses without the dlm lock */
872 /* any errors returned will be due to the new_master dying,
873 * the dlm_reco_thread should detect this */
874 list_for_each(iter
, &resources
) {
875 res
= list_entry (iter
, struct dlm_lock_resource
, recovering
);
876 ret
= dlm_send_one_lockres(dlm
, res
, mres
, reco_master
,
879 mlog(ML_ERROR
, "%s: node %u went down while sending "
880 "recovery state for dead node %u, ret=%d\n", dlm
->name
,
881 reco_master
, dead_node
, ret
);
887 /* move the resources back to the list */
888 spin_lock(&dlm
->spinlock
);
889 list_splice_init(&resources
, &dlm
->reco
.resources
);
890 spin_unlock(&dlm
->spinlock
);
892 if (!skip_all_done
) {
893 ret
= dlm_send_all_done_msg(dlm
, dead_node
, reco_master
);
895 mlog(ML_ERROR
, "%s: node %u went down while sending "
896 "recovery all-done for dead node %u, ret=%d\n",
897 dlm
->name
, reco_master
, dead_node
, ret
);
901 free_page((unsigned long)data
);
905 static int dlm_send_all_done_msg(struct dlm_ctxt
*dlm
, u8 dead_node
, u8 send_to
)
908 struct dlm_reco_data_done done_msg
;
910 memset(&done_msg
, 0, sizeof(done_msg
));
911 done_msg
.node_idx
= dlm
->node_num
;
912 done_msg
.dead_node
= dead_node
;
913 mlog(0, "sending DATA DONE message to %u, "
914 "my node=%u, dead node=%u\n", send_to
, done_msg
.node_idx
,
917 ret
= o2net_send_message(DLM_RECO_DATA_DONE_MSG
, dlm
->key
, &done_msg
,
918 sizeof(done_msg
), send_to
, &tmpret
);
920 if (!dlm_is_host_down(ret
)) {
922 mlog(ML_ERROR
, "%s: unknown error sending data-done "
923 "to %u\n", dlm
->name
, send_to
);
932 int dlm_reco_data_done_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
934 struct dlm_ctxt
*dlm
= data
;
935 struct dlm_reco_data_done
*done
= (struct dlm_reco_data_done
*)msg
->buf
;
936 struct list_head
*iter
;
937 struct dlm_reco_node_data
*ndata
= NULL
;
943 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
944 "node_idx=%u, this node=%u\n", done
->dead_node
,
945 dlm
->reco
.dead_node
, done
->node_idx
, dlm
->node_num
);
947 mlog_bug_on_msg((done
->dead_node
!= dlm
->reco
.dead_node
),
948 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
949 "node_idx=%u, this node=%u\n", done
->dead_node
,
950 dlm
->reco
.dead_node
, done
->node_idx
, dlm
->node_num
);
952 spin_lock(&dlm_reco_state_lock
);
953 list_for_each(iter
, &dlm
->reco
.node_data
) {
954 ndata
= list_entry (iter
, struct dlm_reco_node_data
, list
);
955 if (ndata
->node_num
!= done
->node_idx
)
958 switch (ndata
->state
) {
959 /* should have moved beyond INIT but not to FINALIZE yet */
960 case DLM_RECO_NODE_DATA_INIT
:
961 case DLM_RECO_NODE_DATA_DEAD
:
962 case DLM_RECO_NODE_DATA_FINALIZE_SENT
:
963 mlog(ML_ERROR
, "bad ndata state for node %u:"
964 " state=%d\n", ndata
->node_num
,
968 /* these states are possible at this point, anywhere along
969 * the line of recovery */
970 case DLM_RECO_NODE_DATA_DONE
:
971 case DLM_RECO_NODE_DATA_RECEIVING
:
972 case DLM_RECO_NODE_DATA_REQUESTED
:
973 case DLM_RECO_NODE_DATA_REQUESTING
:
974 mlog(0, "node %u is DONE sending "
978 ndata
->state
= DLM_RECO_NODE_DATA_DONE
;
983 spin_unlock(&dlm_reco_state_lock
);
985 /* wake the recovery thread, some node is done */
987 dlm_kick_recovery_thread(dlm
);
990 mlog(ML_ERROR
, "failed to find recovery node data for node "
991 "%u\n", done
->node_idx
);
994 mlog(0, "leaving reco data done handler, ret=%d\n", ret
);
998 static void dlm_move_reco_locks_to_list(struct dlm_ctxt
*dlm
,
999 struct list_head
*list
,
1002 struct dlm_lock_resource
*res
;
1003 struct list_head
*iter
, *iter2
;
1004 struct dlm_lock
*lock
;
1006 spin_lock(&dlm
->spinlock
);
1007 list_for_each_safe(iter
, iter2
, &dlm
->reco
.resources
) {
1008 res
= list_entry (iter
, struct dlm_lock_resource
, recovering
);
1009 /* always prune any $RECOVERY entries for dead nodes,
1010 * otherwise hangs can occur during later recovery */
1011 if (dlm_is_recovery_lock(res
->lockname
.name
,
1012 res
->lockname
.len
)) {
1013 spin_lock(&res
->spinlock
);
1014 list_for_each_entry(lock
, &res
->granted
, list
) {
1015 if (lock
->ml
.node
== dead_node
) {
1016 mlog(0, "AHA! there was "
1017 "a $RECOVERY lock for dead "
1019 dead_node
, dlm
->name
);
1020 list_del_init(&lock
->list
);
1025 spin_unlock(&res
->spinlock
);
1029 if (res
->owner
== dead_node
) {
1030 mlog(0, "found lockres owned by dead node while "
1031 "doing recovery for node %u. sending it.\n",
1033 list_move_tail(&res
->recovering
, list
);
1034 } else if (res
->owner
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1035 mlog(0, "found UNKNOWN owner while doing recovery "
1036 "for node %u. sending it.\n", dead_node
);
1037 list_move_tail(&res
->recovering
, list
);
1040 spin_unlock(&dlm
->spinlock
);
1043 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource
*res
)
1045 int total_locks
= 0;
1046 struct list_head
*iter
, *queue
= &res
->granted
;
1049 for (i
=0; i
<3; i
++) {
1050 list_for_each(iter
, queue
)
1058 static int dlm_send_mig_lockres_msg(struct dlm_ctxt
*dlm
,
1059 struct dlm_migratable_lockres
*mres
,
1061 struct dlm_lock_resource
*res
,
1064 u64 mig_cookie
= be64_to_cpu(mres
->mig_cookie
);
1065 int mres_total_locks
= be32_to_cpu(mres
->total_locks
);
1066 int sz
, ret
= 0, status
= 0;
1067 u8 orig_flags
= mres
->flags
,
1068 orig_master
= mres
->master
;
1070 BUG_ON(mres
->num_locks
> DLM_MAX_MIGRATABLE_LOCKS
);
1071 if (!mres
->num_locks
)
1074 sz
= sizeof(struct dlm_migratable_lockres
) +
1075 (mres
->num_locks
* sizeof(struct dlm_migratable_lock
));
1077 /* add an all-done flag if we reached the last lock */
1078 orig_flags
= mres
->flags
;
1079 BUG_ON(total_locks
> mres_total_locks
);
1080 if (total_locks
== mres_total_locks
)
1081 mres
->flags
|= DLM_MRES_ALL_DONE
;
1084 ret
= o2net_send_message(DLM_MIG_LOCKRES_MSG
, dlm
->key
, mres
,
1085 sz
, send_to
, &status
);
1087 /* XXX: negative status is not handled.
1088 * this will end up killing this node. */
1091 /* might get an -ENOMEM back here */
1096 if (ret
== -EFAULT
) {
1097 mlog(ML_ERROR
, "node %u told me to kill "
1098 "myself!\n", send_to
);
1104 /* zero and reinit the message buffer */
1105 dlm_init_migratable_lockres(mres
, res
->lockname
.name
,
1106 res
->lockname
.len
, mres_total_locks
,
1107 mig_cookie
, orig_flags
, orig_master
);
1111 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres
*mres
,
1112 const char *lockname
, int namelen
,
1113 int total_locks
, u64 cookie
,
1114 u8 flags
, u8 master
)
1116 /* mres here is one full page */
1117 memset(mres
, 0, PAGE_SIZE
);
1118 mres
->lockname_len
= namelen
;
1119 memcpy(mres
->lockname
, lockname
, namelen
);
1120 mres
->num_locks
= 0;
1121 mres
->total_locks
= cpu_to_be32(total_locks
);
1122 mres
->mig_cookie
= cpu_to_be64(cookie
);
1123 mres
->flags
= flags
;
1124 mres
->master
= master
;
1128 /* returns 1 if this lock fills the network structure,
1130 static int dlm_add_lock_to_array(struct dlm_lock
*lock
,
1131 struct dlm_migratable_lockres
*mres
, int queue
)
1133 struct dlm_migratable_lock
*ml
;
1134 int lock_num
= mres
->num_locks
;
1136 ml
= &(mres
->ml
[lock_num
]);
1137 ml
->cookie
= lock
->ml
.cookie
;
1138 ml
->type
= lock
->ml
.type
;
1139 ml
->convert_type
= lock
->ml
.convert_type
;
1140 ml
->highest_blocked
= lock
->ml
.highest_blocked
;
1143 ml
->flags
= lock
->lksb
->flags
;
1144 /* send our current lvb */
1145 if (ml
->type
== LKM_EXMODE
||
1146 ml
->type
== LKM_PRMODE
) {
1147 /* if it is already set, this had better be a PR
1148 * and it has to match */
1149 if (!dlm_lvb_is_empty(mres
->lvb
) &&
1150 (ml
->type
== LKM_EXMODE
||
1151 memcmp(mres
->lvb
, lock
->lksb
->lvb
, DLM_LVB_LEN
))) {
1152 mlog(ML_ERROR
, "mismatched lvbs!\n");
1153 __dlm_print_one_lock_resource(lock
->lockres
);
1156 memcpy(mres
->lvb
, lock
->lksb
->lvb
, DLM_LVB_LEN
);
1159 ml
->node
= lock
->ml
.node
;
1161 /* we reached the max, send this network message */
1162 if (mres
->num_locks
== DLM_MAX_MIGRATABLE_LOCKS
)
1168 int dlm_send_one_lockres(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
1169 struct dlm_migratable_lockres
*mres
,
1170 u8 send_to
, u8 flags
)
1172 struct list_head
*queue
, *iter
;
1175 struct dlm_lock
*lock
;
1178 BUG_ON(!(flags
& (DLM_MRES_RECOVERY
|DLM_MRES_MIGRATION
)));
1180 mlog(0, "sending to %u\n", send_to
);
1182 total_locks
= dlm_num_locks_in_lockres(res
);
1183 if (total_locks
> DLM_MAX_MIGRATABLE_LOCKS
) {
1184 /* rare, but possible */
1185 mlog(0, "argh. lockres has %d locks. this will "
1186 "require more than one network packet to "
1187 "migrate\n", total_locks
);
1188 mig_cookie
= dlm_get_next_mig_cookie();
1191 dlm_init_migratable_lockres(mres
, res
->lockname
.name
,
1192 res
->lockname
.len
, total_locks
,
1193 mig_cookie
, flags
, res
->owner
);
1196 for (i
=DLM_GRANTED_LIST
; i
<=DLM_BLOCKED_LIST
; i
++) {
1197 queue
= dlm_list_idx_to_ptr(res
, i
);
1198 list_for_each(iter
, queue
) {
1199 lock
= list_entry (iter
, struct dlm_lock
, list
);
1201 /* add another lock. */
1203 if (!dlm_add_lock_to_array(lock
, mres
, i
))
1206 /* this filled the lock message,
1207 * we must send it immediately. */
1208 ret
= dlm_send_mig_lockres_msg(dlm
, mres
, send_to
,
1214 /* flush any remaining locks */
1215 ret
= dlm_send_mig_lockres_msg(dlm
, mres
, send_to
, res
, total_locks
);
1221 mlog(ML_ERROR
, "%s: dlm_send_mig_lockres_msg returned %d\n",
1223 if (!dlm_is_host_down(ret
))
1225 mlog(0, "%s: node %u went down while sending %s "
1226 "lockres %.*s\n", dlm
->name
, send_to
,
1227 flags
& DLM_MRES_RECOVERY
? "recovery" : "migration",
1228 res
->lockname
.len
, res
->lockname
.name
);
1235 * this message will contain no more than one page worth of
1236 * recovery data, and it will work on only one lockres.
1237 * there may be many locks in this page, and we may need to wait
1238 * for additional packets to complete all the locks (rare, but
1242 * NOTE: the allocation error cases here are scary
1243 * we really cannot afford to fail an alloc in recovery
1244 * do we spin? returning an error only delays the problem really
1247 int dlm_mig_lockres_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
1249 struct dlm_ctxt
*dlm
= data
;
1250 struct dlm_migratable_lockres
*mres
=
1251 (struct dlm_migratable_lockres
*)msg
->buf
;
1255 struct dlm_work_item
*item
= NULL
;
1256 struct dlm_lock_resource
*res
= NULL
;
1261 BUG_ON(!(mres
->flags
& (DLM_MRES_RECOVERY
|DLM_MRES_MIGRATION
)));
1263 real_master
= mres
->master
;
1264 if (real_master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1265 /* cannot migrate a lockres with no master */
1266 BUG_ON(!(mres
->flags
& DLM_MRES_RECOVERY
));
1269 mlog(0, "%s message received from node %u\n",
1270 (mres
->flags
& DLM_MRES_RECOVERY
) ?
1271 "recovery" : "migration", mres
->master
);
1272 if (mres
->flags
& DLM_MRES_ALL_DONE
)
1273 mlog(0, "all done flag. all lockres data received!\n");
1276 buf
= kmalloc(be16_to_cpu(msg
->data_len
), GFP_KERNEL
);
1277 item
= kcalloc(1, sizeof(*item
), GFP_KERNEL
);
1281 /* lookup the lock to see if we have a secondary queue for this
1282 * already... just add the locks in and this will have its owner
1283 * and RECOVERY flag changed when it completes. */
1284 res
= dlm_lookup_lockres(dlm
, mres
->lockname
, mres
->lockname_len
);
1286 /* this will get a ref on res */
1287 /* mark it as recovering/migrating and hash it */
1288 spin_lock(&res
->spinlock
);
1289 if (mres
->flags
& DLM_MRES_RECOVERY
) {
1290 res
->state
|= DLM_LOCK_RES_RECOVERING
;
1292 if (res
->state
& DLM_LOCK_RES_MIGRATING
) {
1293 /* this is at least the second
1294 * lockres message */
1295 mlog(0, "lock %.*s is already migrating\n",
1298 } else if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
1299 /* caller should BUG */
1300 mlog(ML_ERROR
, "node is attempting to migrate "
1301 "lock %.*s, but marked as recovering!\n",
1302 mres
->lockname_len
, mres
->lockname
);
1304 spin_unlock(&res
->spinlock
);
1307 res
->state
|= DLM_LOCK_RES_MIGRATING
;
1309 spin_unlock(&res
->spinlock
);
1311 /* need to allocate, just like if it was
1312 * mastered here normally */
1313 res
= dlm_new_lockres(dlm
, mres
->lockname
, mres
->lockname_len
);
1317 /* to match the ref that we would have gotten if
1318 * dlm_lookup_lockres had succeeded */
1319 dlm_lockres_get(res
);
1321 /* mark it as recovering/migrating and hash it */
1322 if (mres
->flags
& DLM_MRES_RECOVERY
)
1323 res
->state
|= DLM_LOCK_RES_RECOVERING
;
1325 res
->state
|= DLM_LOCK_RES_MIGRATING
;
1327 spin_lock(&dlm
->spinlock
);
1328 __dlm_insert_lockres(dlm
, res
);
1329 spin_unlock(&dlm
->spinlock
);
1331 /* now that the new lockres is inserted,
1332 * make it usable by other processes */
1333 spin_lock(&res
->spinlock
);
1334 res
->state
&= ~DLM_LOCK_RES_IN_PROGRESS
;
1335 spin_unlock(&res
->spinlock
);
1337 /* add an extra ref for just-allocated lockres
1338 * otherwise the lockres will be purged immediately */
1339 dlm_lockres_get(res
);
1343 /* at this point we have allocated everything we need,
1344 * and we have a hashed lockres with an extra ref and
1345 * the proper res->state flags. */
1347 if (mres
->master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1348 /* migration cannot have an unknown master */
1349 BUG_ON(!(mres
->flags
& DLM_MRES_RECOVERY
));
1350 mlog(0, "recovery has passed me a lockres with an "
1351 "unknown owner.. will need to requery: "
1352 "%.*s\n", mres
->lockname_len
, mres
->lockname
);
1354 spin_lock(&res
->spinlock
);
1355 dlm_change_lockres_owner(dlm
, res
, dlm
->node_num
);
1356 spin_unlock(&res
->spinlock
);
1359 /* queue up work for dlm_mig_lockres_worker */
1360 dlm_grab(dlm
); /* get an extra ref for the work item */
1361 memcpy(buf
, msg
->buf
, be16_to_cpu(msg
->data_len
)); /* copy the whole message */
1362 dlm_init_work_item(dlm
, item
, dlm_mig_lockres_worker
, buf
);
1363 item
->u
.ml
.lockres
= res
; /* already have a ref */
1364 item
->u
.ml
.real_master
= real_master
;
1365 spin_lock(&dlm
->work_lock
);
1366 list_add_tail(&item
->list
, &dlm
->work_list
);
1367 spin_unlock(&dlm
->work_lock
);
1368 schedule_work(&dlm
->dispatched_work
);
1384 static void dlm_mig_lockres_worker(struct dlm_work_item
*item
, void *data
)
1386 struct dlm_ctxt
*dlm
= data
;
1387 struct dlm_migratable_lockres
*mres
;
1389 struct dlm_lock_resource
*res
;
1393 mres
= (struct dlm_migratable_lockres
*)data
;
1395 res
= item
->u
.ml
.lockres
;
1396 real_master
= item
->u
.ml
.real_master
;
1398 if (real_master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1399 /* this case is super-rare. only occurs if
1400 * node death happens during migration. */
1402 ret
= dlm_lockres_master_requery(dlm
, res
, &real_master
);
1404 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1408 if (real_master
== DLM_LOCK_RES_OWNER_UNKNOWN
) {
1409 mlog(0, "lockres %.*s not claimed. "
1410 "this node will take it.\n",
1411 res
->lockname
.len
, res
->lockname
.name
);
1413 mlog(0, "master needs to respond to sender "
1414 "that node %u still owns %.*s\n",
1415 real_master
, res
->lockname
.len
,
1416 res
->lockname
.name
);
1417 /* cannot touch this lockres */
1422 ret
= dlm_process_recovery_data(dlm
, res
, mres
);
1424 mlog(0, "dlm_process_recovery_data returned %d\n", ret
);
1426 mlog(0, "dlm_process_recovery_data succeeded\n");
1428 if ((mres
->flags
& (DLM_MRES_MIGRATION
|DLM_MRES_ALL_DONE
)) ==
1429 (DLM_MRES_MIGRATION
|DLM_MRES_ALL_DONE
)) {
1430 ret
= dlm_finish_migration(dlm
, res
, mres
->master
);
1442 int dlm_lockres_master_requery(struct dlm_ctxt
*dlm
,
1443 struct dlm_lock_resource
*res
, u8
*real_master
)
1445 struct dlm_node_iter iter
;
1449 *real_master
= DLM_LOCK_RES_OWNER_UNKNOWN
;
1451 /* we only reach here if one of the two nodes in a
1452 * migration died while the migration was in progress.
1453 * at this point we need to requery the master. we
1454 * know that the new_master got as far as creating
1455 * an mle on at least one node, but we do not know
1456 * if any nodes had actually cleared the mle and set
1457 * the master to the new_master. the old master
1458 * is supposed to set the owner to UNKNOWN in the
1459 * event of a new_master death, so the only possible
1460 * responses that we can get from nodes here are
1461 * that the master is new_master, or that the master
1463 * if all nodes come back with UNKNOWN then we know
1464 * the lock needs remastering here.
1465 * if any node comes back with a valid master, check
1466 * to see if that master is the one that we are
1467 * recovering. if so, then the new_master died and
1468 * we need to remaster this lock. if not, then the
1469 * new_master survived and that node will respond to
1470 * other nodes about the owner.
1471 * if there is an owner, this node needs to dump this
1472 * lockres and alert the sender that this lockres
1474 spin_lock(&dlm
->spinlock
);
1475 dlm_node_iter_init(dlm
->domain_map
, &iter
);
1476 spin_unlock(&dlm
->spinlock
);
1478 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
1479 /* do not send to self */
1480 if (nodenum
== dlm
->node_num
)
1482 ret
= dlm_do_master_requery(dlm
, res
, nodenum
, real_master
);
1485 if (!dlm_is_host_down(ret
))
1487 /* host is down, so answer for that node would be
1488 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1490 if (*real_master
!= DLM_LOCK_RES_OWNER_UNKNOWN
) {
1491 mlog(0, "lock master is %u\n", *real_master
);
1499 int dlm_do_master_requery(struct dlm_ctxt
*dlm
, struct dlm_lock_resource
*res
,
1500 u8 nodenum
, u8
*real_master
)
1503 struct dlm_master_requery req
;
1504 int status
= DLM_LOCK_RES_OWNER_UNKNOWN
;
1506 memset(&req
, 0, sizeof(req
));
1507 req
.node_idx
= dlm
->node_num
;
1508 req
.namelen
= res
->lockname
.len
;
1509 memcpy(req
.name
, res
->lockname
.name
, res
->lockname
.len
);
1511 ret
= o2net_send_message(DLM_MASTER_REQUERY_MSG
, dlm
->key
,
1512 &req
, sizeof(req
), nodenum
, &status
);
1513 /* XXX: negative status not handled properly here. */
1518 BUG_ON(status
> DLM_LOCK_RES_OWNER_UNKNOWN
);
1519 *real_master
= (u8
) (status
& 0xff);
1520 mlog(0, "node %u responded to master requery with %u\n",
1521 nodenum
, *real_master
);
1528 /* this function cannot error, so unless the sending
1529 * or receiving of the message failed, the owner can
1531 int dlm_master_requery_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
1533 struct dlm_ctxt
*dlm
= data
;
1534 struct dlm_master_requery
*req
= (struct dlm_master_requery
*)msg
->buf
;
1535 struct dlm_lock_resource
*res
= NULL
;
1537 int master
= DLM_LOCK_RES_OWNER_UNKNOWN
;
1538 u32 flags
= DLM_ASSERT_MASTER_REQUERY
;
1540 if (!dlm_grab(dlm
)) {
1541 /* since the domain has gone away on this
1542 * node, the proper response is UNKNOWN */
1546 hash
= dlm_lockid_hash(req
->name
, req
->namelen
);
1548 spin_lock(&dlm
->spinlock
);
1549 res
= __dlm_lookup_lockres(dlm
, req
->name
, req
->namelen
, hash
);
1551 spin_lock(&res
->spinlock
);
1552 master
= res
->owner
;
1553 if (master
== dlm
->node_num
) {
1554 int ret
= dlm_dispatch_assert_master(dlm
, res
,
1557 mlog_errno(-ENOMEM
);
1562 spin_unlock(&res
->spinlock
);
1564 spin_unlock(&dlm
->spinlock
);
1570 static inline struct list_head
*
1571 dlm_list_num_to_pointer(struct dlm_lock_resource
*res
, int list_num
)
1573 struct list_head
*ret
;
1574 BUG_ON(list_num
< 0);
1575 BUG_ON(list_num
> 2);
1576 ret
= &(res
->granted
);
1580 /* TODO: do ast flush business
1581 * TODO: do MIGRATING and RECOVERING spinning
1585 * NOTE about in-flight requests during migration:
1587 * Before attempting the migrate, the master has marked the lockres as
1588 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1589 * requests either got queued before the MIGRATING flag got set, in which
1590 * case the lock data will reflect the change and a return message is on
1591 * the way, or the request failed to get in before MIGRATING got set. In
1592 * this case, the caller will be told to spin and wait for the MIGRATING
1593 * flag to be dropped, then recheck the master.
1594 * This holds true for the convert, cancel and unlock cases, and since lvb
1595 * updates are tied to these same messages, it applies to lvb updates as
1596 * well. For the lock case, there is no way a lock can be on the master
1597 * queue and not be on the secondary queue since the lock is always added
1598 * locally first. This means that the new target node will never be sent
1599 * a lock that he doesn't already have on the list.
1600 * In total, this means that the local lock is correct and should not be
1601 * updated to match the one sent by the master. Any messages sent back
1602 * from the master before the MIGRATING flag will bring the lock properly
1603 * up-to-date, and the change will be ordered properly for the waiter.
1604 * We will *not* attempt to modify the lock underneath the waiter.
1607 static int dlm_process_recovery_data(struct dlm_ctxt
*dlm
,
1608 struct dlm_lock_resource
*res
,
1609 struct dlm_migratable_lockres
*mres
)
1611 struct dlm_migratable_lock
*ml
;
1612 struct list_head
*queue
;
1613 struct dlm_lock
*newlock
= NULL
;
1614 struct dlm_lockstatus
*lksb
= NULL
;
1617 struct list_head
*iter
;
1618 struct dlm_lock
*lock
= NULL
;
1620 mlog(0, "running %d locks for this lockres\n", mres
->num_locks
);
1621 for (i
=0; i
<mres
->num_locks
; i
++) {
1622 ml
= &(mres
->ml
[i
]);
1623 BUG_ON(ml
->highest_blocked
!= LKM_IVMODE
);
1627 queue
= dlm_list_num_to_pointer(res
, ml
->list
);
1629 /* if the lock is for the local node it needs to
1630 * be moved to the proper location within the queue.
1631 * do not allocate a new lock structure. */
1632 if (ml
->node
== dlm
->node_num
) {
1633 /* MIGRATION ONLY! */
1634 BUG_ON(!(mres
->flags
& DLM_MRES_MIGRATION
));
1636 spin_lock(&res
->spinlock
);
1637 list_for_each(iter
, queue
) {
1638 lock
= list_entry (iter
, struct dlm_lock
, list
);
1639 if (lock
->ml
.cookie
!= ml
->cookie
)
1645 /* lock is always created locally first, and
1646 * destroyed locally last. it must be on the list */
1649 mlog(ML_ERROR
, "could not find local lock "
1650 "with cookie %u:%llu!\n",
1651 dlm_get_lock_cookie_node(c
),
1652 dlm_get_lock_cookie_seq(c
));
1655 BUG_ON(lock
->ml
.node
!= ml
->node
);
1657 /* see NOTE above about why we do not update
1658 * to match the master here */
1660 /* move the lock to its proper place */
1661 /* do not alter lock refcount. switching lists. */
1662 list_move_tail(&lock
->list
, queue
);
1663 spin_unlock(&res
->spinlock
);
1665 mlog(0, "just reordered a local lock!\n");
1669 /* lock is for another node. */
1670 newlock
= dlm_new_lock(ml
->type
, ml
->node
,
1671 be64_to_cpu(ml
->cookie
), NULL
);
1676 lksb
= newlock
->lksb
;
1677 dlm_lock_attach_lockres(newlock
, res
);
1679 if (ml
->convert_type
!= LKM_IVMODE
) {
1680 BUG_ON(queue
!= &res
->converting
);
1681 newlock
->ml
.convert_type
= ml
->convert_type
;
1683 lksb
->flags
|= (ml
->flags
&
1684 (DLM_LKSB_PUT_LVB
|DLM_LKSB_GET_LVB
));
1686 if (ml
->type
== LKM_NLMODE
)
1689 if (!dlm_lvb_is_empty(mres
->lvb
)) {
1690 if (lksb
->flags
& DLM_LKSB_PUT_LVB
) {
1691 /* other node was trying to update
1692 * lvb when node died. recreate the
1693 * lksb with the updated lvb. */
1694 memcpy(lksb
->lvb
, mres
->lvb
, DLM_LVB_LEN
);
1695 /* the lock resource lvb update must happen
1696 * NOW, before the spinlock is dropped.
1697 * we no longer wait for the AST to update
1699 memcpy(res
->lvb
, mres
->lvb
, DLM_LVB_LEN
);
1701 /* otherwise, the node is sending its
1702 * most recent valid lvb info */
1703 BUG_ON(ml
->type
!= LKM_EXMODE
&&
1704 ml
->type
!= LKM_PRMODE
);
1705 if (!dlm_lvb_is_empty(res
->lvb
) &&
1706 (ml
->type
== LKM_EXMODE
||
1707 memcmp(res
->lvb
, mres
->lvb
, DLM_LVB_LEN
))) {
1709 mlog(ML_ERROR
, "%s:%.*s: received bad "
1710 "lvb! type=%d\n", dlm
->name
,
1712 res
->lockname
.name
, ml
->type
);
1713 printk("lockres lvb=[");
1714 for (i
=0; i
<DLM_LVB_LEN
; i
++)
1715 printk("%02x", res
->lvb
[i
]);
1716 printk("]\nmigrated lvb=[");
1717 for (i
=0; i
<DLM_LVB_LEN
; i
++)
1718 printk("%02x", mres
->lvb
[i
]);
1720 dlm_print_one_lock_resource(res
);
1723 memcpy(res
->lvb
, mres
->lvb
, DLM_LVB_LEN
);
1729 * wrt lock queue ordering and recovery:
1730 * 1. order of locks on granted queue is
1732 * 2. order of locks on converting queue is
1733 * LOST with the node death. sorry charlie.
1734 * 3. order of locks on the blocked queue is
1736 * order of locks does not affect integrity, it
1737 * just means that a lock request may get pushed
1738 * back in line as a result of the node death.
1739 * also note that for a given node the lock order
1740 * for its secondary queue locks is preserved
1741 * relative to each other, but clearly *not*
1742 * preserved relative to locks from other nodes.
1745 spin_lock(&res
->spinlock
);
1746 list_for_each_entry(lock
, queue
, list
) {
1747 if (lock
->ml
.cookie
== ml
->cookie
) {
1748 u64 c
= lock
->ml
.cookie
;
1749 mlog(ML_ERROR
, "%s:%.*s: %u:%llu: lock already "
1750 "exists on this lockres!\n", dlm
->name
,
1751 res
->lockname
.len
, res
->lockname
.name
,
1752 dlm_get_lock_cookie_node(c
),
1753 dlm_get_lock_cookie_seq(c
));
1755 mlog(ML_NOTICE
, "sent lock: type=%d, conv=%d, "
1756 "node=%u, cookie=%u:%llu, queue=%d\n",
1757 ml
->type
, ml
->convert_type
, ml
->node
,
1758 dlm_get_lock_cookie_node(ml
->cookie
),
1759 dlm_get_lock_cookie_seq(ml
->cookie
),
1762 __dlm_print_one_lock_resource(res
);
1768 dlm_lock_get(newlock
);
1769 list_add_tail(&newlock
->list
, queue
);
1771 spin_unlock(&res
->spinlock
);
1773 mlog(0, "done running all the locks\n");
1779 dlm_lock_put(newlock
);
1786 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt
*dlm
,
1787 struct dlm_lock_resource
*res
)
1790 struct list_head
*queue
, *iter
, *iter2
;
1791 struct dlm_lock
*lock
;
1793 res
->state
|= DLM_LOCK_RES_RECOVERING
;
1794 if (!list_empty(&res
->recovering
)) {
1796 "Recovering res %s:%.*s, is already on recovery list!\n",
1797 dlm
->name
, res
->lockname
.len
, res
->lockname
.name
);
1798 list_del_init(&res
->recovering
);
1800 /* We need to hold a reference while on the recovery list */
1801 dlm_lockres_get(res
);
1802 list_add_tail(&res
->recovering
, &dlm
->reco
.resources
);
1804 /* find any pending locks and put them back on proper list */
1805 for (i
=DLM_BLOCKED_LIST
; i
>=DLM_GRANTED_LIST
; i
--) {
1806 queue
= dlm_list_idx_to_ptr(res
, i
);
1807 list_for_each_safe(iter
, iter2
, queue
) {
1808 lock
= list_entry (iter
, struct dlm_lock
, list
);
1810 if (lock
->convert_pending
) {
1811 /* move converting lock back to granted */
1812 BUG_ON(i
!= DLM_CONVERTING_LIST
);
1813 mlog(0, "node died with convert pending "
1814 "on %.*s. move back to granted list.\n",
1815 res
->lockname
.len
, res
->lockname
.name
);
1816 dlm_revert_pending_convert(res
, lock
);
1817 lock
->convert_pending
= 0;
1818 } else if (lock
->lock_pending
) {
1819 /* remove pending lock requests completely */
1820 BUG_ON(i
!= DLM_BLOCKED_LIST
);
1821 mlog(0, "node died with lock pending "
1822 "on %.*s. remove from blocked list and skip.\n",
1823 res
->lockname
.len
, res
->lockname
.name
);
1824 /* lock will be floating until ref in
1825 * dlmlock_remote is freed after the network
1826 * call returns. ok for it to not be on any
1827 * list since no ast can be called
1828 * (the master is dead). */
1829 dlm_revert_pending_lock(res
, lock
);
1830 lock
->lock_pending
= 0;
1831 } else if (lock
->unlock_pending
) {
1832 /* if an unlock was in progress, treat as
1833 * if this had completed successfully
1834 * before sending this lock state to the
1835 * new master. note that the dlm_unlock
1836 * call is still responsible for calling
1837 * the unlockast. that will happen after
1838 * the network call times out. for now,
1839 * just move lists to prepare the new
1840 * recovery master. */
1841 BUG_ON(i
!= DLM_GRANTED_LIST
);
1842 mlog(0, "node died with unlock pending "
1843 "on %.*s. remove from blocked list and skip.\n",
1844 res
->lockname
.len
, res
->lockname
.name
);
1845 dlm_commit_pending_unlock(res
, lock
);
1846 lock
->unlock_pending
= 0;
1847 } else if (lock
->cancel_pending
) {
1848 /* if a cancel was in progress, treat as
1849 * if this had completed successfully
1850 * before sending this lock state to the
1852 BUG_ON(i
!= DLM_CONVERTING_LIST
);
1853 mlog(0, "node died with cancel pending "
1854 "on %.*s. move back to granted list.\n",
1855 res
->lockname
.len
, res
->lockname
.name
);
1856 dlm_commit_pending_cancel(res
, lock
);
1857 lock
->cancel_pending
= 0;
1866 /* removes all recovered locks from the recovery list.
1867 * sets the res->owner to the new master.
1868 * unsets the RECOVERY flag and wakes waiters. */
1869 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt
*dlm
,
1870 u8 dead_node
, u8 new_master
)
1873 struct list_head
*iter
, *iter2
;
1874 struct hlist_node
*hash_iter
;
1875 struct hlist_head
*bucket
;
1877 struct dlm_lock_resource
*res
;
1881 assert_spin_locked(&dlm
->spinlock
);
1883 list_for_each_safe(iter
, iter2
, &dlm
->reco
.resources
) {
1884 res
= list_entry (iter
, struct dlm_lock_resource
, recovering
);
1885 if (res
->owner
== dead_node
) {
1886 list_del_init(&res
->recovering
);
1887 spin_lock(&res
->spinlock
);
1888 dlm_change_lockres_owner(dlm
, res
, new_master
);
1889 res
->state
&= ~DLM_LOCK_RES_RECOVERING
;
1890 if (!__dlm_lockres_unused(res
))
1891 __dlm_dirty_lockres(dlm
, res
);
1892 spin_unlock(&res
->spinlock
);
1894 dlm_lockres_put(res
);
1898 /* this will become unnecessary eventually, but
1899 * for now we need to run the whole hash, clear
1900 * the RECOVERING state and set the owner
1902 for (i
= 0; i
< DLM_HASH_BUCKETS
; i
++) {
1903 bucket
= dlm_lockres_hash(dlm
, i
);
1904 hlist_for_each_entry(res
, hash_iter
, bucket
, hash_node
) {
1905 if (res
->state
& DLM_LOCK_RES_RECOVERING
) {
1906 if (res
->owner
== dead_node
) {
1907 mlog(0, "(this=%u) res %.*s owner=%u "
1908 "was not on recovering list, but "
1909 "clearing state anyway\n",
1910 dlm
->node_num
, res
->lockname
.len
,
1911 res
->lockname
.name
, new_master
);
1912 } else if (res
->owner
== dlm
->node_num
) {
1913 mlog(0, "(this=%u) res %.*s owner=%u "
1914 "was not on recovering list, "
1915 "owner is THIS node, clearing\n",
1916 dlm
->node_num
, res
->lockname
.len
,
1917 res
->lockname
.name
, new_master
);
1921 if (!list_empty(&res
->recovering
)) {
1922 mlog(0, "%s:%.*s: lockres was "
1923 "marked RECOVERING, owner=%u\n",
1924 dlm
->name
, res
->lockname
.len
,
1925 res
->lockname
.name
, res
->owner
);
1926 list_del_init(&res
->recovering
);
1927 dlm_lockres_put(res
);
1929 spin_lock(&res
->spinlock
);
1930 dlm_change_lockres_owner(dlm
, res
, new_master
);
1931 res
->state
&= ~DLM_LOCK_RES_RECOVERING
;
1932 if (!__dlm_lockres_unused(res
))
1933 __dlm_dirty_lockres(dlm
, res
);
1934 spin_unlock(&res
->spinlock
);
1941 static inline int dlm_lvb_needs_invalidation(struct dlm_lock
*lock
, int local
)
1944 if (lock
->ml
.type
!= LKM_EXMODE
&&
1945 lock
->ml
.type
!= LKM_PRMODE
)
1947 } else if (lock
->ml
.type
== LKM_EXMODE
)
1952 static void dlm_revalidate_lvb(struct dlm_ctxt
*dlm
,
1953 struct dlm_lock_resource
*res
, u8 dead_node
)
1955 struct list_head
*iter
, *queue
;
1956 struct dlm_lock
*lock
;
1957 int blank_lvb
= 0, local
= 0;
1961 assert_spin_locked(&dlm
->spinlock
);
1962 assert_spin_locked(&res
->spinlock
);
1964 if (res
->owner
== dlm
->node_num
)
1965 /* if this node owned the lockres, and if the dead node
1966 * had an EX when he died, blank out the lvb */
1967 search_node
= dead_node
;
1969 /* if this is a secondary lockres, and we had no EX or PR
1970 * locks granted, we can no longer trust the lvb */
1971 search_node
= dlm
->node_num
;
1972 local
= 1; /* check local state for valid lvb */
1975 for (i
=DLM_GRANTED_LIST
; i
<=DLM_CONVERTING_LIST
; i
++) {
1976 queue
= dlm_list_idx_to_ptr(res
, i
);
1977 list_for_each(iter
, queue
) {
1978 lock
= list_entry (iter
, struct dlm_lock
, list
);
1979 if (lock
->ml
.node
== search_node
) {
1980 if (dlm_lvb_needs_invalidation(lock
, local
)) {
1981 /* zero the lksb lvb and lockres lvb */
1983 memset(lock
->lksb
->lvb
, 0, DLM_LVB_LEN
);
1990 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
1991 res
->lockname
.len
, res
->lockname
.name
, dead_node
);
1992 memset(res
->lvb
, 0, DLM_LVB_LEN
);
1996 static void dlm_free_dead_locks(struct dlm_ctxt
*dlm
,
1997 struct dlm_lock_resource
*res
, u8 dead_node
)
1999 struct list_head
*iter
, *tmpiter
;
2000 struct dlm_lock
*lock
;
2002 /* this node is the lockres master:
2003 * 1) remove any stale locks for the dead node
2004 * 2) if the dead node had an EX when he died, blank out the lvb
2006 assert_spin_locked(&dlm
->spinlock
);
2007 assert_spin_locked(&res
->spinlock
);
2009 /* TODO: check pending_asts, pending_basts here */
2010 list_for_each_safe(iter
, tmpiter
, &res
->granted
) {
2011 lock
= list_entry (iter
, struct dlm_lock
, list
);
2012 if (lock
->ml
.node
== dead_node
) {
2013 list_del_init(&lock
->list
);
2017 list_for_each_safe(iter
, tmpiter
, &res
->converting
) {
2018 lock
= list_entry (iter
, struct dlm_lock
, list
);
2019 if (lock
->ml
.node
== dead_node
) {
2020 list_del_init(&lock
->list
);
2024 list_for_each_safe(iter
, tmpiter
, &res
->blocked
) {
2025 lock
= list_entry (iter
, struct dlm_lock
, list
);
2026 if (lock
->ml
.node
== dead_node
) {
2027 list_del_init(&lock
->list
);
2032 /* do not kick thread yet */
2033 __dlm_dirty_lockres(dlm
, res
);
2036 /* if this node is the recovery master, and there are no
2037 * locks for a given lockres owned by this node that are in
2038 * either PR or EX mode, zero out the lvb before requesting.
2043 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt
*dlm
, u8 dead_node
)
2045 struct hlist_node
*iter
;
2046 struct dlm_lock_resource
*res
;
2048 struct hlist_head
*bucket
;
2049 struct dlm_lock
*lock
;
2052 /* purge any stale mles */
2053 dlm_clean_master_list(dlm
, dead_node
);
2056 * now clean up all lock resources. there are two rules:
2058 * 1) if the dead node was the master, move the lockres
2059 * to the recovering list. set the RECOVERING flag.
2060 * this lockres needs to be cleaned up before it can
2063 * 2) if this node was the master, remove all locks from
2064 * each of the lockres queues that were owned by the
2065 * dead node. once recovery finishes, the dlm thread
2066 * can be kicked again to see if any ASTs or BASTs
2067 * need to be fired as a result.
2069 for (i
= 0; i
< DLM_HASH_BUCKETS
; i
++) {
2070 bucket
= dlm_lockres_hash(dlm
, i
);
2071 hlist_for_each_entry(res
, iter
, bucket
, hash_node
) {
2072 /* always prune any $RECOVERY entries for dead nodes,
2073 * otherwise hangs can occur during later recovery */
2074 if (dlm_is_recovery_lock(res
->lockname
.name
,
2075 res
->lockname
.len
)) {
2076 spin_lock(&res
->spinlock
);
2077 list_for_each_entry(lock
, &res
->granted
, list
) {
2078 if (lock
->ml
.node
== dead_node
) {
2079 mlog(0, "AHA! there was "
2080 "a $RECOVERY lock for dead "
2082 dead_node
, dlm
->name
);
2083 list_del_init(&lock
->list
);
2088 spin_unlock(&res
->spinlock
);
2091 spin_lock(&res
->spinlock
);
2092 /* zero the lvb if necessary */
2093 dlm_revalidate_lvb(dlm
, res
, dead_node
);
2094 if (res
->owner
== dead_node
)
2095 dlm_move_lockres_to_recovery_list(dlm
, res
);
2096 else if (res
->owner
== dlm
->node_num
) {
2097 dlm_free_dead_locks(dlm
, res
, dead_node
);
2098 __dlm_lockres_calc_usage(dlm
, res
);
2100 spin_unlock(&res
->spinlock
);
2106 static void __dlm_hb_node_down(struct dlm_ctxt
*dlm
, int idx
)
2108 assert_spin_locked(&dlm
->spinlock
);
2110 if (dlm
->reco
.new_master
== idx
) {
2111 mlog(0, "%s: recovery master %d just died\n",
2113 if (dlm
->reco
.state
& DLM_RECO_STATE_FINALIZE
) {
2114 /* finalize1 was reached, so it is safe to clear
2115 * the new_master and dead_node. that recovery
2117 mlog(0, "%s: dead master %d had reached "
2118 "finalize1 state, clearing\n", dlm
->name
, idx
);
2119 dlm
->reco
.state
&= ~DLM_RECO_STATE_FINALIZE
;
2120 __dlm_reset_recovery(dlm
);
2124 /* check to see if the node is already considered dead */
2125 if (!test_bit(idx
, dlm
->live_nodes_map
)) {
2126 mlog(0, "for domain %s, node %d is already dead. "
2127 "another node likely did recovery already.\n",
2132 /* check to see if we do not care about this node */
2133 if (!test_bit(idx
, dlm
->domain_map
)) {
2134 /* This also catches the case that we get a node down
2135 * but haven't joined the domain yet. */
2136 mlog(0, "node %u already removed from domain!\n", idx
);
2140 clear_bit(idx
, dlm
->live_nodes_map
);
2142 /* Clean up join state on node death. */
2143 if (dlm
->joining_node
== idx
) {
2144 mlog(0, "Clearing join state for node %u\n", idx
);
2145 __dlm_set_joining_node(dlm
, DLM_LOCK_RES_OWNER_UNKNOWN
);
2148 /* make sure local cleanup occurs before the heartbeat events */
2149 if (!test_bit(idx
, dlm
->recovery_map
))
2150 dlm_do_local_recovery_cleanup(dlm
, idx
);
2152 /* notify anything attached to the heartbeat events */
2153 dlm_hb_event_notify_attached(dlm
, idx
, 0);
2155 mlog(0, "node %u being removed from domain map!\n", idx
);
2156 clear_bit(idx
, dlm
->domain_map
);
2157 /* wake up migration waiters if a node goes down.
2158 * perhaps later we can genericize this for other waiters. */
2159 wake_up(&dlm
->migration_wq
);
2161 if (test_bit(idx
, dlm
->recovery_map
))
2162 mlog(0, "domain %s, node %u already added "
2163 "to recovery map!\n", dlm
->name
, idx
);
2165 set_bit(idx
, dlm
->recovery_map
);
2168 void dlm_hb_node_down_cb(struct o2nm_node
*node
, int idx
, void *data
)
2170 struct dlm_ctxt
*dlm
= data
;
2175 spin_lock(&dlm
->spinlock
);
2176 __dlm_hb_node_down(dlm
, idx
);
2177 spin_unlock(&dlm
->spinlock
);
2182 void dlm_hb_node_up_cb(struct o2nm_node
*node
, int idx
, void *data
)
2184 struct dlm_ctxt
*dlm
= data
;
2189 spin_lock(&dlm
->spinlock
);
2190 set_bit(idx
, dlm
->live_nodes_map
);
2191 /* do NOT notify mle attached to the heartbeat events.
2192 * new nodes are not interesting in mastery until joined. */
2193 spin_unlock(&dlm
->spinlock
);
2198 static void dlm_reco_ast(void *astdata
)
2200 struct dlm_ctxt
*dlm
= astdata
;
2201 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2202 dlm
->node_num
, dlm
->name
);
2204 static void dlm_reco_bast(void *astdata
, int blocked_type
)
2206 struct dlm_ctxt
*dlm
= astdata
;
2207 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2208 dlm
->node_num
, dlm
->name
);
2210 static void dlm_reco_unlock_ast(void *astdata
, enum dlm_status st
)
2212 mlog(0, "unlockast for recovery lock fired!\n");
2216 * dlm_pick_recovery_master will continually attempt to use
2217 * dlmlock() on the special "$RECOVERY" lockres with the
2218 * LKM_NOQUEUE flag to get an EX. every thread that enters
2219 * this function on each node racing to become the recovery
2220 * master will not stop attempting this until either:
2221 * a) this node gets the EX (and becomes the recovery master),
2222 * or b) dlm->reco.new_master gets set to some nodenum
2223 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2224 * so each time a recovery master is needed, the entire cluster
2225 * will sync at this point. if the new master dies, that will
2226 * be detected in dlm_do_recovery */
2227 static int dlm_pick_recovery_master(struct dlm_ctxt
*dlm
)
2229 enum dlm_status ret
;
2230 struct dlm_lockstatus lksb
;
2231 int status
= -EINVAL
;
2233 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2234 dlm
->name
, jiffies
, dlm
->reco
.dead_node
, dlm
->node_num
);
2236 memset(&lksb
, 0, sizeof(lksb
));
2238 ret
= dlmlock(dlm
, LKM_EXMODE
, &lksb
, LKM_NOQUEUE
|LKM_RECOVERY
,
2239 DLM_RECOVERY_LOCK_NAME
, dlm_reco_ast
, dlm
, dlm_reco_bast
);
2241 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2242 dlm
->name
, ret
, lksb
.status
);
2244 if (ret
== DLM_NORMAL
) {
2245 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2246 dlm
->name
, dlm
->node_num
);
2248 /* got the EX lock. check to see if another node
2249 * just became the reco master */
2250 if (dlm_reco_master_ready(dlm
)) {
2251 mlog(0, "%s: got reco EX lock, but %u will "
2252 "do the recovery\n", dlm
->name
,
2253 dlm
->reco
.new_master
);
2258 /* see if recovery was already finished elsewhere */
2259 spin_lock(&dlm
->spinlock
);
2260 if (dlm
->reco
.dead_node
== O2NM_INVALID_NODE_NUM
) {
2262 mlog(0, "%s: got reco EX lock, but "
2263 "node got recovered already\n", dlm
->name
);
2264 if (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
) {
2265 mlog(ML_ERROR
, "%s: new master is %u "
2266 "but no dead node!\n",
2267 dlm
->name
, dlm
->reco
.new_master
);
2271 spin_unlock(&dlm
->spinlock
);
2274 /* if this node has actually become the recovery master,
2275 * set the master and send the messages to begin recovery */
2277 mlog(0, "%s: dead=%u, this=%u, sending "
2278 "begin_reco now\n", dlm
->name
,
2279 dlm
->reco
.dead_node
, dlm
->node_num
);
2280 status
= dlm_send_begin_reco_message(dlm
,
2281 dlm
->reco
.dead_node
);
2282 /* this always succeeds */
2285 /* set the new_master to this node */
2286 spin_lock(&dlm
->spinlock
);
2287 dlm_set_reco_master(dlm
, dlm
->node_num
);
2288 spin_unlock(&dlm
->spinlock
);
2291 /* recovery lock is a special case. ast will not get fired,
2292 * so just go ahead and unlock it. */
2293 ret
= dlmunlock(dlm
, &lksb
, 0, dlm_reco_unlock_ast
, dlm
);
2294 if (ret
== DLM_DENIED
) {
2295 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2296 ret
= dlmunlock(dlm
, &lksb
, LKM_CANCEL
, dlm_reco_unlock_ast
, dlm
);
2298 if (ret
!= DLM_NORMAL
) {
2299 /* this would really suck. this could only happen
2300 * if there was a network error during the unlock
2301 * because of node death. this means the unlock
2302 * is actually "done" and the lock structure is
2303 * even freed. we can continue, but only
2304 * because this specific lock name is special. */
2305 mlog(ML_ERROR
, "dlmunlock returned %d\n", ret
);
2307 } else if (ret
== DLM_NOTQUEUED
) {
2308 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2309 dlm
->name
, dlm
->node_num
);
2310 /* another node is master. wait on
2311 * reco.new_master != O2NM_INVALID_NODE_NUM
2312 * for at most one second */
2313 wait_event_timeout(dlm
->dlm_reco_thread_wq
,
2314 dlm_reco_master_ready(dlm
),
2315 msecs_to_jiffies(1000));
2316 if (!dlm_reco_master_ready(dlm
)) {
2317 mlog(0, "%s: reco master taking awhile\n",
2321 /* another node has informed this one that it is reco master */
2322 mlog(0, "%s: reco master %u is ready to recover %u\n",
2323 dlm
->name
, dlm
->reco
.new_master
, dlm
->reco
.dead_node
);
2325 } else if (ret
== DLM_RECOVERING
) {
2326 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2327 dlm
->name
, dlm
->node_num
);
2330 struct dlm_lock_resource
*res
;
2332 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2333 mlog(ML_ERROR
, "%s: got %s from dlmlock($RECOVERY), "
2334 "lksb.status=%s\n", dlm
->name
, dlm_errname(ret
),
2335 dlm_errname(lksb
.status
));
2336 res
= dlm_lookup_lockres(dlm
, DLM_RECOVERY_LOCK_NAME
,
2337 DLM_RECOVERY_LOCK_NAME_LEN
);
2339 dlm_print_one_lock_resource(res
);
2340 dlm_lockres_put(res
);
2342 mlog(ML_ERROR
, "recovery lock not found\n");
2350 static int dlm_send_begin_reco_message(struct dlm_ctxt
*dlm
, u8 dead_node
)
2352 struct dlm_begin_reco br
;
2354 struct dlm_node_iter iter
;
2358 mlog_entry("%u\n", dead_node
);
2360 mlog(0, "%s: dead node is %u\n", dlm
->name
, dead_node
);
2362 spin_lock(&dlm
->spinlock
);
2363 dlm_node_iter_init(dlm
->domain_map
, &iter
);
2364 spin_unlock(&dlm
->spinlock
);
2366 clear_bit(dead_node
, iter
.node_map
);
2368 memset(&br
, 0, sizeof(br
));
2369 br
.node_idx
= dlm
->node_num
;
2370 br
.dead_node
= dead_node
;
2372 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
2374 if (nodenum
== dead_node
) {
2375 mlog(0, "not sending begin reco to dead node "
2379 if (nodenum
== dlm
->node_num
) {
2380 mlog(0, "not sending begin reco to self\n");
2385 mlog(0, "attempting to send begin reco msg to %d\n",
2387 ret
= o2net_send_message(DLM_BEGIN_RECO_MSG
, dlm
->key
,
2388 &br
, sizeof(br
), nodenum
, &status
);
2389 /* negative status is handled ok by caller here */
2392 if (dlm_is_host_down(ret
)) {
2393 /* node is down. not involved in recovery
2394 * so just keep going */
2395 mlog(0, "%s: node %u was down when sending "
2396 "begin reco msg (%d)\n", dlm
->name
, nodenum
, ret
);
2400 struct dlm_lock_resource
*res
;
2401 /* this is now a serious problem, possibly ENOMEM
2402 * in the network stack. must retry */
2404 mlog(ML_ERROR
, "begin reco of dlm %s to node %u "
2405 " returned %d\n", dlm
->name
, nodenum
, ret
);
2406 res
= dlm_lookup_lockres(dlm
, DLM_RECOVERY_LOCK_NAME
,
2407 DLM_RECOVERY_LOCK_NAME_LEN
);
2409 dlm_print_one_lock_resource(res
);
2410 dlm_lockres_put(res
);
2412 mlog(ML_ERROR
, "recovery lock not found\n");
2414 /* sleep for a bit in hopes that we can avoid
2418 } else if (ret
== EAGAIN
) {
2419 mlog(0, "%s: trying to start recovery of node "
2420 "%u, but node %u is waiting for last recovery "
2421 "to complete, backoff for a bit\n", dlm
->name
,
2422 dead_node
, nodenum
);
2423 /* TODO Look into replacing msleep with cond_resched() */
2432 int dlm_begin_reco_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
2434 struct dlm_ctxt
*dlm
= data
;
2435 struct dlm_begin_reco
*br
= (struct dlm_begin_reco
*)msg
->buf
;
2437 /* ok to return 0, domain has gone away */
2441 spin_lock(&dlm
->spinlock
);
2442 if (dlm
->reco
.state
& DLM_RECO_STATE_FINALIZE
) {
2443 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2444 "but this node is in finalize state, waiting on finalize2\n",
2445 dlm
->name
, br
->node_idx
, br
->dead_node
,
2446 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
2447 spin_unlock(&dlm
->spinlock
);
2450 spin_unlock(&dlm
->spinlock
);
2452 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2453 dlm
->name
, br
->node_idx
, br
->dead_node
,
2454 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
2456 dlm_fire_domain_eviction_callbacks(dlm
, br
->dead_node
);
2458 spin_lock(&dlm
->spinlock
);
2459 if (dlm
->reco
.new_master
!= O2NM_INVALID_NODE_NUM
) {
2460 if (test_bit(dlm
->reco
.new_master
, dlm
->recovery_map
)) {
2461 mlog(0, "%s: new_master %u died, changing "
2462 "to %u\n", dlm
->name
, dlm
->reco
.new_master
,
2465 mlog(0, "%s: new_master %u NOT DEAD, changing "
2466 "to %u\n", dlm
->name
, dlm
->reco
.new_master
,
2468 /* may not have seen the new master as dead yet */
2471 if (dlm
->reco
.dead_node
!= O2NM_INVALID_NODE_NUM
) {
2472 mlog(ML_NOTICE
, "%s: dead_node previously set to %u, "
2473 "node %u changing it to %u\n", dlm
->name
,
2474 dlm
->reco
.dead_node
, br
->node_idx
, br
->dead_node
);
2476 dlm_set_reco_master(dlm
, br
->node_idx
);
2477 dlm_set_reco_dead_node(dlm
, br
->dead_node
);
2478 if (!test_bit(br
->dead_node
, dlm
->recovery_map
)) {
2479 mlog(0, "recovery master %u sees %u as dead, but this "
2480 "node has not yet. marking %u as dead\n",
2481 br
->node_idx
, br
->dead_node
, br
->dead_node
);
2482 if (!test_bit(br
->dead_node
, dlm
->domain_map
) ||
2483 !test_bit(br
->dead_node
, dlm
->live_nodes_map
))
2484 mlog(0, "%u not in domain/live_nodes map "
2485 "so setting it in reco map manually\n",
2487 /* force the recovery cleanup in __dlm_hb_node_down
2488 * both of these will be cleared in a moment */
2489 set_bit(br
->dead_node
, dlm
->domain_map
);
2490 set_bit(br
->dead_node
, dlm
->live_nodes_map
);
2491 __dlm_hb_node_down(dlm
, br
->dead_node
);
2493 spin_unlock(&dlm
->spinlock
);
2495 dlm_kick_recovery_thread(dlm
);
2497 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2498 dlm
->name
, br
->node_idx
, br
->dead_node
,
2499 dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
2505 #define DLM_FINALIZE_STAGE2 0x01
2506 static int dlm_send_finalize_reco_message(struct dlm_ctxt
*dlm
)
2509 struct dlm_finalize_reco fr
;
2510 struct dlm_node_iter iter
;
2515 mlog(0, "finishing recovery for node %s:%u, "
2516 "stage %d\n", dlm
->name
, dlm
->reco
.dead_node
, stage
);
2518 spin_lock(&dlm
->spinlock
);
2519 dlm_node_iter_init(dlm
->domain_map
, &iter
);
2520 spin_unlock(&dlm
->spinlock
);
2523 memset(&fr
, 0, sizeof(fr
));
2524 fr
.node_idx
= dlm
->node_num
;
2525 fr
.dead_node
= dlm
->reco
.dead_node
;
2527 fr
.flags
|= DLM_FINALIZE_STAGE2
;
2529 while ((nodenum
= dlm_node_iter_next(&iter
)) >= 0) {
2530 if (nodenum
== dlm
->node_num
)
2532 ret
= o2net_send_message(DLM_FINALIZE_RECO_MSG
, dlm
->key
,
2533 &fr
, sizeof(fr
), nodenum
, &status
);
2538 if (dlm_is_host_down(ret
)) {
2539 /* this has no effect on this recovery
2540 * session, so set the status to zero to
2541 * finish out the last recovery */
2542 mlog(ML_ERROR
, "node %u went down after this "
2543 "node finished recovery.\n", nodenum
);
2550 /* reset the node_iter back to the top and send finalize2 */
2559 int dlm_finalize_reco_handler(struct o2net_msg
*msg
, u32 len
, void *data
)
2561 struct dlm_ctxt
*dlm
= data
;
2562 struct dlm_finalize_reco
*fr
= (struct dlm_finalize_reco
*)msg
->buf
;
2565 /* ok to return 0, domain has gone away */
2569 if (fr
->flags
& DLM_FINALIZE_STAGE2
)
2572 mlog(0, "%s: node %u finalizing recovery stage%d of "
2573 "node %u (%u:%u)\n", dlm
->name
, fr
->node_idx
, stage
,
2574 fr
->dead_node
, dlm
->reco
.dead_node
, dlm
->reco
.new_master
);
2576 spin_lock(&dlm
->spinlock
);
2578 if (dlm
->reco
.new_master
!= fr
->node_idx
) {
2579 mlog(ML_ERROR
, "node %u sent recovery finalize msg, but node "
2580 "%u is supposed to be the new master, dead=%u\n",
2581 fr
->node_idx
, dlm
->reco
.new_master
, fr
->dead_node
);
2584 if (dlm
->reco
.dead_node
!= fr
->dead_node
) {
2585 mlog(ML_ERROR
, "node %u sent recovery finalize msg for dead "
2586 "node %u, but node %u is supposed to be dead\n",
2587 fr
->node_idx
, fr
->dead_node
, dlm
->reco
.dead_node
);
2593 dlm_finish_local_lockres_recovery(dlm
, fr
->dead_node
, fr
->node_idx
);
2594 if (dlm
->reco
.state
& DLM_RECO_STATE_FINALIZE
) {
2595 mlog(ML_ERROR
, "%s: received finalize1 from "
2596 "new master %u for dead node %u, but "
2597 "this node has already received it!\n",
2598 dlm
->name
, fr
->node_idx
, fr
->dead_node
);
2599 dlm_print_reco_node_status(dlm
);
2602 dlm
->reco
.state
|= DLM_RECO_STATE_FINALIZE
;
2603 spin_unlock(&dlm
->spinlock
);
2606 if (!(dlm
->reco
.state
& DLM_RECO_STATE_FINALIZE
)) {
2607 mlog(ML_ERROR
, "%s: received finalize2 from "
2608 "new master %u for dead node %u, but "
2609 "this node did not have finalize1!\n",
2610 dlm
->name
, fr
->node_idx
, fr
->dead_node
);
2611 dlm_print_reco_node_status(dlm
);
2614 dlm
->reco
.state
&= ~DLM_RECO_STATE_FINALIZE
;
2615 spin_unlock(&dlm
->spinlock
);
2616 dlm_reset_recovery(dlm
);
2617 dlm_kick_recovery_thread(dlm
);
2623 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2624 dlm
->name
, fr
->node_idx
, dlm
->reco
.dead_node
, dlm
->reco
.new_master
);