4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
28 #include <linux/drbd_limits.h>
30 #include "drbd_protocol.h"
33 struct after_state_chg_work
{
37 enum chg_state_flags flags
;
38 struct completion
*done
;
41 enum sanitize_state_warnings
{
43 ABORTED_ONLINE_VERIFY
,
45 CONNECTION_LOST_NEGOTIATING
,
46 IMPLICITLY_UPGRADED_DISK
,
47 IMPLICITLY_UPGRADED_PDSK
,
50 static int w_after_state_ch(struct drbd_work
*w
, int unused
);
51 static void after_state_ch(struct drbd_device
*device
, union drbd_state os
,
52 union drbd_state ns
, enum chg_state_flags flags
);
53 static enum drbd_state_rv
is_valid_state(struct drbd_device
*, union drbd_state
);
54 static enum drbd_state_rv
is_valid_soft_transition(union drbd_state
, union drbd_state
, struct drbd_connection
*);
55 static enum drbd_state_rv
is_valid_transition(union drbd_state os
, union drbd_state ns
);
56 static union drbd_state
sanitize_state(struct drbd_device
*device
, union drbd_state ns
,
57 enum sanitize_state_warnings
*warn
);
59 static inline bool is_susp(union drbd_state s
)
61 return s
.susp
|| s
.susp_nod
|| s
.susp_fen
;
64 bool conn_all_vols_unconf(struct drbd_connection
*connection
)
66 struct drbd_peer_device
*peer_device
;
71 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
72 struct drbd_device
*device
= peer_device
->device
;
73 if (device
->state
.disk
!= D_DISKLESS
||
74 device
->state
.conn
!= C_STANDALONE
||
75 device
->state
.role
!= R_SECONDARY
) {
85 /* Unfortunately the states where not correctly ordered, when
86 they where defined. therefore can not use max_t() here. */
87 static enum drbd_role
max_role(enum drbd_role role1
, enum drbd_role role2
)
89 if (role1
== R_PRIMARY
|| role2
== R_PRIMARY
)
91 if (role1
== R_SECONDARY
|| role2
== R_SECONDARY
)
95 static enum drbd_role
min_role(enum drbd_role role1
, enum drbd_role role2
)
97 if (role1
== R_UNKNOWN
|| role2
== R_UNKNOWN
)
99 if (role1
== R_SECONDARY
|| role2
== R_SECONDARY
)
104 enum drbd_role
conn_highest_role(struct drbd_connection
*connection
)
106 enum drbd_role role
= R_UNKNOWN
;
107 struct drbd_peer_device
*peer_device
;
111 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
112 struct drbd_device
*device
= peer_device
->device
;
113 role
= max_role(role
, device
->state
.role
);
120 enum drbd_role
conn_highest_peer(struct drbd_connection
*connection
)
122 enum drbd_role peer
= R_UNKNOWN
;
123 struct drbd_peer_device
*peer_device
;
127 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
128 struct drbd_device
*device
= peer_device
->device
;
129 peer
= max_role(peer
, device
->state
.peer
);
136 enum drbd_disk_state
conn_highest_disk(struct drbd_connection
*connection
)
138 enum drbd_disk_state ds
= D_DISKLESS
;
139 struct drbd_peer_device
*peer_device
;
143 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
144 struct drbd_device
*device
= peer_device
->device
;
145 ds
= max_t(enum drbd_disk_state
, ds
, device
->state
.disk
);
152 enum drbd_disk_state
conn_lowest_disk(struct drbd_connection
*connection
)
154 enum drbd_disk_state ds
= D_MASK
;
155 struct drbd_peer_device
*peer_device
;
159 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
160 struct drbd_device
*device
= peer_device
->device
;
161 ds
= min_t(enum drbd_disk_state
, ds
, device
->state
.disk
);
168 enum drbd_disk_state
conn_highest_pdsk(struct drbd_connection
*connection
)
170 enum drbd_disk_state ds
= D_DISKLESS
;
171 struct drbd_peer_device
*peer_device
;
175 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
176 struct drbd_device
*device
= peer_device
->device
;
177 ds
= max_t(enum drbd_disk_state
, ds
, device
->state
.pdsk
);
184 enum drbd_conns
conn_lowest_conn(struct drbd_connection
*connection
)
186 enum drbd_conns conn
= C_MASK
;
187 struct drbd_peer_device
*peer_device
;
191 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
192 struct drbd_device
*device
= peer_device
->device
;
193 conn
= min_t(enum drbd_conns
, conn
, device
->state
.conn
);
200 static bool no_peer_wf_report_params(struct drbd_connection
*connection
)
202 struct drbd_peer_device
*peer_device
;
207 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
208 if (peer_device
->device
->state
.conn
== C_WF_REPORT_PARAMS
) {
219 * cl_wide_st_chg() - true if the state change is a cluster wide one
220 * @device: DRBD device.
221 * @os: old (current) state.
222 * @ns: new (wanted) state.
224 static int cl_wide_st_chg(struct drbd_device
*device
,
225 union drbd_state os
, union drbd_state ns
)
227 return (os
.conn
>= C_CONNECTED
&& ns
.conn
>= C_CONNECTED
&&
228 ((os
.role
!= R_PRIMARY
&& ns
.role
== R_PRIMARY
) ||
229 (os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
230 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
) ||
231 (os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
))) ||
232 (os
.conn
>= C_CONNECTED
&& ns
.conn
== C_DISCONNECTING
) ||
233 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_VERIFY_S
) ||
234 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_WF_REPORT_PARAMS
);
237 static union drbd_state
238 apply_mask_val(union drbd_state os
, union drbd_state mask
, union drbd_state val
)
241 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
246 drbd_change_state(struct drbd_device
*device
, enum chg_state_flags f
,
247 union drbd_state mask
, union drbd_state val
)
251 enum drbd_state_rv rv
;
253 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
254 ns
= apply_mask_val(drbd_read_state(device
), mask
, val
);
255 rv
= _drbd_set_state(device
, ns
, f
, NULL
);
256 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
262 * drbd_force_state() - Impose a change which happens outside our control on our state
263 * @device: DRBD device.
264 * @mask: mask of state bits to change.
265 * @val: value of new state bits.
267 void drbd_force_state(struct drbd_device
*device
,
268 union drbd_state mask
, union drbd_state val
)
270 drbd_change_state(device
, CS_HARD
, mask
, val
);
273 static enum drbd_state_rv
274 _req_st_cond(struct drbd_device
*device
, union drbd_state mask
,
275 union drbd_state val
)
277 union drbd_state os
, ns
;
279 enum drbd_state_rv rv
;
281 if (test_and_clear_bit(CL_ST_CHG_SUCCESS
, &device
->flags
))
282 return SS_CW_SUCCESS
;
284 if (test_and_clear_bit(CL_ST_CHG_FAIL
, &device
->flags
))
285 return SS_CW_FAILED_BY_PEER
;
287 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
288 os
= drbd_read_state(device
);
289 ns
= sanitize_state(device
, apply_mask_val(os
, mask
, val
), NULL
);
290 rv
= is_valid_transition(os
, ns
);
291 if (rv
>= SS_SUCCESS
)
292 rv
= SS_UNKNOWN_ERROR
; /* cont waiting, otherwise fail. */
294 if (!cl_wide_st_chg(device
, os
, ns
))
296 if (rv
== SS_UNKNOWN_ERROR
) {
297 rv
= is_valid_state(device
, ns
);
298 if (rv
>= SS_SUCCESS
) {
299 rv
= is_valid_soft_transition(os
, ns
, first_peer_device(device
)->connection
);
300 if (rv
>= SS_SUCCESS
)
301 rv
= SS_UNKNOWN_ERROR
; /* cont waiting, otherwise fail. */
304 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
310 * drbd_req_state() - Perform an eventually cluster wide state change
311 * @device: DRBD device.
312 * @mask: mask of state bits to change.
313 * @val: value of new state bits.
316 * Should not be called directly, use drbd_request_state() or
317 * _drbd_request_state().
319 static enum drbd_state_rv
320 drbd_req_state(struct drbd_device
*device
, union drbd_state mask
,
321 union drbd_state val
, enum chg_state_flags f
)
323 struct completion done
;
325 union drbd_state os
, ns
;
326 enum drbd_state_rv rv
;
328 init_completion(&done
);
330 if (f
& CS_SERIALIZE
)
331 mutex_lock(device
->state_mutex
);
333 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
334 os
= drbd_read_state(device
);
335 ns
= sanitize_state(device
, apply_mask_val(os
, mask
, val
), NULL
);
336 rv
= is_valid_transition(os
, ns
);
337 if (rv
< SS_SUCCESS
) {
338 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
342 if (cl_wide_st_chg(device
, os
, ns
)) {
343 rv
= is_valid_state(device
, ns
);
344 if (rv
== SS_SUCCESS
)
345 rv
= is_valid_soft_transition(os
, ns
, first_peer_device(device
)->connection
);
346 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
348 if (rv
< SS_SUCCESS
) {
350 print_st_err(device
, os
, ns
, rv
);
354 if (drbd_send_state_req(device
, mask
, val
)) {
355 rv
= SS_CW_FAILED_BY_PEER
;
357 print_st_err(device
, os
, ns
, rv
);
361 wait_event(device
->state_wait
,
362 (rv
= _req_st_cond(device
, mask
, val
)));
364 if (rv
< SS_SUCCESS
) {
366 print_st_err(device
, os
, ns
, rv
);
369 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
370 ns
= apply_mask_val(drbd_read_state(device
), mask
, val
);
371 rv
= _drbd_set_state(device
, ns
, f
, &done
);
373 rv
= _drbd_set_state(device
, ns
, f
, &done
);
376 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
378 if (f
& CS_WAIT_COMPLETE
&& rv
== SS_SUCCESS
) {
379 D_ASSERT(device
, current
!= first_peer_device(device
)->connection
->worker
.task
);
380 wait_for_completion(&done
);
384 if (f
& CS_SERIALIZE
)
385 mutex_unlock(device
->state_mutex
);
391 * _drbd_request_state() - Request a state change (with flags)
392 * @device: DRBD device.
393 * @mask: mask of state bits to change.
394 * @val: value of new state bits.
397 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
398 * flag, or when logging of failed state change requests is not desired.
401 _drbd_request_state(struct drbd_device
*device
, union drbd_state mask
,
402 union drbd_state val
, enum chg_state_flags f
)
404 enum drbd_state_rv rv
;
406 wait_event(device
->state_wait
,
407 (rv
= drbd_req_state(device
, mask
, val
, f
)) != SS_IN_TRANSIENT_STATE
);
412 static void print_st(struct drbd_device
*device
, char *name
, union drbd_state ns
)
414 drbd_err(device
, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
416 drbd_conn_str(ns
.conn
),
417 drbd_role_str(ns
.role
),
418 drbd_role_str(ns
.peer
),
419 drbd_disk_str(ns
.disk
),
420 drbd_disk_str(ns
.pdsk
),
421 is_susp(ns
) ? 's' : 'r',
422 ns
.aftr_isp
? 'a' : '-',
423 ns
.peer_isp
? 'p' : '-',
424 ns
.user_isp
? 'u' : '-',
425 ns
.susp_fen
? 'F' : '-',
426 ns
.susp_nod
? 'N' : '-'
430 void print_st_err(struct drbd_device
*device
, union drbd_state os
,
431 union drbd_state ns
, enum drbd_state_rv err
)
433 if (err
== SS_IN_TRANSIENT_STATE
)
435 drbd_err(device
, "State change failed: %s\n", drbd_set_st_err_str(err
));
436 print_st(device
, " state", os
);
437 print_st(device
, "wanted", ns
);
440 static long print_state_change(char *pb
, union drbd_state os
, union drbd_state ns
,
441 enum chg_state_flags flags
)
447 if (ns
.role
!= os
.role
&& flags
& CS_DC_ROLE
)
448 pbp
+= sprintf(pbp
, "role( %s -> %s ) ",
449 drbd_role_str(os
.role
),
450 drbd_role_str(ns
.role
));
451 if (ns
.peer
!= os
.peer
&& flags
& CS_DC_PEER
)
452 pbp
+= sprintf(pbp
, "peer( %s -> %s ) ",
453 drbd_role_str(os
.peer
),
454 drbd_role_str(ns
.peer
));
455 if (ns
.conn
!= os
.conn
&& flags
& CS_DC_CONN
)
456 pbp
+= sprintf(pbp
, "conn( %s -> %s ) ",
457 drbd_conn_str(os
.conn
),
458 drbd_conn_str(ns
.conn
));
459 if (ns
.disk
!= os
.disk
&& flags
& CS_DC_DISK
)
460 pbp
+= sprintf(pbp
, "disk( %s -> %s ) ",
461 drbd_disk_str(os
.disk
),
462 drbd_disk_str(ns
.disk
));
463 if (ns
.pdsk
!= os
.pdsk
&& flags
& CS_DC_PDSK
)
464 pbp
+= sprintf(pbp
, "pdsk( %s -> %s ) ",
465 drbd_disk_str(os
.pdsk
),
466 drbd_disk_str(ns
.pdsk
));
471 static void drbd_pr_state_change(struct drbd_device
*device
, union drbd_state os
, union drbd_state ns
,
472 enum chg_state_flags flags
)
477 pbp
+= print_state_change(pbp
, os
, ns
, flags
^ CS_DC_MASK
);
479 if (ns
.aftr_isp
!= os
.aftr_isp
)
480 pbp
+= sprintf(pbp
, "aftr_isp( %d -> %d ) ",
483 if (ns
.peer_isp
!= os
.peer_isp
)
484 pbp
+= sprintf(pbp
, "peer_isp( %d -> %d ) ",
487 if (ns
.user_isp
!= os
.user_isp
)
488 pbp
+= sprintf(pbp
, "user_isp( %d -> %d ) ",
493 drbd_info(device
, "%s\n", pb
);
496 static void conn_pr_state_change(struct drbd_connection
*connection
, union drbd_state os
, union drbd_state ns
,
497 enum chg_state_flags flags
)
502 pbp
+= print_state_change(pbp
, os
, ns
, flags
);
504 if (is_susp(ns
) != is_susp(os
) && flags
& CS_DC_SUSP
)
505 pbp
+= sprintf(pbp
, "susp( %d -> %d ) ",
510 drbd_info(connection
, "%s\n", pb
);
515 * is_valid_state() - Returns an SS_ error code if ns is not valid
516 * @device: DRBD device.
517 * @ns: State to consider.
519 static enum drbd_state_rv
520 is_valid_state(struct drbd_device
*device
, union drbd_state ns
)
522 /* See drbd_state_sw_errors in drbd_strings.c */
524 enum drbd_fencing_p fp
;
525 enum drbd_state_rv rv
= SS_SUCCESS
;
530 if (get_ldev(device
)) {
531 fp
= rcu_dereference(device
->ldev
->disk_conf
)->fencing
;
535 nc
= rcu_dereference(first_peer_device(device
)->connection
->net_conf
);
537 if (!nc
->two_primaries
&& ns
.role
== R_PRIMARY
) {
538 if (ns
.peer
== R_PRIMARY
)
539 rv
= SS_TWO_PRIMARIES
;
540 else if (conn_highest_peer(first_peer_device(device
)->connection
) == R_PRIMARY
)
541 rv
= SS_O_VOL_PEER_PRI
;
546 /* already found a reason to abort */;
547 else if (ns
.role
== R_SECONDARY
&& device
->open_cnt
)
548 rv
= SS_DEVICE_IN_USE
;
550 else if (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
)
551 rv
= SS_NO_UP_TO_DATE_DISK
;
553 else if (fp
>= FP_RESOURCE
&&
554 ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
>= D_UNKNOWN
)
557 else if (ns
.role
== R_PRIMARY
&& ns
.disk
<= D_INCONSISTENT
&& ns
.pdsk
<= D_INCONSISTENT
)
558 rv
= SS_NO_UP_TO_DATE_DISK
;
560 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_INCONSISTENT
)
561 rv
= SS_NO_LOCAL_DISK
;
563 else if (ns
.conn
> C_CONNECTED
&& ns
.pdsk
< D_INCONSISTENT
)
564 rv
= SS_NO_REMOTE_DISK
;
566 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
)
567 rv
= SS_NO_UP_TO_DATE_DISK
;
569 else if ((ns
.conn
== C_CONNECTED
||
570 ns
.conn
== C_WF_BITMAP_S
||
571 ns
.conn
== C_SYNC_SOURCE
||
572 ns
.conn
== C_PAUSED_SYNC_S
) &&
573 ns
.disk
== D_OUTDATED
)
574 rv
= SS_CONNECTED_OUTDATES
;
576 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
577 (nc
->verify_alg
[0] == 0))
578 rv
= SS_NO_VERIFY_ALG
;
580 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
581 first_peer_device(device
)->connection
->agreed_pro_version
< 88)
582 rv
= SS_NOT_SUPPORTED
;
584 else if (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
)
585 rv
= SS_NO_UP_TO_DATE_DISK
;
587 else if ((ns
.conn
== C_STARTING_SYNC_S
|| ns
.conn
== C_STARTING_SYNC_T
) &&
588 ns
.pdsk
== D_UNKNOWN
)
589 rv
= SS_NEED_CONNECTION
;
591 else if (ns
.conn
>= C_CONNECTED
&& ns
.pdsk
== D_UNKNOWN
)
592 rv
= SS_CONNECTED_OUTDATES
;
600 * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
601 * This function limits state transitions that may be declined by DRBD. I.e.
602 * user requests (aka soft transitions).
603 * @device: DRBD device.
607 static enum drbd_state_rv
608 is_valid_soft_transition(union drbd_state os
, union drbd_state ns
, struct drbd_connection
*connection
)
610 enum drbd_state_rv rv
= SS_SUCCESS
;
612 if ((ns
.conn
== C_STARTING_SYNC_T
|| ns
.conn
== C_STARTING_SYNC_S
) &&
613 os
.conn
> C_CONNECTED
)
614 rv
= SS_RESYNC_RUNNING
;
616 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_STANDALONE
)
617 rv
= SS_ALREADY_STANDALONE
;
619 if (ns
.disk
> D_ATTACHING
&& os
.disk
== D_DISKLESS
)
622 if (ns
.conn
== C_WF_CONNECTION
&& os
.conn
< C_UNCONNECTED
)
623 rv
= SS_NO_NET_CONFIG
;
625 if (ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
&& os
.disk
!= D_ATTACHING
)
626 rv
= SS_LOWER_THAN_OUTDATED
;
628 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_UNCONNECTED
)
629 rv
= SS_IN_TRANSIENT_STATE
;
631 /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
632 rv = SS_IN_TRANSIENT_STATE; */
634 /* While establishing a connection only allow cstate to change.
635 Delay/refuse role changes, detach attach etc... */
636 if (test_bit(STATE_SENT
, &connection
->flags
) &&
637 !(os
.conn
== C_WF_REPORT_PARAMS
||
638 (ns
.conn
== C_WF_REPORT_PARAMS
&& os
.conn
== C_WF_CONNECTION
)))
639 rv
= SS_IN_TRANSIENT_STATE
;
641 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) && os
.conn
< C_CONNECTED
)
642 rv
= SS_NEED_CONNECTION
;
644 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
645 ns
.conn
!= os
.conn
&& os
.conn
> C_CONNECTED
)
646 rv
= SS_RESYNC_RUNNING
;
648 if ((ns
.conn
== C_STARTING_SYNC_S
|| ns
.conn
== C_STARTING_SYNC_T
) &&
649 os
.conn
< C_CONNECTED
)
650 rv
= SS_NEED_CONNECTION
;
652 if ((ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)
653 && os
.conn
< C_WF_REPORT_PARAMS
)
654 rv
= SS_NEED_CONNECTION
; /* No NetworkFailure -> SyncTarget etc... */
656 if (ns
.conn
== C_DISCONNECTING
&& ns
.pdsk
== D_OUTDATED
&&
657 os
.conn
< C_CONNECTED
&& os
.pdsk
> D_OUTDATED
)
658 rv
= SS_OUTDATE_WO_CONN
;
663 static enum drbd_state_rv
664 is_valid_conn_transition(enum drbd_conns oc
, enum drbd_conns nc
)
666 /* no change -> nothing to do, at least for the connection part */
668 return SS_NOTHING_TO_DO
;
670 /* disconnect of an unconfigured connection does not make sense */
671 if (oc
== C_STANDALONE
&& nc
== C_DISCONNECTING
)
672 return SS_ALREADY_STANDALONE
;
674 /* from C_STANDALONE, we start with C_UNCONNECTED */
675 if (oc
== C_STANDALONE
&& nc
!= C_UNCONNECTED
)
676 return SS_NEED_CONNECTION
;
678 /* When establishing a connection we need to go through WF_REPORT_PARAMS!
679 Necessary to do the right thing upon invalidate-remote on a disconnected resource */
680 if (oc
< C_WF_REPORT_PARAMS
&& nc
>= C_CONNECTED
)
681 return SS_NEED_CONNECTION
;
683 /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
684 if (oc
>= C_TIMEOUT
&& oc
<= C_TEAR_DOWN
&& nc
!= C_UNCONNECTED
&& nc
!= C_DISCONNECTING
)
685 return SS_IN_TRANSIENT_STATE
;
687 /* After C_DISCONNECTING only C_STANDALONE may follow */
688 if (oc
== C_DISCONNECTING
&& nc
!= C_STANDALONE
)
689 return SS_IN_TRANSIENT_STATE
;
696 * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
697 * This limits hard state transitions. Hard state transitions are facts there are
698 * imposed on DRBD by the environment. E.g. disk broke or network broke down.
699 * But those hard state transitions are still not allowed to do everything.
703 static enum drbd_state_rv
704 is_valid_transition(union drbd_state os
, union drbd_state ns
)
706 enum drbd_state_rv rv
;
708 rv
= is_valid_conn_transition(os
.conn
, ns
.conn
);
710 /* we cannot fail (again) if we already detached */
711 if (ns
.disk
== D_FAILED
&& os
.disk
== D_DISKLESS
)
717 static void print_sanitize_warnings(struct drbd_device
*device
, enum sanitize_state_warnings warn
)
719 static const char *msg_table
[] = {
721 [ABORTED_ONLINE_VERIFY
] = "Online-verify aborted.",
722 [ABORTED_RESYNC
] = "Resync aborted.",
723 [CONNECTION_LOST_NEGOTIATING
] = "Connection lost while negotiating, no data!",
724 [IMPLICITLY_UPGRADED_DISK
] = "Implicitly upgraded disk",
725 [IMPLICITLY_UPGRADED_PDSK
] = "Implicitly upgraded pdsk",
728 if (warn
!= NO_WARNING
)
729 drbd_warn(device
, "%s\n", msg_table
[warn
]);
733 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
734 * @device: DRBD device.
739 * When we loose connection, we have to set the state of the peers disk (pdsk)
740 * to D_UNKNOWN. This rule and many more along those lines are in this function.
742 static union drbd_state
sanitize_state(struct drbd_device
*device
, union drbd_state ns
,
743 enum sanitize_state_warnings
*warn
)
745 enum drbd_fencing_p fp
;
746 enum drbd_disk_state disk_min
, disk_max
, pdsk_min
, pdsk_max
;
752 if (get_ldev(device
)) {
754 fp
= rcu_dereference(device
->ldev
->disk_conf
)->fencing
;
759 /* Implications from connection to peer and peer_isp */
760 if (ns
.conn
< C_CONNECTED
) {
763 if (ns
.pdsk
> D_UNKNOWN
|| ns
.pdsk
< D_INCONSISTENT
)
767 /* Clear the aftr_isp when becoming unconfigured */
768 if (ns
.conn
== C_STANDALONE
&& ns
.disk
== D_DISKLESS
&& ns
.role
== R_SECONDARY
)
771 /* An implication of the disk states onto the connection state */
772 /* Abort resync if a disk fails/detaches */
773 if (ns
.conn
> C_CONNECTED
&& (ns
.disk
<= D_FAILED
|| ns
.pdsk
<= D_FAILED
)) {
775 *warn
= ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
?
776 ABORTED_ONLINE_VERIFY
: ABORTED_RESYNC
;
777 ns
.conn
= C_CONNECTED
;
780 /* Connection breaks down before we finished "Negotiating" */
781 if (ns
.conn
< C_CONNECTED
&& ns
.disk
== D_NEGOTIATING
&&
782 get_ldev_if_state(device
, D_NEGOTIATING
)) {
783 if (device
->ed_uuid
== device
->ldev
->md
.uuid
[UI_CURRENT
]) {
784 ns
.disk
= device
->new_state_tmp
.disk
;
785 ns
.pdsk
= device
->new_state_tmp
.pdsk
;
788 *warn
= CONNECTION_LOST_NEGOTIATING
;
789 ns
.disk
= D_DISKLESS
;
795 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
796 if (ns
.conn
>= C_CONNECTED
&& ns
.conn
< C_AHEAD
) {
797 if (ns
.disk
== D_CONSISTENT
|| ns
.disk
== D_OUTDATED
)
798 ns
.disk
= D_UP_TO_DATE
;
799 if (ns
.pdsk
== D_CONSISTENT
|| ns
.pdsk
== D_OUTDATED
)
800 ns
.pdsk
= D_UP_TO_DATE
;
803 /* Implications of the connection stat on the disk states */
804 disk_min
= D_DISKLESS
;
805 disk_max
= D_UP_TO_DATE
;
806 pdsk_min
= D_INCONSISTENT
;
807 pdsk_max
= D_UNKNOWN
;
808 switch ((enum drbd_conns
)ns
.conn
) {
810 case C_PAUSED_SYNC_T
:
811 case C_STARTING_SYNC_T
:
814 disk_min
= D_INCONSISTENT
;
815 disk_max
= D_OUTDATED
;
816 pdsk_min
= D_UP_TO_DATE
;
817 pdsk_max
= D_UP_TO_DATE
;
821 disk_min
= D_UP_TO_DATE
;
822 disk_max
= D_UP_TO_DATE
;
823 pdsk_min
= D_UP_TO_DATE
;
824 pdsk_max
= D_UP_TO_DATE
;
827 disk_min
= D_DISKLESS
;
828 disk_max
= D_UP_TO_DATE
;
829 pdsk_min
= D_DISKLESS
;
830 pdsk_max
= D_UP_TO_DATE
;
833 case C_PAUSED_SYNC_S
:
834 case C_STARTING_SYNC_S
:
836 disk_min
= D_UP_TO_DATE
;
837 disk_max
= D_UP_TO_DATE
;
838 pdsk_min
= D_INCONSISTENT
;
839 pdsk_max
= D_CONSISTENT
; /* D_OUTDATED would be nice. But explicit outdate necessary*/
842 disk_min
= D_INCONSISTENT
;
843 disk_max
= D_INCONSISTENT
;
844 pdsk_min
= D_UP_TO_DATE
;
845 pdsk_max
= D_UP_TO_DATE
;
848 disk_min
= D_UP_TO_DATE
;
849 disk_max
= D_UP_TO_DATE
;
850 pdsk_min
= D_INCONSISTENT
;
851 pdsk_max
= D_INCONSISTENT
;
854 case C_DISCONNECTING
:
858 case C_NETWORK_FAILURE
:
859 case C_PROTOCOL_ERROR
:
861 case C_WF_CONNECTION
:
862 case C_WF_REPORT_PARAMS
:
866 if (ns
.disk
> disk_max
)
869 if (ns
.disk
< disk_min
) {
871 *warn
= IMPLICITLY_UPGRADED_DISK
;
874 if (ns
.pdsk
> pdsk_max
)
877 if (ns
.pdsk
< pdsk_min
) {
879 *warn
= IMPLICITLY_UPGRADED_PDSK
;
883 if (fp
== FP_STONITH
&&
884 (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
> D_OUTDATED
))
885 ns
.susp_fen
= 1; /* Suspend IO while fence-peer handler runs (peer lost) */
887 if (device
->resource
->res_opts
.on_no_data
== OND_SUSPEND_IO
&&
888 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
))
889 ns
.susp_nod
= 1; /* Suspend IO while no data available (no accessible data available) */
891 if (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
) {
892 if (ns
.conn
== C_SYNC_SOURCE
)
893 ns
.conn
= C_PAUSED_SYNC_S
;
894 if (ns
.conn
== C_SYNC_TARGET
)
895 ns
.conn
= C_PAUSED_SYNC_T
;
897 if (ns
.conn
== C_PAUSED_SYNC_S
)
898 ns
.conn
= C_SYNC_SOURCE
;
899 if (ns
.conn
== C_PAUSED_SYNC_T
)
900 ns
.conn
= C_SYNC_TARGET
;
906 void drbd_resume_al(struct drbd_device
*device
)
908 if (test_and_clear_bit(AL_SUSPENDED
, &device
->flags
))
909 drbd_info(device
, "Resumed AL updates\n");
912 /* helper for __drbd_set_state */
913 static void set_ov_position(struct drbd_device
*device
, enum drbd_conns cs
)
915 if (first_peer_device(device
)->connection
->agreed_pro_version
< 90)
916 device
->ov_start_sector
= 0;
917 device
->rs_total
= drbd_bm_bits(device
);
918 device
->ov_position
= 0;
919 if (cs
== C_VERIFY_T
) {
920 /* starting online verify from an arbitrary position
921 * does not fit well into the existing protocol.
922 * on C_VERIFY_T, we initialize ov_left and friends
923 * implicitly in receive_DataRequest once the
924 * first P_OV_REQUEST is received */
925 device
->ov_start_sector
= ~(sector_t
)0;
927 unsigned long bit
= BM_SECT_TO_BIT(device
->ov_start_sector
);
928 if (bit
>= device
->rs_total
) {
929 device
->ov_start_sector
=
930 BM_BIT_TO_SECT(device
->rs_total
- 1);
931 device
->rs_total
= 1;
933 device
->rs_total
-= bit
;
934 device
->ov_position
= device
->ov_start_sector
;
936 device
->ov_left
= device
->rs_total
;
940 * __drbd_set_state() - Set a new DRBD state
941 * @device: DRBD device.
944 * @done: Optional completion, that will get completed after the after_state_ch() finished
946 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
949 __drbd_set_state(struct drbd_device
*device
, union drbd_state ns
,
950 enum chg_state_flags flags
, struct completion
*done
)
953 enum drbd_state_rv rv
= SS_SUCCESS
;
954 enum sanitize_state_warnings ssw
;
955 struct after_state_chg_work
*ascw
;
956 bool did_remote
, should_do_remote
;
958 os
= drbd_read_state(device
);
960 ns
= sanitize_state(device
, ns
, &ssw
);
962 return SS_NOTHING_TO_DO
;
964 rv
= is_valid_transition(os
, ns
);
968 if (!(flags
& CS_HARD
)) {
969 /* pre-state-change checks ; only look at ns */
970 /* See drbd_state_sw_errors in drbd_strings.c */
972 rv
= is_valid_state(device
, ns
);
973 if (rv
< SS_SUCCESS
) {
974 /* If the old state was illegal as well, then let
977 if (is_valid_state(device
, os
) == rv
)
978 rv
= is_valid_soft_transition(os
, ns
, first_peer_device(device
)->connection
);
980 rv
= is_valid_soft_transition(os
, ns
, first_peer_device(device
)->connection
);
983 if (rv
< SS_SUCCESS
) {
984 if (flags
& CS_VERBOSE
)
985 print_st_err(device
, os
, ns
, rv
);
989 print_sanitize_warnings(device
, ssw
);
991 drbd_pr_state_change(device
, os
, ns
, flags
);
993 /* Display changes to the susp* flags that where caused by the call to
994 sanitize_state(). Only display it here if we where not called from
995 _conn_request_state() */
996 if (!(flags
& CS_DC_SUSP
))
997 conn_pr_state_change(first_peer_device(device
)->connection
, os
, ns
,
998 (flags
& ~CS_DC_MASK
) | CS_DC_SUSP
);
1000 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1001 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1002 * drbd_ldev_destroy() won't happen before our corresponding
1003 * after_state_ch works run, where we put_ldev again. */
1004 if ((os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) ||
1005 (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
))
1006 atomic_inc(&device
->local_cnt
);
1008 did_remote
= drbd_should_do_remote(device
->state
);
1009 device
->state
.i
= ns
.i
;
1010 should_do_remote
= drbd_should_do_remote(device
->state
);
1011 device
->resource
->susp
= ns
.susp
;
1012 device
->resource
->susp_nod
= ns
.susp_nod
;
1013 device
->resource
->susp_fen
= ns
.susp_fen
;
1015 /* put replicated vs not-replicated requests in seperate epochs */
1016 if (did_remote
!= should_do_remote
)
1017 start_new_tl_epoch(first_peer_device(device
)->connection
);
1019 if (os
.disk
== D_ATTACHING
&& ns
.disk
>= D_NEGOTIATING
)
1020 drbd_print_uuids(device
, "attached to UUIDs");
1022 /* Wake up role changes, that were delayed because of connection establishing */
1023 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
!= C_WF_REPORT_PARAMS
&&
1024 no_peer_wf_report_params(first_peer_device(device
)->connection
))
1025 clear_bit(STATE_SENT
, &first_peer_device(device
)->connection
->flags
);
1027 wake_up(&device
->misc_wait
);
1028 wake_up(&device
->state_wait
);
1029 wake_up(&first_peer_device(device
)->connection
->ping_wait
);
1031 /* Aborted verify run, or we reached the stop sector.
1032 * Log the last position, unless end-of-device. */
1033 if ((os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
) &&
1034 ns
.conn
<= C_CONNECTED
) {
1035 device
->ov_start_sector
=
1036 BM_BIT_TO_SECT(drbd_bm_bits(device
) - device
->ov_left
);
1037 if (device
->ov_left
)
1038 drbd_info(device
, "Online Verify reached sector %llu\n",
1039 (unsigned long long)device
->ov_start_sector
);
1042 if ((os
.conn
== C_PAUSED_SYNC_T
|| os
.conn
== C_PAUSED_SYNC_S
) &&
1043 (ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)) {
1044 drbd_info(device
, "Syncer continues.\n");
1045 device
->rs_paused
+= (long)jiffies
1046 -(long)device
->rs_mark_time
[device
->rs_last_mark
];
1047 if (ns
.conn
== C_SYNC_TARGET
)
1048 mod_timer(&device
->resync_timer
, jiffies
);
1051 if ((os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_SYNC_SOURCE
) &&
1052 (ns
.conn
== C_PAUSED_SYNC_T
|| ns
.conn
== C_PAUSED_SYNC_S
)) {
1053 drbd_info(device
, "Resync suspended\n");
1054 device
->rs_mark_time
[device
->rs_last_mark
] = jiffies
;
1057 if (os
.conn
== C_CONNECTED
&&
1058 (ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
)) {
1059 unsigned long now
= jiffies
;
1062 set_ov_position(device
, ns
.conn
);
1063 device
->rs_start
= now
;
1064 device
->rs_last_events
= 0;
1065 device
->rs_last_sect_ev
= 0;
1066 device
->ov_last_oos_size
= 0;
1067 device
->ov_last_oos_start
= 0;
1069 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
1070 device
->rs_mark_left
[i
] = device
->ov_left
;
1071 device
->rs_mark_time
[i
] = now
;
1074 drbd_rs_controller_reset(device
);
1076 if (ns
.conn
== C_VERIFY_S
) {
1077 drbd_info(device
, "Starting Online Verify from sector %llu\n",
1078 (unsigned long long)device
->ov_position
);
1079 mod_timer(&device
->resync_timer
, jiffies
);
1083 if (get_ldev(device
)) {
1084 u32 mdf
= device
->ldev
->md
.flags
& ~(MDF_CONSISTENT
|MDF_PRIMARY_IND
|
1085 MDF_CONNECTED_IND
|MDF_WAS_UP_TO_DATE
|
1086 MDF_PEER_OUT_DATED
|MDF_CRASHED_PRIMARY
);
1088 mdf
&= ~MDF_AL_CLEAN
;
1089 if (test_bit(CRASHED_PRIMARY
, &device
->flags
))
1090 mdf
|= MDF_CRASHED_PRIMARY
;
1091 if (device
->state
.role
== R_PRIMARY
||
1092 (device
->state
.pdsk
< D_INCONSISTENT
&& device
->state
.peer
== R_PRIMARY
))
1093 mdf
|= MDF_PRIMARY_IND
;
1094 if (device
->state
.conn
> C_WF_REPORT_PARAMS
)
1095 mdf
|= MDF_CONNECTED_IND
;
1096 if (device
->state
.disk
> D_INCONSISTENT
)
1097 mdf
|= MDF_CONSISTENT
;
1098 if (device
->state
.disk
> D_OUTDATED
)
1099 mdf
|= MDF_WAS_UP_TO_DATE
;
1100 if (device
->state
.pdsk
<= D_OUTDATED
&& device
->state
.pdsk
>= D_INCONSISTENT
)
1101 mdf
|= MDF_PEER_OUT_DATED
;
1102 if (mdf
!= device
->ldev
->md
.flags
) {
1103 device
->ldev
->md
.flags
= mdf
;
1104 drbd_md_mark_dirty(device
);
1106 if (os
.disk
< D_CONSISTENT
&& ns
.disk
>= D_CONSISTENT
)
1107 drbd_set_ed_uuid(device
, device
->ldev
->md
.uuid
[UI_CURRENT
]);
1111 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1112 if (os
.disk
== D_INCONSISTENT
&& os
.pdsk
== D_INCONSISTENT
&&
1113 os
.peer
== R_SECONDARY
&& ns
.peer
== R_PRIMARY
)
1114 set_bit(CONSIDER_RESYNC
, &device
->flags
);
1116 /* Receiver should clean up itself */
1117 if (os
.conn
!= C_DISCONNECTING
&& ns
.conn
== C_DISCONNECTING
)
1118 drbd_thread_stop_nowait(&first_peer_device(device
)->connection
->receiver
);
1120 /* Now the receiver finished cleaning up itself, it should die */
1121 if (os
.conn
!= C_STANDALONE
&& ns
.conn
== C_STANDALONE
)
1122 drbd_thread_stop_nowait(&first_peer_device(device
)->connection
->receiver
);
1124 /* Upon network failure, we need to restart the receiver. */
1125 if (os
.conn
> C_WF_CONNECTION
&&
1126 ns
.conn
<= C_TEAR_DOWN
&& ns
.conn
>= C_TIMEOUT
)
1127 drbd_thread_restart_nowait(&first_peer_device(device
)->connection
->receiver
);
1129 /* Resume AL writing if we get a connection */
1130 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
) {
1131 drbd_resume_al(device
);
1132 first_peer_device(device
)->connection
->connect_cnt
++;
1135 /* remember last attach time so request_timer_fn() won't
1136 * kill newly established sessions while we are still trying to thaw
1137 * previously frozen IO */
1138 if ((os
.disk
== D_ATTACHING
|| os
.disk
== D_NEGOTIATING
) &&
1139 ns
.disk
> D_NEGOTIATING
)
1140 device
->last_reattach_jif
= jiffies
;
1142 ascw
= kmalloc(sizeof(*ascw
), GFP_ATOMIC
);
1146 ascw
->flags
= flags
;
1147 ascw
->w
.cb
= w_after_state_ch
;
1148 ascw
->w
.device
= device
;
1150 drbd_queue_work(&first_peer_device(device
)->connection
->sender_work
, &ascw
->w
);
1152 drbd_err(device
, "Could not kmalloc an ascw\n");
1158 static int w_after_state_ch(struct drbd_work
*w
, int unused
)
1160 struct after_state_chg_work
*ascw
=
1161 container_of(w
, struct after_state_chg_work
, w
);
1162 struct drbd_device
*device
= w
->device
;
1164 after_state_ch(device
, ascw
->os
, ascw
->ns
, ascw
->flags
);
1165 if (ascw
->flags
& CS_WAIT_COMPLETE
) {
1166 D_ASSERT(device
, ascw
->done
!= NULL
);
1167 complete(ascw
->done
);
1174 static void abw_start_sync(struct drbd_device
*device
, int rv
)
1177 drbd_err(device
, "Writing the bitmap failed not starting resync.\n");
1178 _drbd_request_state(device
, NS(conn
, C_CONNECTED
), CS_VERBOSE
);
1182 switch (device
->state
.conn
) {
1183 case C_STARTING_SYNC_T
:
1184 _drbd_request_state(device
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
1186 case C_STARTING_SYNC_S
:
1187 drbd_start_resync(device
, C_SYNC_SOURCE
);
1192 int drbd_bitmap_io_from_worker(struct drbd_device
*device
,
1193 int (*io_fn
)(struct drbd_device
*),
1194 char *why
, enum bm_flag flags
)
1198 D_ASSERT(device
, current
== first_peer_device(device
)->connection
->worker
.task
);
1200 /* open coded non-blocking drbd_suspend_io(device); */
1201 set_bit(SUSPEND_IO
, &device
->flags
);
1203 drbd_bm_lock(device
, why
, flags
);
1205 drbd_bm_unlock(device
);
1207 drbd_resume_io(device
);
1213 * after_state_ch() - Perform after state change actions that may sleep
1214 * @device: DRBD device.
1219 static void after_state_ch(struct drbd_device
*device
, union drbd_state os
,
1220 union drbd_state ns
, enum chg_state_flags flags
)
1222 struct drbd_resource
*resource
= device
->resource
;
1223 struct sib_info sib
;
1225 sib
.sib_reason
= SIB_STATE_CHANGE
;
1229 if (os
.conn
!= C_CONNECTED
&& ns
.conn
== C_CONNECTED
) {
1230 clear_bit(CRASHED_PRIMARY
, &device
->flags
);
1232 device
->p_uuid
[UI_FLAGS
] &= ~((u64
)2);
1235 /* Inform userspace about the change... */
1236 drbd_bcast_event(device
, &sib
);
1238 if (!(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
) &&
1239 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
))
1240 drbd_khelper(device
, "pri-on-incon-degr");
1242 /* Here we have the actions that are performed after a
1243 state change. This function might sleep */
1246 struct drbd_connection
*connection
= first_peer_device(device
)->connection
;
1247 enum drbd_req_event what
= NOTHING
;
1249 spin_lock_irq(&device
->resource
->req_lock
);
1250 if (os
.conn
< C_CONNECTED
&& conn_lowest_conn(connection
) >= C_CONNECTED
)
1253 if ((os
.disk
== D_ATTACHING
|| os
.disk
== D_NEGOTIATING
) &&
1254 conn_lowest_disk(connection
) > D_NEGOTIATING
)
1255 what
= RESTART_FROZEN_DISK_IO
;
1257 if (resource
->susp_nod
&& what
!= NOTHING
) {
1258 _tl_restart(connection
, what
);
1259 _conn_request_state(connection
,
1260 (union drbd_state
) { { .susp_nod
= 1 } },
1261 (union drbd_state
) { { .susp_nod
= 0 } },
1264 spin_unlock_irq(&device
->resource
->req_lock
);
1268 struct drbd_connection
*connection
= first_peer_device(device
)->connection
;
1270 spin_lock_irq(&device
->resource
->req_lock
);
1271 if (resource
->susp_fen
&& conn_lowest_conn(connection
) >= C_CONNECTED
) {
1272 /* case2: The connection was established again: */
1273 struct drbd_peer_device
*peer_device
;
1277 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
1278 clear_bit(NEW_CUR_UUID
, &peer_device
->device
->flags
);
1280 _tl_restart(connection
, RESEND
);
1281 _conn_request_state(connection
,
1282 (union drbd_state
) { { .susp_fen
= 1 } },
1283 (union drbd_state
) { { .susp_fen
= 0 } },
1286 spin_unlock_irq(&device
->resource
->req_lock
);
1289 /* Became sync source. With protocol >= 96, we still need to send out
1290 * the sync uuid now. Need to do that before any drbd_send_state, or
1291 * the other side may go "paused sync" before receiving the sync uuids,
1292 * which is unexpected. */
1293 if ((os
.conn
!= C_SYNC_SOURCE
&& os
.conn
!= C_PAUSED_SYNC_S
) &&
1294 (ns
.conn
== C_SYNC_SOURCE
|| ns
.conn
== C_PAUSED_SYNC_S
) &&
1295 first_peer_device(device
)->connection
->agreed_pro_version
>= 96 && get_ldev(device
)) {
1296 drbd_gen_and_send_sync_uuid(device
);
1300 /* Do not change the order of the if above and the two below... */
1301 if (os
.pdsk
== D_DISKLESS
&&
1302 ns
.pdsk
> D_DISKLESS
&& ns
.pdsk
!= D_UNKNOWN
) { /* attach on the peer */
1303 /* we probably will start a resync soon.
1304 * make sure those things are properly reset. */
1305 device
->rs_total
= 0;
1306 device
->rs_failed
= 0;
1307 atomic_set(&device
->rs_pending_cnt
, 0);
1308 drbd_rs_cancel_all(device
);
1310 drbd_send_uuids(device
);
1311 drbd_send_state(device
, ns
);
1313 /* No point in queuing send_bitmap if we don't have a connection
1314 * anymore, so check also the _current_ state, not only the new state
1315 * at the time this work was queued. */
1316 if (os
.conn
!= C_WF_BITMAP_S
&& ns
.conn
== C_WF_BITMAP_S
&&
1317 device
->state
.conn
== C_WF_BITMAP_S
)
1318 drbd_queue_bitmap_io(device
, &drbd_send_bitmap
, NULL
,
1319 "send_bitmap (WFBitMapS)",
1320 BM_LOCKED_TEST_ALLOWED
);
1322 /* Lost contact to peer's copy of the data */
1323 if ((os
.pdsk
>= D_INCONSISTENT
&&
1324 os
.pdsk
!= D_UNKNOWN
&&
1325 os
.pdsk
!= D_OUTDATED
)
1326 && (ns
.pdsk
< D_INCONSISTENT
||
1327 ns
.pdsk
== D_UNKNOWN
||
1328 ns
.pdsk
== D_OUTDATED
)) {
1329 if (get_ldev(device
)) {
1330 if ((ns
.role
== R_PRIMARY
|| ns
.peer
== R_PRIMARY
) &&
1331 device
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1332 if (drbd_suspended(device
)) {
1333 set_bit(NEW_CUR_UUID
, &device
->flags
);
1335 drbd_uuid_new_current(device
);
1336 drbd_send_uuids(device
);
1343 if (ns
.pdsk
< D_INCONSISTENT
&& get_ldev(device
)) {
1344 if (os
.peer
== R_SECONDARY
&& ns
.peer
== R_PRIMARY
&&
1345 device
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1346 drbd_uuid_new_current(device
);
1347 drbd_send_uuids(device
);
1349 /* D_DISKLESS Peer becomes secondary */
1350 if (os
.peer
== R_PRIMARY
&& ns
.peer
== R_SECONDARY
)
1351 /* We may still be Primary ourselves.
1352 * No harm done if the bitmap still changes,
1353 * redirtied pages will follow later. */
1354 drbd_bitmap_io_from_worker(device
, &drbd_bm_write
,
1355 "demote diskless peer", BM_LOCKED_SET_ALLOWED
);
1359 /* Write out all changed bits on demote.
1360 * Though, no need to da that just yet
1361 * if there is a resync going on still */
1362 if (os
.role
== R_PRIMARY
&& ns
.role
== R_SECONDARY
&&
1363 device
->state
.conn
<= C_CONNECTED
&& get_ldev(device
)) {
1364 /* No changes to the bitmap expected this time, so assert that,
1365 * even though no harm was done if it did change. */
1366 drbd_bitmap_io_from_worker(device
, &drbd_bm_write
,
1367 "demote", BM_LOCKED_TEST_ALLOWED
);
1371 /* Last part of the attaching process ... */
1372 if (ns
.conn
>= C_CONNECTED
&&
1373 os
.disk
== D_ATTACHING
&& ns
.disk
== D_NEGOTIATING
) {
1374 drbd_send_sizes(device
, 0, 0); /* to start sync... */
1375 drbd_send_uuids(device
);
1376 drbd_send_state(device
, ns
);
1379 /* We want to pause/continue resync, tell peer. */
1380 if (ns
.conn
>= C_CONNECTED
&&
1381 ((os
.aftr_isp
!= ns
.aftr_isp
) ||
1382 (os
.user_isp
!= ns
.user_isp
)))
1383 drbd_send_state(device
, ns
);
1385 /* In case one of the isp bits got set, suspend other devices. */
1386 if ((!os
.aftr_isp
&& !os
.peer_isp
&& !os
.user_isp
) &&
1387 (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
))
1388 suspend_other_sg(device
);
1390 /* Make sure the peer gets informed about eventual state
1391 changes (ISP bits) while we were in WFReportParams. */
1392 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
>= C_CONNECTED
)
1393 drbd_send_state(device
, ns
);
1395 if (os
.conn
!= C_AHEAD
&& ns
.conn
== C_AHEAD
)
1396 drbd_send_state(device
, ns
);
1398 /* We are in the progress to start a full sync... */
1399 if ((os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
1400 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
))
1401 /* no other bitmap changes expected during this phase */
1402 drbd_queue_bitmap_io(device
,
1403 &drbd_bmio_set_n_write
, &abw_start_sync
,
1404 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED
);
1406 /* first half of local IO error, failure to attach,
1407 * or administrative detach */
1408 if (os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) {
1409 enum drbd_io_error_p eh
= EP_PASS_ON
;
1410 int was_io_error
= 0;
1411 /* corresponding get_ldev was in __drbd_set_state, to serialize
1412 * our cleanup here with the transition to D_DISKLESS.
1413 * But is is still not save to dreference ldev here, since
1414 * we might come from an failed Attach before ldev was set. */
1417 eh
= rcu_dereference(device
->ldev
->disk_conf
)->on_io_error
;
1420 was_io_error
= test_and_clear_bit(WAS_IO_ERROR
, &device
->flags
);
1422 if (was_io_error
&& eh
== EP_CALL_HELPER
)
1423 drbd_khelper(device
, "local-io-error");
1425 /* Immediately allow completion of all application IO,
1426 * that waits for completion from the local disk,
1427 * if this was a force-detach due to disk_timeout
1428 * or administrator request (drbdsetup detach --force).
1429 * Do NOT abort otherwise.
1430 * Aborting local requests may cause serious problems,
1431 * if requests are completed to upper layers already,
1432 * and then later the already submitted local bio completes.
1433 * This can cause DMA into former bio pages that meanwhile
1434 * have been re-used for other things.
1435 * So aborting local requests may cause crashes,
1436 * or even worse, silent data corruption.
1438 if (test_and_clear_bit(FORCE_DETACH
, &device
->flags
))
1439 tl_abort_disk_io(device
);
1441 /* current state still has to be D_FAILED,
1442 * there is only one way out: to D_DISKLESS,
1443 * and that may only happen after our put_ldev below. */
1444 if (device
->state
.disk
!= D_FAILED
)
1446 "ASSERT FAILED: disk is %s during detach\n",
1447 drbd_disk_str(device
->state
.disk
));
1449 if (ns
.conn
>= C_CONNECTED
)
1450 drbd_send_state(device
, ns
);
1452 drbd_rs_cancel_all(device
);
1454 /* In case we want to get something to stable storage still,
1455 * this may be the last chance.
1456 * Following put_ldev may transition to D_DISKLESS. */
1457 drbd_md_sync(device
);
1462 /* second half of local IO error, failure to attach,
1463 * or administrative detach,
1464 * after local_cnt references have reached zero again */
1465 if (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
) {
1466 /* We must still be diskless,
1467 * re-attach has to be serialized with this! */
1468 if (device
->state
.disk
!= D_DISKLESS
)
1470 "ASSERT FAILED: disk is %s while going diskless\n",
1471 drbd_disk_str(device
->state
.disk
));
1473 if (ns
.conn
>= C_CONNECTED
)
1474 drbd_send_state(device
, ns
);
1475 /* corresponding get_ldev in __drbd_set_state
1476 * this may finally trigger drbd_ldev_destroy. */
1480 /* Notify peer that I had a local IO error, and did not detached.. */
1481 if (os
.disk
== D_UP_TO_DATE
&& ns
.disk
== D_INCONSISTENT
&& ns
.conn
>= C_CONNECTED
)
1482 drbd_send_state(device
, ns
);
1484 /* Disks got bigger while they were detached */
1485 if (ns
.disk
> D_NEGOTIATING
&& ns
.pdsk
> D_NEGOTIATING
&&
1486 test_and_clear_bit(RESYNC_AFTER_NEG
, &device
->flags
)) {
1487 if (ns
.conn
== C_CONNECTED
)
1488 resync_after_online_grow(device
);
1491 /* A resync finished or aborted, wake paused devices... */
1492 if ((os
.conn
> C_CONNECTED
&& ns
.conn
<= C_CONNECTED
) ||
1493 (os
.peer_isp
&& !ns
.peer_isp
) ||
1494 (os
.user_isp
&& !ns
.user_isp
))
1495 resume_next_sg(device
);
1497 /* sync target done with resync. Explicitly notify peer, even though
1498 * it should (at least for non-empty resyncs) already know itself. */
1499 if (os
.disk
< D_UP_TO_DATE
&& os
.conn
>= C_SYNC_SOURCE
&& ns
.conn
== C_CONNECTED
)
1500 drbd_send_state(device
, ns
);
1502 /* Verify finished, or reached stop sector. Peer did not know about
1503 * the stop sector, and we may even have changed the stop sector during
1504 * verify to interrupt/stop early. Send the new state. */
1505 if (os
.conn
== C_VERIFY_S
&& ns
.conn
== C_CONNECTED
1506 && verify_can_do_stop_sector(device
))
1507 drbd_send_state(device
, ns
);
1509 /* This triggers bitmap writeout of potentially still unwritten pages
1510 * if the resync finished cleanly, or aborted because of peer disk
1511 * failure, or because of connection loss.
1512 * For resync aborted because of local disk failure, we cannot do
1513 * any bitmap writeout anymore.
1514 * No harm done if some bits change during this phase.
1516 if (os
.conn
> C_CONNECTED
&& ns
.conn
<= C_CONNECTED
&& get_ldev(device
)) {
1517 drbd_queue_bitmap_io(device
, &drbd_bm_write_copy_pages
, NULL
,
1518 "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED
);
1522 if (ns
.disk
== D_DISKLESS
&&
1523 ns
.conn
== C_STANDALONE
&&
1524 ns
.role
== R_SECONDARY
) {
1525 if (os
.aftr_isp
!= ns
.aftr_isp
)
1526 resume_next_sg(device
);
1529 drbd_md_sync(device
);
1532 struct after_conn_state_chg_work
{
1535 union drbd_state ns_min
;
1536 union drbd_state ns_max
; /* new, max state, over all devices */
1537 enum chg_state_flags flags
;
1540 static int w_after_conn_state_ch(struct drbd_work
*w
, int unused
)
1542 struct after_conn_state_chg_work
*acscw
=
1543 container_of(w
, struct after_conn_state_chg_work
, w
);
1544 struct drbd_connection
*connection
= w
->connection
;
1545 enum drbd_conns oc
= acscw
->oc
;
1546 union drbd_state ns_max
= acscw
->ns_max
;
1547 struct drbd_peer_device
*peer_device
;
1552 /* Upon network configuration, we need to start the receiver */
1553 if (oc
== C_STANDALONE
&& ns_max
.conn
== C_UNCONNECTED
)
1554 drbd_thread_start(&connection
->receiver
);
1556 if (oc
== C_DISCONNECTING
&& ns_max
.conn
== C_STANDALONE
) {
1557 struct net_conf
*old_conf
;
1559 mutex_lock(&connection
->resource
->conf_update
);
1560 old_conf
= connection
->net_conf
;
1561 connection
->my_addr_len
= 0;
1562 connection
->peer_addr_len
= 0;
1563 rcu_assign_pointer(connection
->net_conf
, NULL
);
1564 conn_free_crypto(connection
);
1565 mutex_unlock(&connection
->resource
->conf_update
);
1571 if (ns_max
.susp_fen
) {
1572 /* case1: The outdate peer handler is successful: */
1573 if (ns_max
.pdsk
<= D_OUTDATED
) {
1575 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
1576 struct drbd_device
*device
= peer_device
->device
;
1577 if (test_bit(NEW_CUR_UUID
, &device
->flags
)) {
1578 drbd_uuid_new_current(device
);
1579 clear_bit(NEW_CUR_UUID
, &device
->flags
);
1583 spin_lock_irq(&connection
->resource
->req_lock
);
1584 _tl_restart(connection
, CONNECTION_LOST_WHILE_PENDING
);
1585 _conn_request_state(connection
,
1586 (union drbd_state
) { { .susp_fen
= 1 } },
1587 (union drbd_state
) { { .susp_fen
= 0 } },
1589 spin_unlock_irq(&connection
->resource
->req_lock
);
1592 kref_put(&connection
->kref
, drbd_destroy_connection
);
1594 conn_md_sync(connection
);
1599 void conn_old_common_state(struct drbd_connection
*connection
, union drbd_state
*pcs
, enum chg_state_flags
*pf
)
1601 enum chg_state_flags flags
= ~0;
1602 struct drbd_peer_device
*peer_device
;
1603 int vnr
, first_vol
= 1;
1604 union drbd_dev_state os
, cs
= {
1605 { .role
= R_SECONDARY
,
1607 .conn
= connection
->cstate
,
1613 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
1614 struct drbd_device
*device
= peer_device
->device
;
1623 if (cs
.role
!= os
.role
)
1624 flags
&= ~CS_DC_ROLE
;
1626 if (cs
.peer
!= os
.peer
)
1627 flags
&= ~CS_DC_PEER
;
1629 if (cs
.conn
!= os
.conn
)
1630 flags
&= ~CS_DC_CONN
;
1632 if (cs
.disk
!= os
.disk
)
1633 flags
&= ~CS_DC_DISK
;
1635 if (cs
.pdsk
!= os
.pdsk
)
1636 flags
&= ~CS_DC_PDSK
;
1645 static enum drbd_state_rv
1646 conn_is_valid_transition(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
,
1647 enum chg_state_flags flags
)
1649 enum drbd_state_rv rv
= SS_SUCCESS
;
1650 union drbd_state ns
, os
;
1651 struct drbd_peer_device
*peer_device
;
1655 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
1656 struct drbd_device
*device
= peer_device
->device
;
1657 os
= drbd_read_state(device
);
1658 ns
= sanitize_state(device
, apply_mask_val(os
, mask
, val
), NULL
);
1660 if (flags
& CS_IGN_OUTD_FAIL
&& ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
)
1666 rv
= is_valid_transition(os
, ns
);
1668 if (rv
>= SS_SUCCESS
&& !(flags
& CS_HARD
)) {
1669 rv
= is_valid_state(device
, ns
);
1670 if (rv
< SS_SUCCESS
) {
1671 if (is_valid_state(device
, os
) == rv
)
1672 rv
= is_valid_soft_transition(os
, ns
, connection
);
1674 rv
= is_valid_soft_transition(os
, ns
, connection
);
1677 if (rv
< SS_SUCCESS
) {
1678 if (flags
& CS_VERBOSE
)
1679 print_st_err(device
, os
, ns
, rv
);
1689 conn_set_state(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
,
1690 union drbd_state
*pns_min
, union drbd_state
*pns_max
, enum chg_state_flags flags
)
1692 union drbd_state ns
, os
, ns_max
= { };
1693 union drbd_state ns_min
= {
1700 struct drbd_peer_device
*peer_device
;
1701 enum drbd_state_rv rv
;
1702 int vnr
, number_of_volumes
= 0;
1704 if (mask
.conn
== C_MASK
) {
1705 /* remember last connect time so request_timer_fn() won't
1706 * kill newly established sessions while we are still trying to thaw
1707 * previously frozen IO */
1708 if (connection
->cstate
!= C_WF_REPORT_PARAMS
&& val
.conn
== C_WF_REPORT_PARAMS
)
1709 connection
->last_reconnect_jif
= jiffies
;
1711 connection
->cstate
= val
.conn
;
1715 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
1716 struct drbd_device
*device
= peer_device
->device
;
1717 number_of_volumes
++;
1718 os
= drbd_read_state(device
);
1719 ns
= apply_mask_val(os
, mask
, val
);
1720 ns
= sanitize_state(device
, ns
, NULL
);
1722 if (flags
& CS_IGN_OUTD_FAIL
&& ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
)
1725 rv
= __drbd_set_state(device
, ns
, flags
, NULL
);
1726 if (rv
< SS_SUCCESS
)
1729 ns
.i
= device
->state
.i
;
1730 ns_max
.role
= max_role(ns
.role
, ns_max
.role
);
1731 ns_max
.peer
= max_role(ns
.peer
, ns_max
.peer
);
1732 ns_max
.conn
= max_t(enum drbd_conns
, ns
.conn
, ns_max
.conn
);
1733 ns_max
.disk
= max_t(enum drbd_disk_state
, ns
.disk
, ns_max
.disk
);
1734 ns_max
.pdsk
= max_t(enum drbd_disk_state
, ns
.pdsk
, ns_max
.pdsk
);
1736 ns_min
.role
= min_role(ns
.role
, ns_min
.role
);
1737 ns_min
.peer
= min_role(ns
.peer
, ns_min
.peer
);
1738 ns_min
.conn
= min_t(enum drbd_conns
, ns
.conn
, ns_min
.conn
);
1739 ns_min
.disk
= min_t(enum drbd_disk_state
, ns
.disk
, ns_min
.disk
);
1740 ns_min
.pdsk
= min_t(enum drbd_disk_state
, ns
.pdsk
, ns_min
.pdsk
);
1744 if (number_of_volumes
== 0) {
1745 ns_min
= ns_max
= (union drbd_state
) { {
1746 .role
= R_SECONDARY
,
1754 ns_min
.susp
= ns_max
.susp
= connection
->resource
->susp
;
1755 ns_min
.susp_nod
= ns_max
.susp_nod
= connection
->resource
->susp_nod
;
1756 ns_min
.susp_fen
= ns_max
.susp_fen
= connection
->resource
->susp_fen
;
1762 static enum drbd_state_rv
1763 _conn_rq_cond(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
)
1765 enum drbd_state_rv rv
;
1767 if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY
, &connection
->flags
))
1768 return SS_CW_SUCCESS
;
1770 if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL
, &connection
->flags
))
1771 return SS_CW_FAILED_BY_PEER
;
1773 rv
= conn_is_valid_transition(connection
, mask
, val
, 0);
1774 if (rv
== SS_SUCCESS
&& connection
->cstate
== C_WF_REPORT_PARAMS
)
1775 rv
= SS_UNKNOWN_ERROR
; /* continue waiting */
1781 _conn_request_state(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
,
1782 enum chg_state_flags flags
)
1784 enum drbd_state_rv rv
= SS_SUCCESS
;
1785 struct after_conn_state_chg_work
*acscw
;
1786 enum drbd_conns oc
= connection
->cstate
;
1787 union drbd_state ns_max
, ns_min
, os
;
1788 bool have_mutex
= false;
1791 rv
= is_valid_conn_transition(oc
, val
.conn
);
1792 if (rv
< SS_SUCCESS
)
1796 rv
= conn_is_valid_transition(connection
, mask
, val
, flags
);
1797 if (rv
< SS_SUCCESS
)
1800 if (oc
== C_WF_REPORT_PARAMS
&& val
.conn
== C_DISCONNECTING
&&
1801 !(flags
& (CS_LOCAL_ONLY
| CS_HARD
))) {
1803 /* This will be a cluster-wide state change.
1804 * Need to give up the spinlock, grab the mutex,
1805 * then send the state change request, ... */
1806 spin_unlock_irq(&connection
->resource
->req_lock
);
1807 mutex_lock(&connection
->cstate_mutex
);
1810 set_bit(CONN_WD_ST_CHG_REQ
, &connection
->flags
);
1811 if (conn_send_state_req(connection
, mask
, val
)) {
1812 /* sending failed. */
1813 clear_bit(CONN_WD_ST_CHG_REQ
, &connection
->flags
);
1814 rv
= SS_CW_FAILED_BY_PEER
;
1815 /* need to re-aquire the spin lock, though */
1816 goto abort_unlocked
;
1819 if (val
.conn
== C_DISCONNECTING
)
1820 set_bit(DISCONNECT_SENT
, &connection
->flags
);
1822 /* ... and re-aquire the spinlock.
1823 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
1824 * conn_set_state() within the same spinlock. */
1825 spin_lock_irq(&connection
->resource
->req_lock
);
1826 wait_event_lock_irq(connection
->ping_wait
,
1827 (rv
= _conn_rq_cond(connection
, mask
, val
)),
1828 connection
->resource
->req_lock
);
1829 clear_bit(CONN_WD_ST_CHG_REQ
, &connection
->flags
);
1830 if (rv
< SS_SUCCESS
)
1834 conn_old_common_state(connection
, &os
, &flags
);
1835 flags
|= CS_DC_SUSP
;
1836 conn_set_state(connection
, mask
, val
, &ns_min
, &ns_max
, flags
);
1837 conn_pr_state_change(connection
, os
, ns_max
, flags
);
1839 acscw
= kmalloc(sizeof(*acscw
), GFP_ATOMIC
);
1841 acscw
->oc
= os
.conn
;
1842 acscw
->ns_min
= ns_min
;
1843 acscw
->ns_max
= ns_max
;
1844 acscw
->flags
= flags
;
1845 acscw
->w
.cb
= w_after_conn_state_ch
;
1846 kref_get(&connection
->kref
);
1847 acscw
->w
.connection
= connection
;
1848 drbd_queue_work(&connection
->sender_work
, &acscw
->w
);
1850 drbd_err(connection
, "Could not kmalloc an acscw\n");
1855 /* mutex_unlock() "... must not be used in interrupt context.",
1856 * so give up the spinlock, then re-aquire it */
1857 spin_unlock_irq(&connection
->resource
->req_lock
);
1859 mutex_unlock(&connection
->cstate_mutex
);
1860 spin_lock_irq(&connection
->resource
->req_lock
);
1862 if (rv
< SS_SUCCESS
&& flags
& CS_VERBOSE
) {
1863 drbd_err(connection
, "State change failed: %s\n", drbd_set_st_err_str(rv
));
1864 drbd_err(connection
, " mask = 0x%x val = 0x%x\n", mask
.i
, val
.i
);
1865 drbd_err(connection
, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc
), drbd_conn_str(val
.conn
));
1871 conn_request_state(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
,
1872 enum chg_state_flags flags
)
1874 enum drbd_state_rv rv
;
1876 spin_lock_irq(&connection
->resource
->req_lock
);
1877 rv
= _conn_request_state(connection
, mask
, val
, flags
);
1878 spin_unlock_irq(&connection
->resource
->req_lock
);