drbd: Fixed logging of old connection state
[deliverable/linux.git] / drivers / block / drbd / drbd_state.c
CommitLineData
b8907339
PR
1/*
2 drbd_state.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 */
27
28#include <linux/drbd_limits.h>
29#include "drbd_int.h"
30#include "drbd_req.h"
31
32struct after_state_chg_work {
33 struct drbd_work w;
34 union drbd_state os;
35 union drbd_state ns;
36 enum chg_state_flags flags;
37 struct completion *done;
38};
39
99920dc5 40static int w_after_state_ch(struct drbd_work *w, int unused);
b8907339
PR
41static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
42 union drbd_state ns, enum chg_state_flags flags);
0e29d163 43static void after_all_state_ch(struct drbd_tconn *tconn);
a75f34ad
PR
44static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
45static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state);
3509502d 46static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
4308a0a3
PR
47static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
48 const char **warn_sync_abort);
b8907339 49
d0456c72 50bool conn_all_vols_unconf(struct drbd_tconn *tconn)
0e29d163
PR
51{
52 struct drbd_conf *mdev;
e90285e0 53 int vnr;
0e29d163 54
e90285e0 55 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
d0456c72
LE
56 if (mdev->state.disk != D_DISKLESS ||
57 mdev->state.conn != C_STANDALONE ||
58 mdev->state.role != R_SECONDARY)
59 return false;
0e29d163 60 }
d0456c72 61 return true;
0e29d163
PR
62}
63
cb703454
PR
64/* Unfortunately the states where not correctly ordered, when
65 they where defined. therefore can not use max_t() here. */
66static enum drbd_role max_role(enum drbd_role role1, enum drbd_role role2)
67{
68 if (role1 == R_PRIMARY || role2 == R_PRIMARY)
69 return R_PRIMARY;
70 if (role1 == R_SECONDARY || role2 == R_SECONDARY)
71 return R_SECONDARY;
72 return R_UNKNOWN;
73}
74static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
75{
76 if (role1 == R_UNKNOWN || role2 == R_UNKNOWN)
77 return R_UNKNOWN;
78 if (role1 == R_SECONDARY || role2 == R_SECONDARY)
79 return R_SECONDARY;
80 return R_PRIMARY;
81}
82
83enum drbd_role conn_highest_role(struct drbd_tconn *tconn)
84{
85 enum drbd_role role = R_UNKNOWN;
86 struct drbd_conf *mdev;
87 int vnr;
88
89 idr_for_each_entry(&tconn->volumes, mdev, vnr)
90 role = max_role(role, mdev->state.role);
91
92 return role;
93}
94
95enum drbd_role conn_highest_peer(struct drbd_tconn *tconn)
96{
97 enum drbd_role peer = R_UNKNOWN;
98 struct drbd_conf *mdev;
99 int vnr;
100
101 idr_for_each_entry(&tconn->volumes, mdev, vnr)
102 peer = max_role(peer, mdev->state.peer);
103
104 return peer;
105}
106
107enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn)
108{
109 enum drbd_disk_state ds = D_DISKLESS;
110 struct drbd_conf *mdev;
111 int vnr;
112
113 idr_for_each_entry(&tconn->volumes, mdev, vnr)
114 ds = max_t(enum drbd_disk_state, ds, mdev->state.disk);
115
116 return ds;
117}
118
119enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn)
120{
121 enum drbd_disk_state ds = D_DISKLESS;
122 struct drbd_conf *mdev;
123 int vnr;
124
125 idr_for_each_entry(&tconn->volumes, mdev, vnr)
126 ds = max_t(enum drbd_disk_state, ds, mdev->state.pdsk);
127
128 return ds;
129}
130
b8907339
PR
131/**
132 * cl_wide_st_chg() - true if the state change is a cluster wide one
133 * @mdev: DRBD device.
134 * @os: old (current) state.
135 * @ns: new (wanted) state.
136 */
137static int cl_wide_st_chg(struct drbd_conf *mdev,
138 union drbd_state os, union drbd_state ns)
139{
140 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
141 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
142 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
143 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
144 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
145 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
146 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
147}
148
56707f9e
PR
149static union drbd_state
150apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
151{
152 union drbd_state ns;
153 ns.i = (os.i & ~mask.i) | val.i;
154 return ns;
155}
156
b8907339
PR
157enum drbd_state_rv
158drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
159 union drbd_state mask, union drbd_state val)
160{
161 unsigned long flags;
56707f9e 162 union drbd_state ns;
b8907339
PR
163 enum drbd_state_rv rv;
164
165 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
56707f9e 166 ns = apply_mask_val(mdev->state, mask, val);
b8907339
PR
167 rv = _drbd_set_state(mdev, ns, f, NULL);
168 ns = mdev->state;
169 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
170
171 return rv;
172}
173
174/**
175 * drbd_force_state() - Impose a change which happens outside our control on our state
176 * @mdev: DRBD device.
177 * @mask: mask of state bits to change.
178 * @val: value of new state bits.
179 */
180void drbd_force_state(struct drbd_conf *mdev,
181 union drbd_state mask, union drbd_state val)
182{
183 drbd_change_state(mdev, CS_HARD, mask, val);
184}
185
186static enum drbd_state_rv
187_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
188 union drbd_state val)
189{
190 union drbd_state os, ns;
191 unsigned long flags;
192 enum drbd_state_rv rv;
193
194 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
195 return SS_CW_SUCCESS;
196
197 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
198 return SS_CW_FAILED_BY_PEER;
199
b8907339
PR
200 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
201 os = mdev->state;
56707f9e 202 ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
3509502d
PR
203 rv = is_valid_transition(os, ns);
204 if (rv == SS_SUCCESS)
205 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
b8907339
PR
206
207 if (!cl_wide_st_chg(mdev, os, ns))
208 rv = SS_CW_NO_NEED;
3509502d 209 if (rv == SS_UNKNOWN_ERROR) {
b8907339
PR
210 rv = is_valid_state(mdev, ns);
211 if (rv == SS_SUCCESS) {
a75f34ad 212 rv = is_valid_soft_transition(os, ns);
b8907339
PR
213 if (rv == SS_SUCCESS)
214 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
215 }
216 }
217 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
218
219 return rv;
220}
221
222/**
223 * drbd_req_state() - Perform an eventually cluster wide state change
224 * @mdev: DRBD device.
225 * @mask: mask of state bits to change.
226 * @val: value of new state bits.
227 * @f: flags
228 *
229 * Should not be called directly, use drbd_request_state() or
230 * _drbd_request_state().
231 */
232static enum drbd_state_rv
233drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
234 union drbd_state val, enum chg_state_flags f)
235{
236 struct completion done;
237 unsigned long flags;
238 union drbd_state os, ns;
239 enum drbd_state_rv rv;
240
241 init_completion(&done);
242
243 if (f & CS_SERIALIZE)
8410da8f 244 mutex_lock(mdev->state_mutex);
b8907339
PR
245
246 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
247 os = mdev->state;
56707f9e 248 ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
3509502d 249 rv = is_valid_transition(os, ns);
3c5e5f6a
LE
250 if (rv < SS_SUCCESS) {
251 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
3509502d 252 goto abort;
3c5e5f6a 253 }
b8907339
PR
254
255 if (cl_wide_st_chg(mdev, os, ns)) {
256 rv = is_valid_state(mdev, ns);
257 if (rv == SS_SUCCESS)
a75f34ad 258 rv = is_valid_soft_transition(os, ns);
b8907339
PR
259 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
260
261 if (rv < SS_SUCCESS) {
262 if (f & CS_VERBOSE)
263 print_st_err(mdev, os, ns, rv);
264 goto abort;
265 }
266
d24ae219 267 if (drbd_send_state_req(mdev, mask, val)) {
b8907339
PR
268 rv = SS_CW_FAILED_BY_PEER;
269 if (f & CS_VERBOSE)
270 print_st_err(mdev, os, ns, rv);
271 goto abort;
272 }
273
274 wait_event(mdev->state_wait,
275 (rv = _req_st_cond(mdev, mask, val)));
276
277 if (rv < SS_SUCCESS) {
b8907339
PR
278 if (f & CS_VERBOSE)
279 print_st_err(mdev, os, ns, rv);
280 goto abort;
281 }
282 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
56707f9e 283 ns = apply_mask_val(mdev->state, mask, val);
b8907339 284 rv = _drbd_set_state(mdev, ns, f, &done);
b8907339
PR
285 } else {
286 rv = _drbd_set_state(mdev, ns, f, &done);
287 }
288
289 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
290
291 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
292 D_ASSERT(current != mdev->tconn->worker.task);
293 wait_for_completion(&done);
294 }
295
296abort:
297 if (f & CS_SERIALIZE)
8410da8f 298 mutex_unlock(mdev->state_mutex);
b8907339
PR
299
300 return rv;
301}
302
303/**
304 * _drbd_request_state() - Request a state change (with flags)
305 * @mdev: DRBD device.
306 * @mask: mask of state bits to change.
307 * @val: value of new state bits.
308 * @f: flags
309 *
310 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
311 * flag, or when logging of failed state change requests is not desired.
312 */
313enum drbd_state_rv
314_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
315 union drbd_state val, enum chg_state_flags f)
316{
317 enum drbd_state_rv rv;
318
319 wait_event(mdev->state_wait,
320 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
321
322 return rv;
323}
324
325static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
326{
327 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
328 name,
329 drbd_conn_str(ns.conn),
330 drbd_role_str(ns.role),
331 drbd_role_str(ns.peer),
332 drbd_disk_str(ns.disk),
333 drbd_disk_str(ns.pdsk),
334 is_susp(ns) ? 's' : 'r',
335 ns.aftr_isp ? 'a' : '-',
336 ns.peer_isp ? 'p' : '-',
337 ns.user_isp ? 'u' : '-',
338 ns.susp_fen ? 'F' : '-',
339 ns.susp_nod ? 'N' : '-'
340 );
341}
342
343void print_st_err(struct drbd_conf *mdev, union drbd_state os,
344 union drbd_state ns, enum drbd_state_rv err)
345{
346 if (err == SS_IN_TRANSIENT_STATE)
347 return;
348 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
349 print_st(mdev, " state", os);
350 print_st(mdev, "wanted", ns);
351}
352
bbeb641c
PR
353static void print_state_change(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns,
354 enum chg_state_flags flags)
355{
356 char *pbp, pb[300];
357 pbp = pb;
358 *pbp = 0;
359 if (ns.role != os.role)
360 pbp += sprintf(pbp, "role( %s -> %s ) ",
361 drbd_role_str(os.role),
362 drbd_role_str(ns.role));
363 if (ns.peer != os.peer)
364 pbp += sprintf(pbp, "peer( %s -> %s ) ",
365 drbd_role_str(os.peer),
366 drbd_role_str(ns.peer));
367 if (ns.conn != os.conn && !(flags & CS_NO_CSTATE_CHG))
368 pbp += sprintf(pbp, "conn( %s -> %s ) ",
369 drbd_conn_str(os.conn),
370 drbd_conn_str(ns.conn));
371 if (ns.disk != os.disk)
372 pbp += sprintf(pbp, "disk( %s -> %s ) ",
373 drbd_disk_str(os.disk),
374 drbd_disk_str(ns.disk));
375 if (ns.pdsk != os.pdsk)
376 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
377 drbd_disk_str(os.pdsk),
378 drbd_disk_str(ns.pdsk));
379 if (is_susp(ns) != is_susp(os))
380 pbp += sprintf(pbp, "susp( %d -> %d ) ",
381 is_susp(os),
382 is_susp(ns));
383 if (ns.aftr_isp != os.aftr_isp)
384 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
385 os.aftr_isp,
386 ns.aftr_isp);
387 if (ns.peer_isp != os.peer_isp)
388 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
389 os.peer_isp,
390 ns.peer_isp);
391 if (ns.user_isp != os.user_isp)
392 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
393 os.user_isp,
394 ns.user_isp);
395 if (pbp != pb)
396 dev_info(DEV, "%s\n", pb);
397}
b8907339
PR
398
399/**
400 * is_valid_state() - Returns an SS_ error code if ns is not valid
401 * @mdev: DRBD device.
402 * @ns: State to consider.
403 */
404static enum drbd_state_rv
405is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
406{
407 /* See drbd_state_sw_errors in drbd_strings.c */
408
409 enum drbd_fencing_p fp;
410 enum drbd_state_rv rv = SS_SUCCESS;
411
412 fp = FP_DONT_CARE;
413 if (get_ldev(mdev)) {
414 fp = mdev->ldev->dc.fencing;
415 put_ldev(mdev);
416 }
417
418 if (get_net_conf(mdev->tconn)) {
047e95e2
PR
419 if (!mdev->tconn->net_conf->two_primaries && ns.role == R_PRIMARY) {
420 if (ns.peer == R_PRIMARY)
421 rv = SS_TWO_PRIMARIES;
cb703454 422 else if (conn_highest_peer(mdev->tconn) == R_PRIMARY)
047e95e2
PR
423 rv = SS_O_VOL_PEER_PRI;
424 }
b8907339
PR
425 put_net_conf(mdev->tconn);
426 }
427
428 if (rv <= 0)
429 /* already found a reason to abort */;
430 else if (ns.role == R_SECONDARY && mdev->open_cnt)
431 rv = SS_DEVICE_IN_USE;
432
433 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
434 rv = SS_NO_UP_TO_DATE_DISK;
435
436 else if (fp >= FP_RESOURCE &&
437 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
438 rv = SS_PRIMARY_NOP;
439
440 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
441 rv = SS_NO_UP_TO_DATE_DISK;
442
443 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
444 rv = SS_NO_LOCAL_DISK;
445
446 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
447 rv = SS_NO_REMOTE_DISK;
448
449 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
450 rv = SS_NO_UP_TO_DATE_DISK;
451
452 else if ((ns.conn == C_CONNECTED ||
453 ns.conn == C_WF_BITMAP_S ||
454 ns.conn == C_SYNC_SOURCE ||
455 ns.conn == C_PAUSED_SYNC_S) &&
456 ns.disk == D_OUTDATED)
457 rv = SS_CONNECTED_OUTDATES;
458
459 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
f399002e 460 (mdev->tconn->net_conf->verify_alg[0] == 0))
b8907339
PR
461 rv = SS_NO_VERIFY_ALG;
462
463 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
464 mdev->tconn->agreed_pro_version < 88)
465 rv = SS_NOT_SUPPORTED;
466
467 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
468 rv = SS_CONNECTED_OUTDATES;
469
470 return rv;
471}
472
473/**
a75f34ad 474 * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
3509502d
PR
475 * This function limits state transitions that may be declined by DRBD. I.e.
476 * user requests (aka soft transitions).
b8907339
PR
477 * @mdev: DRBD device.
478 * @ns: new state.
479 * @os: old state.
480 */
481static enum drbd_state_rv
a75f34ad 482is_valid_soft_transition(union drbd_state os, union drbd_state ns)
b8907339
PR
483{
484 enum drbd_state_rv rv = SS_SUCCESS;
485
486 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
487 os.conn > C_CONNECTED)
488 rv = SS_RESYNC_RUNNING;
489
490 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
491 rv = SS_ALREADY_STANDALONE;
492
493 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
494 rv = SS_IS_DISKLESS;
495
496 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
497 rv = SS_NO_NET_CONFIG;
498
499 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
500 rv = SS_LOWER_THAN_OUTDATED;
501
502 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
503 rv = SS_IN_TRANSIENT_STATE;
504
2325eb66
PR
505 /* if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
506 rv = SS_IN_TRANSIENT_STATE; */
b8907339
PR
507
508 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
509 rv = SS_NEED_CONNECTION;
510
511 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
512 ns.conn != os.conn && os.conn > C_CONNECTED)
513 rv = SS_RESYNC_RUNNING;
514
515 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
516 os.conn < C_CONNECTED)
517 rv = SS_NEED_CONNECTION;
518
519 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
520 && os.conn < C_WF_REPORT_PARAMS)
521 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
522
523 return rv;
524}
525
3509502d 526static enum drbd_state_rv
fda74117 527is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
3509502d
PR
528{
529 enum drbd_state_rv rv = SS_SUCCESS;
530
531 /* Disallow Network errors to configure a device's network part */
fda74117 532 if ((nc >= C_TIMEOUT && nc <= C_TEAR_DOWN) && oc <= C_DISCONNECTING)
3509502d
PR
533 rv = SS_NEED_CONNECTION;
534
535 /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
fda74117 536 if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
3509502d
PR
537 rv = SS_IN_TRANSIENT_STATE;
538
539 /* After C_DISCONNECTING only C_STANDALONE may follow */
fda74117 540 if (oc == C_DISCONNECTING && nc != C_STANDALONE)
3509502d
PR
541 rv = SS_IN_TRANSIENT_STATE;
542
fda74117
PR
543 return rv;
544}
545
546
547/**
548 * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
549 * This limits hard state transitions. Hard state transitions are facts there are
550 * imposed on DRBD by the environment. E.g. disk broke or network broke down.
551 * But those hard state transitions are still not allowed to do everything.
552 * @ns: new state.
553 * @os: old state.
554 */
555static enum drbd_state_rv
556is_valid_transition(union drbd_state os, union drbd_state ns)
557{
558 enum drbd_state_rv rv;
559
560 rv = is_valid_conn_transition(os.conn, ns.conn);
561
3509502d
PR
562 /* we cannot fail (again) if we already detached */
563 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
564 rv = SS_IS_DISKLESS;
565
4308a0a3
PR
566 /* if we are only D_ATTACHING yet,
567 * we can (and should) go directly to D_DISKLESS. */
568 if (ns.disk == D_FAILED && os.disk == D_ATTACHING) {
569 printk("TODO: FIX ME\n");
570 rv = SS_IS_DISKLESS;
571 }
572
3509502d
PR
573 return rv;
574}
575
b8907339
PR
576/**
577 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
578 * @mdev: DRBD device.
579 * @os: old state.
580 * @ns: new state.
581 * @warn_sync_abort:
582 *
583 * When we loose connection, we have to set the state of the peers disk (pdsk)
584 * to D_UNKNOWN. This rule and many more along those lines are in this function.
585 */
4308a0a3
PR
586static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
587 const char **warn_sync_abort)
b8907339
PR
588{
589 enum drbd_fencing_p fp;
590 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
591
592 fp = FP_DONT_CARE;
593 if (get_ldev(mdev)) {
594 fp = mdev->ldev->dc.fencing;
595 put_ldev(mdev);
596 }
597
3509502d 598 /* Implications from connection to peer and peer_isp */
b8907339
PR
599 if (ns.conn < C_CONNECTED) {
600 ns.peer_isp = 0;
601 ns.peer = R_UNKNOWN;
602 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
603 ns.pdsk = D_UNKNOWN;
604 }
605
606 /* Clear the aftr_isp when becoming unconfigured */
607 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
608 ns.aftr_isp = 0;
609
4308a0a3 610 /* An implication of the disk states onto the connection state */
b8907339 611 /* Abort resync if a disk fails/detaches */
4308a0a3 612 if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
b8907339
PR
613 if (warn_sync_abort)
614 *warn_sync_abort =
4308a0a3 615 ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
b8907339
PR
616 "Online-verify" : "Resync";
617 ns.conn = C_CONNECTED;
618 }
619
620 /* Connection breaks down before we finished "Negotiating" */
621 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
622 get_ldev_if_state(mdev, D_NEGOTIATING)) {
623 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
624 ns.disk = mdev->new_state_tmp.disk;
625 ns.pdsk = mdev->new_state_tmp.pdsk;
626 } else {
627 dev_alert(DEV, "Connection lost while negotiating, no data!\n");
628 ns.disk = D_DISKLESS;
629 ns.pdsk = D_UNKNOWN;
630 }
631 put_ldev(mdev);
632 }
633
634 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
635 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
636 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
637 ns.disk = D_UP_TO_DATE;
638 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
639 ns.pdsk = D_UP_TO_DATE;
640 }
641
642 /* Implications of the connection stat on the disk states */
643 disk_min = D_DISKLESS;
644 disk_max = D_UP_TO_DATE;
645 pdsk_min = D_INCONSISTENT;
646 pdsk_max = D_UNKNOWN;
647 switch ((enum drbd_conns)ns.conn) {
648 case C_WF_BITMAP_T:
649 case C_PAUSED_SYNC_T:
650 case C_STARTING_SYNC_T:
651 case C_WF_SYNC_UUID:
652 case C_BEHIND:
653 disk_min = D_INCONSISTENT;
654 disk_max = D_OUTDATED;
655 pdsk_min = D_UP_TO_DATE;
656 pdsk_max = D_UP_TO_DATE;
657 break;
658 case C_VERIFY_S:
659 case C_VERIFY_T:
660 disk_min = D_UP_TO_DATE;
661 disk_max = D_UP_TO_DATE;
662 pdsk_min = D_UP_TO_DATE;
663 pdsk_max = D_UP_TO_DATE;
664 break;
665 case C_CONNECTED:
666 disk_min = D_DISKLESS;
667 disk_max = D_UP_TO_DATE;
668 pdsk_min = D_DISKLESS;
669 pdsk_max = D_UP_TO_DATE;
670 break;
671 case C_WF_BITMAP_S:
672 case C_PAUSED_SYNC_S:
673 case C_STARTING_SYNC_S:
674 case C_AHEAD:
675 disk_min = D_UP_TO_DATE;
676 disk_max = D_UP_TO_DATE;
677 pdsk_min = D_INCONSISTENT;
678 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
679 break;
680 case C_SYNC_TARGET:
681 disk_min = D_INCONSISTENT;
682 disk_max = D_INCONSISTENT;
683 pdsk_min = D_UP_TO_DATE;
684 pdsk_max = D_UP_TO_DATE;
685 break;
686 case C_SYNC_SOURCE:
687 disk_min = D_UP_TO_DATE;
688 disk_max = D_UP_TO_DATE;
689 pdsk_min = D_INCONSISTENT;
690 pdsk_max = D_INCONSISTENT;
691 break;
692 case C_STANDALONE:
693 case C_DISCONNECTING:
694 case C_UNCONNECTED:
695 case C_TIMEOUT:
696 case C_BROKEN_PIPE:
697 case C_NETWORK_FAILURE:
698 case C_PROTOCOL_ERROR:
699 case C_TEAR_DOWN:
700 case C_WF_CONNECTION:
701 case C_WF_REPORT_PARAMS:
702 case C_MASK:
703 break;
704 }
705 if (ns.disk > disk_max)
706 ns.disk = disk_max;
707
708 if (ns.disk < disk_min) {
709 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
710 drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
711 ns.disk = disk_min;
712 }
713 if (ns.pdsk > pdsk_max)
714 ns.pdsk = pdsk_max;
715
716 if (ns.pdsk < pdsk_min) {
717 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
718 drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
719 ns.pdsk = pdsk_min;
720 }
721
722 if (fp == FP_STONITH &&
4308a0a3 723 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
b8907339
PR
724 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
725
f399002e 726 if (mdev->tconn->res_opts.on_no_data == OND_SUSPEND_IO &&
4308a0a3 727 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
b8907339
PR
728 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
729
730 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
731 if (ns.conn == C_SYNC_SOURCE)
732 ns.conn = C_PAUSED_SYNC_S;
733 if (ns.conn == C_SYNC_TARGET)
734 ns.conn = C_PAUSED_SYNC_T;
735 } else {
736 if (ns.conn == C_PAUSED_SYNC_S)
737 ns.conn = C_SYNC_SOURCE;
738 if (ns.conn == C_PAUSED_SYNC_T)
739 ns.conn = C_SYNC_TARGET;
740 }
741
742 return ns;
743}
744
745void drbd_resume_al(struct drbd_conf *mdev)
746{
747 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
748 dev_info(DEV, "Resumed AL updates\n");
749}
750
751/* helper for __drbd_set_state */
752static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
753{
754 if (mdev->tconn->agreed_pro_version < 90)
755 mdev->ov_start_sector = 0;
756 mdev->rs_total = drbd_bm_bits(mdev);
757 mdev->ov_position = 0;
758 if (cs == C_VERIFY_T) {
759 /* starting online verify from an arbitrary position
760 * does not fit well into the existing protocol.
761 * on C_VERIFY_T, we initialize ov_left and friends
762 * implicitly in receive_DataRequest once the
763 * first P_OV_REQUEST is received */
764 mdev->ov_start_sector = ~(sector_t)0;
765 } else {
766 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
767 if (bit >= mdev->rs_total) {
768 mdev->ov_start_sector =
769 BM_BIT_TO_SECT(mdev->rs_total - 1);
770 mdev->rs_total = 1;
771 } else
772 mdev->rs_total -= bit;
773 mdev->ov_position = mdev->ov_start_sector;
774 }
775 mdev->ov_left = mdev->rs_total;
776}
777
778/**
779 * __drbd_set_state() - Set a new DRBD state
780 * @mdev: DRBD device.
781 * @ns: new state.
782 * @flags: Flags
783 * @done: Optional completion, that will get completed after the after_state_ch() finished
784 *
785 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
786 */
787enum drbd_state_rv
788__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
789 enum chg_state_flags flags, struct completion *done)
790{
791 union drbd_state os;
792 enum drbd_state_rv rv = SS_SUCCESS;
793 const char *warn_sync_abort = NULL;
794 struct after_state_chg_work *ascw;
795
796 os = mdev->state;
797
4308a0a3 798 ns = sanitize_state(mdev, ns, &warn_sync_abort);
b8907339
PR
799 if (ns.i == os.i)
800 return SS_NOTHING_TO_DO;
801
3509502d
PR
802 rv = is_valid_transition(os, ns);
803 if (rv < SS_SUCCESS)
804 return rv;
805
b8907339
PR
806 if (!(flags & CS_HARD)) {
807 /* pre-state-change checks ; only look at ns */
808 /* See drbd_state_sw_errors in drbd_strings.c */
809
810 rv = is_valid_state(mdev, ns);
811 if (rv < SS_SUCCESS) {
812 /* If the old state was illegal as well, then let
813 this happen...*/
814
815 if (is_valid_state(mdev, os) == rv)
a75f34ad 816 rv = is_valid_soft_transition(os, ns);
b8907339 817 } else
a75f34ad 818 rv = is_valid_soft_transition(os, ns);
b8907339
PR
819 }
820
821 if (rv < SS_SUCCESS) {
822 if (flags & CS_VERBOSE)
823 print_st_err(mdev, os, ns, rv);
824 return rv;
825 }
826
827 if (warn_sync_abort)
828 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
829
bbeb641c 830 print_state_change(mdev, os, ns, flags);
b8907339 831
b8907339
PR
832 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
833 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
834 * drbd_ldev_destroy() won't happen before our corresponding
835 * after_state_ch works run, where we put_ldev again. */
836 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
837 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
838 atomic_inc(&mdev->local_cnt);
839
840 mdev->state = ns;
841
0e29d163
PR
842 /* solve the race between becoming unconfigured,
843 * worker doing the cleanup, and
844 * admin reconfiguring us:
845 * on (re)configure, first set CONFIG_PENDING,
846 * then wait for a potentially exiting worker,
847 * start the worker, and schedule one no_op.
848 * then proceed with configuration.
849 */
850 if(conn_all_vols_unconf(mdev->tconn) &&
851 !test_and_set_bit(CONFIG_PENDING, &mdev->tconn->flags))
852 set_bit(OBJECT_DYING, &mdev->tconn->flags);
853
b8907339
PR
854 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
855 drbd_print_uuids(mdev, "attached to UUIDs");
856
857 wake_up(&mdev->misc_wait);
858 wake_up(&mdev->state_wait);
2a67d8b9 859 wake_up(&mdev->tconn->ping_wait);
b8907339
PR
860
861 /* aborted verify run. log the last position */
862 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
863 ns.conn < C_CONNECTED) {
864 mdev->ov_start_sector =
865 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
866 dev_info(DEV, "Online Verify reached sector %llu\n",
867 (unsigned long long)mdev->ov_start_sector);
868 }
869
870 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
871 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
872 dev_info(DEV, "Syncer continues.\n");
873 mdev->rs_paused += (long)jiffies
874 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
875 if (ns.conn == C_SYNC_TARGET)
876 mod_timer(&mdev->resync_timer, jiffies);
877 }
878
879 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
880 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
881 dev_info(DEV, "Resync suspended\n");
882 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
883 }
884
885 if (os.conn == C_CONNECTED &&
886 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
887 unsigned long now = jiffies;
888 int i;
889
890 set_ov_position(mdev, ns.conn);
891 mdev->rs_start = now;
892 mdev->rs_last_events = 0;
893 mdev->rs_last_sect_ev = 0;
894 mdev->ov_last_oos_size = 0;
895 mdev->ov_last_oos_start = 0;
896
897 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
898 mdev->rs_mark_left[i] = mdev->ov_left;
899 mdev->rs_mark_time[i] = now;
900 }
901
902 drbd_rs_controller_reset(mdev);
903
904 if (ns.conn == C_VERIFY_S) {
905 dev_info(DEV, "Starting Online Verify from sector %llu\n",
906 (unsigned long long)mdev->ov_position);
907 mod_timer(&mdev->resync_timer, jiffies);
908 }
909 }
910
911 if (get_ldev(mdev)) {
912 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
913 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
914 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
915
916 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
917 mdf |= MDF_CRASHED_PRIMARY;
918 if (mdev->state.role == R_PRIMARY ||
919 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
920 mdf |= MDF_PRIMARY_IND;
921 if (mdev->state.conn > C_WF_REPORT_PARAMS)
922 mdf |= MDF_CONNECTED_IND;
923 if (mdev->state.disk > D_INCONSISTENT)
924 mdf |= MDF_CONSISTENT;
925 if (mdev->state.disk > D_OUTDATED)
926 mdf |= MDF_WAS_UP_TO_DATE;
927 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
928 mdf |= MDF_PEER_OUT_DATED;
929 if (mdf != mdev->ldev->md.flags) {
930 mdev->ldev->md.flags = mdf;
931 drbd_md_mark_dirty(mdev);
932 }
933 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
934 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
935 put_ldev(mdev);
936 }
937
938 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
939 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
940 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
941 set_bit(CONSIDER_RESYNC, &mdev->flags);
942
943 /* Receiver should clean up itself */
944 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
945 drbd_thread_stop_nowait(&mdev->tconn->receiver);
946
947 /* Now the receiver finished cleaning up itself, it should die */
948 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
949 drbd_thread_stop_nowait(&mdev->tconn->receiver);
950
951 /* Upon network failure, we need to restart the receiver. */
952 if (os.conn > C_TEAR_DOWN &&
953 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
954 drbd_thread_restart_nowait(&mdev->tconn->receiver);
955
956 /* Resume AL writing if we get a connection */
957 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
958 drbd_resume_al(mdev);
959
960 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
961 if (ascw) {
962 ascw->os = os;
963 ascw->ns = ns;
964 ascw->flags = flags;
965 ascw->w.cb = w_after_state_ch;
a21e9298 966 ascw->w.mdev = mdev;
b8907339
PR
967 ascw->done = done;
968 drbd_queue_work(&mdev->tconn->data.work, &ascw->w);
969 } else {
bbeb641c 970 dev_err(DEV, "Could not kmalloc an ascw\n");
b8907339
PR
971 }
972
973 return rv;
974}
975
99920dc5 976static int w_after_state_ch(struct drbd_work *w, int unused)
b8907339
PR
977{
978 struct after_state_chg_work *ascw =
979 container_of(w, struct after_state_chg_work, w);
00d56944 980 struct drbd_conf *mdev = w->mdev;
b8907339
PR
981
982 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
983 if (ascw->flags & CS_WAIT_COMPLETE) {
984 D_ASSERT(ascw->done != NULL);
985 complete(ascw->done);
986 }
987 kfree(ascw);
988
99920dc5 989 return 0;
b8907339
PR
990}
991
992static void abw_start_sync(struct drbd_conf *mdev, int rv)
993{
994 if (rv) {
995 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
996 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
997 return;
998 }
999
1000 switch (mdev->state.conn) {
1001 case C_STARTING_SYNC_T:
1002 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1003 break;
1004 case C_STARTING_SYNC_S:
1005 drbd_start_resync(mdev, C_SYNC_SOURCE);
1006 break;
1007 }
1008}
1009
1010int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1011 int (*io_fn)(struct drbd_conf *),
1012 char *why, enum bm_flag flags)
1013{
1014 int rv;
1015
1016 D_ASSERT(current == mdev->tconn->worker.task);
1017
1018 /* open coded non-blocking drbd_suspend_io(mdev); */
1019 set_bit(SUSPEND_IO, &mdev->flags);
1020
1021 drbd_bm_lock(mdev, why, flags);
1022 rv = io_fn(mdev);
1023 drbd_bm_unlock(mdev);
1024
1025 drbd_resume_io(mdev);
1026
1027 return rv;
1028}
1029
1030/**
1031 * after_state_ch() - Perform after state change actions that may sleep
1032 * @mdev: DRBD device.
1033 * @os: old state.
1034 * @ns: new state.
1035 * @flags: Flags
1036 */
1037static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1038 union drbd_state ns, enum chg_state_flags flags)
1039{
1040 enum drbd_fencing_p fp;
1041 enum drbd_req_event what = NOTHING;
69f5ec72 1042 union drbd_state nsm;
3b98c0c2
LE
1043 struct sib_info sib;
1044
1045 sib.sib_reason = SIB_STATE_CHANGE;
1046 sib.os = os;
1047 sib.ns = ns;
b8907339
PR
1048
1049 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1050 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1051 if (mdev->p_uuid)
1052 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1053 }
1054
1055 fp = FP_DONT_CARE;
1056 if (get_ldev(mdev)) {
1057 fp = mdev->ldev->dc.fencing;
1058 put_ldev(mdev);
1059 }
1060
1061 /* Inform userspace about the change... */
3b98c0c2 1062 drbd_bcast_event(mdev, &sib);
b8907339
PR
1063
1064 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1065 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1066 drbd_khelper(mdev, "pri-on-incon-degr");
1067
1068 /* Here we have the actions that are performed after a
1069 state change. This function might sleep */
1070
1071 nsm.i = -1;
1072 if (ns.susp_nod) {
1073 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1074 what = RESEND;
1075
1076 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
1077 what = RESTART_FROZEN_DISK_IO;
1078
1079 if (what != NOTHING)
1080 nsm.susp_nod = 0;
1081 }
1082
1083 if (ns.susp_fen) {
1084 /* case1: The outdate peer handler is successful: */
1085 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
2f5cdd0b 1086 tl_clear(mdev->tconn);
b8907339
PR
1087 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1088 drbd_uuid_new_current(mdev);
1089 clear_bit(NEW_CUR_UUID, &mdev->flags);
1090 }
1091 spin_lock_irq(&mdev->tconn->req_lock);
1092 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1093 spin_unlock_irq(&mdev->tconn->req_lock);
1094 }
1095 /* case2: The connection was established again: */
1096 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1097 clear_bit(NEW_CUR_UUID, &mdev->flags);
1098 what = RESEND;
1099 nsm.susp_fen = 0;
1100 }
1101 }
1102
1103 if (what != NOTHING) {
1104 spin_lock_irq(&mdev->tconn->req_lock);
2f5cdd0b 1105 _tl_restart(mdev->tconn, what);
b8907339
PR
1106 nsm.i &= mdev->state.i;
1107 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1108 spin_unlock_irq(&mdev->tconn->req_lock);
1109 }
1110
1111 /* Became sync source. With protocol >= 96, we still need to send out
1112 * the sync uuid now. Need to do that before any drbd_send_state, or
1113 * the other side may go "paused sync" before receiving the sync uuids,
1114 * which is unexpected. */
1115 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1116 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1117 mdev->tconn->agreed_pro_version >= 96 && get_ldev(mdev)) {
1118 drbd_gen_and_send_sync_uuid(mdev);
1119 put_ldev(mdev);
1120 }
1121
1122 /* Do not change the order of the if above and the two below... */
1123 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1124 drbd_send_uuids(mdev);
1125 drbd_send_state(mdev);
1126 }
1127 /* No point in queuing send_bitmap if we don't have a connection
1128 * anymore, so check also the _current_ state, not only the new state
1129 * at the time this work was queued. */
1130 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1131 mdev->state.conn == C_WF_BITMAP_S)
1132 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
1133 "send_bitmap (WFBitMapS)",
1134 BM_LOCKED_TEST_ALLOWED);
1135
1136 /* Lost contact to peer's copy of the data */
1137 if ((os.pdsk >= D_INCONSISTENT &&
1138 os.pdsk != D_UNKNOWN &&
1139 os.pdsk != D_OUTDATED)
1140 && (ns.pdsk < D_INCONSISTENT ||
1141 ns.pdsk == D_UNKNOWN ||
1142 ns.pdsk == D_OUTDATED)) {
1143 if (get_ldev(mdev)) {
1144 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1145 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1146 if (is_susp(mdev->state)) {
1147 set_bit(NEW_CUR_UUID, &mdev->flags);
1148 } else {
1149 drbd_uuid_new_current(mdev);
1150 drbd_send_uuids(mdev);
1151 }
1152 }
1153 put_ldev(mdev);
1154 }
1155 }
1156
1157 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1158 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1159 drbd_uuid_new_current(mdev);
1160 drbd_send_uuids(mdev);
1161 }
1162
1163 /* D_DISKLESS Peer becomes secondary */
1164 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1165 /* We may still be Primary ourselves.
1166 * No harm done if the bitmap still changes,
1167 * redirtied pages will follow later. */
1168 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1169 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
1170 put_ldev(mdev);
1171 }
1172
1173 /* Write out all changed bits on demote.
1174 * Though, no need to da that just yet
1175 * if there is a resync going on still */
1176 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1177 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
1178 /* No changes to the bitmap expected this time, so assert that,
1179 * even though no harm was done if it did change. */
1180 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1181 "demote", BM_LOCKED_TEST_ALLOWED);
1182 put_ldev(mdev);
1183 }
1184
1185 /* Last part of the attaching process ... */
1186 if (ns.conn >= C_CONNECTED &&
1187 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1188 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
1189 drbd_send_uuids(mdev);
1190 drbd_send_state(mdev);
1191 }
1192
1193 /* We want to pause/continue resync, tell peer. */
1194 if (ns.conn >= C_CONNECTED &&
1195 ((os.aftr_isp != ns.aftr_isp) ||
1196 (os.user_isp != ns.user_isp)))
1197 drbd_send_state(mdev);
1198
1199 /* In case one of the isp bits got set, suspend other devices. */
1200 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1201 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1202 suspend_other_sg(mdev);
1203
1204 /* Make sure the peer gets informed about eventual state
1205 changes (ISP bits) while we were in WFReportParams. */
1206 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1207 drbd_send_state(mdev);
1208
1209 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1210 drbd_send_state(mdev);
1211
1212 /* We are in the progress to start a full sync... */
1213 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1214 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1215 /* no other bitmap changes expected during this phase */
1216 drbd_queue_bitmap_io(mdev,
1217 &drbd_bmio_set_n_write, &abw_start_sync,
1218 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
1219
1220 /* We are invalidating our self... */
1221 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1222 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1223 /* other bitmap operation expected during this phase */
1224 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1225 "set_n_write from invalidate", BM_LOCKED_MASK);
1226
1227 /* first half of local IO error, failure to attach,
1228 * or administrative detach */
1229 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1230 enum drbd_io_error_p eh;
1231 int was_io_error;
1232 /* corresponding get_ldev was in __drbd_set_state, to serialize
1233 * our cleanup here with the transition to D_DISKLESS,
1234 * so it is safe to dreference ldev here. */
1235 eh = mdev->ldev->dc.on_io_error;
1236 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1237
1238 /* current state still has to be D_FAILED,
1239 * there is only one way out: to D_DISKLESS,
1240 * and that may only happen after our put_ldev below. */
1241 if (mdev->state.disk != D_FAILED)
1242 dev_err(DEV,
1243 "ASSERT FAILED: disk is %s during detach\n",
1244 drbd_disk_str(mdev->state.disk));
1245
927036f9 1246 if (!drbd_send_state(mdev))
b8907339
PR
1247 dev_warn(DEV, "Notified peer that I am detaching my disk\n");
1248 else
1249 dev_err(DEV, "Sending state for detaching disk failed\n");
1250
1251 drbd_rs_cancel_all(mdev);
1252
1253 /* In case we want to get something to stable storage still,
1254 * this may be the last chance.
1255 * Following put_ldev may transition to D_DISKLESS. */
1256 drbd_md_sync(mdev);
1257 put_ldev(mdev);
1258
1259 if (was_io_error && eh == EP_CALL_HELPER)
1260 drbd_khelper(mdev, "local-io-error");
1261 }
1262
1263 /* second half of local IO error, failure to attach,
1264 * or administrative detach,
1265 * after local_cnt references have reached zero again */
1266 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1267 /* We must still be diskless,
1268 * re-attach has to be serialized with this! */
1269 if (mdev->state.disk != D_DISKLESS)
1270 dev_err(DEV,
1271 "ASSERT FAILED: disk is %s while going diskless\n",
1272 drbd_disk_str(mdev->state.disk));
1273
1274 mdev->rs_total = 0;
1275 mdev->rs_failed = 0;
1276 atomic_set(&mdev->rs_pending_cnt, 0);
1277
927036f9 1278 if (!drbd_send_state(mdev))
b8907339
PR
1279 dev_warn(DEV, "Notified peer that I'm now diskless.\n");
1280 /* corresponding get_ldev in __drbd_set_state
1281 * this may finally trigger drbd_ldev_destroy. */
1282 put_ldev(mdev);
1283 }
1284
1285 /* Notify peer that I had a local IO error, and did not detached.. */
1286 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1287 drbd_send_state(mdev);
1288
1289 /* Disks got bigger while they were detached */
1290 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1291 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1292 if (ns.conn == C_CONNECTED)
1293 resync_after_online_grow(mdev);
1294 }
1295
1296 /* A resync finished or aborted, wake paused devices... */
1297 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1298 (os.peer_isp && !ns.peer_isp) ||
1299 (os.user_isp && !ns.user_isp))
1300 resume_next_sg(mdev);
1301
1302 /* sync target done with resync. Explicitly notify peer, even though
1303 * it should (at least for non-empty resyncs) already know itself. */
1304 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1305 drbd_send_state(mdev);
1306
1307 /* This triggers bitmap writeout of potentially still unwritten pages
1308 * if the resync finished cleanly, or aborted because of peer disk
1309 * failure, or because of connection loss.
1310 * For resync aborted because of local disk failure, we cannot do
1311 * any bitmap writeout anymore.
1312 * No harm done if some bits change during this phase.
1313 */
1314 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1315 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1316 "write from resync_finished", BM_LOCKED_SET_ALLOWED);
1317 put_ldev(mdev);
1318 }
1319
1320 if (ns.disk == D_DISKLESS &&
1321 ns.conn == C_STANDALONE &&
1322 ns.role == R_SECONDARY) {
1323 if (os.aftr_isp != ns.aftr_isp)
1324 resume_next_sg(mdev);
1325 }
1326
0e29d163 1327 after_all_state_ch(mdev->tconn);
bbeb641c 1328
b8907339
PR
1329 drbd_md_sync(mdev);
1330}
1331
bbeb641c
PR
1332struct after_conn_state_chg_work {
1333 struct drbd_work w;
1334 enum drbd_conns oc;
1335 union drbd_state nms; /* new, max state, over all mdevs */
1336 enum chg_state_flags flags;
1337};
1338
0e29d163 1339static void after_all_state_ch(struct drbd_tconn *tconn)
b8907339 1340{
0e29d163
PR
1341 if (conn_all_vols_unconf(tconn) &&
1342 test_bit(OBJECT_DYING, &tconn->flags)) {
bbeb641c
PR
1343 drbd_thread_stop_nowait(&tconn->worker);
1344 }
1345}
1346
99920dc5 1347static int w_after_conn_state_ch(struct drbd_work *w, int unused)
bbeb641c
PR
1348{
1349 struct after_conn_state_chg_work *acscw =
1350 container_of(w, struct after_conn_state_chg_work, w);
1351 struct drbd_tconn *tconn = w->tconn;
1352 enum drbd_conns oc = acscw->oc;
1353 union drbd_state nms = acscw->nms;
1354
1355 kfree(acscw);
1356
b8907339 1357 /* Upon network configuration, we need to start the receiver */
bbeb641c 1358 if (oc == C_STANDALONE && nms.conn == C_UNCONNECTED)
b8907339
PR
1359 drbd_thread_start(&tconn->receiver);
1360
bbeb641c 1361 //conn_err(tconn, STATE_FMT, STATE_ARGS("nms", nms));
0e29d163 1362 after_all_state_ch(tconn);
bbeb641c 1363
99920dc5 1364 return 0;
bbeb641c
PR
1365}
1366
1367static void print_conn_state_change(struct drbd_tconn *tconn, enum drbd_conns oc, enum drbd_conns nc)
1368{
1369 char *pbp, pb[300];
1370 pbp = pb;
1371 *pbp = 0;
1372 if (nc != oc)
1373 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1374 drbd_conn_str(oc),
1375 drbd_conn_str(nc));
1376
1377 conn_info(tconn, "%s\n", pb);
1378}
1379
88ef594e
PR
1380enum sp_state {
1381 OC_UNINITIALIZED,
1382 OC_CONSISTENT,
1383 OC_INCONSISTENT,
1384} oc_state;
1385
1386static void common_state_part(enum sp_state *sps, int *sp, int nsp)
1387{
1388 switch (*sps) {
1389 case OC_UNINITIALIZED:
1390 *sp = nsp;
1391 *sps = OC_CONSISTENT;
1392 break;
1393 case OC_CONSISTENT:
1394 if (*sp != nsp)
1395 *sps = OC_INCONSISTENT;
1396 break;
1397 case OC_INCONSISTENT:
1398 break;
1399 }
1400}
1401
1402void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, union drbd_state *pmask)
1403{
1404 union drbd_state css = {}; /* common state state */
1405 union drbd_state os, cs = {}; /* old_state, common_state */
1406 union drbd_state mask = {};
1407 enum sp_state sps; /* state part state */
1408 int sp; /* state part */
1409 struct drbd_conf *mdev;
1410 int vnr;
1411
1412 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1413 os = mdev->state;
1414
1415 sps = css.role;
1416 sp = cs.role;
1417 common_state_part(&sps, &sp, os.role);
1418 css.role = sps;
1419 cs.role = sp;
1420
1421 sps = css.peer;
1422 sp = cs.peer;
1423 common_state_part(&sps, &sp, os.peer);
1424 css.peer = sps;
1425 cs.peer = sp;
1426
1427 sps = css.conn;
1428 sp = cs.conn;
1429 common_state_part(&sps, &sp, os.conn);
1430 css.conn = sps;
1431 cs.conn = sp;
1432
1433 sps = css.disk;
1434 sp = cs.disk;
1435 common_state_part(&sps, &sp, os.disk);
1436 css.disk = sps;
1437 cs.disk = sp;
1438
1439 sps = css.pdsk;
1440 sp = cs.pdsk;
1441 common_state_part(&sps, &sp, os.pdsk);
1442 css.pdsk = sps;
1443 cs.pdsk = sp;
1444 }
1445
1446 if (css.role == OC_CONSISTENT)
1447 mask.role = R_MASK;
1448 if (css.peer == OC_CONSISTENT)
1449 mask.peer = R_MASK;
1450 if (css.conn == OC_CONSISTENT)
1451 mask.conn = C_MASK;
1452 if (css.disk == OC_CONSISTENT)
1453 mask.disk = D_MASK;
1454 if (css.pdsk == OC_CONSISTENT)
1455 mask.pdsk = D_MASK;
1456
1457 *pcs = cs;
1458 *pmask = mask;
1459}
bbeb641c 1460
bd0c824a
PR
1461static enum drbd_state_rv
1462conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
88ef594e 1463 enum chg_state_flags flags)
bbeb641c 1464{
bd0c824a 1465 enum drbd_state_rv rv = SS_SUCCESS;
bbeb641c 1466 union drbd_state ns, os;
bd0c824a
PR
1467 struct drbd_conf *mdev;
1468 int vnr;
bbeb641c 1469
bd0c824a
PR
1470 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1471 os = mdev->state;
1472 ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
1473
bd0c824a
PR
1474 if (ns.i == os.i)
1475 continue;
bbeb641c 1476
bd0c824a
PR
1477 rv = is_valid_transition(os, ns);
1478 if (rv < SS_SUCCESS)
1479 break;
1480
1481 if (!(flags & CS_HARD)) {
1482 rv = is_valid_state(mdev, ns);
1483 if (rv < SS_SUCCESS) {
1484 if (is_valid_state(mdev, os) == rv)
1485 rv = is_valid_soft_transition(os, ns);
1486 } else
1487 rv = is_valid_soft_transition(os, ns);
1488 }
1489 if (rv < SS_SUCCESS)
1490 break;
bbeb641c
PR
1491 }
1492
bd0c824a
PR
1493 if (rv < SS_SUCCESS && flags & CS_VERBOSE)
1494 print_st_err(mdev, os, ns, rv);
1495
1496 return rv;
bbeb641c
PR
1497}
1498
bd0c824a
PR
1499static union drbd_state
1500conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1501 enum chg_state_flags flags)
bbeb641c 1502{
bd0c824a
PR
1503 union drbd_state ns, os, ms = { };
1504 struct drbd_conf *mdev;
bbeb641c 1505 enum drbd_state_rv rv;
bd0c824a 1506 int vnr;
bbeb641c 1507
bd0c824a
PR
1508 if (mask.conn == C_MASK)
1509 tconn->cstate = val.conn;
1510
1511 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1512 os = mdev->state;
1513 ns = apply_mask_val(os, mask, val);
1514 ns = sanitize_state(mdev, ns, NULL);
bbeb641c 1515
bd0c824a
PR
1516 rv = __drbd_set_state(mdev, ns, flags, NULL);
1517 if (rv < SS_SUCCESS)
1518 BUG();
bbeb641c 1519
bd0c824a
PR
1520 ms.role = max_role(mdev->state.role, ms.role);
1521 ms.peer = max_role(mdev->state.peer, ms.peer);
1522 ms.disk = max_t(enum drbd_disk_state, mdev->state.disk, ms.disk);
1523 ms.pdsk = max_t(enum drbd_disk_state, mdev->state.pdsk, ms.pdsk);
1524 }
bbeb641c 1525
bd0c824a 1526 return ms;
bbeb641c
PR
1527}
1528
df24aa45
PR
1529static enum drbd_state_rv
1530_conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1531{
df24aa45
PR
1532 enum drbd_state_rv rv;
1533
1534 if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags))
1535 return SS_CW_SUCCESS;
1536
1537 if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags))
1538 return SS_CW_FAILED_BY_PEER;
1539
df24aa45
PR
1540 spin_lock_irq(&tconn->req_lock);
1541 rv = tconn->cstate != C_WF_REPORT_PARAMS ? SS_CW_NO_NEED : SS_UNKNOWN_ERROR;
1542
1543 if (rv == SS_UNKNOWN_ERROR)
88ef594e 1544 rv = conn_is_valid_transition(tconn, mask, val, CS_NO_CSTATE_CHG);
df24aa45 1545
bd0c824a
PR
1546 if (rv == SS_SUCCESS)
1547 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
df24aa45
PR
1548
1549 spin_unlock_irq(&tconn->req_lock);
1550
1551 return rv;
1552}
1553
1554static enum drbd_state_rv
1555conn_cl_wide(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1556 enum chg_state_flags f)
1557{
1558 enum drbd_state_rv rv;
1559
1560 spin_unlock_irq(&tconn->req_lock);
1561 mutex_lock(&tconn->cstate_mutex);
1562
caee1c3a 1563 if (conn_send_state_req(tconn, mask, val)) {
df24aa45
PR
1564 rv = SS_CW_FAILED_BY_PEER;
1565 /* if (f & CS_VERBOSE)
1566 print_st_err(mdev, os, ns, rv); */
1567 goto abort;
1568 }
1569
1570 wait_event(tconn->ping_wait, (rv = _conn_rq_cond(tconn, mask, val)));
1571
1572abort:
1573 mutex_unlock(&tconn->cstate_mutex);
1574 spin_lock_irq(&tconn->req_lock);
1575
1576 return rv;
1577}
1578
bbeb641c
PR
1579enum drbd_state_rv
1580_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1581 enum chg_state_flags flags)
1582{
1583 enum drbd_state_rv rv = SS_SUCCESS;
bbeb641c
PR
1584 struct after_conn_state_chg_work *acscw;
1585 enum drbd_conns oc = tconn->cstate;
88ef594e 1586 union drbd_state ms, os_val, os_mask;
bbeb641c 1587
bbeb641c
PR
1588 rv = is_valid_conn_transition(oc, val.conn);
1589 if (rv < SS_SUCCESS)
1590 goto abort;
1591
88ef594e 1592 rv = conn_is_valid_transition(tconn, mask, val, flags);
bbeb641c
PR
1593 if (rv < SS_SUCCESS)
1594 goto abort;
1595
df24aa45
PR
1596 if (oc == C_WF_REPORT_PARAMS && val.conn == C_DISCONNECTING &&
1597 !(flags & (CS_LOCAL_ONLY | CS_HARD))) {
1598 rv = conn_cl_wide(tconn, mask, val, flags);
1599 if (rv < SS_SUCCESS)
1600 goto abort;
1601 }
1602
88ef594e
PR
1603 conn_old_common_state(tconn, &os_val, &os_mask);
1604 if (os_mask.conn == C_MASK) {
1605 oc = os_val.conn;
bbeb641c 1606 print_conn_state_change(tconn, oc, val.conn);
bd0c824a 1607 flags |= CS_NO_CSTATE_CHG;
bbeb641c 1608 }
bd0c824a
PR
1609
1610 ms = conn_set_state(tconn, mask, val, flags);
1611 ms.conn = val.conn;
bbeb641c
PR
1612
1613 acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
1614 if (acscw) {
1615 acscw->oc = oc;
bd0c824a 1616 acscw->nms = ms;
bbeb641c
PR
1617 acscw->flags = flags;
1618 acscw->w.cb = w_after_conn_state_ch;
1619 acscw->w.tconn = tconn;
1620 drbd_queue_work(&tconn->data.work, &acscw->w);
1621 } else {
1622 conn_err(tconn, "Could not kmalloc an acscw\n");
b8907339 1623 }
bbeb641c
PR
1624
1625abort:
bbeb641c
PR
1626 return rv;
1627}
1628
1629enum drbd_state_rv
1630conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val,
1631 enum chg_state_flags flags)
1632{
1633 enum drbd_state_rv rv;
1634
1635 spin_lock_irq(&tconn->req_lock);
1636 rv = _conn_request_state(tconn, mask, val, flags);
1637 spin_unlock_irq(&tconn->req_lock);
1638
1639 return rv;
b8907339 1640}
This page took 0.099387 seconds and 5 git commands to generate.