4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
34 #include <linux/ctype.h>
35 #include <linux/smp_lock.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
53 #include <linux/drbd_limits.h>
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
59 struct after_state_chg_work
{
63 enum chg_state_flags flags
;
64 struct completion
*done
;
67 int drbdd_init(struct drbd_thread
*);
68 int drbd_worker(struct drbd_thread
*);
69 int drbd_asender(struct drbd_thread
*);
72 static int drbd_open(struct block_device
*bdev
, fmode_t mode
);
73 static int drbd_release(struct gendisk
*gd
, fmode_t mode
);
74 static int w_after_state_ch(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
);
75 static void after_state_ch(struct drbd_conf
*mdev
, union drbd_state os
,
76 union drbd_state ns
, enum chg_state_flags flags
);
77 static int w_md_sync(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
);
78 static void md_sync_timer_fn(unsigned long data
);
79 static int w_bitmap_io(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
);
81 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
82 "Lars Ellenberg <lars@linbit.com>");
83 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION
);
84 MODULE_VERSION(REL_VERSION
);
85 MODULE_LICENSE("GPL");
86 MODULE_PARM_DESC(minor_count
, "Maximum number of drbd devices (1-255)");
87 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR
);
89 #include <linux/moduleparam.h>
90 /* allow_open_on_secondary */
91 MODULE_PARM_DESC(allow_oos
, "DONT USE!");
92 /* thanks to these macros, if compiled into the kernel (not-module),
93 * this becomes the boot parameter drbd.minor_count */
94 module_param(minor_count
, uint
, 0444);
95 module_param(disable_sendpage
, bool, 0644);
96 module_param(allow_oos
, bool, 0);
97 module_param(cn_idx
, uint
, 0444);
98 module_param(proc_details
, int, 0644);
100 #ifdef CONFIG_DRBD_FAULT_INJECTION
103 static int fault_count
;
105 /* bitmap of enabled faults */
106 module_param(enable_faults
, int, 0664);
107 /* fault rate % value - applies to all enabled faults */
108 module_param(fault_rate
, int, 0664);
109 /* count of faults inserted */
110 module_param(fault_count
, int, 0664);
111 /* bitmap of devices to insert faults on */
112 module_param(fault_devs
, int, 0644);
115 /* module parameter, defined */
116 unsigned int minor_count
= 32;
117 int disable_sendpage
;
119 unsigned int cn_idx
= CN_IDX_DRBD
;
120 int proc_details
; /* Detail level in proc drbd*/
122 /* Module parameter for setting the user mode helper program
123 * to run. Default is /sbin/drbdadm */
124 char usermode_helper
[80] = "/sbin/drbdadm";
126 module_param_string(usermode_helper
, usermode_helper
, sizeof(usermode_helper
), 0644);
128 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
129 * as member "struct gendisk *vdisk;"
131 struct drbd_conf
**minor_table
;
133 struct kmem_cache
*drbd_request_cache
;
134 struct kmem_cache
*drbd_ee_cache
; /* epoch entries */
135 struct kmem_cache
*drbd_bm_ext_cache
; /* bitmap extents */
136 struct kmem_cache
*drbd_al_ext_cache
; /* activity log extents */
137 mempool_t
*drbd_request_mempool
;
138 mempool_t
*drbd_ee_mempool
;
140 /* I do not use a standard mempool, because:
141 1) I want to hand out the pre-allocated objects first.
142 2) I want to be able to interrupt sleeping allocation with a signal.
143 Note: This is a single linked list, the next pointer is the private
144 member of struct page.
146 struct page
*drbd_pp_pool
;
147 spinlock_t drbd_pp_lock
;
149 wait_queue_head_t drbd_pp_wait
;
151 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state
, 5 * HZ
, 5);
153 static const struct block_device_operations drbd_ops
= {
154 .owner
= THIS_MODULE
,
156 .release
= drbd_release
,
159 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
162 /* When checking with sparse, and this is an inline function, sparse will
163 give tons of false positives. When this is a real functions sparse works.
165 int _get_ldev_if_state(struct drbd_conf
*mdev
, enum drbd_disk_state mins
)
169 atomic_inc(&mdev
->local_cnt
);
170 io_allowed
= (mdev
->state
.disk
>= mins
);
172 if (atomic_dec_and_test(&mdev
->local_cnt
))
173 wake_up(&mdev
->misc_wait
);
181 * DOC: The transfer log
183 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
184 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
185 * of the list. There is always at least one &struct drbd_tl_epoch object.
187 * Each &struct drbd_tl_epoch has a circular double linked list of requests
190 static int tl_init(struct drbd_conf
*mdev
)
192 struct drbd_tl_epoch
*b
;
194 /* during device minor initialization, we may well use GFP_KERNEL */
195 b
= kmalloc(sizeof(struct drbd_tl_epoch
), GFP_KERNEL
);
198 INIT_LIST_HEAD(&b
->requests
);
199 INIT_LIST_HEAD(&b
->w
.list
);
203 b
->w
.cb
= NULL
; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
205 mdev
->oldest_tle
= b
;
206 mdev
->newest_tle
= b
;
207 INIT_LIST_HEAD(&mdev
->out_of_sequence_requests
);
209 mdev
->tl_hash
= NULL
;
215 static void tl_cleanup(struct drbd_conf
*mdev
)
217 D_ASSERT(mdev
->oldest_tle
== mdev
->newest_tle
);
218 D_ASSERT(list_empty(&mdev
->out_of_sequence_requests
));
219 kfree(mdev
->oldest_tle
);
220 mdev
->oldest_tle
= NULL
;
221 kfree(mdev
->unused_spare_tle
);
222 mdev
->unused_spare_tle
= NULL
;
223 kfree(mdev
->tl_hash
);
224 mdev
->tl_hash
= NULL
;
229 * _tl_add_barrier() - Adds a barrier to the transfer log
230 * @mdev: DRBD device.
231 * @new: Barrier to be added before the current head of the TL.
233 * The caller must hold the req_lock.
235 void _tl_add_barrier(struct drbd_conf
*mdev
, struct drbd_tl_epoch
*new)
237 struct drbd_tl_epoch
*newest_before
;
239 INIT_LIST_HEAD(&new->requests
);
240 INIT_LIST_HEAD(&new->w
.list
);
241 new->w
.cb
= NULL
; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
245 newest_before
= mdev
->newest_tle
;
246 /* never send a barrier number == 0, because that is special-cased
247 * when using TCQ for our write ordering code */
248 new->br_number
= (newest_before
->br_number
+1) ?: 1;
249 if (mdev
->newest_tle
!= new) {
250 mdev
->newest_tle
->next
= new;
251 mdev
->newest_tle
= new;
256 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
257 * @mdev: DRBD device.
258 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
259 * @set_size: Expected number of requests before that barrier.
261 * In case the passed barrier_nr or set_size does not match the oldest
262 * &struct drbd_tl_epoch objects this function will cause a termination
265 void tl_release(struct drbd_conf
*mdev
, unsigned int barrier_nr
,
266 unsigned int set_size
)
268 struct drbd_tl_epoch
*b
, *nob
; /* next old barrier */
269 struct list_head
*le
, *tle
;
270 struct drbd_request
*r
;
272 spin_lock_irq(&mdev
->req_lock
);
274 b
= mdev
->oldest_tle
;
276 /* first some paranoia code */
278 dev_err(DEV
, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
282 if (b
->br_number
!= barrier_nr
) {
283 dev_err(DEV
, "BAD! BarrierAck #%u received, expected #%u!\n",
284 barrier_nr
, b
->br_number
);
287 if (b
->n_writes
!= set_size
) {
288 dev_err(DEV
, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
289 barrier_nr
, set_size
, b
->n_writes
);
293 /* Clean up list of requests processed during current epoch */
294 list_for_each_safe(le
, tle
, &b
->requests
) {
295 r
= list_entry(le
, struct drbd_request
, tl_requests
);
296 _req_mod(r
, barrier_acked
);
298 /* There could be requests on the list waiting for completion
299 of the write to the local disk. To avoid corruptions of
300 slab's data structures we have to remove the lists head.
302 Also there could have been a barrier ack out of sequence, overtaking
303 the write acks - which would be a bug and violating write ordering.
304 To not deadlock in case we lose connection while such requests are
305 still pending, we need some way to find them for the
306 _req_mode(connection_lost_while_pending).
308 These have been list_move'd to the out_of_sequence_requests list in
309 _req_mod(, barrier_acked) above.
311 list_del_init(&b
->requests
);
314 if (test_and_clear_bit(CREATE_BARRIER
, &mdev
->flags
)) {
315 _tl_add_barrier(mdev
, b
);
317 mdev
->oldest_tle
= nob
;
318 /* if nob == NULL b was the only barrier, and becomes the new
319 barrier. Therefore mdev->oldest_tle points already to b */
321 D_ASSERT(nob
!= NULL
);
322 mdev
->oldest_tle
= nob
;
326 spin_unlock_irq(&mdev
->req_lock
);
327 dec_ap_pending(mdev
);
332 spin_unlock_irq(&mdev
->req_lock
);
333 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
337 * _tl_restart() - Walks the transfer log, and applies an action to all requests
338 * @mdev: DRBD device.
339 * @what: The action/event to perform with all request objects
341 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
342 * restart_frozen_disk_io.
344 static void _tl_restart(struct drbd_conf
*mdev
, enum drbd_req_event what
)
346 struct drbd_tl_epoch
*b
, *tmp
, **pn
;
347 struct list_head
*le
, *tle
, carry_reads
;
348 struct drbd_request
*req
;
349 int rv
, n_writes
, n_reads
;
351 b
= mdev
->oldest_tle
;
352 pn
= &mdev
->oldest_tle
;
356 INIT_LIST_HEAD(&carry_reads
);
357 list_for_each_safe(le
, tle
, &b
->requests
) {
358 req
= list_entry(le
, struct drbd_request
, tl_requests
);
359 rv
= _req_mod(req
, what
);
361 n_writes
+= (rv
& MR_WRITE
) >> MR_WRITE_SHIFT
;
362 n_reads
+= (rv
& MR_READ
) >> MR_READ_SHIFT
;
367 if (what
== resend
) {
368 b
->n_writes
= n_writes
;
369 if (b
->w
.cb
== NULL
) {
370 b
->w
.cb
= w_send_barrier
;
371 inc_ap_pending(mdev
);
372 set_bit(CREATE_BARRIER
, &mdev
->flags
);
375 drbd_queue_work(&mdev
->data
.work
, &b
->w
);
380 list_add(&carry_reads
, &b
->requests
);
381 /* there could still be requests on that ring list,
382 * in case local io is still pending */
383 list_del(&b
->requests
);
385 /* dec_ap_pending corresponding to queue_barrier.
386 * the newest barrier may not have been queued yet,
387 * in which case w.cb is still NULL. */
389 dec_ap_pending(mdev
);
391 if (b
== mdev
->newest_tle
) {
392 /* recycle, but reinit! */
393 D_ASSERT(tmp
== NULL
);
394 INIT_LIST_HEAD(&b
->requests
);
395 list_splice(&carry_reads
, &b
->requests
);
396 INIT_LIST_HEAD(&b
->w
.list
);
398 b
->br_number
= net_random();
408 list_splice(&carry_reads
, &b
->requests
);
414 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
415 * @mdev: DRBD device.
417 * This is called after the connection to the peer was lost. The storage covered
418 * by the requests on the transfer gets marked as our of sync. Called from the
419 * receiver thread and the worker thread.
421 void tl_clear(struct drbd_conf
*mdev
)
423 struct list_head
*le
, *tle
;
424 struct drbd_request
*r
;
426 spin_lock_irq(&mdev
->req_lock
);
428 _tl_restart(mdev
, connection_lost_while_pending
);
430 /* we expect this list to be empty. */
431 D_ASSERT(list_empty(&mdev
->out_of_sequence_requests
));
433 /* but just in case, clean it up anyways! */
434 list_for_each_safe(le
, tle
, &mdev
->out_of_sequence_requests
) {
435 r
= list_entry(le
, struct drbd_request
, tl_requests
);
436 /* It would be nice to complete outside of spinlock.
437 * But this is easier for now. */
438 _req_mod(r
, connection_lost_while_pending
);
441 /* ensure bit indicating barrier is required is clear */
442 clear_bit(CREATE_BARRIER
, &mdev
->flags
);
444 memset(mdev
->app_reads_hash
, 0, APP_R_HSIZE
*sizeof(void *));
446 spin_unlock_irq(&mdev
->req_lock
);
449 void tl_restart(struct drbd_conf
*mdev
, enum drbd_req_event what
)
451 spin_lock_irq(&mdev
->req_lock
);
452 _tl_restart(mdev
, what
);
453 spin_unlock_irq(&mdev
->req_lock
);
457 * cl_wide_st_chg() - TRUE if the state change is a cluster wide one
458 * @mdev: DRBD device.
459 * @os: old (current) state.
460 * @ns: new (wanted) state.
462 static int cl_wide_st_chg(struct drbd_conf
*mdev
,
463 union drbd_state os
, union drbd_state ns
)
465 return (os
.conn
>= C_CONNECTED
&& ns
.conn
>= C_CONNECTED
&&
466 ((os
.role
!= R_PRIMARY
&& ns
.role
== R_PRIMARY
) ||
467 (os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
468 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
) ||
469 (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
))) ||
470 (os
.conn
>= C_CONNECTED
&& ns
.conn
== C_DISCONNECTING
) ||
471 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_VERIFY_S
);
474 int drbd_change_state(struct drbd_conf
*mdev
, enum chg_state_flags f
,
475 union drbd_state mask
, union drbd_state val
)
478 union drbd_state os
, ns
;
481 spin_lock_irqsave(&mdev
->req_lock
, flags
);
483 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
484 rv
= _drbd_set_state(mdev
, ns
, f
, NULL
);
486 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
492 * drbd_force_state() - Impose a change which happens outside our control on our state
493 * @mdev: DRBD device.
494 * @mask: mask of state bits to change.
495 * @val: value of new state bits.
497 void drbd_force_state(struct drbd_conf
*mdev
,
498 union drbd_state mask
, union drbd_state val
)
500 drbd_change_state(mdev
, CS_HARD
, mask
, val
);
503 static int is_valid_state(struct drbd_conf
*mdev
, union drbd_state ns
);
504 static int is_valid_state_transition(struct drbd_conf
*,
505 union drbd_state
, union drbd_state
);
506 static union drbd_state
sanitize_state(struct drbd_conf
*mdev
, union drbd_state os
,
507 union drbd_state ns
, int *warn_sync_abort
);
508 int drbd_send_state_req(struct drbd_conf
*,
509 union drbd_state
, union drbd_state
);
511 static enum drbd_state_ret_codes
_req_st_cond(struct drbd_conf
*mdev
,
512 union drbd_state mask
, union drbd_state val
)
514 union drbd_state os
, ns
;
518 if (test_and_clear_bit(CL_ST_CHG_SUCCESS
, &mdev
->flags
))
519 return SS_CW_SUCCESS
;
521 if (test_and_clear_bit(CL_ST_CHG_FAIL
, &mdev
->flags
))
522 return SS_CW_FAILED_BY_PEER
;
525 spin_lock_irqsave(&mdev
->req_lock
, flags
);
527 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
528 ns
= sanitize_state(mdev
, os
, ns
, NULL
);
530 if (!cl_wide_st_chg(mdev
, os
, ns
))
533 rv
= is_valid_state(mdev
, ns
);
534 if (rv
== SS_SUCCESS
) {
535 rv
= is_valid_state_transition(mdev
, ns
, os
);
536 if (rv
== SS_SUCCESS
)
537 rv
= 0; /* cont waiting, otherwise fail. */
540 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
546 * drbd_req_state() - Perform an eventually cluster wide state change
547 * @mdev: DRBD device.
548 * @mask: mask of state bits to change.
549 * @val: value of new state bits.
552 * Should not be called directly, use drbd_request_state() or
553 * _drbd_request_state().
555 static int drbd_req_state(struct drbd_conf
*mdev
,
556 union drbd_state mask
, union drbd_state val
,
557 enum chg_state_flags f
)
559 struct completion done
;
561 union drbd_state os
, ns
;
564 init_completion(&done
);
566 if (f
& CS_SERIALIZE
)
567 mutex_lock(&mdev
->state_mutex
);
569 spin_lock_irqsave(&mdev
->req_lock
, flags
);
571 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
572 ns
= sanitize_state(mdev
, os
, ns
, NULL
);
574 if (cl_wide_st_chg(mdev
, os
, ns
)) {
575 rv
= is_valid_state(mdev
, ns
);
576 if (rv
== SS_SUCCESS
)
577 rv
= is_valid_state_transition(mdev
, ns
, os
);
578 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
580 if (rv
< SS_SUCCESS
) {
582 print_st_err(mdev
, os
, ns
, rv
);
586 drbd_state_lock(mdev
);
587 if (!drbd_send_state_req(mdev
, mask
, val
)) {
588 drbd_state_unlock(mdev
);
589 rv
= SS_CW_FAILED_BY_PEER
;
591 print_st_err(mdev
, os
, ns
, rv
);
595 wait_event(mdev
->state_wait
,
596 (rv
= _req_st_cond(mdev
, mask
, val
)));
598 if (rv
< SS_SUCCESS
) {
599 drbd_state_unlock(mdev
);
601 print_st_err(mdev
, os
, ns
, rv
);
604 spin_lock_irqsave(&mdev
->req_lock
, flags
);
606 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
607 rv
= _drbd_set_state(mdev
, ns
, f
, &done
);
608 drbd_state_unlock(mdev
);
610 rv
= _drbd_set_state(mdev
, ns
, f
, &done
);
613 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
615 if (f
& CS_WAIT_COMPLETE
&& rv
== SS_SUCCESS
) {
616 D_ASSERT(current
!= mdev
->worker
.task
);
617 wait_for_completion(&done
);
621 if (f
& CS_SERIALIZE
)
622 mutex_unlock(&mdev
->state_mutex
);
628 * _drbd_request_state() - Request a state change (with flags)
629 * @mdev: DRBD device.
630 * @mask: mask of state bits to change.
631 * @val: value of new state bits.
634 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
635 * flag, or when logging of failed state change requests is not desired.
637 int _drbd_request_state(struct drbd_conf
*mdev
, union drbd_state mask
,
638 union drbd_state val
, enum chg_state_flags f
)
642 wait_event(mdev
->state_wait
,
643 (rv
= drbd_req_state(mdev
, mask
, val
, f
)) != SS_IN_TRANSIENT_STATE
);
648 static void print_st(struct drbd_conf
*mdev
, char *name
, union drbd_state ns
)
650 dev_err(DEV
, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
652 drbd_conn_str(ns
.conn
),
653 drbd_role_str(ns
.role
),
654 drbd_role_str(ns
.peer
),
655 drbd_disk_str(ns
.disk
),
656 drbd_disk_str(ns
.pdsk
),
658 ns
.aftr_isp
? 'a' : '-',
659 ns
.peer_isp
? 'p' : '-',
660 ns
.user_isp
? 'u' : '-'
664 void print_st_err(struct drbd_conf
*mdev
,
665 union drbd_state os
, union drbd_state ns
, int err
)
667 if (err
== SS_IN_TRANSIENT_STATE
)
669 dev_err(DEV
, "State change failed: %s\n", drbd_set_st_err_str(err
));
670 print_st(mdev
, " state", os
);
671 print_st(mdev
, "wanted", ns
);
675 #define drbd_peer_str drbd_role_str
676 #define drbd_pdsk_str drbd_disk_str
678 #define drbd_susp_str(A) ((A) ? "1" : "0")
679 #define drbd_aftr_isp_str(A) ((A) ? "1" : "0")
680 #define drbd_peer_isp_str(A) ((A) ? "1" : "0")
681 #define drbd_user_isp_str(A) ((A) ? "1" : "0")
684 ({ if (ns.A != os.A) { \
685 pbp += sprintf(pbp, #A "( %s -> %s ) ", \
686 drbd_##A##_str(os.A), \
687 drbd_##A##_str(ns.A)); \
691 * is_valid_state() - Returns an SS_ error code if ns is not valid
692 * @mdev: DRBD device.
693 * @ns: State to consider.
695 static int is_valid_state(struct drbd_conf
*mdev
, union drbd_state ns
)
697 /* See drbd_state_sw_errors in drbd_strings.c */
699 enum drbd_fencing_p fp
;
703 if (get_ldev(mdev
)) {
704 fp
= mdev
->ldev
->dc
.fencing
;
708 if (get_net_conf(mdev
)) {
709 if (!mdev
->net_conf
->two_primaries
&&
710 ns
.role
== R_PRIMARY
&& ns
.peer
== R_PRIMARY
)
711 rv
= SS_TWO_PRIMARIES
;
716 /* already found a reason to abort */;
717 else if (ns
.role
== R_SECONDARY
&& mdev
->open_cnt
)
718 rv
= SS_DEVICE_IN_USE
;
720 else if (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
)
721 rv
= SS_NO_UP_TO_DATE_DISK
;
723 else if (fp
>= FP_RESOURCE
&&
724 ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
>= D_UNKNOWN
)
727 else if (ns
.role
== R_PRIMARY
&& ns
.disk
<= D_INCONSISTENT
&& ns
.pdsk
<= D_INCONSISTENT
)
728 rv
= SS_NO_UP_TO_DATE_DISK
;
730 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_INCONSISTENT
)
731 rv
= SS_NO_LOCAL_DISK
;
733 else if (ns
.conn
> C_CONNECTED
&& ns
.pdsk
< D_INCONSISTENT
)
734 rv
= SS_NO_REMOTE_DISK
;
736 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
)
737 rv
= SS_NO_UP_TO_DATE_DISK
;
739 else if ((ns
.conn
== C_CONNECTED
||
740 ns
.conn
== C_WF_BITMAP_S
||
741 ns
.conn
== C_SYNC_SOURCE
||
742 ns
.conn
== C_PAUSED_SYNC_S
) &&
743 ns
.disk
== D_OUTDATED
)
744 rv
= SS_CONNECTED_OUTDATES
;
746 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
747 (mdev
->sync_conf
.verify_alg
[0] == 0))
748 rv
= SS_NO_VERIFY_ALG
;
750 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
751 mdev
->agreed_pro_version
< 88)
752 rv
= SS_NOT_SUPPORTED
;
758 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
759 * @mdev: DRBD device.
763 static int is_valid_state_transition(struct drbd_conf
*mdev
,
764 union drbd_state ns
, union drbd_state os
)
768 if ((ns
.conn
== C_STARTING_SYNC_T
|| ns
.conn
== C_STARTING_SYNC_S
) &&
769 os
.conn
> C_CONNECTED
)
770 rv
= SS_RESYNC_RUNNING
;
772 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_STANDALONE
)
773 rv
= SS_ALREADY_STANDALONE
;
775 if (ns
.disk
> D_ATTACHING
&& os
.disk
== D_DISKLESS
)
778 if (ns
.conn
== C_WF_CONNECTION
&& os
.conn
< C_UNCONNECTED
)
779 rv
= SS_NO_NET_CONFIG
;
781 if (ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
&& os
.disk
!= D_ATTACHING
)
782 rv
= SS_LOWER_THAN_OUTDATED
;
784 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_UNCONNECTED
)
785 rv
= SS_IN_TRANSIENT_STATE
;
787 if (ns
.conn
== os
.conn
&& ns
.conn
== C_WF_REPORT_PARAMS
)
788 rv
= SS_IN_TRANSIENT_STATE
;
790 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) && os
.conn
< C_CONNECTED
)
791 rv
= SS_NEED_CONNECTION
;
793 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
794 ns
.conn
!= os
.conn
&& os
.conn
> C_CONNECTED
)
795 rv
= SS_RESYNC_RUNNING
;
797 if ((ns
.conn
== C_STARTING_SYNC_S
|| ns
.conn
== C_STARTING_SYNC_T
) &&
798 os
.conn
< C_CONNECTED
)
799 rv
= SS_NEED_CONNECTION
;
805 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
806 * @mdev: DRBD device.
811 * When we loose connection, we have to set the state of the peers disk (pdsk)
812 * to D_UNKNOWN. This rule and many more along those lines are in this function.
814 static union drbd_state
sanitize_state(struct drbd_conf
*mdev
, union drbd_state os
,
815 union drbd_state ns
, int *warn_sync_abort
)
817 enum drbd_fencing_p fp
;
820 if (get_ldev(mdev
)) {
821 fp
= mdev
->ldev
->dc
.fencing
;
825 /* Disallow Network errors to configure a device's network part */
826 if ((ns
.conn
>= C_TIMEOUT
&& ns
.conn
<= C_TEAR_DOWN
) &&
827 os
.conn
<= C_DISCONNECTING
)
830 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow */
831 if (os
.conn
>= C_TIMEOUT
&& os
.conn
<= C_TEAR_DOWN
&&
832 ns
.conn
!= C_UNCONNECTED
&& ns
.conn
!= C_DISCONNECTING
)
835 /* After C_DISCONNECTING only C_STANDALONE may follow */
836 if (os
.conn
== C_DISCONNECTING
&& ns
.conn
!= C_STANDALONE
)
839 if (ns
.conn
< C_CONNECTED
) {
842 if (ns
.pdsk
> D_UNKNOWN
|| ns
.pdsk
< D_INCONSISTENT
)
846 /* Clear the aftr_isp when becoming unconfigured */
847 if (ns
.conn
== C_STANDALONE
&& ns
.disk
== D_DISKLESS
&& ns
.role
== R_SECONDARY
)
850 /* Abort resync if a disk fails/detaches */
851 if (os
.conn
> C_CONNECTED
&& ns
.conn
> C_CONNECTED
&&
852 (ns
.disk
<= D_FAILED
|| ns
.pdsk
<= D_FAILED
)) {
854 *warn_sync_abort
= 1;
855 ns
.conn
= C_CONNECTED
;
858 if (ns
.conn
>= C_CONNECTED
&&
859 ((ns
.disk
== D_CONSISTENT
|| ns
.disk
== D_OUTDATED
) ||
860 (ns
.disk
== D_NEGOTIATING
&& ns
.conn
== C_WF_BITMAP_T
))) {
863 case C_PAUSED_SYNC_T
:
864 ns
.disk
= D_OUTDATED
;
869 case C_PAUSED_SYNC_S
:
870 ns
.disk
= D_UP_TO_DATE
;
873 ns
.disk
= D_INCONSISTENT
;
874 dev_warn(DEV
, "Implicitly set disk state Inconsistent!\n");
877 if (os
.disk
== D_OUTDATED
&& ns
.disk
== D_UP_TO_DATE
)
878 dev_warn(DEV
, "Implicitly set disk from Outdated to UpToDate\n");
881 if (ns
.conn
>= C_CONNECTED
&&
882 (ns
.pdsk
== D_CONSISTENT
|| ns
.pdsk
== D_OUTDATED
)) {
886 case C_PAUSED_SYNC_T
:
888 ns
.pdsk
= D_UP_TO_DATE
;
891 case C_PAUSED_SYNC_S
:
892 /* remap any consistent state to D_OUTDATED,
893 * but disallow "upgrade" of not even consistent states.
896 (D_DISKLESS
< os
.pdsk
&& os
.pdsk
< D_OUTDATED
)
897 ? os
.pdsk
: D_OUTDATED
;
900 ns
.pdsk
= D_INCONSISTENT
;
901 dev_warn(DEV
, "Implicitly set pdsk Inconsistent!\n");
904 if (os
.pdsk
== D_OUTDATED
&& ns
.pdsk
== D_UP_TO_DATE
)
905 dev_warn(DEV
, "Implicitly set pdsk from Outdated to UpToDate\n");
908 /* Connection breaks down before we finished "Negotiating" */
909 if (ns
.conn
< C_CONNECTED
&& ns
.disk
== D_NEGOTIATING
&&
910 get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
911 if (mdev
->ed_uuid
== mdev
->ldev
->md
.uuid
[UI_CURRENT
]) {
912 ns
.disk
= mdev
->new_state_tmp
.disk
;
913 ns
.pdsk
= mdev
->new_state_tmp
.pdsk
;
915 dev_alert(DEV
, "Connection lost while negotiating, no data!\n");
916 ns
.disk
= D_DISKLESS
;
922 if (fp
== FP_STONITH
&&
923 (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
> D_OUTDATED
) &&
924 !(os
.role
== R_PRIMARY
&& os
.conn
< C_CONNECTED
&& os
.pdsk
> D_OUTDATED
))
925 ns
.susp
= 1; /* Suspend IO while fence-peer handler runs (peer lost) */
927 if (mdev
->sync_conf
.on_no_data
== OND_SUSPEND_IO
&&
928 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
) &&
929 !(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
))
930 ns
.susp
= 1; /* Suspend IO while no data available (no accessible data available) */
932 if (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
) {
933 if (ns
.conn
== C_SYNC_SOURCE
)
934 ns
.conn
= C_PAUSED_SYNC_S
;
935 if (ns
.conn
== C_SYNC_TARGET
)
936 ns
.conn
= C_PAUSED_SYNC_T
;
938 if (ns
.conn
== C_PAUSED_SYNC_S
)
939 ns
.conn
= C_SYNC_SOURCE
;
940 if (ns
.conn
== C_PAUSED_SYNC_T
)
941 ns
.conn
= C_SYNC_TARGET
;
947 /* helper for __drbd_set_state */
948 static void set_ov_position(struct drbd_conf
*mdev
, enum drbd_conns cs
)
950 if (cs
== C_VERIFY_T
) {
951 /* starting online verify from an arbitrary position
952 * does not fit well into the existing protocol.
953 * on C_VERIFY_T, we initialize ov_left and friends
954 * implicitly in receive_DataRequest once the
955 * first P_OV_REQUEST is received */
956 mdev
->ov_start_sector
= ~(sector_t
)0;
958 unsigned long bit
= BM_SECT_TO_BIT(mdev
->ov_start_sector
);
959 if (bit
>= mdev
->rs_total
)
960 mdev
->ov_start_sector
=
961 BM_BIT_TO_SECT(mdev
->rs_total
- 1);
962 mdev
->ov_position
= mdev
->ov_start_sector
;
966 static void drbd_resume_al(struct drbd_conf
*mdev
)
968 if (test_and_clear_bit(AL_SUSPENDED
, &mdev
->flags
))
969 dev_info(DEV
, "Resumed AL updates\n");
973 * __drbd_set_state() - Set a new DRBD state
974 * @mdev: DRBD device.
977 * @done: Optional completion, that will get completed after the after_state_ch() finished
979 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
981 int __drbd_set_state(struct drbd_conf
*mdev
,
982 union drbd_state ns
, enum chg_state_flags flags
,
983 struct completion
*done
)
987 int warn_sync_abort
= 0;
988 struct after_state_chg_work
*ascw
;
992 ns
= sanitize_state(mdev
, os
, ns
, &warn_sync_abort
);
995 return SS_NOTHING_TO_DO
;
997 if (!(flags
& CS_HARD
)) {
998 /* pre-state-change checks ; only look at ns */
999 /* See drbd_state_sw_errors in drbd_strings.c */
1001 rv
= is_valid_state(mdev
, ns
);
1002 if (rv
< SS_SUCCESS
) {
1003 /* If the old state was illegal as well, then let
1006 if (is_valid_state(mdev
, os
) == rv
)
1007 rv
= is_valid_state_transition(mdev
, ns
, os
);
1009 rv
= is_valid_state_transition(mdev
, ns
, os
);
1012 if (rv
< SS_SUCCESS
) {
1013 if (flags
& CS_VERBOSE
)
1014 print_st_err(mdev
, os
, ns
, rv
);
1018 if (warn_sync_abort
)
1019 dev_warn(DEV
, "Resync aborted.\n");
1034 dev_info(DEV
, "%s\n", pb
);
1037 /* solve the race between becoming unconfigured,
1038 * worker doing the cleanup, and
1039 * admin reconfiguring us:
1040 * on (re)configure, first set CONFIG_PENDING,
1041 * then wait for a potentially exiting worker,
1042 * start the worker, and schedule one no_op.
1043 * then proceed with configuration.
1045 if (ns
.disk
== D_DISKLESS
&&
1046 ns
.conn
== C_STANDALONE
&&
1047 ns
.role
== R_SECONDARY
&&
1048 !test_and_set_bit(CONFIG_PENDING
, &mdev
->flags
))
1049 set_bit(DEVICE_DYING
, &mdev
->flags
);
1051 mdev
->state
.i
= ns
.i
;
1052 wake_up(&mdev
->misc_wait
);
1053 wake_up(&mdev
->state_wait
);
1055 /* post-state-change actions */
1056 if (os
.conn
>= C_SYNC_SOURCE
&& ns
.conn
<= C_CONNECTED
) {
1057 set_bit(STOP_SYNC_TIMER
, &mdev
->flags
);
1058 mod_timer(&mdev
->resync_timer
, jiffies
);
1061 /* aborted verify run. log the last position */
1062 if ((os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
) &&
1063 ns
.conn
< C_CONNECTED
) {
1064 mdev
->ov_start_sector
=
1065 BM_BIT_TO_SECT(mdev
->rs_total
- mdev
->ov_left
);
1066 dev_info(DEV
, "Online Verify reached sector %llu\n",
1067 (unsigned long long)mdev
->ov_start_sector
);
1070 if ((os
.conn
== C_PAUSED_SYNC_T
|| os
.conn
== C_PAUSED_SYNC_S
) &&
1071 (ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)) {
1072 dev_info(DEV
, "Syncer continues.\n");
1073 mdev
->rs_paused
+= (long)jiffies
1074 -(long)mdev
->rs_mark_time
[mdev
->rs_last_mark
];
1075 if (ns
.conn
== C_SYNC_TARGET
) {
1076 if (!test_and_clear_bit(STOP_SYNC_TIMER
, &mdev
->flags
))
1077 mod_timer(&mdev
->resync_timer
, jiffies
);
1078 /* This if (!test_bit) is only needed for the case
1079 that a device that has ceased to used its timer,
1080 i.e. it is already in drbd_resync_finished() gets
1081 paused and resumed. */
1085 if ((os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_SYNC_SOURCE
) &&
1086 (ns
.conn
== C_PAUSED_SYNC_T
|| ns
.conn
== C_PAUSED_SYNC_S
)) {
1087 dev_info(DEV
, "Resync suspended\n");
1088 mdev
->rs_mark_time
[mdev
->rs_last_mark
] = jiffies
;
1089 if (ns
.conn
== C_PAUSED_SYNC_T
)
1090 set_bit(STOP_SYNC_TIMER
, &mdev
->flags
);
1093 if (os
.conn
== C_CONNECTED
&&
1094 (ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
)) {
1095 unsigned long now
= jiffies
;
1098 mdev
->ov_position
= 0;
1099 mdev
->rs_total
= drbd_bm_bits(mdev
);
1100 if (mdev
->agreed_pro_version
>= 90)
1101 set_ov_position(mdev
, ns
.conn
);
1103 mdev
->ov_start_sector
= 0;
1104 mdev
->ov_left
= mdev
->rs_total
1105 - BM_SECT_TO_BIT(mdev
->ov_position
);
1106 mdev
->rs_start
= now
;
1107 mdev
->rs_last_events
= 0;
1108 mdev
->rs_last_sect_ev
= 0;
1109 mdev
->ov_last_oos_size
= 0;
1110 mdev
->ov_last_oos_start
= 0;
1112 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
1113 mdev
->rs_mark_left
[i
] = mdev
->rs_total
;
1114 mdev
->rs_mark_time
[i
] = now
;
1117 if (ns
.conn
== C_VERIFY_S
) {
1118 dev_info(DEV
, "Starting Online Verify from sector %llu\n",
1119 (unsigned long long)mdev
->ov_position
);
1120 mod_timer(&mdev
->resync_timer
, jiffies
);
1124 if (get_ldev(mdev
)) {
1125 u32 mdf
= mdev
->ldev
->md
.flags
& ~(MDF_CONSISTENT
|MDF_PRIMARY_IND
|
1126 MDF_CONNECTED_IND
|MDF_WAS_UP_TO_DATE
|
1127 MDF_PEER_OUT_DATED
|MDF_CRASHED_PRIMARY
);
1129 if (test_bit(CRASHED_PRIMARY
, &mdev
->flags
))
1130 mdf
|= MDF_CRASHED_PRIMARY
;
1131 if (mdev
->state
.role
== R_PRIMARY
||
1132 (mdev
->state
.pdsk
< D_INCONSISTENT
&& mdev
->state
.peer
== R_PRIMARY
))
1133 mdf
|= MDF_PRIMARY_IND
;
1134 if (mdev
->state
.conn
> C_WF_REPORT_PARAMS
)
1135 mdf
|= MDF_CONNECTED_IND
;
1136 if (mdev
->state
.disk
> D_INCONSISTENT
)
1137 mdf
|= MDF_CONSISTENT
;
1138 if (mdev
->state
.disk
> D_OUTDATED
)
1139 mdf
|= MDF_WAS_UP_TO_DATE
;
1140 if (mdev
->state
.pdsk
<= D_OUTDATED
&& mdev
->state
.pdsk
>= D_INCONSISTENT
)
1141 mdf
|= MDF_PEER_OUT_DATED
;
1142 if (mdf
!= mdev
->ldev
->md
.flags
) {
1143 mdev
->ldev
->md
.flags
= mdf
;
1144 drbd_md_mark_dirty(mdev
);
1146 if (os
.disk
< D_CONSISTENT
&& ns
.disk
>= D_CONSISTENT
)
1147 drbd_set_ed_uuid(mdev
, mdev
->ldev
->md
.uuid
[UI_CURRENT
]);
1151 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1152 if (os
.disk
== D_INCONSISTENT
&& os
.pdsk
== D_INCONSISTENT
&&
1153 os
.peer
== R_SECONDARY
&& ns
.peer
== R_PRIMARY
)
1154 set_bit(CONSIDER_RESYNC
, &mdev
->flags
);
1156 /* Receiver should clean up itself */
1157 if (os
.conn
!= C_DISCONNECTING
&& ns
.conn
== C_DISCONNECTING
)
1158 drbd_thread_stop_nowait(&mdev
->receiver
);
1160 /* Now the receiver finished cleaning up itself, it should die */
1161 if (os
.conn
!= C_STANDALONE
&& ns
.conn
== C_STANDALONE
)
1162 drbd_thread_stop_nowait(&mdev
->receiver
);
1164 /* Upon network failure, we need to restart the receiver. */
1165 if (os
.conn
> C_TEAR_DOWN
&&
1166 ns
.conn
<= C_TEAR_DOWN
&& ns
.conn
>= C_TIMEOUT
)
1167 drbd_thread_restart_nowait(&mdev
->receiver
);
1169 /* Resume AL writing if we get a connection */
1170 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
)
1171 drbd_resume_al(mdev
);
1173 ascw
= kmalloc(sizeof(*ascw
), GFP_ATOMIC
);
1177 ascw
->flags
= flags
;
1178 ascw
->w
.cb
= w_after_state_ch
;
1180 drbd_queue_work(&mdev
->data
.work
, &ascw
->w
);
1182 dev_warn(DEV
, "Could not kmalloc an ascw\n");
1188 static int w_after_state_ch(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1190 struct after_state_chg_work
*ascw
=
1191 container_of(w
, struct after_state_chg_work
, w
);
1192 after_state_ch(mdev
, ascw
->os
, ascw
->ns
, ascw
->flags
);
1193 if (ascw
->flags
& CS_WAIT_COMPLETE
) {
1194 D_ASSERT(ascw
->done
!= NULL
);
1195 complete(ascw
->done
);
1202 static void abw_start_sync(struct drbd_conf
*mdev
, int rv
)
1205 dev_err(DEV
, "Writing the bitmap failed not starting resync.\n");
1206 _drbd_request_state(mdev
, NS(conn
, C_CONNECTED
), CS_VERBOSE
);
1210 switch (mdev
->state
.conn
) {
1211 case C_STARTING_SYNC_T
:
1212 _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
1214 case C_STARTING_SYNC_S
:
1215 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
1221 * after_state_ch() - Perform after state change actions that may sleep
1222 * @mdev: DRBD device.
1227 static void after_state_ch(struct drbd_conf
*mdev
, union drbd_state os
,
1228 union drbd_state ns
, enum chg_state_flags flags
)
1230 enum drbd_fencing_p fp
;
1231 enum drbd_req_event what
= nothing
;
1233 if (os
.conn
!= C_CONNECTED
&& ns
.conn
== C_CONNECTED
) {
1234 clear_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1236 mdev
->p_uuid
[UI_FLAGS
] &= ~((u64
)2);
1240 if (get_ldev(mdev
)) {
1241 fp
= mdev
->ldev
->dc
.fencing
;
1245 /* Inform userspace about the change... */
1246 drbd_bcast_state(mdev
, ns
);
1248 if (!(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
) &&
1249 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
))
1250 drbd_khelper(mdev
, "pri-on-incon-degr");
1252 /* Here we have the actions that are performed after a
1253 state change. This function might sleep */
1255 if (os
.susp
&& ns
.susp
&& mdev
->sync_conf
.on_no_data
== OND_SUSPEND_IO
) {
1256 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
) {
1257 if (ns
.conn
== C_CONNECTED
)
1259 else /* ns.conn > C_CONNECTED */
1260 dev_err(DEV
, "Unexpected Resynd going on!\n");
1263 if (os
.disk
== D_ATTACHING
&& ns
.disk
> D_ATTACHING
)
1264 what
= restart_frozen_disk_io
;
1267 if (fp
== FP_STONITH
&& ns
.susp
) {
1268 /* case1: The outdate peer handler is successful: */
1269 if (os
.pdsk
> D_OUTDATED
&& ns
.pdsk
<= D_OUTDATED
) {
1271 if (test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
1272 drbd_uuid_new_current(mdev
);
1273 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
1276 spin_lock_irq(&mdev
->req_lock
);
1277 _drbd_set_state(_NS(mdev
, susp
, 0), CS_VERBOSE
, NULL
);
1278 spin_unlock_irq(&mdev
->req_lock
);
1280 /* case2: The connection was established again: */
1281 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
) {
1282 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
1287 if (what
!= nothing
) {
1288 spin_lock_irq(&mdev
->req_lock
);
1289 _tl_restart(mdev
, what
);
1290 _drbd_set_state(_NS(mdev
, susp
, 0), CS_VERBOSE
, NULL
);
1291 spin_unlock_irq(&mdev
->req_lock
);
1294 /* Do not change the order of the if above and the two below... */
1295 if (os
.pdsk
== D_DISKLESS
&& ns
.pdsk
> D_DISKLESS
) { /* attach on the peer */
1296 drbd_send_uuids(mdev
);
1297 drbd_send_state(mdev
);
1299 if (os
.conn
!= C_WF_BITMAP_S
&& ns
.conn
== C_WF_BITMAP_S
)
1300 drbd_queue_bitmap_io(mdev
, &drbd_send_bitmap
, NULL
, "send_bitmap (WFBitMapS)");
1302 /* Lost contact to peer's copy of the data */
1303 if ((os
.pdsk
>= D_INCONSISTENT
&&
1304 os
.pdsk
!= D_UNKNOWN
&&
1305 os
.pdsk
!= D_OUTDATED
)
1306 && (ns
.pdsk
< D_INCONSISTENT
||
1307 ns
.pdsk
== D_UNKNOWN
||
1308 ns
.pdsk
== D_OUTDATED
)) {
1309 if (get_ldev(mdev
)) {
1310 if ((ns
.role
== R_PRIMARY
|| ns
.peer
== R_PRIMARY
) &&
1311 mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1312 if (mdev
->state
.susp
) {
1313 set_bit(NEW_CUR_UUID
, &mdev
->flags
);
1315 drbd_uuid_new_current(mdev
);
1316 drbd_send_uuids(mdev
);
1323 if (ns
.pdsk
< D_INCONSISTENT
&& get_ldev(mdev
)) {
1324 if (ns
.peer
== R_PRIMARY
&& mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0) {
1325 drbd_uuid_new_current(mdev
);
1326 drbd_send_uuids(mdev
);
1329 /* D_DISKLESS Peer becomes secondary */
1330 if (os
.peer
== R_PRIMARY
&& ns
.peer
== R_SECONDARY
)
1331 drbd_al_to_on_disk_bm(mdev
);
1335 /* Last part of the attaching process ... */
1336 if (ns
.conn
>= C_CONNECTED
&&
1337 os
.disk
== D_ATTACHING
&& ns
.disk
== D_NEGOTIATING
) {
1338 drbd_send_sizes(mdev
, 0, 0); /* to start sync... */
1339 drbd_send_uuids(mdev
);
1340 drbd_send_state(mdev
);
1343 /* We want to pause/continue resync, tell peer. */
1344 if (ns
.conn
>= C_CONNECTED
&&
1345 ((os
.aftr_isp
!= ns
.aftr_isp
) ||
1346 (os
.user_isp
!= ns
.user_isp
)))
1347 drbd_send_state(mdev
);
1349 /* In case one of the isp bits got set, suspend other devices. */
1350 if ((!os
.aftr_isp
&& !os
.peer_isp
&& !os
.user_isp
) &&
1351 (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
))
1352 suspend_other_sg(mdev
);
1354 /* Make sure the peer gets informed about eventual state
1355 changes (ISP bits) while we were in WFReportParams. */
1356 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
>= C_CONNECTED
)
1357 drbd_send_state(mdev
);
1359 /* We are in the progress to start a full sync... */
1360 if ((os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
1361 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
))
1362 drbd_queue_bitmap_io(mdev
, &drbd_bmio_set_n_write
, &abw_start_sync
, "set_n_write from StartingSync");
1364 /* We are invalidating our self... */
1365 if (os
.conn
< C_CONNECTED
&& ns
.conn
< C_CONNECTED
&&
1366 os
.disk
> D_INCONSISTENT
&& ns
.disk
== D_INCONSISTENT
)
1367 drbd_queue_bitmap_io(mdev
, &drbd_bmio_set_n_write
, NULL
, "set_n_write from invalidate");
1369 if (os
.disk
> D_FAILED
&& ns
.disk
== D_FAILED
) {
1370 enum drbd_io_error_p eh
;
1373 if (get_ldev_if_state(mdev
, D_FAILED
)) {
1374 eh
= mdev
->ldev
->dc
.on_io_error
;
1378 drbd_rs_cancel_all(mdev
);
1379 /* since get_ldev() only works as long as disk>=D_INCONSISTENT,
1380 and it is D_DISKLESS here, local_cnt can only go down, it can
1381 not increase... It will reach zero */
1382 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->local_cnt
));
1384 mdev
->rs_failed
= 0;
1385 atomic_set(&mdev
->rs_pending_cnt
, 0);
1387 spin_lock_irq(&mdev
->req_lock
);
1388 _drbd_set_state(_NS(mdev
, disk
, D_DISKLESS
), CS_HARD
, NULL
);
1389 spin_unlock_irq(&mdev
->req_lock
);
1391 if (eh
== EP_CALL_HELPER
)
1392 drbd_khelper(mdev
, "local-io-error");
1395 if (os
.disk
> D_DISKLESS
&& ns
.disk
== D_DISKLESS
) {
1397 if (os
.disk
== D_FAILED
) /* && ns.disk == D_DISKLESS*/ {
1398 if (drbd_send_state(mdev
))
1399 dev_warn(DEV
, "Notified peer that my disk is broken.\n");
1401 dev_err(DEV
, "Sending state in drbd_io_error() failed\n");
1404 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->local_cnt
));
1405 lc_destroy(mdev
->resync
);
1406 mdev
->resync
= NULL
;
1407 lc_destroy(mdev
->act_log
);
1408 mdev
->act_log
= NULL
;
1410 drbd_free_bc(mdev
->ldev
);
1411 mdev
->ldev
= NULL
;);
1413 if (mdev
->md_io_tmpp
)
1414 __free_page(mdev
->md_io_tmpp
);
1417 /* Disks got bigger while they were detached */
1418 if (ns
.disk
> D_NEGOTIATING
&& ns
.pdsk
> D_NEGOTIATING
&&
1419 test_and_clear_bit(RESYNC_AFTER_NEG
, &mdev
->flags
)) {
1420 if (ns
.conn
== C_CONNECTED
)
1421 resync_after_online_grow(mdev
);
1424 /* A resync finished or aborted, wake paused devices... */
1425 if ((os
.conn
> C_CONNECTED
&& ns
.conn
<= C_CONNECTED
) ||
1426 (os
.peer_isp
&& !ns
.peer_isp
) ||
1427 (os
.user_isp
&& !ns
.user_isp
))
1428 resume_next_sg(mdev
);
1430 /* free tl_hash if we Got thawed and are C_STANDALONE */
1431 if (ns
.conn
== C_STANDALONE
&& ns
.susp
== 0 && mdev
->tl_hash
)
1432 drbd_free_tl_hash(mdev
);
1434 /* Upon network connection, we need to start the receiver */
1435 if (os
.conn
== C_STANDALONE
&& ns
.conn
== C_UNCONNECTED
)
1436 drbd_thread_start(&mdev
->receiver
);
1438 /* Terminate worker thread if we are unconfigured - it will be
1439 restarted as needed... */
1440 if (ns
.disk
== D_DISKLESS
&&
1441 ns
.conn
== C_STANDALONE
&&
1442 ns
.role
== R_SECONDARY
) {
1443 if (os
.aftr_isp
!= ns
.aftr_isp
)
1444 resume_next_sg(mdev
);
1445 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1446 if (test_bit(DEVICE_DYING
, &mdev
->flags
))
1447 drbd_thread_stop_nowait(&mdev
->worker
);
1454 static int drbd_thread_setup(void *arg
)
1456 struct drbd_thread
*thi
= (struct drbd_thread
*) arg
;
1457 struct drbd_conf
*mdev
= thi
->mdev
;
1458 unsigned long flags
;
1462 retval
= thi
->function(thi
);
1464 spin_lock_irqsave(&thi
->t_lock
, flags
);
1466 /* if the receiver has been "Exiting", the last thing it did
1467 * was set the conn state to "StandAlone",
1468 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1469 * and receiver thread will be "started".
1470 * drbd_thread_start needs to set "Restarting" in that case.
1471 * t_state check and assignment needs to be within the same spinlock,
1472 * so either thread_start sees Exiting, and can remap to Restarting,
1473 * or thread_start see None, and can proceed as normal.
1476 if (thi
->t_state
== Restarting
) {
1477 dev_info(DEV
, "Restarting %s\n", current
->comm
);
1478 thi
->t_state
= Running
;
1479 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1484 thi
->t_state
= None
;
1486 complete(&thi
->stop
);
1487 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1489 dev_info(DEV
, "Terminating %s\n", current
->comm
);
1491 /* Release mod reference taken when thread was started */
1492 module_put(THIS_MODULE
);
1496 static void drbd_thread_init(struct drbd_conf
*mdev
, struct drbd_thread
*thi
,
1497 int (*func
) (struct drbd_thread
*))
1499 spin_lock_init(&thi
->t_lock
);
1501 thi
->t_state
= None
;
1502 thi
->function
= func
;
1506 int drbd_thread_start(struct drbd_thread
*thi
)
1508 struct drbd_conf
*mdev
= thi
->mdev
;
1509 struct task_struct
*nt
;
1510 unsigned long flags
;
1513 thi
== &mdev
->receiver
? "receiver" :
1514 thi
== &mdev
->asender
? "asender" :
1515 thi
== &mdev
->worker
? "worker" : "NONSENSE";
1517 /* is used from state engine doing drbd_thread_stop_nowait,
1518 * while holding the req lock irqsave */
1519 spin_lock_irqsave(&thi
->t_lock
, flags
);
1521 switch (thi
->t_state
) {
1523 dev_info(DEV
, "Starting %s thread (from %s [%d])\n",
1524 me
, current
->comm
, current
->pid
);
1526 /* Get ref on module for thread - this is released when thread exits */
1527 if (!try_module_get(THIS_MODULE
)) {
1528 dev_err(DEV
, "Failed to get module reference in drbd_thread_start\n");
1529 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1533 init_completion(&thi
->stop
);
1534 D_ASSERT(thi
->task
== NULL
);
1535 thi
->reset_cpu_mask
= 1;
1536 thi
->t_state
= Running
;
1537 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1538 flush_signals(current
); /* otherw. may get -ERESTARTNOINTR */
1540 nt
= kthread_create(drbd_thread_setup
, (void *) thi
,
1541 "drbd%d_%s", mdev_to_minor(mdev
), me
);
1544 dev_err(DEV
, "Couldn't start thread\n");
1546 module_put(THIS_MODULE
);
1549 spin_lock_irqsave(&thi
->t_lock
, flags
);
1551 thi
->t_state
= Running
;
1552 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1553 wake_up_process(nt
);
1556 thi
->t_state
= Restarting
;
1557 dev_info(DEV
, "Restarting %s thread (from %s [%d])\n",
1558 me
, current
->comm
, current
->pid
);
1563 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1571 void _drbd_thread_stop(struct drbd_thread
*thi
, int restart
, int wait
)
1573 unsigned long flags
;
1575 enum drbd_thread_state ns
= restart
? Restarting
: Exiting
;
1577 /* may be called from state engine, holding the req lock irqsave */
1578 spin_lock_irqsave(&thi
->t_lock
, flags
);
1580 if (thi
->t_state
== None
) {
1581 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1583 drbd_thread_start(thi
);
1587 if (thi
->t_state
!= ns
) {
1588 if (thi
->task
== NULL
) {
1589 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1595 init_completion(&thi
->stop
);
1596 if (thi
->task
!= current
)
1597 force_sig(DRBD_SIGKILL
, thi
->task
);
1601 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1604 wait_for_completion(&thi
->stop
);
1609 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1610 * @mdev: DRBD device.
1612 * Forces all threads of a device onto the same CPU. This is beneficial for
1613 * DRBD's performance. May be overwritten by user's configuration.
1615 void drbd_calc_cpu_mask(struct drbd_conf
*mdev
)
1619 /* user override. */
1620 if (cpumask_weight(mdev
->cpu_mask
))
1623 ord
= mdev_to_minor(mdev
) % cpumask_weight(cpu_online_mask
);
1624 for_each_online_cpu(cpu
) {
1626 cpumask_set_cpu(cpu
, mdev
->cpu_mask
);
1630 /* should not be reached */
1631 cpumask_setall(mdev
->cpu_mask
);
1635 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1636 * @mdev: DRBD device.
1638 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1641 void drbd_thread_current_set_cpu(struct drbd_conf
*mdev
)
1643 struct task_struct
*p
= current
;
1644 struct drbd_thread
*thi
=
1645 p
== mdev
->asender
.task
? &mdev
->asender
:
1646 p
== mdev
->receiver
.task
? &mdev
->receiver
:
1647 p
== mdev
->worker
.task
? &mdev
->worker
:
1651 if (!thi
->reset_cpu_mask
)
1653 thi
->reset_cpu_mask
= 0;
1654 set_cpus_allowed_ptr(p
, mdev
->cpu_mask
);
1658 /* the appropriate socket mutex must be held already */
1659 int _drbd_send_cmd(struct drbd_conf
*mdev
, struct socket
*sock
,
1660 enum drbd_packets cmd
, struct p_header80
*h
,
1661 size_t size
, unsigned msg_flags
)
1665 ERR_IF(!h
) return FALSE
;
1666 ERR_IF(!size
) return FALSE
;
1668 h
->magic
= BE_DRBD_MAGIC
;
1669 h
->command
= cpu_to_be16(cmd
);
1670 h
->length
= cpu_to_be16(size
-sizeof(struct p_header80
));
1672 sent
= drbd_send(mdev
, sock
, h
, size
, msg_flags
);
1674 ok
= (sent
== size
);
1676 dev_err(DEV
, "short sent %s size=%d sent=%d\n",
1677 cmdname(cmd
), (int)size
, sent
);
1681 /* don't pass the socket. we may only look at it
1682 * when we hold the appropriate socket mutex.
1684 int drbd_send_cmd(struct drbd_conf
*mdev
, int use_data_socket
,
1685 enum drbd_packets cmd
, struct p_header80
*h
, size_t size
)
1688 struct socket
*sock
;
1690 if (use_data_socket
) {
1691 mutex_lock(&mdev
->data
.mutex
);
1692 sock
= mdev
->data
.socket
;
1694 mutex_lock(&mdev
->meta
.mutex
);
1695 sock
= mdev
->meta
.socket
;
1698 /* drbd_disconnect() could have called drbd_free_sock()
1699 * while we were waiting in down()... */
1700 if (likely(sock
!= NULL
))
1701 ok
= _drbd_send_cmd(mdev
, sock
, cmd
, h
, size
, 0);
1703 if (use_data_socket
)
1704 mutex_unlock(&mdev
->data
.mutex
);
1706 mutex_unlock(&mdev
->meta
.mutex
);
1710 int drbd_send_cmd2(struct drbd_conf
*mdev
, enum drbd_packets cmd
, char *data
,
1713 struct p_header80 h
;
1716 h
.magic
= BE_DRBD_MAGIC
;
1717 h
.command
= cpu_to_be16(cmd
);
1718 h
.length
= cpu_to_be16(size
);
1720 if (!drbd_get_data_sock(mdev
))
1724 drbd_send(mdev
, mdev
->data
.socket
, &h
, sizeof(h
), 0));
1726 drbd_send(mdev
, mdev
->data
.socket
, data
, size
, 0));
1728 drbd_put_data_sock(mdev
);
1733 int drbd_send_sync_param(struct drbd_conf
*mdev
, struct syncer_conf
*sc
)
1735 struct p_rs_param_95
*p
;
1736 struct socket
*sock
;
1738 const int apv
= mdev
->agreed_pro_version
;
1740 size
= apv
<= 87 ? sizeof(struct p_rs_param
)
1741 : apv
== 88 ? sizeof(struct p_rs_param
)
1742 + strlen(mdev
->sync_conf
.verify_alg
) + 1
1743 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
1744 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
1746 /* used from admin command context and receiver/worker context.
1747 * to avoid kmalloc, grab the socket right here,
1748 * then use the pre-allocated sbuf there */
1749 mutex_lock(&mdev
->data
.mutex
);
1750 sock
= mdev
->data
.socket
;
1752 if (likely(sock
!= NULL
)) {
1753 enum drbd_packets cmd
= apv
>= 89 ? P_SYNC_PARAM89
: P_SYNC_PARAM
;
1755 p
= &mdev
->data
.sbuf
.rs_param_95
;
1757 /* initialize verify_alg and csums_alg */
1758 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
1760 p
->rate
= cpu_to_be32(sc
->rate
);
1761 p
->c_plan_ahead
= cpu_to_be32(sc
->c_plan_ahead
);
1762 p
->c_delay_target
= cpu_to_be32(sc
->c_delay_target
);
1763 p
->c_fill_target
= cpu_to_be32(sc
->c_fill_target
);
1764 p
->c_max_rate
= cpu_to_be32(sc
->c_max_rate
);
1767 strcpy(p
->verify_alg
, mdev
->sync_conf
.verify_alg
);
1769 strcpy(p
->csums_alg
, mdev
->sync_conf
.csums_alg
);
1771 rv
= _drbd_send_cmd(mdev
, sock
, cmd
, &p
->head
, size
, 0);
1773 rv
= 0; /* not ok */
1775 mutex_unlock(&mdev
->data
.mutex
);
1780 int drbd_send_protocol(struct drbd_conf
*mdev
)
1782 struct p_protocol
*p
;
1785 size
= sizeof(struct p_protocol
);
1787 if (mdev
->agreed_pro_version
>= 87)
1788 size
+= strlen(mdev
->net_conf
->integrity_alg
) + 1;
1790 /* we must not recurse into our own queue,
1791 * as that is blocked during handshake */
1792 p
= kmalloc(size
, GFP_NOIO
);
1796 p
->protocol
= cpu_to_be32(mdev
->net_conf
->wire_protocol
);
1797 p
->after_sb_0p
= cpu_to_be32(mdev
->net_conf
->after_sb_0p
);
1798 p
->after_sb_1p
= cpu_to_be32(mdev
->net_conf
->after_sb_1p
);
1799 p
->after_sb_2p
= cpu_to_be32(mdev
->net_conf
->after_sb_2p
);
1800 p
->two_primaries
= cpu_to_be32(mdev
->net_conf
->two_primaries
);
1803 if (mdev
->net_conf
->want_lose
)
1805 if (mdev
->net_conf
->dry_run
) {
1806 if (mdev
->agreed_pro_version
>= 92)
1809 dev_err(DEV
, "--dry-run is not supported by peer");
1814 p
->conn_flags
= cpu_to_be32(cf
);
1816 if (mdev
->agreed_pro_version
>= 87)
1817 strcpy(p
->integrity_alg
, mdev
->net_conf
->integrity_alg
);
1819 rv
= drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_PROTOCOL
,
1820 (struct p_header80
*)p
, size
);
1825 int _drbd_send_uuids(struct drbd_conf
*mdev
, u64 uuid_flags
)
1830 if (!get_ldev_if_state(mdev
, D_NEGOTIATING
))
1833 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
1834 p
.uuid
[i
] = mdev
->ldev
? cpu_to_be64(mdev
->ldev
->md
.uuid
[i
]) : 0;
1836 mdev
->comm_bm_set
= drbd_bm_total_weight(mdev
);
1837 p
.uuid
[UI_SIZE
] = cpu_to_be64(mdev
->comm_bm_set
);
1838 uuid_flags
|= mdev
->net_conf
->want_lose
? 1 : 0;
1839 uuid_flags
|= test_bit(CRASHED_PRIMARY
, &mdev
->flags
) ? 2 : 0;
1840 uuid_flags
|= mdev
->new_state_tmp
.disk
== D_INCONSISTENT
? 4 : 0;
1841 p
.uuid
[UI_FLAGS
] = cpu_to_be64(uuid_flags
);
1845 return drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_UUIDS
,
1846 (struct p_header80
*)&p
, sizeof(p
));
1849 int drbd_send_uuids(struct drbd_conf
*mdev
)
1851 return _drbd_send_uuids(mdev
, 0);
1854 int drbd_send_uuids_skip_initial_sync(struct drbd_conf
*mdev
)
1856 return _drbd_send_uuids(mdev
, 8);
1860 int drbd_send_sync_uuid(struct drbd_conf
*mdev
, u64 val
)
1864 p
.uuid
= cpu_to_be64(val
);
1866 return drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_SYNC_UUID
,
1867 (struct p_header80
*)&p
, sizeof(p
));
1870 int drbd_send_sizes(struct drbd_conf
*mdev
, int trigger_reply
, enum dds_flags flags
)
1873 sector_t d_size
, u_size
;
1877 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
1878 D_ASSERT(mdev
->ldev
->backing_bdev
);
1879 d_size
= drbd_get_max_capacity(mdev
->ldev
);
1880 u_size
= mdev
->ldev
->dc
.disk_size
;
1881 q_order_type
= drbd_queue_order_type(mdev
);
1886 q_order_type
= QUEUE_ORDERED_NONE
;
1889 p
.d_size
= cpu_to_be64(d_size
);
1890 p
.u_size
= cpu_to_be64(u_size
);
1891 p
.c_size
= cpu_to_be64(trigger_reply
? 0 : drbd_get_capacity(mdev
->this_bdev
));
1892 p
.max_segment_size
= cpu_to_be32(queue_max_segment_size(mdev
->rq_queue
));
1893 p
.queue_order_type
= cpu_to_be16(q_order_type
);
1894 p
.dds_flags
= cpu_to_be16(flags
);
1896 ok
= drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_SIZES
,
1897 (struct p_header80
*)&p
, sizeof(p
));
1902 * drbd_send_state() - Sends the drbd state to the peer
1903 * @mdev: DRBD device.
1905 int drbd_send_state(struct drbd_conf
*mdev
)
1907 struct socket
*sock
;
1911 /* Grab state lock so we wont send state if we're in the middle
1912 * of a cluster wide state change on another thread */
1913 drbd_state_lock(mdev
);
1915 mutex_lock(&mdev
->data
.mutex
);
1917 p
.state
= cpu_to_be32(mdev
->state
.i
); /* Within the send mutex */
1918 sock
= mdev
->data
.socket
;
1920 if (likely(sock
!= NULL
)) {
1921 ok
= _drbd_send_cmd(mdev
, sock
, P_STATE
,
1922 (struct p_header80
*)&p
, sizeof(p
), 0);
1925 mutex_unlock(&mdev
->data
.mutex
);
1927 drbd_state_unlock(mdev
);
1931 int drbd_send_state_req(struct drbd_conf
*mdev
,
1932 union drbd_state mask
, union drbd_state val
)
1934 struct p_req_state p
;
1936 p
.mask
= cpu_to_be32(mask
.i
);
1937 p
.val
= cpu_to_be32(val
.i
);
1939 return drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_STATE_CHG_REQ
,
1940 (struct p_header80
*)&p
, sizeof(p
));
1943 int drbd_send_sr_reply(struct drbd_conf
*mdev
, int retcode
)
1945 struct p_req_state_reply p
;
1947 p
.retcode
= cpu_to_be32(retcode
);
1949 return drbd_send_cmd(mdev
, USE_META_SOCKET
, P_STATE_CHG_REPLY
,
1950 (struct p_header80
*)&p
, sizeof(p
));
1953 int fill_bitmap_rle_bits(struct drbd_conf
*mdev
,
1954 struct p_compressed_bm
*p
,
1955 struct bm_xfer_ctx
*c
)
1957 struct bitstream bs
;
1958 unsigned long plain_bits
;
1965 /* may we use this feature? */
1966 if ((mdev
->sync_conf
.use_rle
== 0) ||
1967 (mdev
->agreed_pro_version
< 90))
1970 if (c
->bit_offset
>= c
->bm_bits
)
1971 return 0; /* nothing to do. */
1973 /* use at most thus many bytes */
1974 bitstream_init(&bs
, p
->code
, BM_PACKET_VLI_BYTES_MAX
, 0);
1975 memset(p
->code
, 0, BM_PACKET_VLI_BYTES_MAX
);
1976 /* plain bits covered in this code string */
1979 /* p->encoding & 0x80 stores whether the first run length is set.
1980 * bit offset is implicit.
1981 * start with toggle == 2 to be able to tell the first iteration */
1984 /* see how much plain bits we can stuff into one packet
1985 * using RLE and VLI. */
1987 tmp
= (toggle
== 0) ? _drbd_bm_find_next_zero(mdev
, c
->bit_offset
)
1988 : _drbd_bm_find_next(mdev
, c
->bit_offset
);
1991 rl
= tmp
- c
->bit_offset
;
1993 if (toggle
== 2) { /* first iteration */
1995 /* the first checked bit was set,
1996 * store start value, */
1997 DCBP_set_start(p
, 1);
1998 /* but skip encoding of zero run length */
2002 DCBP_set_start(p
, 0);
2005 /* paranoia: catch zero runlength.
2006 * can only happen if bitmap is modified while we scan it. */
2008 dev_err(DEV
, "unexpected zero runlength while encoding bitmap "
2009 "t:%u bo:%lu\n", toggle
, c
->bit_offset
);
2013 bits
= vli_encode_bits(&bs
, rl
);
2014 if (bits
== -ENOBUFS
) /* buffer full */
2017 dev_err(DEV
, "error while encoding bitmap: %d\n", bits
);
2023 c
->bit_offset
= tmp
;
2024 } while (c
->bit_offset
< c
->bm_bits
);
2026 len
= bs
.cur
.b
- p
->code
+ !!bs
.cur
.bit
;
2028 if (plain_bits
< (len
<< 3)) {
2029 /* incompressible with this method.
2030 * we need to rewind both word and bit position. */
2031 c
->bit_offset
-= plain_bits
;
2032 bm_xfer_ctx_bit_to_word_offset(c
);
2033 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
2037 /* RLE + VLI was able to compress it just fine.
2038 * update c->word_offset. */
2039 bm_xfer_ctx_bit_to_word_offset(c
);
2041 /* store pad_bits */
2042 DCBP_set_pad_bits(p
, (8 - bs
.cur
.bit
) & 0x7);
2047 enum { OK
, FAILED
, DONE
}
2048 send_bitmap_rle_or_plain(struct drbd_conf
*mdev
,
2049 struct p_header80
*h
, struct bm_xfer_ctx
*c
)
2051 struct p_compressed_bm
*p
= (void*)h
;
2052 unsigned long num_words
;
2056 len
= fill_bitmap_rle_bits(mdev
, p
, c
);
2062 DCBP_set_code(p
, RLE_VLI_Bits
);
2063 ok
= _drbd_send_cmd(mdev
, mdev
->data
.socket
, P_COMPRESSED_BITMAP
, h
,
2064 sizeof(*p
) + len
, 0);
2067 c
->bytes
[0] += sizeof(*p
) + len
;
2069 if (c
->bit_offset
>= c
->bm_bits
)
2072 /* was not compressible.
2073 * send a buffer full of plain text bits instead. */
2074 num_words
= min_t(size_t, BM_PACKET_WORDS
, c
->bm_words
- c
->word_offset
);
2075 len
= num_words
* sizeof(long);
2077 drbd_bm_get_lel(mdev
, c
->word_offset
, num_words
, (unsigned long*)h
->payload
);
2078 ok
= _drbd_send_cmd(mdev
, mdev
->data
.socket
, P_BITMAP
,
2079 h
, sizeof(struct p_header80
) + len
, 0);
2080 c
->word_offset
+= num_words
;
2081 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
2084 c
->bytes
[1] += sizeof(struct p_header80
) + len
;
2086 if (c
->bit_offset
> c
->bm_bits
)
2087 c
->bit_offset
= c
->bm_bits
;
2089 ok
= ok
? ((len
== 0) ? DONE
: OK
) : FAILED
;
2092 INFO_bm_xfer_stats(mdev
, "send", c
);
2096 /* See the comment at receive_bitmap() */
2097 int _drbd_send_bitmap(struct drbd_conf
*mdev
)
2099 struct bm_xfer_ctx c
;
2100 struct p_header80
*p
;
2103 ERR_IF(!mdev
->bitmap
) return FALSE
;
2105 /* maybe we should use some per thread scratch page,
2106 * and allocate that during initial device creation? */
2107 p
= (struct p_header80
*) __get_free_page(GFP_NOIO
);
2109 dev_err(DEV
, "failed to allocate one page buffer in %s\n", __func__
);
2113 if (get_ldev(mdev
)) {
2114 if (drbd_md_test_flag(mdev
->ldev
, MDF_FULL_SYNC
)) {
2115 dev_info(DEV
, "Writing the whole bitmap, MDF_FullSync was set.\n");
2116 drbd_bm_set_all(mdev
);
2117 if (drbd_bm_write(mdev
)) {
2118 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2119 * but otherwise process as per normal - need to tell other
2120 * side that a full resync is required! */
2121 dev_err(DEV
, "Failed to write bitmap to disk!\n");
2123 drbd_md_clear_flag(mdev
, MDF_FULL_SYNC
);
2130 c
= (struct bm_xfer_ctx
) {
2131 .bm_bits
= drbd_bm_bits(mdev
),
2132 .bm_words
= drbd_bm_words(mdev
),
2136 ret
= send_bitmap_rle_or_plain(mdev
, p
, &c
);
2137 } while (ret
== OK
);
2139 free_page((unsigned long) p
);
2140 return (ret
== DONE
);
2143 int drbd_send_bitmap(struct drbd_conf
*mdev
)
2147 if (!drbd_get_data_sock(mdev
))
2149 err
= !_drbd_send_bitmap(mdev
);
2150 drbd_put_data_sock(mdev
);
2154 int drbd_send_b_ack(struct drbd_conf
*mdev
, u32 barrier_nr
, u32 set_size
)
2157 struct p_barrier_ack p
;
2159 p
.barrier
= barrier_nr
;
2160 p
.set_size
= cpu_to_be32(set_size
);
2162 if (mdev
->state
.conn
< C_CONNECTED
)
2164 ok
= drbd_send_cmd(mdev
, USE_META_SOCKET
, P_BARRIER_ACK
,
2165 (struct p_header80
*)&p
, sizeof(p
));
2170 * _drbd_send_ack() - Sends an ack packet
2171 * @mdev: DRBD device.
2172 * @cmd: Packet command code.
2173 * @sector: sector, needs to be in big endian byte order
2174 * @blksize: size in byte, needs to be in big endian byte order
2175 * @block_id: Id, big endian byte order
2177 static int _drbd_send_ack(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2183 struct p_block_ack p
;
2186 p
.block_id
= block_id
;
2187 p
.blksize
= blksize
;
2188 p
.seq_num
= cpu_to_be32(atomic_add_return(1, &mdev
->packet_seq
));
2190 if (!mdev
->meta
.socket
|| mdev
->state
.conn
< C_CONNECTED
)
2192 ok
= drbd_send_cmd(mdev
, USE_META_SOCKET
, cmd
,
2193 (struct p_header80
*)&p
, sizeof(p
));
2197 int drbd_send_ack_dp(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2200 const int header_size
= sizeof(struct p_data
)
2201 - sizeof(struct p_header80
);
2202 int data_size
= ((struct p_header80
*)dp
)->length
- header_size
;
2204 return _drbd_send_ack(mdev
, cmd
, dp
->sector
, cpu_to_be32(data_size
),
2208 int drbd_send_ack_rp(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2209 struct p_block_req
*rp
)
2211 return _drbd_send_ack(mdev
, cmd
, rp
->sector
, rp
->blksize
, rp
->block_id
);
2215 * drbd_send_ack() - Sends an ack packet
2216 * @mdev: DRBD device.
2217 * @cmd: Packet command code.
2220 int drbd_send_ack(struct drbd_conf
*mdev
,
2221 enum drbd_packets cmd
, struct drbd_epoch_entry
*e
)
2223 return _drbd_send_ack(mdev
, cmd
,
2224 cpu_to_be64(e
->sector
),
2225 cpu_to_be32(e
->size
),
2229 /* This function misuses the block_id field to signal if the blocks
2230 * are is sync or not. */
2231 int drbd_send_ack_ex(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2232 sector_t sector
, int blksize
, u64 block_id
)
2234 return _drbd_send_ack(mdev
, cmd
,
2235 cpu_to_be64(sector
),
2236 cpu_to_be32(blksize
),
2237 cpu_to_be64(block_id
));
2240 int drbd_send_drequest(struct drbd_conf
*mdev
, int cmd
,
2241 sector_t sector
, int size
, u64 block_id
)
2244 struct p_block_req p
;
2246 p
.sector
= cpu_to_be64(sector
);
2247 p
.block_id
= block_id
;
2248 p
.blksize
= cpu_to_be32(size
);
2250 ok
= drbd_send_cmd(mdev
, USE_DATA_SOCKET
, cmd
,
2251 (struct p_header80
*)&p
, sizeof(p
));
2255 int drbd_send_drequest_csum(struct drbd_conf
*mdev
,
2256 sector_t sector
, int size
,
2257 void *digest
, int digest_size
,
2258 enum drbd_packets cmd
)
2261 struct p_block_req p
;
2263 p
.sector
= cpu_to_be64(sector
);
2264 p
.block_id
= BE_DRBD_MAGIC
+ 0xbeef;
2265 p
.blksize
= cpu_to_be32(size
);
2267 p
.head
.magic
= BE_DRBD_MAGIC
;
2268 p
.head
.command
= cpu_to_be16(cmd
);
2269 p
.head
.length
= cpu_to_be16(sizeof(p
) - sizeof(struct p_header80
) + digest_size
);
2271 mutex_lock(&mdev
->data
.mutex
);
2273 ok
= (sizeof(p
) == drbd_send(mdev
, mdev
->data
.socket
, &p
, sizeof(p
), 0));
2274 ok
= ok
&& (digest_size
== drbd_send(mdev
, mdev
->data
.socket
, digest
, digest_size
, 0));
2276 mutex_unlock(&mdev
->data
.mutex
);
2281 int drbd_send_ov_request(struct drbd_conf
*mdev
, sector_t sector
, int size
)
2284 struct p_block_req p
;
2286 p
.sector
= cpu_to_be64(sector
);
2287 p
.block_id
= BE_DRBD_MAGIC
+ 0xbabe;
2288 p
.blksize
= cpu_to_be32(size
);
2290 ok
= drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_OV_REQUEST
,
2291 (struct p_header80
*)&p
, sizeof(p
));
2295 /* called on sndtimeo
2296 * returns FALSE if we should retry,
2297 * TRUE if we think connection is dead
2299 static int we_should_drop_the_connection(struct drbd_conf
*mdev
, struct socket
*sock
)
2302 /* long elapsed = (long)(jiffies - mdev->last_received); */
2304 drop_it
= mdev
->meta
.socket
== sock
2305 || !mdev
->asender
.task
2306 || get_t_state(&mdev
->asender
) != Running
2307 || mdev
->state
.conn
< C_CONNECTED
;
2312 drop_it
= !--mdev
->ko_count
;
2314 dev_err(DEV
, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2315 current
->comm
, current
->pid
, mdev
->ko_count
);
2319 return drop_it
; /* && (mdev->state == R_PRIMARY) */;
2322 /* The idea of sendpage seems to be to put some kind of reference
2323 * to the page into the skb, and to hand it over to the NIC. In
2324 * this process get_page() gets called.
2326 * As soon as the page was really sent over the network put_page()
2327 * gets called by some part of the network layer. [ NIC driver? ]
2329 * [ get_page() / put_page() increment/decrement the count. If count
2330 * reaches 0 the page will be freed. ]
2332 * This works nicely with pages from FSs.
2333 * But this means that in protocol A we might signal IO completion too early!
2335 * In order not to corrupt data during a resync we must make sure
2336 * that we do not reuse our own buffer pages (EEs) to early, therefore
2337 * we have the net_ee list.
2339 * XFS seems to have problems, still, it submits pages with page_count == 0!
2340 * As a workaround, we disable sendpage on pages
2341 * with page_count == 0 or PageSlab.
2343 static int _drbd_no_send_page(struct drbd_conf
*mdev
, struct page
*page
,
2344 int offset
, size_t size
, unsigned msg_flags
)
2346 int sent
= drbd_send(mdev
, mdev
->data
.socket
, kmap(page
) + offset
, size
, msg_flags
);
2349 mdev
->send_cnt
+= size
>>9;
2350 return sent
== size
;
2353 static int _drbd_send_page(struct drbd_conf
*mdev
, struct page
*page
,
2354 int offset
, size_t size
, unsigned msg_flags
)
2356 mm_segment_t oldfs
= get_fs();
2360 /* e.g. XFS meta- & log-data is in slab pages, which have a
2361 * page_count of 0 and/or have PageSlab() set.
2362 * we cannot use send_page for those, as that does get_page();
2363 * put_page(); and would cause either a VM_BUG directly, or
2364 * __page_cache_release a page that would actually still be referenced
2365 * by someone, leading to some obscure delayed Oops somewhere else. */
2366 if (disable_sendpage
|| (page_count(page
) < 1) || PageSlab(page
))
2367 return _drbd_no_send_page(mdev
, page
, offset
, size
, msg_flags
);
2369 msg_flags
|= MSG_NOSIGNAL
;
2370 drbd_update_congested(mdev
);
2373 sent
= mdev
->data
.socket
->ops
->sendpage(mdev
->data
.socket
, page
,
2376 if (sent
== -EAGAIN
) {
2377 if (we_should_drop_the_connection(mdev
,
2384 dev_warn(DEV
, "%s: size=%d len=%d sent=%d\n",
2385 __func__
, (int)size
, len
, sent
);
2390 } while (len
> 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2392 clear_bit(NET_CONGESTED
, &mdev
->flags
);
2396 mdev
->send_cnt
+= size
>>9;
2400 static int _drbd_send_bio(struct drbd_conf
*mdev
, struct bio
*bio
)
2402 struct bio_vec
*bvec
;
2404 /* hint all but last page with MSG_MORE */
2405 __bio_for_each_segment(bvec
, bio
, i
, 0) {
2406 if (!_drbd_no_send_page(mdev
, bvec
->bv_page
,
2407 bvec
->bv_offset
, bvec
->bv_len
,
2408 i
== bio
->bi_vcnt
-1 ? 0 : MSG_MORE
))
2414 static int _drbd_send_zc_bio(struct drbd_conf
*mdev
, struct bio
*bio
)
2416 struct bio_vec
*bvec
;
2418 /* hint all but last page with MSG_MORE */
2419 __bio_for_each_segment(bvec
, bio
, i
, 0) {
2420 if (!_drbd_send_page(mdev
, bvec
->bv_page
,
2421 bvec
->bv_offset
, bvec
->bv_len
,
2422 i
== bio
->bi_vcnt
-1 ? 0 : MSG_MORE
))
2428 static int _drbd_send_zc_ee(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
)
2430 struct page
*page
= e
->pages
;
2431 unsigned len
= e
->size
;
2432 /* hint all but last page with MSG_MORE */
2433 page_chain_for_each(page
) {
2434 unsigned l
= min_t(unsigned, len
, PAGE_SIZE
);
2435 if (!_drbd_send_page(mdev
, page
, 0, l
,
2436 page_chain_next(page
) ? MSG_MORE
: 0))
2443 /* Used to send write requests
2444 * R_PRIMARY -> Peer (P_DATA)
2446 int drbd_send_dblock(struct drbd_conf
*mdev
, struct drbd_request
*req
)
2450 unsigned int dp_flags
= 0;
2454 if (!drbd_get_data_sock(mdev
))
2457 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_w_tfm
) ?
2458 crypto_hash_digestsize(mdev
->integrity_w_tfm
) : 0;
2460 if (req
->size
<= DRBD_MAX_SIZE_H80_PACKET
) {
2461 p
.head
.h80
.magic
= BE_DRBD_MAGIC
;
2462 p
.head
.h80
.command
= cpu_to_be16(P_DATA
);
2464 cpu_to_be16(sizeof(p
) - sizeof(union p_header
) + dgs
+ req
->size
);
2466 p
.head
.h95
.magic
= BE_DRBD_MAGIC_BIG
;
2467 p
.head
.h95
.command
= cpu_to_be16(P_DATA
);
2469 cpu_to_be32(sizeof(p
) - sizeof(union p_header
) + dgs
+ req
->size
);
2472 p
.sector
= cpu_to_be64(req
->sector
);
2473 p
.block_id
= (unsigned long)req
;
2474 p
.seq_num
= cpu_to_be32(req
->seq_num
=
2475 atomic_add_return(1, &mdev
->packet_seq
));
2478 /* NOTE: no need to check if barriers supported here as we would
2479 * not pass the test in make_request_common in that case
2481 if (req
->master_bio
->bi_rw
& REQ_HARDBARRIER
) {
2482 dev_err(DEV
, "ASSERT FAILED would have set DP_HARDBARRIER\n");
2483 /* dp_flags |= DP_HARDBARRIER; */
2485 if (req
->master_bio
->bi_rw
& REQ_SYNC
)
2486 dp_flags
|= DP_RW_SYNC
;
2487 /* for now handle SYNCIO and UNPLUG
2488 * as if they still were one and the same flag */
2489 if (req
->master_bio
->bi_rw
& REQ_UNPLUG
)
2490 dp_flags
|= DP_RW_SYNC
;
2491 if (mdev
->state
.conn
>= C_SYNC_SOURCE
&&
2492 mdev
->state
.conn
<= C_PAUSED_SYNC_T
)
2493 dp_flags
|= DP_MAY_SET_IN_SYNC
;
2495 p
.dp_flags
= cpu_to_be32(dp_flags
);
2496 set_bit(UNPLUG_REMOTE
, &mdev
->flags
);
2498 drbd_send(mdev
, mdev
->data
.socket
, &p
, sizeof(p
), dgs
? MSG_MORE
: 0));
2500 dgb
= mdev
->int_dig_out
;
2501 drbd_csum_bio(mdev
, mdev
->integrity_w_tfm
, req
->master_bio
, dgb
);
2502 ok
= drbd_send(mdev
, mdev
->data
.socket
, dgb
, dgs
, 0);
2505 if (mdev
->net_conf
->wire_protocol
== DRBD_PROT_A
)
2506 ok
= _drbd_send_bio(mdev
, req
->master_bio
);
2508 ok
= _drbd_send_zc_bio(mdev
, req
->master_bio
);
2511 drbd_put_data_sock(mdev
);
2516 /* answer packet, used to send data back for read requests:
2517 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2518 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2520 int drbd_send_block(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2521 struct drbd_epoch_entry
*e
)
2528 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_w_tfm
) ?
2529 crypto_hash_digestsize(mdev
->integrity_w_tfm
) : 0;
2531 if (e
->size
<= DRBD_MAX_SIZE_H80_PACKET
) {
2532 p
.head
.h80
.magic
= BE_DRBD_MAGIC
;
2533 p
.head
.h80
.command
= cpu_to_be16(cmd
);
2535 cpu_to_be16(sizeof(p
) - sizeof(struct p_header80
) + dgs
+ e
->size
);
2537 p
.head
.h95
.magic
= BE_DRBD_MAGIC_BIG
;
2538 p
.head
.h95
.command
= cpu_to_be16(cmd
);
2540 cpu_to_be32(sizeof(p
) - sizeof(struct p_header80
) + dgs
+ e
->size
);
2543 p
.sector
= cpu_to_be64(e
->sector
);
2544 p
.block_id
= e
->block_id
;
2545 /* p.seq_num = 0; No sequence numbers here.. */
2547 /* Only called by our kernel thread.
2548 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2549 * in response to admin command or module unload.
2551 if (!drbd_get_data_sock(mdev
))
2554 ok
= sizeof(p
) == drbd_send(mdev
, mdev
->data
.socket
, &p
, sizeof(p
), dgs
? MSG_MORE
: 0);
2556 dgb
= mdev
->int_dig_out
;
2557 drbd_csum_ee(mdev
, mdev
->integrity_w_tfm
, e
, dgb
);
2558 ok
= drbd_send(mdev
, mdev
->data
.socket
, dgb
, dgs
, 0);
2561 ok
= _drbd_send_zc_ee(mdev
, e
);
2563 drbd_put_data_sock(mdev
);
2569 drbd_send distinguishes two cases:
2571 Packets sent via the data socket "sock"
2572 and packets sent via the meta data socket "msock"
2575 -----------------+-------------------------+------------------------------
2576 timeout conf.timeout / 2 conf.timeout / 2
2577 timeout action send a ping via msock Abort communication
2578 and close all sockets
2582 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2584 int drbd_send(struct drbd_conf
*mdev
, struct socket
*sock
,
2585 void *buf
, size_t size
, unsigned msg_flags
)
2594 /* THINK if (signal_pending) return ... ? */
2599 msg
.msg_name
= NULL
;
2600 msg
.msg_namelen
= 0;
2601 msg
.msg_control
= NULL
;
2602 msg
.msg_controllen
= 0;
2603 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
2605 if (sock
== mdev
->data
.socket
) {
2606 mdev
->ko_count
= mdev
->net_conf
->ko_count
;
2607 drbd_update_congested(mdev
);
2611 * tcp_sendmsg does _not_ use its size parameter at all ?
2613 * -EAGAIN on timeout, -EINTR on signal.
2616 * do we need to block DRBD_SIG if sock == &meta.socket ??
2617 * otherwise wake_asender() might interrupt some send_*Ack !
2619 rv
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
2620 if (rv
== -EAGAIN
) {
2621 if (we_should_drop_the_connection(mdev
, sock
))
2628 flush_signals(current
);
2636 } while (sent
< size
);
2638 if (sock
== mdev
->data
.socket
)
2639 clear_bit(NET_CONGESTED
, &mdev
->flags
);
2642 if (rv
!= -EAGAIN
) {
2643 dev_err(DEV
, "%s_sendmsg returned %d\n",
2644 sock
== mdev
->meta
.socket
? "msock" : "sock",
2646 drbd_force_state(mdev
, NS(conn
, C_BROKEN_PIPE
));
2648 drbd_force_state(mdev
, NS(conn
, C_TIMEOUT
));
2654 static int drbd_open(struct block_device
*bdev
, fmode_t mode
)
2656 struct drbd_conf
*mdev
= bdev
->bd_disk
->private_data
;
2657 unsigned long flags
;
2661 spin_lock_irqsave(&mdev
->req_lock
, flags
);
2662 /* to have a stable mdev->state.role
2663 * and no race with updating open_cnt */
2665 if (mdev
->state
.role
!= R_PRIMARY
) {
2666 if (mode
& FMODE_WRITE
)
2668 else if (!allow_oos
)
2674 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
2680 static int drbd_release(struct gendisk
*gd
, fmode_t mode
)
2682 struct drbd_conf
*mdev
= gd
->private_data
;
2689 static void drbd_unplug_fn(struct request_queue
*q
)
2691 struct drbd_conf
*mdev
= q
->queuedata
;
2694 spin_lock_irq(q
->queue_lock
);
2696 spin_unlock_irq(q
->queue_lock
);
2698 /* only if connected */
2699 spin_lock_irq(&mdev
->req_lock
);
2700 if (mdev
->state
.pdsk
>= D_INCONSISTENT
&& mdev
->state
.conn
>= C_CONNECTED
) {
2701 D_ASSERT(mdev
->state
.role
== R_PRIMARY
);
2702 if (test_and_clear_bit(UNPLUG_REMOTE
, &mdev
->flags
)) {
2703 /* add to the data.work queue,
2704 * unless already queued.
2705 * XXX this might be a good addition to drbd_queue_work
2706 * anyways, to detect "double queuing" ... */
2707 if (list_empty(&mdev
->unplug_work
.list
))
2708 drbd_queue_work(&mdev
->data
.work
,
2709 &mdev
->unplug_work
);
2712 spin_unlock_irq(&mdev
->req_lock
);
2714 if (mdev
->state
.disk
>= D_INCONSISTENT
)
2718 static void drbd_set_defaults(struct drbd_conf
*mdev
)
2720 /* This way we get a compile error when sync_conf grows,
2721 and we forgot to initialize it here */
2722 mdev
->sync_conf
= (struct syncer_conf
) {
2723 /* .rate = */ DRBD_RATE_DEF
,
2724 /* .after = */ DRBD_AFTER_DEF
,
2725 /* .al_extents = */ DRBD_AL_EXTENTS_DEF
,
2726 /* .verify_alg = */ {}, 0,
2727 /* .cpu_mask = */ {}, 0,
2728 /* .csums_alg = */ {}, 0,
2730 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF
,
2731 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF
,
2732 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF
,
2733 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF
,
2734 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF
,
2735 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
2738 /* Have to use that way, because the layout differs between
2739 big endian and little endian */
2740 mdev
->state
= (union drbd_state
) {
2741 { .role
= R_SECONDARY
,
2743 .conn
= C_STANDALONE
,
2750 void drbd_init_set_defaults(struct drbd_conf
*mdev
)
2752 /* the memset(,0,) did most of this.
2753 * note: only assignments, no allocation in here */
2755 drbd_set_defaults(mdev
);
2757 /* for now, we do NOT yet support it,
2758 * even though we start some framework
2759 * to eventually support barriers */
2760 set_bit(NO_BARRIER_SUPP
, &mdev
->flags
);
2762 atomic_set(&mdev
->ap_bio_cnt
, 0);
2763 atomic_set(&mdev
->ap_pending_cnt
, 0);
2764 atomic_set(&mdev
->rs_pending_cnt
, 0);
2765 atomic_set(&mdev
->unacked_cnt
, 0);
2766 atomic_set(&mdev
->local_cnt
, 0);
2767 atomic_set(&mdev
->net_cnt
, 0);
2768 atomic_set(&mdev
->packet_seq
, 0);
2769 atomic_set(&mdev
->pp_in_use
, 0);
2770 atomic_set(&mdev
->rs_sect_in
, 0);
2771 atomic_set(&mdev
->rs_sect_ev
, 0);
2773 mutex_init(&mdev
->md_io_mutex
);
2774 mutex_init(&mdev
->data
.mutex
);
2775 mutex_init(&mdev
->meta
.mutex
);
2776 sema_init(&mdev
->data
.work
.s
, 0);
2777 sema_init(&mdev
->meta
.work
.s
, 0);
2778 mutex_init(&mdev
->state_mutex
);
2780 spin_lock_init(&mdev
->data
.work
.q_lock
);
2781 spin_lock_init(&mdev
->meta
.work
.q_lock
);
2783 spin_lock_init(&mdev
->al_lock
);
2784 spin_lock_init(&mdev
->req_lock
);
2785 spin_lock_init(&mdev
->peer_seq_lock
);
2786 spin_lock_init(&mdev
->epoch_lock
);
2788 INIT_LIST_HEAD(&mdev
->active_ee
);
2789 INIT_LIST_HEAD(&mdev
->sync_ee
);
2790 INIT_LIST_HEAD(&mdev
->done_ee
);
2791 INIT_LIST_HEAD(&mdev
->read_ee
);
2792 INIT_LIST_HEAD(&mdev
->net_ee
);
2793 INIT_LIST_HEAD(&mdev
->resync_reads
);
2794 INIT_LIST_HEAD(&mdev
->data
.work
.q
);
2795 INIT_LIST_HEAD(&mdev
->meta
.work
.q
);
2796 INIT_LIST_HEAD(&mdev
->resync_work
.list
);
2797 INIT_LIST_HEAD(&mdev
->unplug_work
.list
);
2798 INIT_LIST_HEAD(&mdev
->md_sync_work
.list
);
2799 INIT_LIST_HEAD(&mdev
->bm_io_work
.w
.list
);
2801 mdev
->resync_work
.cb
= w_resync_inactive
;
2802 mdev
->unplug_work
.cb
= w_send_write_hint
;
2803 mdev
->md_sync_work
.cb
= w_md_sync
;
2804 mdev
->bm_io_work
.w
.cb
= w_bitmap_io
;
2805 init_timer(&mdev
->resync_timer
);
2806 init_timer(&mdev
->md_sync_timer
);
2807 mdev
->resync_timer
.function
= resync_timer_fn
;
2808 mdev
->resync_timer
.data
= (unsigned long) mdev
;
2809 mdev
->md_sync_timer
.function
= md_sync_timer_fn
;
2810 mdev
->md_sync_timer
.data
= (unsigned long) mdev
;
2812 init_waitqueue_head(&mdev
->misc_wait
);
2813 init_waitqueue_head(&mdev
->state_wait
);
2814 init_waitqueue_head(&mdev
->net_cnt_wait
);
2815 init_waitqueue_head(&mdev
->ee_wait
);
2816 init_waitqueue_head(&mdev
->al_wait
);
2817 init_waitqueue_head(&mdev
->seq_wait
);
2819 drbd_thread_init(mdev
, &mdev
->receiver
, drbdd_init
);
2820 drbd_thread_init(mdev
, &mdev
->worker
, drbd_worker
);
2821 drbd_thread_init(mdev
, &mdev
->asender
, drbd_asender
);
2823 mdev
->agreed_pro_version
= PRO_VERSION_MAX
;
2824 mdev
->write_ordering
= WO_bio_barrier
;
2825 mdev
->resync_wenr
= LC_FREE
;
2828 void drbd_mdev_cleanup(struct drbd_conf
*mdev
)
2831 if (mdev
->receiver
.t_state
!= None
)
2832 dev_err(DEV
, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2833 mdev
->receiver
.t_state
);
2835 /* no need to lock it, I'm the only thread alive */
2836 if (atomic_read(&mdev
->current_epoch
->epoch_size
) != 0)
2837 dev_err(DEV
, "epoch_size:%d\n", atomic_read(&mdev
->current_epoch
->epoch_size
));
2847 mdev
->rs_failed
= 0;
2848 mdev
->rs_last_events
= 0;
2849 mdev
->rs_last_sect_ev
= 0;
2850 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
2851 mdev
->rs_mark_left
[i
] = 0;
2852 mdev
->rs_mark_time
[i
] = 0;
2854 D_ASSERT(mdev
->net_conf
== NULL
);
2856 drbd_set_my_capacity(mdev
, 0);
2858 /* maybe never allocated. */
2859 drbd_bm_resize(mdev
, 0, 1);
2860 drbd_bm_cleanup(mdev
);
2863 drbd_free_resources(mdev
);
2864 clear_bit(AL_SUSPENDED
, &mdev
->flags
);
2867 * currently we drbd_init_ee only on module load, so
2868 * we may do drbd_release_ee only on module unload!
2870 D_ASSERT(list_empty(&mdev
->active_ee
));
2871 D_ASSERT(list_empty(&mdev
->sync_ee
));
2872 D_ASSERT(list_empty(&mdev
->done_ee
));
2873 D_ASSERT(list_empty(&mdev
->read_ee
));
2874 D_ASSERT(list_empty(&mdev
->net_ee
));
2875 D_ASSERT(list_empty(&mdev
->resync_reads
));
2876 D_ASSERT(list_empty(&mdev
->data
.work
.q
));
2877 D_ASSERT(list_empty(&mdev
->meta
.work
.q
));
2878 D_ASSERT(list_empty(&mdev
->resync_work
.list
));
2879 D_ASSERT(list_empty(&mdev
->unplug_work
.list
));
2884 static void drbd_destroy_mempools(void)
2888 while (drbd_pp_pool
) {
2889 page
= drbd_pp_pool
;
2890 drbd_pp_pool
= (struct page
*)page_private(page
);
2895 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2897 if (drbd_ee_mempool
)
2898 mempool_destroy(drbd_ee_mempool
);
2899 if (drbd_request_mempool
)
2900 mempool_destroy(drbd_request_mempool
);
2902 kmem_cache_destroy(drbd_ee_cache
);
2903 if (drbd_request_cache
)
2904 kmem_cache_destroy(drbd_request_cache
);
2905 if (drbd_bm_ext_cache
)
2906 kmem_cache_destroy(drbd_bm_ext_cache
);
2907 if (drbd_al_ext_cache
)
2908 kmem_cache_destroy(drbd_al_ext_cache
);
2910 drbd_ee_mempool
= NULL
;
2911 drbd_request_mempool
= NULL
;
2912 drbd_ee_cache
= NULL
;
2913 drbd_request_cache
= NULL
;
2914 drbd_bm_ext_cache
= NULL
;
2915 drbd_al_ext_cache
= NULL
;
2920 static int drbd_create_mempools(void)
2923 const int number
= (DRBD_MAX_SEGMENT_SIZE
/PAGE_SIZE
) * minor_count
;
2926 /* prepare our caches and mempools */
2927 drbd_request_mempool
= NULL
;
2928 drbd_ee_cache
= NULL
;
2929 drbd_request_cache
= NULL
;
2930 drbd_bm_ext_cache
= NULL
;
2931 drbd_al_ext_cache
= NULL
;
2932 drbd_pp_pool
= NULL
;
2935 drbd_request_cache
= kmem_cache_create(
2936 "drbd_req", sizeof(struct drbd_request
), 0, 0, NULL
);
2937 if (drbd_request_cache
== NULL
)
2940 drbd_ee_cache
= kmem_cache_create(
2941 "drbd_ee", sizeof(struct drbd_epoch_entry
), 0, 0, NULL
);
2942 if (drbd_ee_cache
== NULL
)
2945 drbd_bm_ext_cache
= kmem_cache_create(
2946 "drbd_bm", sizeof(struct bm_extent
), 0, 0, NULL
);
2947 if (drbd_bm_ext_cache
== NULL
)
2950 drbd_al_ext_cache
= kmem_cache_create(
2951 "drbd_al", sizeof(struct lc_element
), 0, 0, NULL
);
2952 if (drbd_al_ext_cache
== NULL
)
2956 drbd_request_mempool
= mempool_create(number
,
2957 mempool_alloc_slab
, mempool_free_slab
, drbd_request_cache
);
2958 if (drbd_request_mempool
== NULL
)
2961 drbd_ee_mempool
= mempool_create(number
,
2962 mempool_alloc_slab
, mempool_free_slab
, drbd_ee_cache
);
2963 if (drbd_request_mempool
== NULL
)
2966 /* drbd's page pool */
2967 spin_lock_init(&drbd_pp_lock
);
2969 for (i
= 0; i
< number
; i
++) {
2970 page
= alloc_page(GFP_HIGHUSER
);
2973 set_page_private(page
, (unsigned long)drbd_pp_pool
);
2974 drbd_pp_pool
= page
;
2976 drbd_pp_vacant
= number
;
2981 drbd_destroy_mempools(); /* in case we allocated some */
2985 static int drbd_notify_sys(struct notifier_block
*this, unsigned long code
,
2988 /* just so we have it. you never know what interesting things we
2989 * might want to do here some day...
2995 static struct notifier_block drbd_notifier
= {
2996 .notifier_call
= drbd_notify_sys
,
2999 static void drbd_release_ee_lists(struct drbd_conf
*mdev
)
3003 rr
= drbd_release_ee(mdev
, &mdev
->active_ee
);
3005 dev_err(DEV
, "%d EEs in active list found!\n", rr
);
3007 rr
= drbd_release_ee(mdev
, &mdev
->sync_ee
);
3009 dev_err(DEV
, "%d EEs in sync list found!\n", rr
);
3011 rr
= drbd_release_ee(mdev
, &mdev
->read_ee
);
3013 dev_err(DEV
, "%d EEs in read list found!\n", rr
);
3015 rr
= drbd_release_ee(mdev
, &mdev
->done_ee
);
3017 dev_err(DEV
, "%d EEs in done list found!\n", rr
);
3019 rr
= drbd_release_ee(mdev
, &mdev
->net_ee
);
3021 dev_err(DEV
, "%d EEs in net list found!\n", rr
);
3024 /* caution. no locking.
3025 * currently only used from module cleanup code. */
3026 static void drbd_delete_device(unsigned int minor
)
3028 struct drbd_conf
*mdev
= minor_to_mdev(minor
);
3033 /* paranoia asserts */
3034 if (mdev
->open_cnt
!= 0)
3035 dev_err(DEV
, "open_cnt = %d in %s:%u", mdev
->open_cnt
,
3036 __FILE__
, __LINE__
);
3038 ERR_IF (!list_empty(&mdev
->data
.work
.q
)) {
3039 struct list_head
*lp
;
3040 list_for_each(lp
, &mdev
->data
.work
.q
) {
3041 dev_err(DEV
, "lp = %p\n", lp
);
3044 /* end paranoia asserts */
3046 del_gendisk(mdev
->vdisk
);
3048 /* cleanup stuff that may have been allocated during
3049 * device (re-)configuration or state changes */
3051 if (mdev
->this_bdev
)
3052 bdput(mdev
->this_bdev
);
3054 drbd_free_resources(mdev
);
3056 drbd_release_ee_lists(mdev
);
3058 /* should be free'd on disconnect? */
3059 kfree(mdev
->ee_hash
);
3061 mdev->ee_hash_s = 0;
3062 mdev->ee_hash = NULL;
3065 lc_destroy(mdev
->act_log
);
3066 lc_destroy(mdev
->resync
);
3068 kfree(mdev
->p_uuid
);
3069 /* mdev->p_uuid = NULL; */
3071 kfree(mdev
->int_dig_out
);
3072 kfree(mdev
->int_dig_in
);
3073 kfree(mdev
->int_dig_vv
);
3075 /* cleanup the rest that has been
3076 * allocated from drbd_new_device
3077 * and actually free the mdev itself */
3078 drbd_free_mdev(mdev
);
3081 static void drbd_cleanup(void)
3085 unregister_reboot_notifier(&drbd_notifier
);
3091 remove_proc_entry("drbd", NULL
);
3094 drbd_delete_device(i
);
3095 drbd_destroy_mempools();
3100 unregister_blkdev(DRBD_MAJOR
, "drbd");
3102 printk(KERN_INFO
"drbd: module cleanup done.\n");
3106 * drbd_congested() - Callback for pdflush
3107 * @congested_data: User data
3108 * @bdi_bits: Bits pdflush is currently interested in
3110 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3112 static int drbd_congested(void *congested_data
, int bdi_bits
)
3114 struct drbd_conf
*mdev
= congested_data
;
3115 struct request_queue
*q
;
3119 if (!__inc_ap_bio_cond(mdev
)) {
3120 /* DRBD has frozen IO */
3126 if (get_ldev(mdev
)) {
3127 q
= bdev_get_queue(mdev
->ldev
->backing_bdev
);
3128 r
= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
3134 if (bdi_bits
& (1 << BDI_async_congested
) && test_bit(NET_CONGESTED
, &mdev
->flags
)) {
3135 r
|= (1 << BDI_async_congested
);
3136 reason
= reason
== 'b' ? 'a' : 'n';
3140 mdev
->congestion_reason
= reason
;
3144 struct drbd_conf
*drbd_new_device(unsigned int minor
)
3146 struct drbd_conf
*mdev
;
3147 struct gendisk
*disk
;
3148 struct request_queue
*q
;
3150 /* GFP_KERNEL, we are outside of all write-out paths */
3151 mdev
= kzalloc(sizeof(struct drbd_conf
), GFP_KERNEL
);
3154 if (!zalloc_cpumask_var(&mdev
->cpu_mask
, GFP_KERNEL
))
3155 goto out_no_cpumask
;
3157 mdev
->minor
= minor
;
3159 drbd_init_set_defaults(mdev
);
3161 q
= blk_alloc_queue(GFP_KERNEL
);
3165 q
->queuedata
= mdev
;
3167 disk
= alloc_disk(1);
3172 set_disk_ro(disk
, TRUE
);
3175 disk
->major
= DRBD_MAJOR
;
3176 disk
->first_minor
= minor
;
3177 disk
->fops
= &drbd_ops
;
3178 sprintf(disk
->disk_name
, "drbd%d", minor
);
3179 disk
->private_data
= mdev
;
3181 mdev
->this_bdev
= bdget(MKDEV(DRBD_MAJOR
, minor
));
3182 /* we have no partitions. we contain only ourselves. */
3183 mdev
->this_bdev
->bd_contains
= mdev
->this_bdev
;
3185 q
->backing_dev_info
.congested_fn
= drbd_congested
;
3186 q
->backing_dev_info
.congested_data
= mdev
;
3188 blk_queue_make_request(q
, drbd_make_request_26
);
3189 blk_queue_max_segment_size(q
, DRBD_MAX_SEGMENT_SIZE
);
3190 blk_queue_bounce_limit(q
, BLK_BOUNCE_ANY
);
3191 blk_queue_merge_bvec(q
, drbd_merge_bvec
);
3192 q
->queue_lock
= &mdev
->req_lock
; /* needed since we use */
3193 /* plugging on a queue, that actually has no requests! */
3194 q
->unplug_fn
= drbd_unplug_fn
;
3196 mdev
->md_io_page
= alloc_page(GFP_KERNEL
);
3197 if (!mdev
->md_io_page
)
3198 goto out_no_io_page
;
3200 if (drbd_bm_init(mdev
))
3202 /* no need to lock access, we are still initializing this minor device. */
3206 mdev
->app_reads_hash
= kzalloc(APP_R_HSIZE
*sizeof(void *), GFP_KERNEL
);
3207 if (!mdev
->app_reads_hash
)
3208 goto out_no_app_reads
;
3210 mdev
->current_epoch
= kzalloc(sizeof(struct drbd_epoch
), GFP_KERNEL
);
3211 if (!mdev
->current_epoch
)
3214 INIT_LIST_HEAD(&mdev
->current_epoch
->list
);
3219 /* out_whatever_else:
3220 kfree(mdev->current_epoch); */
3222 kfree(mdev
->app_reads_hash
);
3226 drbd_bm_cleanup(mdev
);
3228 __free_page(mdev
->md_io_page
);
3232 blk_cleanup_queue(q
);
3234 free_cpumask_var(mdev
->cpu_mask
);
3240 /* counterpart of drbd_new_device.
3241 * last part of drbd_delete_device. */
3242 void drbd_free_mdev(struct drbd_conf
*mdev
)
3244 kfree(mdev
->current_epoch
);
3245 kfree(mdev
->app_reads_hash
);
3247 if (mdev
->bitmap
) /* should no longer be there. */
3248 drbd_bm_cleanup(mdev
);
3249 __free_page(mdev
->md_io_page
);
3250 put_disk(mdev
->vdisk
);
3251 blk_cleanup_queue(mdev
->rq_queue
);
3252 free_cpumask_var(mdev
->cpu_mask
);
3257 int __init
drbd_init(void)
3261 if (sizeof(struct p_handshake
) != 80) {
3263 "drbd: never change the size or layout "
3264 "of the HandShake packet.\n");
3268 if (1 > minor_count
|| minor_count
> 255) {
3270 "drbd: invalid minor_count (%d)\n", minor_count
);
3278 err
= drbd_nl_init();
3282 err
= register_blkdev(DRBD_MAJOR
, "drbd");
3285 "drbd: unable to register block device major %d\n",
3290 register_reboot_notifier(&drbd_notifier
);
3293 * allocate all necessary structs
3297 init_waitqueue_head(&drbd_pp_wait
);
3299 drbd_proc
= NULL
; /* play safe for drbd_cleanup */
3300 minor_table
= kzalloc(sizeof(struct drbd_conf
*)*minor_count
,
3305 err
= drbd_create_mempools();
3309 drbd_proc
= proc_create_data("drbd", S_IFREG
| S_IRUGO
, NULL
, &drbd_proc_fops
, NULL
);
3311 printk(KERN_ERR
"drbd: unable to register proc file\n");
3315 rwlock_init(&global_state_lock
);
3317 printk(KERN_INFO
"drbd: initialized. "
3318 "Version: " REL_VERSION
" (api:%d/proto:%d-%d)\n",
3319 API_VERSION
, PRO_VERSION_MIN
, PRO_VERSION_MAX
);
3320 printk(KERN_INFO
"drbd: %s\n", drbd_buildtag());
3321 printk(KERN_INFO
"drbd: registered as block device major %d\n",
3323 printk(KERN_INFO
"drbd: minor_table @ 0x%p\n", minor_table
);
3325 return 0; /* Success! */
3330 /* currently always the case */
3331 printk(KERN_ERR
"drbd: ran out of memory\n");
3333 printk(KERN_ERR
"drbd: initialization failure\n");
3337 void drbd_free_bc(struct drbd_backing_dev
*ldev
)
3342 bd_release(ldev
->backing_bdev
);
3343 bd_release(ldev
->md_bdev
);
3345 fput(ldev
->lo_file
);
3346 fput(ldev
->md_file
);
3351 void drbd_free_sock(struct drbd_conf
*mdev
)
3353 if (mdev
->data
.socket
) {
3354 mutex_lock(&mdev
->data
.mutex
);
3355 kernel_sock_shutdown(mdev
->data
.socket
, SHUT_RDWR
);
3356 sock_release(mdev
->data
.socket
);
3357 mdev
->data
.socket
= NULL
;
3358 mutex_unlock(&mdev
->data
.mutex
);
3360 if (mdev
->meta
.socket
) {
3361 mutex_lock(&mdev
->meta
.mutex
);
3362 kernel_sock_shutdown(mdev
->meta
.socket
, SHUT_RDWR
);
3363 sock_release(mdev
->meta
.socket
);
3364 mdev
->meta
.socket
= NULL
;
3365 mutex_unlock(&mdev
->meta
.mutex
);
3370 void drbd_free_resources(struct drbd_conf
*mdev
)
3372 crypto_free_hash(mdev
->csums_tfm
);
3373 mdev
->csums_tfm
= NULL
;
3374 crypto_free_hash(mdev
->verify_tfm
);
3375 mdev
->verify_tfm
= NULL
;
3376 crypto_free_hash(mdev
->cram_hmac_tfm
);
3377 mdev
->cram_hmac_tfm
= NULL
;
3378 crypto_free_hash(mdev
->integrity_w_tfm
);
3379 mdev
->integrity_w_tfm
= NULL
;
3380 crypto_free_hash(mdev
->integrity_r_tfm
);
3381 mdev
->integrity_r_tfm
= NULL
;
3383 drbd_free_sock(mdev
);
3386 drbd_free_bc(mdev
->ldev
);
3387 mdev
->ldev
= NULL
;);
3390 /* meta data management */
3392 struct meta_data_on_disk
{
3393 u64 la_size
; /* last agreed size. */
3394 u64 uuid
[UI_SIZE
]; /* UUIDs. */
3397 u32 flags
; /* MDF */
3400 u32 al_offset
; /* offset to this block */
3401 u32 al_nr_extents
; /* important for restoring the AL */
3402 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3403 u32 bm_offset
; /* offset to the bitmap, from here */
3404 u32 bm_bytes_per_bit
; /* BM_BLOCK_SIZE */
3405 u32 reserved_u32
[4];
3410 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3411 * @mdev: DRBD device.
3413 void drbd_md_sync(struct drbd_conf
*mdev
)
3415 struct meta_data_on_disk
*buffer
;
3419 if (!test_and_clear_bit(MD_DIRTY
, &mdev
->flags
))
3421 del_timer(&mdev
->md_sync_timer
);
3423 /* We use here D_FAILED and not D_ATTACHING because we try to write
3424 * metadata even if we detach due to a disk failure! */
3425 if (!get_ldev_if_state(mdev
, D_FAILED
))
3428 mutex_lock(&mdev
->md_io_mutex
);
3429 buffer
= (struct meta_data_on_disk
*)page_address(mdev
->md_io_page
);
3430 memset(buffer
, 0, 512);
3432 buffer
->la_size
= cpu_to_be64(drbd_get_capacity(mdev
->this_bdev
));
3433 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
3434 buffer
->uuid
[i
] = cpu_to_be64(mdev
->ldev
->md
.uuid
[i
]);
3435 buffer
->flags
= cpu_to_be32(mdev
->ldev
->md
.flags
);
3436 buffer
->magic
= cpu_to_be32(DRBD_MD_MAGIC
);
3438 buffer
->md_size_sect
= cpu_to_be32(mdev
->ldev
->md
.md_size_sect
);
3439 buffer
->al_offset
= cpu_to_be32(mdev
->ldev
->md
.al_offset
);
3440 buffer
->al_nr_extents
= cpu_to_be32(mdev
->act_log
->nr_elements
);
3441 buffer
->bm_bytes_per_bit
= cpu_to_be32(BM_BLOCK_SIZE
);
3442 buffer
->device_uuid
= cpu_to_be64(mdev
->ldev
->md
.device_uuid
);
3444 buffer
->bm_offset
= cpu_to_be32(mdev
->ldev
->md
.bm_offset
);
3446 D_ASSERT(drbd_md_ss__(mdev
, mdev
->ldev
) == mdev
->ldev
->md
.md_offset
);
3447 sector
= mdev
->ldev
->md
.md_offset
;
3449 if (!drbd_md_sync_page_io(mdev
, mdev
->ldev
, sector
, WRITE
)) {
3450 /* this was a try anyways ... */
3451 dev_err(DEV
, "meta data update failed!\n");
3452 drbd_chk_io_error(mdev
, 1, TRUE
);
3455 /* Update mdev->ldev->md.la_size_sect,
3456 * since we updated it on metadata. */
3457 mdev
->ldev
->md
.la_size_sect
= drbd_get_capacity(mdev
->this_bdev
);
3459 mutex_unlock(&mdev
->md_io_mutex
);
3464 * drbd_md_read() - Reads in the meta data super block
3465 * @mdev: DRBD device.
3466 * @bdev: Device from which the meta data should be read in.
3468 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case
3469 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3471 int drbd_md_read(struct drbd_conf
*mdev
, struct drbd_backing_dev
*bdev
)
3473 struct meta_data_on_disk
*buffer
;
3474 int i
, rv
= NO_ERROR
;
3476 if (!get_ldev_if_state(mdev
, D_ATTACHING
))
3477 return ERR_IO_MD_DISK
;
3479 mutex_lock(&mdev
->md_io_mutex
);
3480 buffer
= (struct meta_data_on_disk
*)page_address(mdev
->md_io_page
);
3482 if (!drbd_md_sync_page_io(mdev
, bdev
, bdev
->md
.md_offset
, READ
)) {
3483 /* NOTE: cant do normal error processing here as this is
3484 called BEFORE disk is attached */
3485 dev_err(DEV
, "Error while reading metadata.\n");
3486 rv
= ERR_IO_MD_DISK
;
3490 if (be32_to_cpu(buffer
->magic
) != DRBD_MD_MAGIC
) {
3491 dev_err(DEV
, "Error while reading metadata, magic not found.\n");
3492 rv
= ERR_MD_INVALID
;
3495 if (be32_to_cpu(buffer
->al_offset
) != bdev
->md
.al_offset
) {
3496 dev_err(DEV
, "unexpected al_offset: %d (expected %d)\n",
3497 be32_to_cpu(buffer
->al_offset
), bdev
->md
.al_offset
);
3498 rv
= ERR_MD_INVALID
;
3501 if (be32_to_cpu(buffer
->bm_offset
) != bdev
->md
.bm_offset
) {
3502 dev_err(DEV
, "unexpected bm_offset: %d (expected %d)\n",
3503 be32_to_cpu(buffer
->bm_offset
), bdev
->md
.bm_offset
);
3504 rv
= ERR_MD_INVALID
;
3507 if (be32_to_cpu(buffer
->md_size_sect
) != bdev
->md
.md_size_sect
) {
3508 dev_err(DEV
, "unexpected md_size: %u (expected %u)\n",
3509 be32_to_cpu(buffer
->md_size_sect
), bdev
->md
.md_size_sect
);
3510 rv
= ERR_MD_INVALID
;
3514 if (be32_to_cpu(buffer
->bm_bytes_per_bit
) != BM_BLOCK_SIZE
) {
3515 dev_err(DEV
, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3516 be32_to_cpu(buffer
->bm_bytes_per_bit
), BM_BLOCK_SIZE
);
3517 rv
= ERR_MD_INVALID
;
3521 bdev
->md
.la_size_sect
= be64_to_cpu(buffer
->la_size
);
3522 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
3523 bdev
->md
.uuid
[i
] = be64_to_cpu(buffer
->uuid
[i
]);
3524 bdev
->md
.flags
= be32_to_cpu(buffer
->flags
);
3525 mdev
->sync_conf
.al_extents
= be32_to_cpu(buffer
->al_nr_extents
);
3526 bdev
->md
.device_uuid
= be64_to_cpu(buffer
->device_uuid
);
3528 if (mdev
->sync_conf
.al_extents
< 7)
3529 mdev
->sync_conf
.al_extents
= 127;
3532 mutex_unlock(&mdev
->md_io_mutex
);
3539 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3540 * @mdev: DRBD device.
3542 * Call this function if you change anything that should be written to
3543 * the meta-data super block. This function sets MD_DIRTY, and starts a
3544 * timer that ensures that within five seconds you have to call drbd_md_sync().
3546 void drbd_md_mark_dirty(struct drbd_conf
*mdev
)
3548 set_bit(MD_DIRTY
, &mdev
->flags
);
3549 mod_timer(&mdev
->md_sync_timer
, jiffies
+ 5*HZ
);
3553 static void drbd_uuid_move_history(struct drbd_conf
*mdev
) __must_hold(local
)
3557 for (i
= UI_HISTORY_START
; i
< UI_HISTORY_END
; i
++)
3558 mdev
->ldev
->md
.uuid
[i
+1] = mdev
->ldev
->md
.uuid
[i
];
3561 void _drbd_uuid_set(struct drbd_conf
*mdev
, int idx
, u64 val
) __must_hold(local
)
3563 if (idx
== UI_CURRENT
) {
3564 if (mdev
->state
.role
== R_PRIMARY
)
3569 drbd_set_ed_uuid(mdev
, val
);
3572 mdev
->ldev
->md
.uuid
[idx
] = val
;
3573 drbd_md_mark_dirty(mdev
);
3577 void drbd_uuid_set(struct drbd_conf
*mdev
, int idx
, u64 val
) __must_hold(local
)
3579 if (mdev
->ldev
->md
.uuid
[idx
]) {
3580 drbd_uuid_move_history(mdev
);
3581 mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] = mdev
->ldev
->md
.uuid
[idx
];
3583 _drbd_uuid_set(mdev
, idx
, val
);
3587 * drbd_uuid_new_current() - Creates a new current UUID
3588 * @mdev: DRBD device.
3590 * Creates a new current UUID, and rotates the old current UUID into
3591 * the bitmap slot. Causes an incremental resync upon next connect.
3593 void drbd_uuid_new_current(struct drbd_conf
*mdev
) __must_hold(local
)
3597 dev_info(DEV
, "Creating new current UUID\n");
3598 D_ASSERT(mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0);
3599 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = mdev
->ldev
->md
.uuid
[UI_CURRENT
];
3601 get_random_bytes(&val
, sizeof(u64
));
3602 _drbd_uuid_set(mdev
, UI_CURRENT
, val
);
3605 void drbd_uuid_set_bm(struct drbd_conf
*mdev
, u64 val
) __must_hold(local
)
3607 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && val
== 0)
3611 drbd_uuid_move_history(mdev
);
3612 mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] = mdev
->ldev
->md
.uuid
[UI_BITMAP
];
3613 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = 0;
3615 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
])
3616 dev_warn(DEV
, "bm UUID already set");
3618 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = val
;
3619 mdev
->ldev
->md
.uuid
[UI_BITMAP
] &= ~((u64
)1);
3622 drbd_md_mark_dirty(mdev
);
3626 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3627 * @mdev: DRBD device.
3629 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3631 int drbd_bmio_set_n_write(struct drbd_conf
*mdev
)
3635 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
3636 drbd_md_set_flag(mdev
, MDF_FULL_SYNC
);
3638 drbd_bm_set_all(mdev
);
3640 rv
= drbd_bm_write(mdev
);
3643 drbd_md_clear_flag(mdev
, MDF_FULL_SYNC
);
3654 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3655 * @mdev: DRBD device.
3657 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3659 int drbd_bmio_clear_n_write(struct drbd_conf
*mdev
)
3663 drbd_resume_al(mdev
);
3664 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
3665 drbd_bm_clear_all(mdev
);
3666 rv
= drbd_bm_write(mdev
);
3673 static int w_bitmap_io(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
3675 struct bm_io_work
*work
= container_of(w
, struct bm_io_work
, w
);
3678 D_ASSERT(atomic_read(&mdev
->ap_bio_cnt
) == 0);
3680 drbd_bm_lock(mdev
, work
->why
);
3681 rv
= work
->io_fn(mdev
);
3682 drbd_bm_unlock(mdev
);
3684 clear_bit(BITMAP_IO
, &mdev
->flags
);
3685 wake_up(&mdev
->misc_wait
);
3688 work
->done(mdev
, rv
);
3690 clear_bit(BITMAP_IO_QUEUED
, &mdev
->flags
);
3697 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3698 * @mdev: DRBD device.
3699 * @io_fn: IO callback to be called when bitmap IO is possible
3700 * @done: callback to be called after the bitmap IO was performed
3701 * @why: Descriptive text of the reason for doing the IO
3703 * While IO on the bitmap happens we freeze application IO thus we ensure
3704 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3705 * called from worker context. It MUST NOT be used while a previous such
3706 * work is still pending!
3708 void drbd_queue_bitmap_io(struct drbd_conf
*mdev
,
3709 int (*io_fn
)(struct drbd_conf
*),
3710 void (*done
)(struct drbd_conf
*, int),
3713 D_ASSERT(current
== mdev
->worker
.task
);
3715 D_ASSERT(!test_bit(BITMAP_IO_QUEUED
, &mdev
->flags
));
3716 D_ASSERT(!test_bit(BITMAP_IO
, &mdev
->flags
));
3717 D_ASSERT(list_empty(&mdev
->bm_io_work
.w
.list
));
3718 if (mdev
->bm_io_work
.why
)
3719 dev_err(DEV
, "FIXME going to queue '%s' but '%s' still pending?\n",
3720 why
, mdev
->bm_io_work
.why
);
3722 mdev
->bm_io_work
.io_fn
= io_fn
;
3723 mdev
->bm_io_work
.done
= done
;
3724 mdev
->bm_io_work
.why
= why
;
3726 set_bit(BITMAP_IO
, &mdev
->flags
);
3727 if (atomic_read(&mdev
->ap_bio_cnt
) == 0) {
3728 if (list_empty(&mdev
->bm_io_work
.w
.list
)) {
3729 set_bit(BITMAP_IO_QUEUED
, &mdev
->flags
);
3730 drbd_queue_work(&mdev
->data
.work
, &mdev
->bm_io_work
.w
);
3732 dev_err(DEV
, "FIXME avoided double queuing bm_io_work\n");
3737 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3738 * @mdev: DRBD device.
3739 * @io_fn: IO callback to be called when bitmap IO is possible
3740 * @why: Descriptive text of the reason for doing the IO
3742 * freezes application IO while that the actual IO operations runs. This
3743 * functions MAY NOT be called from worker context.
3745 int drbd_bitmap_io(struct drbd_conf
*mdev
, int (*io_fn
)(struct drbd_conf
*), char *why
)
3749 D_ASSERT(current
!= mdev
->worker
.task
);
3751 drbd_suspend_io(mdev
);
3753 drbd_bm_lock(mdev
, why
);
3755 drbd_bm_unlock(mdev
);
3757 drbd_resume_io(mdev
);
3762 void drbd_md_set_flag(struct drbd_conf
*mdev
, int flag
) __must_hold(local
)
3764 if ((mdev
->ldev
->md
.flags
& flag
) != flag
) {
3765 drbd_md_mark_dirty(mdev
);
3766 mdev
->ldev
->md
.flags
|= flag
;
3770 void drbd_md_clear_flag(struct drbd_conf
*mdev
, int flag
) __must_hold(local
)
3772 if ((mdev
->ldev
->md
.flags
& flag
) != 0) {
3773 drbd_md_mark_dirty(mdev
);
3774 mdev
->ldev
->md
.flags
&= ~flag
;
3777 int drbd_md_test_flag(struct drbd_backing_dev
*bdev
, int flag
)
3779 return (bdev
->md
.flags
& flag
) != 0;
3782 static void md_sync_timer_fn(unsigned long data
)
3784 struct drbd_conf
*mdev
= (struct drbd_conf
*) data
;
3786 drbd_queue_work_front(&mdev
->data
.work
, &mdev
->md_sync_work
);
3789 static int w_md_sync(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
3791 dev_warn(DEV
, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3797 #ifdef CONFIG_DRBD_FAULT_INJECTION
3798 /* Fault insertion support including random number generator shamelessly
3799 * stolen from kernel/rcutorture.c */
3800 struct fault_random_state
{
3801 unsigned long state
;
3802 unsigned long count
;
3805 #define FAULT_RANDOM_MULT 39916801 /* prime */
3806 #define FAULT_RANDOM_ADD 479001701 /* prime */
3807 #define FAULT_RANDOM_REFRESH 10000
3810 * Crude but fast random-number generator. Uses a linear congruential
3811 * generator, with occasional help from get_random_bytes().
3813 static unsigned long
3814 _drbd_fault_random(struct fault_random_state
*rsp
)
3818 if (!rsp
->count
--) {
3819 get_random_bytes(&refresh
, sizeof(refresh
));
3820 rsp
->state
+= refresh
;
3821 rsp
->count
= FAULT_RANDOM_REFRESH
;
3823 rsp
->state
= rsp
->state
* FAULT_RANDOM_MULT
+ FAULT_RANDOM_ADD
;
3824 return swahw32(rsp
->state
);
3828 _drbd_fault_str(unsigned int type
) {
3829 static char *_faults
[] = {
3830 [DRBD_FAULT_MD_WR
] = "Meta-data write",
3831 [DRBD_FAULT_MD_RD
] = "Meta-data read",
3832 [DRBD_FAULT_RS_WR
] = "Resync write",
3833 [DRBD_FAULT_RS_RD
] = "Resync read",
3834 [DRBD_FAULT_DT_WR
] = "Data write",
3835 [DRBD_FAULT_DT_RD
] = "Data read",
3836 [DRBD_FAULT_DT_RA
] = "Data read ahead",
3837 [DRBD_FAULT_BM_ALLOC
] = "BM allocation",
3838 [DRBD_FAULT_AL_EE
] = "EE allocation",
3839 [DRBD_FAULT_RECEIVE
] = "receive data corruption",
3842 return (type
< DRBD_FAULT_MAX
) ? _faults
[type
] : "**Unknown**";
3846 _drbd_insert_fault(struct drbd_conf
*mdev
, unsigned int type
)
3848 static struct fault_random_state rrs
= {0, 0};
3850 unsigned int ret
= (
3852 ((1 << mdev_to_minor(mdev
)) & fault_devs
) != 0) &&
3853 (((_drbd_fault_random(&rrs
) % 100) + 1) <= fault_rate
));
3858 if (__ratelimit(&drbd_ratelimit_state
))
3859 dev_warn(DEV
, "***Simulating %s failure\n",
3860 _drbd_fault_str(type
));
3867 const char *drbd_buildtag(void)
3869 /* DRBD built from external sources has here a reference to the
3870 git hash of the source code. */
3872 static char buildtag
[38] = "\0uilt-in";
3874 if (buildtag
[0] == 0) {
3875 #ifdef CONFIG_MODULES
3876 if (THIS_MODULE
!= NULL
)
3877 sprintf(buildtag
, "srcversion: %-24s", THIS_MODULE
->srcversion
);
3886 module_init(drbd_init
)
3887 module_exit(drbd_cleanup
)
3889 EXPORT_SYMBOL(drbd_conn_str
);
3890 EXPORT_SYMBOL(drbd_role_str
);
3891 EXPORT_SYMBOL(drbd_disk_str
);
3892 EXPORT_SYMBOL(drbd_set_st_err_str
);