4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48 #include <linux/workqueue.h>
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
53 #include <linux/drbd_limits.h>
55 #include "drbd_protocol.h"
56 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
60 static DEFINE_MUTEX(drbd_main_mutex
);
61 static int drbd_open(struct block_device
*bdev
, fmode_t mode
);
62 static void drbd_release(struct gendisk
*gd
, fmode_t mode
);
63 static int w_md_sync(struct drbd_work
*w
, int unused
);
64 static void md_sync_timer_fn(unsigned long data
);
65 static int w_bitmap_io(struct drbd_work
*w
, int unused
);
66 static int w_go_diskless(struct drbd_work
*w
, int unused
);
68 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
69 "Lars Ellenberg <lars@linbit.com>");
70 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION
);
71 MODULE_VERSION(REL_VERSION
);
72 MODULE_LICENSE("GPL");
73 MODULE_PARM_DESC(minor_count
, "Approximate number of drbd devices ("
74 __stringify(DRBD_MINOR_COUNT_MIN
) "-" __stringify(DRBD_MINOR_COUNT_MAX
) ")");
75 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR
);
77 #include <linux/moduleparam.h>
78 /* allow_open_on_secondary */
79 MODULE_PARM_DESC(allow_oos
, "DONT USE!");
80 /* thanks to these macros, if compiled into the kernel (not-module),
81 * this becomes the boot parameter drbd.minor_count */
82 module_param(minor_count
, uint
, 0444);
83 module_param(disable_sendpage
, bool, 0644);
84 module_param(allow_oos
, bool, 0);
85 module_param(proc_details
, int, 0644);
87 #ifdef CONFIG_DRBD_FAULT_INJECTION
90 static int fault_count
;
92 /* bitmap of enabled faults */
93 module_param(enable_faults
, int, 0664);
94 /* fault rate % value - applies to all enabled faults */
95 module_param(fault_rate
, int, 0664);
96 /* count of faults inserted */
97 module_param(fault_count
, int, 0664);
98 /* bitmap of devices to insert faults on */
99 module_param(fault_devs
, int, 0644);
102 /* module parameter, defined */
103 unsigned int minor_count
= DRBD_MINOR_COUNT_DEF
;
104 bool disable_sendpage
;
106 int proc_details
; /* Detail level in proc drbd*/
108 /* Module parameter for setting the user mode helper program
109 * to run. Default is /sbin/drbdadm */
110 char usermode_helper
[80] = "/sbin/drbdadm";
112 module_param_string(usermode_helper
, usermode_helper
, sizeof(usermode_helper
), 0644);
114 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
115 * as member "struct gendisk *vdisk;"
117 struct idr drbd_devices
;
118 struct list_head drbd_resources
;
120 struct kmem_cache
*drbd_request_cache
;
121 struct kmem_cache
*drbd_ee_cache
; /* peer requests */
122 struct kmem_cache
*drbd_bm_ext_cache
; /* bitmap extents */
123 struct kmem_cache
*drbd_al_ext_cache
; /* activity log extents */
124 mempool_t
*drbd_request_mempool
;
125 mempool_t
*drbd_ee_mempool
;
126 mempool_t
*drbd_md_io_page_pool
;
127 struct bio_set
*drbd_md_io_bio_set
;
129 /* I do not use a standard mempool, because:
130 1) I want to hand out the pre-allocated objects first.
131 2) I want to be able to interrupt sleeping allocation with a signal.
132 Note: This is a single linked list, the next pointer is the private
133 member of struct page.
135 struct page
*drbd_pp_pool
;
136 spinlock_t drbd_pp_lock
;
138 wait_queue_head_t drbd_pp_wait
;
140 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state
, 5 * HZ
, 5);
142 static const struct block_device_operations drbd_ops
= {
143 .owner
= THIS_MODULE
,
145 .release
= drbd_release
,
148 struct bio
*bio_alloc_drbd(gfp_t gfp_mask
)
152 if (!drbd_md_io_bio_set
)
153 return bio_alloc(gfp_mask
, 1);
155 bio
= bio_alloc_bioset(gfp_mask
, 1, drbd_md_io_bio_set
);
162 /* When checking with sparse, and this is an inline function, sparse will
163 give tons of false positives. When this is a real functions sparse works.
165 int _get_ldev_if_state(struct drbd_device
*device
, enum drbd_disk_state mins
)
169 atomic_inc(&device
->local_cnt
);
170 io_allowed
= (device
->state
.disk
>= mins
);
172 if (atomic_dec_and_test(&device
->local_cnt
))
173 wake_up(&device
->misc_wait
);
181 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
182 * @connection: DRBD connection.
183 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
184 * @set_size: Expected number of requests before that barrier.
186 * In case the passed barrier_nr or set_size does not match the oldest
187 * epoch of not yet barrier-acked requests, this function will cause a
188 * termination of the connection.
190 void tl_release(struct drbd_connection
*connection
, unsigned int barrier_nr
,
191 unsigned int set_size
)
193 struct drbd_request
*r
;
194 struct drbd_request
*req
= NULL
;
195 int expect_epoch
= 0;
198 spin_lock_irq(&connection
->resource
->req_lock
);
200 /* find oldest not yet barrier-acked write request,
201 * count writes in its epoch. */
202 list_for_each_entry(r
, &connection
->transfer_log
, tl_requests
) {
203 const unsigned s
= r
->rq_state
;
207 if (!(s
& RQ_NET_MASK
))
212 expect_epoch
= req
->epoch
;
215 if (r
->epoch
!= expect_epoch
)
219 /* if (s & RQ_DONE): not expected */
220 /* if (!(s & RQ_NET_MASK)): not expected */
225 /* first some paranoia code */
227 drbd_err(connection
, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
231 if (expect_epoch
!= barrier_nr
) {
232 drbd_err(connection
, "BAD! BarrierAck #%u received, expected #%u!\n",
233 barrier_nr
, expect_epoch
);
237 if (expect_size
!= set_size
) {
238 drbd_err(connection
, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
239 barrier_nr
, set_size
, expect_size
);
243 /* Clean up list of requests processed during current epoch. */
244 /* this extra list walk restart is paranoia,
245 * to catch requests being barrier-acked "unexpectedly".
246 * It usually should find the same req again, or some READ preceding it. */
247 list_for_each_entry(req
, &connection
->transfer_log
, tl_requests
)
248 if (req
->epoch
== expect_epoch
)
250 list_for_each_entry_safe_from(req
, r
, &connection
->transfer_log
, tl_requests
) {
251 if (req
->epoch
!= expect_epoch
)
253 _req_mod(req
, BARRIER_ACKED
);
255 spin_unlock_irq(&connection
->resource
->req_lock
);
260 spin_unlock_irq(&connection
->resource
->req_lock
);
261 conn_request_state(connection
, NS(conn
, C_PROTOCOL_ERROR
), CS_HARD
);
266 * _tl_restart() - Walks the transfer log, and applies an action to all requests
267 * @device: DRBD device.
268 * @what: The action/event to perform with all request objects
270 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
271 * RESTART_FROZEN_DISK_IO.
273 /* must hold resource->req_lock */
274 void _tl_restart(struct drbd_connection
*connection
, enum drbd_req_event what
)
276 struct drbd_request
*req
, *r
;
278 list_for_each_entry_safe(req
, r
, &connection
->transfer_log
, tl_requests
)
282 void tl_restart(struct drbd_connection
*connection
, enum drbd_req_event what
)
284 spin_lock_irq(&connection
->resource
->req_lock
);
285 _tl_restart(connection
, what
);
286 spin_unlock_irq(&connection
->resource
->req_lock
);
290 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
291 * @device: DRBD device.
293 * This is called after the connection to the peer was lost. The storage covered
294 * by the requests on the transfer gets marked as our of sync. Called from the
295 * receiver thread and the worker thread.
297 void tl_clear(struct drbd_connection
*connection
)
299 tl_restart(connection
, CONNECTION_LOST_WHILE_PENDING
);
303 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
304 * @device: DRBD device.
306 void tl_abort_disk_io(struct drbd_device
*device
)
308 struct drbd_connection
*connection
= first_peer_device(device
)->connection
;
309 struct drbd_request
*req
, *r
;
311 spin_lock_irq(&connection
->resource
->req_lock
);
312 list_for_each_entry_safe(req
, r
, &connection
->transfer_log
, tl_requests
) {
313 if (!(req
->rq_state
& RQ_LOCAL_PENDING
))
315 if (req
->w
.device
!= device
)
317 _req_mod(req
, ABORT_DISK_IO
);
319 spin_unlock_irq(&connection
->resource
->req_lock
);
322 static int drbd_thread_setup(void *arg
)
324 struct drbd_thread
*thi
= (struct drbd_thread
*) arg
;
325 struct drbd_connection
*connection
= thi
->connection
;
329 snprintf(current
->comm
, sizeof(current
->comm
), "drbd_%c_%s",
331 thi
->connection
->resource
->name
);
334 retval
= thi
->function(thi
);
336 spin_lock_irqsave(&thi
->t_lock
, flags
);
338 /* if the receiver has been "EXITING", the last thing it did
339 * was set the conn state to "StandAlone",
340 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
341 * and receiver thread will be "started".
342 * drbd_thread_start needs to set "RESTARTING" in that case.
343 * t_state check and assignment needs to be within the same spinlock,
344 * so either thread_start sees EXITING, and can remap to RESTARTING,
345 * or thread_start see NONE, and can proceed as normal.
348 if (thi
->t_state
== RESTARTING
) {
349 drbd_info(connection
, "Restarting %s thread\n", thi
->name
);
350 thi
->t_state
= RUNNING
;
351 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
358 complete_all(&thi
->stop
);
359 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
361 drbd_info(connection
, "Terminating %s\n", current
->comm
);
363 /* Release mod reference taken when thread was started */
365 kref_put(&connection
->kref
, drbd_destroy_connection
);
366 module_put(THIS_MODULE
);
370 static void drbd_thread_init(struct drbd_connection
*connection
, struct drbd_thread
*thi
,
371 int (*func
) (struct drbd_thread
*), char *name
)
373 spin_lock_init(&thi
->t_lock
);
376 thi
->function
= func
;
377 thi
->connection
= connection
;
378 strncpy(thi
->name
, name
, ARRAY_SIZE(thi
->name
));
381 int drbd_thread_start(struct drbd_thread
*thi
)
383 struct drbd_connection
*connection
= thi
->connection
;
384 struct task_struct
*nt
;
387 /* is used from state engine doing drbd_thread_stop_nowait,
388 * while holding the req lock irqsave */
389 spin_lock_irqsave(&thi
->t_lock
, flags
);
391 switch (thi
->t_state
) {
393 drbd_info(connection
, "Starting %s thread (from %s [%d])\n",
394 thi
->name
, current
->comm
, current
->pid
);
396 /* Get ref on module for thread - this is released when thread exits */
397 if (!try_module_get(THIS_MODULE
)) {
398 drbd_err(connection
, "Failed to get module reference in drbd_thread_start\n");
399 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
403 kref_get(&thi
->connection
->kref
);
405 init_completion(&thi
->stop
);
406 thi
->reset_cpu_mask
= 1;
407 thi
->t_state
= RUNNING
;
408 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
409 flush_signals(current
); /* otherw. may get -ERESTARTNOINTR */
411 nt
= kthread_create(drbd_thread_setup
, (void *) thi
,
412 "drbd_%c_%s", thi
->name
[0], thi
->connection
->resource
->name
);
415 drbd_err(connection
, "Couldn't start thread\n");
417 kref_put(&connection
->kref
, drbd_destroy_connection
);
418 module_put(THIS_MODULE
);
421 spin_lock_irqsave(&thi
->t_lock
, flags
);
423 thi
->t_state
= RUNNING
;
424 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
428 thi
->t_state
= RESTARTING
;
429 drbd_info(connection
, "Restarting %s thread (from %s [%d])\n",
430 thi
->name
, current
->comm
, current
->pid
);
435 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
443 void _drbd_thread_stop(struct drbd_thread
*thi
, int restart
, int wait
)
447 enum drbd_thread_state ns
= restart
? RESTARTING
: EXITING
;
449 /* may be called from state engine, holding the req lock irqsave */
450 spin_lock_irqsave(&thi
->t_lock
, flags
);
452 if (thi
->t_state
== NONE
) {
453 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
455 drbd_thread_start(thi
);
459 if (thi
->t_state
!= ns
) {
460 if (thi
->task
== NULL
) {
461 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
467 init_completion(&thi
->stop
);
468 if (thi
->task
!= current
)
469 force_sig(DRBD_SIGKILL
, thi
->task
);
472 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
475 wait_for_completion(&thi
->stop
);
478 static struct drbd_thread
*drbd_task_to_thread(struct drbd_connection
*connection
, struct task_struct
*task
)
480 struct drbd_thread
*thi
=
481 task
== connection
->receiver
.task
? &connection
->receiver
:
482 task
== connection
->asender
.task
? &connection
->asender
:
483 task
== connection
->worker
.task
? &connection
->worker
: NULL
;
488 char *drbd_task_to_thread_name(struct drbd_connection
*connection
, struct task_struct
*task
)
490 struct drbd_thread
*thi
= drbd_task_to_thread(connection
, task
);
491 return thi
? thi
->name
: task
->comm
;
494 int conn_lowest_minor(struct drbd_connection
*connection
)
496 struct drbd_peer_device
*peer_device
;
497 int vnr
= 0, minor
= -1;
500 peer_device
= idr_get_next(&connection
->peer_devices
, &vnr
);
502 minor
= device_to_minor(peer_device
->device
);
510 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
512 * Forces all threads of a resource onto the same CPU. This is beneficial for
513 * DRBD's performance. May be overwritten by user's configuration.
515 static void drbd_calc_cpu_mask(cpumask_var_t
*cpu_mask
)
517 unsigned int *resources_per_cpu
, min_index
= ~0;
519 resources_per_cpu
= kzalloc(nr_cpu_ids
* sizeof(*resources_per_cpu
), GFP_KERNEL
);
520 if (resources_per_cpu
) {
521 struct drbd_resource
*resource
;
522 unsigned int cpu
, min
= ~0;
525 for_each_resource_rcu(resource
, &drbd_resources
) {
526 for_each_cpu(cpu
, resource
->cpu_mask
)
527 resources_per_cpu
[cpu
]++;
530 for_each_online_cpu(cpu
) {
531 if (resources_per_cpu
[cpu
] < min
) {
532 min
= resources_per_cpu
[cpu
];
536 kfree(resources_per_cpu
);
538 if (min_index
== ~0) {
539 cpumask_setall(*cpu_mask
);
542 cpumask_set_cpu(min_index
, *cpu_mask
);
546 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
547 * @device: DRBD device.
548 * @thi: drbd_thread object
550 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
553 void drbd_thread_current_set_cpu(struct drbd_thread
*thi
)
555 struct task_struct
*p
= current
;
557 if (!thi
->reset_cpu_mask
)
559 thi
->reset_cpu_mask
= 0;
560 set_cpus_allowed_ptr(p
, thi
->connection
->resource
->cpu_mask
);
563 #define drbd_calc_cpu_mask(A) ({})
567 * drbd_header_size - size of a packet header
569 * The header size is a multiple of 8, so any payload following the header is
570 * word aligned on 64-bit architectures. (The bitmap send and receive code
573 unsigned int drbd_header_size(struct drbd_connection
*connection
)
575 if (connection
->agreed_pro_version
>= 100) {
576 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100
), 8));
577 return sizeof(struct p_header100
);
579 BUILD_BUG_ON(sizeof(struct p_header80
) !=
580 sizeof(struct p_header95
));
581 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80
), 8));
582 return sizeof(struct p_header80
);
586 static unsigned int prepare_header80(struct p_header80
*h
, enum drbd_packet cmd
, int size
)
588 h
->magic
= cpu_to_be32(DRBD_MAGIC
);
589 h
->command
= cpu_to_be16(cmd
);
590 h
->length
= cpu_to_be16(size
);
591 return sizeof(struct p_header80
);
594 static unsigned int prepare_header95(struct p_header95
*h
, enum drbd_packet cmd
, int size
)
596 h
->magic
= cpu_to_be16(DRBD_MAGIC_BIG
);
597 h
->command
= cpu_to_be16(cmd
);
598 h
->length
= cpu_to_be32(size
);
599 return sizeof(struct p_header95
);
602 static unsigned int prepare_header100(struct p_header100
*h
, enum drbd_packet cmd
,
605 h
->magic
= cpu_to_be32(DRBD_MAGIC_100
);
606 h
->volume
= cpu_to_be16(vnr
);
607 h
->command
= cpu_to_be16(cmd
);
608 h
->length
= cpu_to_be32(size
);
610 return sizeof(struct p_header100
);
613 static unsigned int prepare_header(struct drbd_connection
*connection
, int vnr
,
614 void *buffer
, enum drbd_packet cmd
, int size
)
616 if (connection
->agreed_pro_version
>= 100)
617 return prepare_header100(buffer
, cmd
, size
, vnr
);
618 else if (connection
->agreed_pro_version
>= 95 &&
619 size
> DRBD_MAX_SIZE_H80_PACKET
)
620 return prepare_header95(buffer
, cmd
, size
);
622 return prepare_header80(buffer
, cmd
, size
);
625 static void *__conn_prepare_command(struct drbd_connection
*connection
,
626 struct drbd_socket
*sock
)
630 return sock
->sbuf
+ drbd_header_size(connection
);
633 void *conn_prepare_command(struct drbd_connection
*connection
, struct drbd_socket
*sock
)
637 mutex_lock(&sock
->mutex
);
638 p
= __conn_prepare_command(connection
, sock
);
640 mutex_unlock(&sock
->mutex
);
645 void *drbd_prepare_command(struct drbd_device
*device
, struct drbd_socket
*sock
)
647 return conn_prepare_command(first_peer_device(device
)->connection
, sock
);
650 static int __send_command(struct drbd_connection
*connection
, int vnr
,
651 struct drbd_socket
*sock
, enum drbd_packet cmd
,
652 unsigned int header_size
, void *data
,
659 * Called with @data == NULL and the size of the data blocks in @size
660 * for commands that send data blocks. For those commands, omit the
661 * MSG_MORE flag: this will increase the likelihood that data blocks
662 * which are page aligned on the sender will end up page aligned on the
665 msg_flags
= data
? MSG_MORE
: 0;
667 header_size
+= prepare_header(connection
, vnr
, sock
->sbuf
, cmd
,
669 err
= drbd_send_all(connection
, sock
->socket
, sock
->sbuf
, header_size
,
672 err
= drbd_send_all(connection
, sock
->socket
, data
, size
, 0);
676 static int __conn_send_command(struct drbd_connection
*connection
, struct drbd_socket
*sock
,
677 enum drbd_packet cmd
, unsigned int header_size
,
678 void *data
, unsigned int size
)
680 return __send_command(connection
, 0, sock
, cmd
, header_size
, data
, size
);
683 int conn_send_command(struct drbd_connection
*connection
, struct drbd_socket
*sock
,
684 enum drbd_packet cmd
, unsigned int header_size
,
685 void *data
, unsigned int size
)
689 err
= __conn_send_command(connection
, sock
, cmd
, header_size
, data
, size
);
690 mutex_unlock(&sock
->mutex
);
694 int drbd_send_command(struct drbd_device
*device
, struct drbd_socket
*sock
,
695 enum drbd_packet cmd
, unsigned int header_size
,
696 void *data
, unsigned int size
)
700 err
= __send_command(first_peer_device(device
)->connection
, device
->vnr
, sock
, cmd
, header_size
,
702 mutex_unlock(&sock
->mutex
);
706 int drbd_send_ping(struct drbd_connection
*connection
)
708 struct drbd_socket
*sock
;
710 sock
= &connection
->meta
;
711 if (!conn_prepare_command(connection
, sock
))
713 return conn_send_command(connection
, sock
, P_PING
, 0, NULL
, 0);
716 int drbd_send_ping_ack(struct drbd_connection
*connection
)
718 struct drbd_socket
*sock
;
720 sock
= &connection
->meta
;
721 if (!conn_prepare_command(connection
, sock
))
723 return conn_send_command(connection
, sock
, P_PING_ACK
, 0, NULL
, 0);
726 int drbd_send_sync_param(struct drbd_device
*device
)
728 struct drbd_socket
*sock
;
729 struct p_rs_param_95
*p
;
731 const int apv
= first_peer_device(device
)->connection
->agreed_pro_version
;
732 enum drbd_packet cmd
;
734 struct disk_conf
*dc
;
736 sock
= &first_peer_device(device
)->connection
->data
;
737 p
= drbd_prepare_command(device
, sock
);
742 nc
= rcu_dereference(first_peer_device(device
)->connection
->net_conf
);
744 size
= apv
<= 87 ? sizeof(struct p_rs_param
)
745 : apv
== 88 ? sizeof(struct p_rs_param
)
746 + strlen(nc
->verify_alg
) + 1
747 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
748 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
750 cmd
= apv
>= 89 ? P_SYNC_PARAM89
: P_SYNC_PARAM
;
752 /* initialize verify_alg and csums_alg */
753 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
755 if (get_ldev(device
)) {
756 dc
= rcu_dereference(device
->ldev
->disk_conf
);
757 p
->resync_rate
= cpu_to_be32(dc
->resync_rate
);
758 p
->c_plan_ahead
= cpu_to_be32(dc
->c_plan_ahead
);
759 p
->c_delay_target
= cpu_to_be32(dc
->c_delay_target
);
760 p
->c_fill_target
= cpu_to_be32(dc
->c_fill_target
);
761 p
->c_max_rate
= cpu_to_be32(dc
->c_max_rate
);
764 p
->resync_rate
= cpu_to_be32(DRBD_RESYNC_RATE_DEF
);
765 p
->c_plan_ahead
= cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF
);
766 p
->c_delay_target
= cpu_to_be32(DRBD_C_DELAY_TARGET_DEF
);
767 p
->c_fill_target
= cpu_to_be32(DRBD_C_FILL_TARGET_DEF
);
768 p
->c_max_rate
= cpu_to_be32(DRBD_C_MAX_RATE_DEF
);
772 strcpy(p
->verify_alg
, nc
->verify_alg
);
774 strcpy(p
->csums_alg
, nc
->csums_alg
);
777 return drbd_send_command(device
, sock
, cmd
, size
, NULL
, 0);
780 int __drbd_send_protocol(struct drbd_connection
*connection
, enum drbd_packet cmd
)
782 struct drbd_socket
*sock
;
783 struct p_protocol
*p
;
787 sock
= &connection
->data
;
788 p
= __conn_prepare_command(connection
, sock
);
793 nc
= rcu_dereference(connection
->net_conf
);
795 if (nc
->tentative
&& connection
->agreed_pro_version
< 92) {
797 mutex_unlock(&sock
->mutex
);
798 drbd_err(connection
, "--dry-run is not supported by peer");
803 if (connection
->agreed_pro_version
>= 87)
804 size
+= strlen(nc
->integrity_alg
) + 1;
806 p
->protocol
= cpu_to_be32(nc
->wire_protocol
);
807 p
->after_sb_0p
= cpu_to_be32(nc
->after_sb_0p
);
808 p
->after_sb_1p
= cpu_to_be32(nc
->after_sb_1p
);
809 p
->after_sb_2p
= cpu_to_be32(nc
->after_sb_2p
);
810 p
->two_primaries
= cpu_to_be32(nc
->two_primaries
);
812 if (nc
->discard_my_data
)
813 cf
|= CF_DISCARD_MY_DATA
;
816 p
->conn_flags
= cpu_to_be32(cf
);
818 if (connection
->agreed_pro_version
>= 87)
819 strcpy(p
->integrity_alg
, nc
->integrity_alg
);
822 return __conn_send_command(connection
, sock
, cmd
, size
, NULL
, 0);
825 int drbd_send_protocol(struct drbd_connection
*connection
)
829 mutex_lock(&connection
->data
.mutex
);
830 err
= __drbd_send_protocol(connection
, P_PROTOCOL
);
831 mutex_unlock(&connection
->data
.mutex
);
836 static int _drbd_send_uuids(struct drbd_device
*device
, u64 uuid_flags
)
838 struct drbd_socket
*sock
;
842 if (!get_ldev_if_state(device
, D_NEGOTIATING
))
845 sock
= &first_peer_device(device
)->connection
->data
;
846 p
= drbd_prepare_command(device
, sock
);
851 spin_lock_irq(&device
->ldev
->md
.uuid_lock
);
852 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
853 p
->uuid
[i
] = cpu_to_be64(device
->ldev
->md
.uuid
[i
]);
854 spin_unlock_irq(&device
->ldev
->md
.uuid_lock
);
856 device
->comm_bm_set
= drbd_bm_total_weight(device
);
857 p
->uuid
[UI_SIZE
] = cpu_to_be64(device
->comm_bm_set
);
859 uuid_flags
|= rcu_dereference(first_peer_device(device
)->connection
->net_conf
)->discard_my_data
? 1 : 0;
861 uuid_flags
|= test_bit(CRASHED_PRIMARY
, &device
->flags
) ? 2 : 0;
862 uuid_flags
|= device
->new_state_tmp
.disk
== D_INCONSISTENT
? 4 : 0;
863 p
->uuid
[UI_FLAGS
] = cpu_to_be64(uuid_flags
);
866 return drbd_send_command(device
, sock
, P_UUIDS
, sizeof(*p
), NULL
, 0);
869 int drbd_send_uuids(struct drbd_device
*device
)
871 return _drbd_send_uuids(device
, 0);
874 int drbd_send_uuids_skip_initial_sync(struct drbd_device
*device
)
876 return _drbd_send_uuids(device
, 8);
879 void drbd_print_uuids(struct drbd_device
*device
, const char *text
)
881 if (get_ldev_if_state(device
, D_NEGOTIATING
)) {
882 u64
*uuid
= device
->ldev
->md
.uuid
;
883 drbd_info(device
, "%s %016llX:%016llX:%016llX:%016llX\n",
885 (unsigned long long)uuid
[UI_CURRENT
],
886 (unsigned long long)uuid
[UI_BITMAP
],
887 (unsigned long long)uuid
[UI_HISTORY_START
],
888 (unsigned long long)uuid
[UI_HISTORY_END
]);
891 drbd_info(device
, "%s effective data uuid: %016llX\n",
893 (unsigned long long)device
->ed_uuid
);
897 void drbd_gen_and_send_sync_uuid(struct drbd_device
*device
)
899 struct drbd_socket
*sock
;
903 D_ASSERT(device
, device
->state
.disk
== D_UP_TO_DATE
);
905 uuid
= device
->ldev
->md
.uuid
[UI_BITMAP
];
906 if (uuid
&& uuid
!= UUID_JUST_CREATED
)
907 uuid
= uuid
+ UUID_NEW_BM_OFFSET
;
909 get_random_bytes(&uuid
, sizeof(u64
));
910 drbd_uuid_set(device
, UI_BITMAP
, uuid
);
911 drbd_print_uuids(device
, "updated sync UUID");
912 drbd_md_sync(device
);
914 sock
= &first_peer_device(device
)->connection
->data
;
915 p
= drbd_prepare_command(device
, sock
);
917 p
->uuid
= cpu_to_be64(uuid
);
918 drbd_send_command(device
, sock
, P_SYNC_UUID
, sizeof(*p
), NULL
, 0);
922 int drbd_send_sizes(struct drbd_device
*device
, int trigger_reply
, enum dds_flags flags
)
924 struct drbd_socket
*sock
;
926 sector_t d_size
, u_size
;
928 unsigned int max_bio_size
;
930 if (get_ldev_if_state(device
, D_NEGOTIATING
)) {
931 D_ASSERT(device
, device
->ldev
->backing_bdev
);
932 d_size
= drbd_get_max_capacity(device
->ldev
);
934 u_size
= rcu_dereference(device
->ldev
->disk_conf
)->disk_size
;
936 q_order_type
= drbd_queue_order_type(device
);
937 max_bio_size
= queue_max_hw_sectors(device
->ldev
->backing_bdev
->bd_disk
->queue
) << 9;
938 max_bio_size
= min(max_bio_size
, DRBD_MAX_BIO_SIZE
);
943 q_order_type
= QUEUE_ORDERED_NONE
;
944 max_bio_size
= DRBD_MAX_BIO_SIZE
; /* ... multiple BIOs per peer_request */
947 sock
= &first_peer_device(device
)->connection
->data
;
948 p
= drbd_prepare_command(device
, sock
);
952 if (first_peer_device(device
)->connection
->agreed_pro_version
<= 94)
953 max_bio_size
= min(max_bio_size
, DRBD_MAX_SIZE_H80_PACKET
);
954 else if (first_peer_device(device
)->connection
->agreed_pro_version
< 100)
955 max_bio_size
= min(max_bio_size
, DRBD_MAX_BIO_SIZE_P95
);
957 p
->d_size
= cpu_to_be64(d_size
);
958 p
->u_size
= cpu_to_be64(u_size
);
959 p
->c_size
= cpu_to_be64(trigger_reply
? 0 : drbd_get_capacity(device
->this_bdev
));
960 p
->max_bio_size
= cpu_to_be32(max_bio_size
);
961 p
->queue_order_type
= cpu_to_be16(q_order_type
);
962 p
->dds_flags
= cpu_to_be16(flags
);
963 return drbd_send_command(device
, sock
, P_SIZES
, sizeof(*p
), NULL
, 0);
967 * drbd_send_current_state() - Sends the drbd state to the peer
968 * @device: DRBD device.
970 int drbd_send_current_state(struct drbd_device
*device
)
972 struct drbd_socket
*sock
;
975 sock
= &first_peer_device(device
)->connection
->data
;
976 p
= drbd_prepare_command(device
, sock
);
979 p
->state
= cpu_to_be32(device
->state
.i
); /* Within the send mutex */
980 return drbd_send_command(device
, sock
, P_STATE
, sizeof(*p
), NULL
, 0);
984 * drbd_send_state() - After a state change, sends the new state to the peer
985 * @device: DRBD device.
986 * @state: the state to send, not necessarily the current state.
988 * Each state change queues an "after_state_ch" work, which will eventually
989 * send the resulting new state to the peer. If more state changes happen
990 * between queuing and processing of the after_state_ch work, we still
991 * want to send each intermediary state in the order it occurred.
993 int drbd_send_state(struct drbd_device
*device
, union drbd_state state
)
995 struct drbd_socket
*sock
;
998 sock
= &first_peer_device(device
)->connection
->data
;
999 p
= drbd_prepare_command(device
, sock
);
1002 p
->state
= cpu_to_be32(state
.i
); /* Within the send mutex */
1003 return drbd_send_command(device
, sock
, P_STATE
, sizeof(*p
), NULL
, 0);
1006 int drbd_send_state_req(struct drbd_device
*device
, union drbd_state mask
, union drbd_state val
)
1008 struct drbd_socket
*sock
;
1009 struct p_req_state
*p
;
1011 sock
= &first_peer_device(device
)->connection
->data
;
1012 p
= drbd_prepare_command(device
, sock
);
1015 p
->mask
= cpu_to_be32(mask
.i
);
1016 p
->val
= cpu_to_be32(val
.i
);
1017 return drbd_send_command(device
, sock
, P_STATE_CHG_REQ
, sizeof(*p
), NULL
, 0);
1020 int conn_send_state_req(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
)
1022 enum drbd_packet cmd
;
1023 struct drbd_socket
*sock
;
1024 struct p_req_state
*p
;
1026 cmd
= connection
->agreed_pro_version
< 100 ? P_STATE_CHG_REQ
: P_CONN_ST_CHG_REQ
;
1027 sock
= &connection
->data
;
1028 p
= conn_prepare_command(connection
, sock
);
1031 p
->mask
= cpu_to_be32(mask
.i
);
1032 p
->val
= cpu_to_be32(val
.i
);
1033 return conn_send_command(connection
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1036 void drbd_send_sr_reply(struct drbd_device
*device
, enum drbd_state_rv retcode
)
1038 struct drbd_socket
*sock
;
1039 struct p_req_state_reply
*p
;
1041 sock
= &first_peer_device(device
)->connection
->meta
;
1042 p
= drbd_prepare_command(device
, sock
);
1044 p
->retcode
= cpu_to_be32(retcode
);
1045 drbd_send_command(device
, sock
, P_STATE_CHG_REPLY
, sizeof(*p
), NULL
, 0);
1049 void conn_send_sr_reply(struct drbd_connection
*connection
, enum drbd_state_rv retcode
)
1051 struct drbd_socket
*sock
;
1052 struct p_req_state_reply
*p
;
1053 enum drbd_packet cmd
= connection
->agreed_pro_version
< 100 ? P_STATE_CHG_REPLY
: P_CONN_ST_CHG_REPLY
;
1055 sock
= &connection
->meta
;
1056 p
= conn_prepare_command(connection
, sock
);
1058 p
->retcode
= cpu_to_be32(retcode
);
1059 conn_send_command(connection
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1063 static void dcbp_set_code(struct p_compressed_bm
*p
, enum drbd_bitmap_code code
)
1065 BUG_ON(code
& ~0xf);
1066 p
->encoding
= (p
->encoding
& ~0xf) | code
;
1069 static void dcbp_set_start(struct p_compressed_bm
*p
, int set
)
1071 p
->encoding
= (p
->encoding
& ~0x80) | (set
? 0x80 : 0);
1074 static void dcbp_set_pad_bits(struct p_compressed_bm
*p
, int n
)
1077 p
->encoding
= (p
->encoding
& (~0x7 << 4)) | (n
<< 4);
1080 static int fill_bitmap_rle_bits(struct drbd_device
*device
,
1081 struct p_compressed_bm
*p
,
1083 struct bm_xfer_ctx
*c
)
1085 struct bitstream bs
;
1086 unsigned long plain_bits
;
1093 /* may we use this feature? */
1095 use_rle
= rcu_dereference(first_peer_device(device
)->connection
->net_conf
)->use_rle
;
1097 if (!use_rle
|| first_peer_device(device
)->connection
->agreed_pro_version
< 90)
1100 if (c
->bit_offset
>= c
->bm_bits
)
1101 return 0; /* nothing to do. */
1103 /* use at most thus many bytes */
1104 bitstream_init(&bs
, p
->code
, size
, 0);
1105 memset(p
->code
, 0, size
);
1106 /* plain bits covered in this code string */
1109 /* p->encoding & 0x80 stores whether the first run length is set.
1110 * bit offset is implicit.
1111 * start with toggle == 2 to be able to tell the first iteration */
1114 /* see how much plain bits we can stuff into one packet
1115 * using RLE and VLI. */
1117 tmp
= (toggle
== 0) ? _drbd_bm_find_next_zero(device
, c
->bit_offset
)
1118 : _drbd_bm_find_next(device
, c
->bit_offset
);
1121 rl
= tmp
- c
->bit_offset
;
1123 if (toggle
== 2) { /* first iteration */
1125 /* the first checked bit was set,
1126 * store start value, */
1127 dcbp_set_start(p
, 1);
1128 /* but skip encoding of zero run length */
1132 dcbp_set_start(p
, 0);
1135 /* paranoia: catch zero runlength.
1136 * can only happen if bitmap is modified while we scan it. */
1138 drbd_err(device
, "unexpected zero runlength while encoding bitmap "
1139 "t:%u bo:%lu\n", toggle
, c
->bit_offset
);
1143 bits
= vli_encode_bits(&bs
, rl
);
1144 if (bits
== -ENOBUFS
) /* buffer full */
1147 drbd_err(device
, "error while encoding bitmap: %d\n", bits
);
1153 c
->bit_offset
= tmp
;
1154 } while (c
->bit_offset
< c
->bm_bits
);
1156 len
= bs
.cur
.b
- p
->code
+ !!bs
.cur
.bit
;
1158 if (plain_bits
< (len
<< 3)) {
1159 /* incompressible with this method.
1160 * we need to rewind both word and bit position. */
1161 c
->bit_offset
-= plain_bits
;
1162 bm_xfer_ctx_bit_to_word_offset(c
);
1163 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
1167 /* RLE + VLI was able to compress it just fine.
1168 * update c->word_offset. */
1169 bm_xfer_ctx_bit_to_word_offset(c
);
1171 /* store pad_bits */
1172 dcbp_set_pad_bits(p
, (8 - bs
.cur
.bit
) & 0x7);
1178 * send_bitmap_rle_or_plain
1180 * Return 0 when done, 1 when another iteration is needed, and a negative error
1181 * code upon failure.
1184 send_bitmap_rle_or_plain(struct drbd_device
*device
, struct bm_xfer_ctx
*c
)
1186 struct drbd_socket
*sock
= &first_peer_device(device
)->connection
->data
;
1187 unsigned int header_size
= drbd_header_size(first_peer_device(device
)->connection
);
1188 struct p_compressed_bm
*p
= sock
->sbuf
+ header_size
;
1191 len
= fill_bitmap_rle_bits(device
, p
,
1192 DRBD_SOCKET_BUFFER_SIZE
- header_size
- sizeof(*p
), c
);
1197 dcbp_set_code(p
, RLE_VLI_Bits
);
1198 err
= __send_command(first_peer_device(device
)->connection
, device
->vnr
, sock
,
1199 P_COMPRESSED_BITMAP
, sizeof(*p
) + len
,
1202 c
->bytes
[0] += header_size
+ sizeof(*p
) + len
;
1204 if (c
->bit_offset
>= c
->bm_bits
)
1207 /* was not compressible.
1208 * send a buffer full of plain text bits instead. */
1209 unsigned int data_size
;
1210 unsigned long num_words
;
1211 unsigned long *p
= sock
->sbuf
+ header_size
;
1213 data_size
= DRBD_SOCKET_BUFFER_SIZE
- header_size
;
1214 num_words
= min_t(size_t, data_size
/ sizeof(*p
),
1215 c
->bm_words
- c
->word_offset
);
1216 len
= num_words
* sizeof(*p
);
1218 drbd_bm_get_lel(device
, c
->word_offset
, num_words
, p
);
1219 err
= __send_command(first_peer_device(device
)->connection
, device
->vnr
, sock
, P_BITMAP
, len
, NULL
, 0);
1220 c
->word_offset
+= num_words
;
1221 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
1224 c
->bytes
[1] += header_size
+ len
;
1226 if (c
->bit_offset
> c
->bm_bits
)
1227 c
->bit_offset
= c
->bm_bits
;
1231 INFO_bm_xfer_stats(device
, "send", c
);
1239 /* See the comment at receive_bitmap() */
1240 static int _drbd_send_bitmap(struct drbd_device
*device
)
1242 struct bm_xfer_ctx c
;
1245 if (!expect(device
->bitmap
))
1248 if (get_ldev(device
)) {
1249 if (drbd_md_test_flag(device
->ldev
, MDF_FULL_SYNC
)) {
1250 drbd_info(device
, "Writing the whole bitmap, MDF_FullSync was set.\n");
1251 drbd_bm_set_all(device
);
1252 if (drbd_bm_write(device
)) {
1253 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1254 * but otherwise process as per normal - need to tell other
1255 * side that a full resync is required! */
1256 drbd_err(device
, "Failed to write bitmap to disk!\n");
1258 drbd_md_clear_flag(device
, MDF_FULL_SYNC
);
1259 drbd_md_sync(device
);
1265 c
= (struct bm_xfer_ctx
) {
1266 .bm_bits
= drbd_bm_bits(device
),
1267 .bm_words
= drbd_bm_words(device
),
1271 err
= send_bitmap_rle_or_plain(device
, &c
);
1277 int drbd_send_bitmap(struct drbd_device
*device
)
1279 struct drbd_socket
*sock
= &first_peer_device(device
)->connection
->data
;
1282 mutex_lock(&sock
->mutex
);
1284 err
= !_drbd_send_bitmap(device
);
1285 mutex_unlock(&sock
->mutex
);
1289 void drbd_send_b_ack(struct drbd_connection
*connection
, u32 barrier_nr
, u32 set_size
)
1291 struct drbd_socket
*sock
;
1292 struct p_barrier_ack
*p
;
1294 if (connection
->cstate
< C_WF_REPORT_PARAMS
)
1297 sock
= &connection
->meta
;
1298 p
= conn_prepare_command(connection
, sock
);
1301 p
->barrier
= barrier_nr
;
1302 p
->set_size
= cpu_to_be32(set_size
);
1303 conn_send_command(connection
, sock
, P_BARRIER_ACK
, sizeof(*p
), NULL
, 0);
1307 * _drbd_send_ack() - Sends an ack packet
1308 * @device: DRBD device.
1309 * @cmd: Packet command code.
1310 * @sector: sector, needs to be in big endian byte order
1311 * @blksize: size in byte, needs to be in big endian byte order
1312 * @block_id: Id, big endian byte order
1314 static int _drbd_send_ack(struct drbd_device
*device
, enum drbd_packet cmd
,
1315 u64 sector
, u32 blksize
, u64 block_id
)
1317 struct drbd_socket
*sock
;
1318 struct p_block_ack
*p
;
1320 if (device
->state
.conn
< C_CONNECTED
)
1323 sock
= &first_peer_device(device
)->connection
->meta
;
1324 p
= drbd_prepare_command(device
, sock
);
1328 p
->block_id
= block_id
;
1329 p
->blksize
= blksize
;
1330 p
->seq_num
= cpu_to_be32(atomic_inc_return(&device
->packet_seq
));
1331 return drbd_send_command(device
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1334 /* dp->sector and dp->block_id already/still in network byte order,
1335 * data_size is payload size according to dp->head,
1336 * and may need to be corrected for digest size. */
1337 void drbd_send_ack_dp(struct drbd_device
*device
, enum drbd_packet cmd
,
1338 struct p_data
*dp
, int data_size
)
1340 if (first_peer_device(device
)->connection
->peer_integrity_tfm
)
1341 data_size
-= crypto_hash_digestsize(first_peer_device(device
)->connection
->peer_integrity_tfm
);
1342 _drbd_send_ack(device
, cmd
, dp
->sector
, cpu_to_be32(data_size
),
1346 void drbd_send_ack_rp(struct drbd_device
*device
, enum drbd_packet cmd
,
1347 struct p_block_req
*rp
)
1349 _drbd_send_ack(device
, cmd
, rp
->sector
, rp
->blksize
, rp
->block_id
);
1353 * drbd_send_ack() - Sends an ack packet
1354 * @device: DRBD device
1355 * @cmd: packet command code
1356 * @peer_req: peer request
1358 int drbd_send_ack(struct drbd_device
*device
, enum drbd_packet cmd
,
1359 struct drbd_peer_request
*peer_req
)
1361 return _drbd_send_ack(device
, cmd
,
1362 cpu_to_be64(peer_req
->i
.sector
),
1363 cpu_to_be32(peer_req
->i
.size
),
1364 peer_req
->block_id
);
1367 /* This function misuses the block_id field to signal if the blocks
1368 * are is sync or not. */
1369 int drbd_send_ack_ex(struct drbd_device
*device
, enum drbd_packet cmd
,
1370 sector_t sector
, int blksize
, u64 block_id
)
1372 return _drbd_send_ack(device
, cmd
,
1373 cpu_to_be64(sector
),
1374 cpu_to_be32(blksize
),
1375 cpu_to_be64(block_id
));
1378 int drbd_send_drequest(struct drbd_device
*device
, int cmd
,
1379 sector_t sector
, int size
, u64 block_id
)
1381 struct drbd_socket
*sock
;
1382 struct p_block_req
*p
;
1384 sock
= &first_peer_device(device
)->connection
->data
;
1385 p
= drbd_prepare_command(device
, sock
);
1388 p
->sector
= cpu_to_be64(sector
);
1389 p
->block_id
= block_id
;
1390 p
->blksize
= cpu_to_be32(size
);
1391 return drbd_send_command(device
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1394 int drbd_send_drequest_csum(struct drbd_device
*device
, sector_t sector
, int size
,
1395 void *digest
, int digest_size
, enum drbd_packet cmd
)
1397 struct drbd_socket
*sock
;
1398 struct p_block_req
*p
;
1400 /* FIXME: Put the digest into the preallocated socket buffer. */
1402 sock
= &first_peer_device(device
)->connection
->data
;
1403 p
= drbd_prepare_command(device
, sock
);
1406 p
->sector
= cpu_to_be64(sector
);
1407 p
->block_id
= ID_SYNCER
/* unused */;
1408 p
->blksize
= cpu_to_be32(size
);
1409 return drbd_send_command(device
, sock
, cmd
, sizeof(*p
),
1410 digest
, digest_size
);
1413 int drbd_send_ov_request(struct drbd_device
*device
, sector_t sector
, int size
)
1415 struct drbd_socket
*sock
;
1416 struct p_block_req
*p
;
1418 sock
= &first_peer_device(device
)->connection
->data
;
1419 p
= drbd_prepare_command(device
, sock
);
1422 p
->sector
= cpu_to_be64(sector
);
1423 p
->block_id
= ID_SYNCER
/* unused */;
1424 p
->blksize
= cpu_to_be32(size
);
1425 return drbd_send_command(device
, sock
, P_OV_REQUEST
, sizeof(*p
), NULL
, 0);
1428 /* called on sndtimeo
1429 * returns false if we should retry,
1430 * true if we think connection is dead
1432 static int we_should_drop_the_connection(struct drbd_connection
*connection
, struct socket
*sock
)
1435 /* long elapsed = (long)(jiffies - device->last_received); */
1437 drop_it
= connection
->meta
.socket
== sock
1438 || !connection
->asender
.task
1439 || get_t_state(&connection
->asender
) != RUNNING
1440 || connection
->cstate
< C_WF_REPORT_PARAMS
;
1445 drop_it
= !--connection
->ko_count
;
1447 drbd_err(connection
, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1448 current
->comm
, current
->pid
, connection
->ko_count
);
1449 request_ping(connection
);
1452 return drop_it
; /* && (device->state == R_PRIMARY) */;
1455 static void drbd_update_congested(struct drbd_connection
*connection
)
1457 struct sock
*sk
= connection
->data
.socket
->sk
;
1458 if (sk
->sk_wmem_queued
> sk
->sk_sndbuf
* 4 / 5)
1459 set_bit(NET_CONGESTED
, &connection
->flags
);
1462 /* The idea of sendpage seems to be to put some kind of reference
1463 * to the page into the skb, and to hand it over to the NIC. In
1464 * this process get_page() gets called.
1466 * As soon as the page was really sent over the network put_page()
1467 * gets called by some part of the network layer. [ NIC driver? ]
1469 * [ get_page() / put_page() increment/decrement the count. If count
1470 * reaches 0 the page will be freed. ]
1472 * This works nicely with pages from FSs.
1473 * But this means that in protocol A we might signal IO completion too early!
1475 * In order not to corrupt data during a resync we must make sure
1476 * that we do not reuse our own buffer pages (EEs) to early, therefore
1477 * we have the net_ee list.
1479 * XFS seems to have problems, still, it submits pages with page_count == 0!
1480 * As a workaround, we disable sendpage on pages
1481 * with page_count == 0 or PageSlab.
1483 static int _drbd_no_send_page(struct drbd_device
*device
, struct page
*page
,
1484 int offset
, size_t size
, unsigned msg_flags
)
1486 struct socket
*socket
;
1490 socket
= first_peer_device(device
)->connection
->data
.socket
;
1491 addr
= kmap(page
) + offset
;
1492 err
= drbd_send_all(first_peer_device(device
)->connection
, socket
, addr
, size
, msg_flags
);
1495 device
->send_cnt
+= size
>> 9;
1499 static int _drbd_send_page(struct drbd_device
*device
, struct page
*page
,
1500 int offset
, size_t size
, unsigned msg_flags
)
1502 struct socket
*socket
= first_peer_device(device
)->connection
->data
.socket
;
1503 mm_segment_t oldfs
= get_fs();
1507 /* e.g. XFS meta- & log-data is in slab pages, which have a
1508 * page_count of 0 and/or have PageSlab() set.
1509 * we cannot use send_page for those, as that does get_page();
1510 * put_page(); and would cause either a VM_BUG directly, or
1511 * __page_cache_release a page that would actually still be referenced
1512 * by someone, leading to some obscure delayed Oops somewhere else. */
1513 if (disable_sendpage
|| (page_count(page
) < 1) || PageSlab(page
))
1514 return _drbd_no_send_page(device
, page
, offset
, size
, msg_flags
);
1516 msg_flags
|= MSG_NOSIGNAL
;
1517 drbd_update_congested(first_peer_device(device
)->connection
);
1522 sent
= socket
->ops
->sendpage(socket
, page
, offset
, len
, msg_flags
);
1524 if (sent
== -EAGAIN
) {
1525 if (we_should_drop_the_connection(first_peer_device(device
)->connection
, socket
))
1529 drbd_warn(device
, "%s: size=%d len=%d sent=%d\n",
1530 __func__
, (int)size
, len
, sent
);
1537 } while (len
> 0 /* THINK && device->cstate >= C_CONNECTED*/);
1539 clear_bit(NET_CONGESTED
, &first_peer_device(device
)->connection
->flags
);
1543 device
->send_cnt
+= size
>> 9;
1548 static int _drbd_send_bio(struct drbd_device
*device
, struct bio
*bio
)
1550 struct bio_vec bvec
;
1551 struct bvec_iter iter
;
1553 /* hint all but last page with MSG_MORE */
1554 bio_for_each_segment(bvec
, bio
, iter
) {
1557 err
= _drbd_no_send_page(device
, bvec
.bv_page
,
1558 bvec
.bv_offset
, bvec
.bv_len
,
1559 bio_iter_last(bvec
, iter
)
1567 static int _drbd_send_zc_bio(struct drbd_device
*device
, struct bio
*bio
)
1569 struct bio_vec bvec
;
1570 struct bvec_iter iter
;
1572 /* hint all but last page with MSG_MORE */
1573 bio_for_each_segment(bvec
, bio
, iter
) {
1576 err
= _drbd_send_page(device
, bvec
.bv_page
,
1577 bvec
.bv_offset
, bvec
.bv_len
,
1578 bio_iter_last(bvec
, iter
) ? 0 : MSG_MORE
);
1585 static int _drbd_send_zc_ee(struct drbd_device
*device
,
1586 struct drbd_peer_request
*peer_req
)
1588 struct page
*page
= peer_req
->pages
;
1589 unsigned len
= peer_req
->i
.size
;
1592 /* hint all but last page with MSG_MORE */
1593 page_chain_for_each(page
) {
1594 unsigned l
= min_t(unsigned, len
, PAGE_SIZE
);
1596 err
= _drbd_send_page(device
, page
, 0, l
,
1597 page_chain_next(page
) ? MSG_MORE
: 0);
1605 static u32
bio_flags_to_wire(struct drbd_device
*device
, unsigned long bi_rw
)
1607 if (first_peer_device(device
)->connection
->agreed_pro_version
>= 95)
1608 return (bi_rw
& REQ_SYNC
? DP_RW_SYNC
: 0) |
1609 (bi_rw
& REQ_FUA
? DP_FUA
: 0) |
1610 (bi_rw
& REQ_FLUSH
? DP_FLUSH
: 0) |
1611 (bi_rw
& REQ_DISCARD
? DP_DISCARD
: 0);
1613 return bi_rw
& REQ_SYNC
? DP_RW_SYNC
: 0;
1616 /* Used to send write requests
1617 * R_PRIMARY -> Peer (P_DATA)
1619 int drbd_send_dblock(struct drbd_device
*device
, struct drbd_request
*req
)
1621 struct drbd_socket
*sock
;
1623 unsigned int dp_flags
= 0;
1627 sock
= &first_peer_device(device
)->connection
->data
;
1628 p
= drbd_prepare_command(device
, sock
);
1629 dgs
= first_peer_device(device
)->connection
->integrity_tfm
?
1630 crypto_hash_digestsize(first_peer_device(device
)->connection
->integrity_tfm
) : 0;
1634 p
->sector
= cpu_to_be64(req
->i
.sector
);
1635 p
->block_id
= (unsigned long)req
;
1636 p
->seq_num
= cpu_to_be32(atomic_inc_return(&device
->packet_seq
));
1637 dp_flags
= bio_flags_to_wire(device
, req
->master_bio
->bi_rw
);
1638 if (device
->state
.conn
>= C_SYNC_SOURCE
&&
1639 device
->state
.conn
<= C_PAUSED_SYNC_T
)
1640 dp_flags
|= DP_MAY_SET_IN_SYNC
;
1641 if (first_peer_device(device
)->connection
->agreed_pro_version
>= 100) {
1642 if (req
->rq_state
& RQ_EXP_RECEIVE_ACK
)
1643 dp_flags
|= DP_SEND_RECEIVE_ACK
;
1644 if (req
->rq_state
& RQ_EXP_WRITE_ACK
)
1645 dp_flags
|= DP_SEND_WRITE_ACK
;
1647 p
->dp_flags
= cpu_to_be32(dp_flags
);
1649 drbd_csum_bio(first_peer_device(device
)->connection
->integrity_tfm
, req
->master_bio
, p
+ 1);
1650 err
= __send_command(first_peer_device(device
)->connection
, device
->vnr
, sock
, P_DATA
, sizeof(*p
) + dgs
, NULL
, req
->i
.size
);
1652 /* For protocol A, we have to memcpy the payload into
1653 * socket buffers, as we may complete right away
1654 * as soon as we handed it over to tcp, at which point the data
1655 * pages may become invalid.
1657 * For data-integrity enabled, we copy it as well, so we can be
1658 * sure that even if the bio pages may still be modified, it
1659 * won't change the data on the wire, thus if the digest checks
1660 * out ok after sending on this side, but does not fit on the
1661 * receiving side, we sure have detected corruption elsewhere.
1663 if (!(req
->rq_state
& (RQ_EXP_RECEIVE_ACK
| RQ_EXP_WRITE_ACK
)) || dgs
)
1664 err
= _drbd_send_bio(device
, req
->master_bio
);
1666 err
= _drbd_send_zc_bio(device
, req
->master_bio
);
1668 /* double check digest, sometimes buffers have been modified in flight. */
1669 if (dgs
> 0 && dgs
<= 64) {
1670 /* 64 byte, 512 bit, is the largest digest size
1671 * currently supported in kernel crypto. */
1672 unsigned char digest
[64];
1673 drbd_csum_bio(first_peer_device(device
)->connection
->integrity_tfm
, req
->master_bio
, digest
);
1674 if (memcmp(p
+ 1, digest
, dgs
)) {
1676 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1677 (unsigned long long)req
->i
.sector
, req
->i
.size
);
1679 } /* else if (dgs > 64) {
1680 ... Be noisy about digest too large ...
1683 mutex_unlock(&sock
->mutex
); /* locked by drbd_prepare_command() */
1688 /* answer packet, used to send data back for read requests:
1689 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1690 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1692 int drbd_send_block(struct drbd_device
*device
, enum drbd_packet cmd
,
1693 struct drbd_peer_request
*peer_req
)
1695 struct drbd_socket
*sock
;
1700 sock
= &first_peer_device(device
)->connection
->data
;
1701 p
= drbd_prepare_command(device
, sock
);
1703 dgs
= first_peer_device(device
)->connection
->integrity_tfm
?
1704 crypto_hash_digestsize(first_peer_device(device
)->connection
->integrity_tfm
) : 0;
1708 p
->sector
= cpu_to_be64(peer_req
->i
.sector
);
1709 p
->block_id
= peer_req
->block_id
;
1710 p
->seq_num
= 0; /* unused */
1713 drbd_csum_ee(first_peer_device(device
)->connection
->integrity_tfm
, peer_req
, p
+ 1);
1714 err
= __send_command(first_peer_device(device
)->connection
, device
->vnr
, sock
, cmd
, sizeof(*p
) + dgs
, NULL
, peer_req
->i
.size
);
1716 err
= _drbd_send_zc_ee(device
, peer_req
);
1717 mutex_unlock(&sock
->mutex
); /* locked by drbd_prepare_command() */
1722 int drbd_send_out_of_sync(struct drbd_device
*device
, struct drbd_request
*req
)
1724 struct drbd_socket
*sock
;
1725 struct p_block_desc
*p
;
1727 sock
= &first_peer_device(device
)->connection
->data
;
1728 p
= drbd_prepare_command(device
, sock
);
1731 p
->sector
= cpu_to_be64(req
->i
.sector
);
1732 p
->blksize
= cpu_to_be32(req
->i
.size
);
1733 return drbd_send_command(device
, sock
, P_OUT_OF_SYNC
, sizeof(*p
), NULL
, 0);
1737 drbd_send distinguishes two cases:
1739 Packets sent via the data socket "sock"
1740 and packets sent via the meta data socket "msock"
1743 -----------------+-------------------------+------------------------------
1744 timeout conf.timeout / 2 conf.timeout / 2
1745 timeout action send a ping via msock Abort communication
1746 and close all sockets
1750 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1752 int drbd_send(struct drbd_connection
*connection
, struct socket
*sock
,
1753 void *buf
, size_t size
, unsigned msg_flags
)
1762 /* THINK if (signal_pending) return ... ? */
1767 msg
.msg_name
= NULL
;
1768 msg
.msg_namelen
= 0;
1769 msg
.msg_control
= NULL
;
1770 msg
.msg_controllen
= 0;
1771 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
1773 if (sock
== connection
->data
.socket
) {
1775 connection
->ko_count
= rcu_dereference(connection
->net_conf
)->ko_count
;
1777 drbd_update_congested(connection
);
1781 * tcp_sendmsg does _not_ use its size parameter at all ?
1783 * -EAGAIN on timeout, -EINTR on signal.
1786 * do we need to block DRBD_SIG if sock == &meta.socket ??
1787 * otherwise wake_asender() might interrupt some send_*Ack !
1789 rv
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
1790 if (rv
== -EAGAIN
) {
1791 if (we_should_drop_the_connection(connection
, sock
))
1797 flush_signals(current
);
1805 } while (sent
< size
);
1807 if (sock
== connection
->data
.socket
)
1808 clear_bit(NET_CONGESTED
, &connection
->flags
);
1811 if (rv
!= -EAGAIN
) {
1812 drbd_err(connection
, "%s_sendmsg returned %d\n",
1813 sock
== connection
->meta
.socket
? "msock" : "sock",
1815 conn_request_state(connection
, NS(conn
, C_BROKEN_PIPE
), CS_HARD
);
1817 conn_request_state(connection
, NS(conn
, C_TIMEOUT
), CS_HARD
);
1824 * drbd_send_all - Send an entire buffer
1826 * Returns 0 upon success and a negative error value otherwise.
1828 int drbd_send_all(struct drbd_connection
*connection
, struct socket
*sock
, void *buffer
,
1829 size_t size
, unsigned msg_flags
)
1833 err
= drbd_send(connection
, sock
, buffer
, size
, msg_flags
);
1841 static int drbd_open(struct block_device
*bdev
, fmode_t mode
)
1843 struct drbd_device
*device
= bdev
->bd_disk
->private_data
;
1844 unsigned long flags
;
1847 mutex_lock(&drbd_main_mutex
);
1848 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
1849 /* to have a stable device->state.role
1850 * and no race with updating open_cnt */
1852 if (device
->state
.role
!= R_PRIMARY
) {
1853 if (mode
& FMODE_WRITE
)
1855 else if (!allow_oos
)
1861 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
1862 mutex_unlock(&drbd_main_mutex
);
1867 static void drbd_release(struct gendisk
*gd
, fmode_t mode
)
1869 struct drbd_device
*device
= gd
->private_data
;
1870 mutex_lock(&drbd_main_mutex
);
1872 mutex_unlock(&drbd_main_mutex
);
1875 static void drbd_set_defaults(struct drbd_device
*device
)
1877 /* Beware! The actual layout differs
1878 * between big endian and little endian */
1879 device
->state
= (union drbd_dev_state
) {
1880 { .role
= R_SECONDARY
,
1882 .conn
= C_STANDALONE
,
1888 void drbd_init_set_defaults(struct drbd_device
*device
)
1890 /* the memset(,0,) did most of this.
1891 * note: only assignments, no allocation in here */
1893 drbd_set_defaults(device
);
1895 atomic_set(&device
->ap_bio_cnt
, 0);
1896 atomic_set(&device
->ap_pending_cnt
, 0);
1897 atomic_set(&device
->rs_pending_cnt
, 0);
1898 atomic_set(&device
->unacked_cnt
, 0);
1899 atomic_set(&device
->local_cnt
, 0);
1900 atomic_set(&device
->pp_in_use_by_net
, 0);
1901 atomic_set(&device
->rs_sect_in
, 0);
1902 atomic_set(&device
->rs_sect_ev
, 0);
1903 atomic_set(&device
->ap_in_flight
, 0);
1904 atomic_set(&device
->md_io_in_use
, 0);
1906 mutex_init(&device
->own_state_mutex
);
1907 device
->state_mutex
= &device
->own_state_mutex
;
1909 spin_lock_init(&device
->al_lock
);
1910 spin_lock_init(&device
->peer_seq_lock
);
1912 INIT_LIST_HEAD(&device
->active_ee
);
1913 INIT_LIST_HEAD(&device
->sync_ee
);
1914 INIT_LIST_HEAD(&device
->done_ee
);
1915 INIT_LIST_HEAD(&device
->read_ee
);
1916 INIT_LIST_HEAD(&device
->net_ee
);
1917 INIT_LIST_HEAD(&device
->resync_reads
);
1918 INIT_LIST_HEAD(&device
->resync_work
.list
);
1919 INIT_LIST_HEAD(&device
->unplug_work
.list
);
1920 INIT_LIST_HEAD(&device
->go_diskless
.list
);
1921 INIT_LIST_HEAD(&device
->md_sync_work
.list
);
1922 INIT_LIST_HEAD(&device
->start_resync_work
.list
);
1923 INIT_LIST_HEAD(&device
->bm_io_work
.w
.list
);
1925 device
->resync_work
.cb
= w_resync_timer
;
1926 device
->unplug_work
.cb
= w_send_write_hint
;
1927 device
->go_diskless
.cb
= w_go_diskless
;
1928 device
->md_sync_work
.cb
= w_md_sync
;
1929 device
->bm_io_work
.w
.cb
= w_bitmap_io
;
1930 device
->start_resync_work
.cb
= w_start_resync
;
1932 device
->resync_work
.device
= device
;
1933 device
->unplug_work
.device
= device
;
1934 device
->go_diskless
.device
= device
;
1935 device
->md_sync_work
.device
= device
;
1936 device
->bm_io_work
.w
.device
= device
;
1937 device
->start_resync_work
.device
= device
;
1939 init_timer(&device
->resync_timer
);
1940 init_timer(&device
->md_sync_timer
);
1941 init_timer(&device
->start_resync_timer
);
1942 init_timer(&device
->request_timer
);
1943 device
->resync_timer
.function
= resync_timer_fn
;
1944 device
->resync_timer
.data
= (unsigned long) device
;
1945 device
->md_sync_timer
.function
= md_sync_timer_fn
;
1946 device
->md_sync_timer
.data
= (unsigned long) device
;
1947 device
->start_resync_timer
.function
= start_resync_timer_fn
;
1948 device
->start_resync_timer
.data
= (unsigned long) device
;
1949 device
->request_timer
.function
= request_timer_fn
;
1950 device
->request_timer
.data
= (unsigned long) device
;
1952 init_waitqueue_head(&device
->misc_wait
);
1953 init_waitqueue_head(&device
->state_wait
);
1954 init_waitqueue_head(&device
->ee_wait
);
1955 init_waitqueue_head(&device
->al_wait
);
1956 init_waitqueue_head(&device
->seq_wait
);
1958 device
->resync_wenr
= LC_FREE
;
1959 device
->peer_max_bio_size
= DRBD_MAX_BIO_SIZE_SAFE
;
1960 device
->local_max_bio_size
= DRBD_MAX_BIO_SIZE_SAFE
;
1963 void drbd_device_cleanup(struct drbd_device
*device
)
1966 if (first_peer_device(device
)->connection
->receiver
.t_state
!= NONE
)
1967 drbd_err(device
, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
1968 first_peer_device(device
)->connection
->receiver
.t_state
);
1970 device
->al_writ_cnt
=
1971 device
->bm_writ_cnt
=
1979 device
->rs_failed
= 0;
1980 device
->rs_last_events
= 0;
1981 device
->rs_last_sect_ev
= 0;
1982 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
1983 device
->rs_mark_left
[i
] = 0;
1984 device
->rs_mark_time
[i
] = 0;
1986 D_ASSERT(device
, first_peer_device(device
)->connection
->net_conf
== NULL
);
1988 drbd_set_my_capacity(device
, 0);
1989 if (device
->bitmap
) {
1990 /* maybe never allocated. */
1991 drbd_bm_resize(device
, 0, 1);
1992 drbd_bm_cleanup(device
);
1995 drbd_free_bc(device
->ldev
);
1996 device
->ldev
= NULL
;
1998 clear_bit(AL_SUSPENDED
, &device
->flags
);
2000 D_ASSERT(device
, list_empty(&device
->active_ee
));
2001 D_ASSERT(device
, list_empty(&device
->sync_ee
));
2002 D_ASSERT(device
, list_empty(&device
->done_ee
));
2003 D_ASSERT(device
, list_empty(&device
->read_ee
));
2004 D_ASSERT(device
, list_empty(&device
->net_ee
));
2005 D_ASSERT(device
, list_empty(&device
->resync_reads
));
2006 D_ASSERT(device
, list_empty(&first_peer_device(device
)->connection
->sender_work
.q
));
2007 D_ASSERT(device
, list_empty(&device
->resync_work
.list
));
2008 D_ASSERT(device
, list_empty(&device
->unplug_work
.list
));
2009 D_ASSERT(device
, list_empty(&device
->go_diskless
.list
));
2011 drbd_set_defaults(device
);
2015 static void drbd_destroy_mempools(void)
2019 while (drbd_pp_pool
) {
2020 page
= drbd_pp_pool
;
2021 drbd_pp_pool
= (struct page
*)page_private(page
);
2026 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2028 if (drbd_md_io_bio_set
)
2029 bioset_free(drbd_md_io_bio_set
);
2030 if (drbd_md_io_page_pool
)
2031 mempool_destroy(drbd_md_io_page_pool
);
2032 if (drbd_ee_mempool
)
2033 mempool_destroy(drbd_ee_mempool
);
2034 if (drbd_request_mempool
)
2035 mempool_destroy(drbd_request_mempool
);
2037 kmem_cache_destroy(drbd_ee_cache
);
2038 if (drbd_request_cache
)
2039 kmem_cache_destroy(drbd_request_cache
);
2040 if (drbd_bm_ext_cache
)
2041 kmem_cache_destroy(drbd_bm_ext_cache
);
2042 if (drbd_al_ext_cache
)
2043 kmem_cache_destroy(drbd_al_ext_cache
);
2045 drbd_md_io_bio_set
= NULL
;
2046 drbd_md_io_page_pool
= NULL
;
2047 drbd_ee_mempool
= NULL
;
2048 drbd_request_mempool
= NULL
;
2049 drbd_ee_cache
= NULL
;
2050 drbd_request_cache
= NULL
;
2051 drbd_bm_ext_cache
= NULL
;
2052 drbd_al_ext_cache
= NULL
;
2057 static int drbd_create_mempools(void)
2060 const int number
= (DRBD_MAX_BIO_SIZE
/PAGE_SIZE
) * minor_count
;
2063 /* prepare our caches and mempools */
2064 drbd_request_mempool
= NULL
;
2065 drbd_ee_cache
= NULL
;
2066 drbd_request_cache
= NULL
;
2067 drbd_bm_ext_cache
= NULL
;
2068 drbd_al_ext_cache
= NULL
;
2069 drbd_pp_pool
= NULL
;
2070 drbd_md_io_page_pool
= NULL
;
2071 drbd_md_io_bio_set
= NULL
;
2074 drbd_request_cache
= kmem_cache_create(
2075 "drbd_req", sizeof(struct drbd_request
), 0, 0, NULL
);
2076 if (drbd_request_cache
== NULL
)
2079 drbd_ee_cache
= kmem_cache_create(
2080 "drbd_ee", sizeof(struct drbd_peer_request
), 0, 0, NULL
);
2081 if (drbd_ee_cache
== NULL
)
2084 drbd_bm_ext_cache
= kmem_cache_create(
2085 "drbd_bm", sizeof(struct bm_extent
), 0, 0, NULL
);
2086 if (drbd_bm_ext_cache
== NULL
)
2089 drbd_al_ext_cache
= kmem_cache_create(
2090 "drbd_al", sizeof(struct lc_element
), 0, 0, NULL
);
2091 if (drbd_al_ext_cache
== NULL
)
2095 drbd_md_io_bio_set
= bioset_create(DRBD_MIN_POOL_PAGES
, 0);
2096 if (drbd_md_io_bio_set
== NULL
)
2099 drbd_md_io_page_pool
= mempool_create_page_pool(DRBD_MIN_POOL_PAGES
, 0);
2100 if (drbd_md_io_page_pool
== NULL
)
2103 drbd_request_mempool
= mempool_create(number
,
2104 mempool_alloc_slab
, mempool_free_slab
, drbd_request_cache
);
2105 if (drbd_request_mempool
== NULL
)
2108 drbd_ee_mempool
= mempool_create(number
,
2109 mempool_alloc_slab
, mempool_free_slab
, drbd_ee_cache
);
2110 if (drbd_ee_mempool
== NULL
)
2113 /* drbd's page pool */
2114 spin_lock_init(&drbd_pp_lock
);
2116 for (i
= 0; i
< number
; i
++) {
2117 page
= alloc_page(GFP_HIGHUSER
);
2120 set_page_private(page
, (unsigned long)drbd_pp_pool
);
2121 drbd_pp_pool
= page
;
2123 drbd_pp_vacant
= number
;
2128 drbd_destroy_mempools(); /* in case we allocated some */
2132 static int drbd_notify_sys(struct notifier_block
*this, unsigned long code
,
2135 /* just so we have it. you never know what interesting things we
2136 * might want to do here some day...
2142 static struct notifier_block drbd_notifier
= {
2143 .notifier_call
= drbd_notify_sys
,
2146 static void drbd_release_all_peer_reqs(struct drbd_device
*device
)
2150 rr
= drbd_free_peer_reqs(device
, &device
->active_ee
);
2152 drbd_err(device
, "%d EEs in active list found!\n", rr
);
2154 rr
= drbd_free_peer_reqs(device
, &device
->sync_ee
);
2156 drbd_err(device
, "%d EEs in sync list found!\n", rr
);
2158 rr
= drbd_free_peer_reqs(device
, &device
->read_ee
);
2160 drbd_err(device
, "%d EEs in read list found!\n", rr
);
2162 rr
= drbd_free_peer_reqs(device
, &device
->done_ee
);
2164 drbd_err(device
, "%d EEs in done list found!\n", rr
);
2166 rr
= drbd_free_peer_reqs(device
, &device
->net_ee
);
2168 drbd_err(device
, "%d EEs in net list found!\n", rr
);
2171 /* caution. no locking. */
2172 void drbd_destroy_device(struct kref
*kref
)
2174 struct drbd_device
*device
= container_of(kref
, struct drbd_device
, kref
);
2175 struct drbd_resource
*resource
= device
->resource
;
2176 struct drbd_connection
*connection
;
2178 del_timer_sync(&device
->request_timer
);
2180 /* paranoia asserts */
2181 D_ASSERT(device
, device
->open_cnt
== 0);
2182 /* end paranoia asserts */
2184 /* cleanup stuff that may have been allocated during
2185 * device (re-)configuration or state changes */
2187 if (device
->this_bdev
)
2188 bdput(device
->this_bdev
);
2190 drbd_free_bc(device
->ldev
);
2191 device
->ldev
= NULL
;
2193 drbd_release_all_peer_reqs(device
);
2195 lc_destroy(device
->act_log
);
2196 lc_destroy(device
->resync
);
2198 kfree(device
->p_uuid
);
2199 /* device->p_uuid = NULL; */
2201 if (device
->bitmap
) /* should no longer be there. */
2202 drbd_bm_cleanup(device
);
2203 __free_page(device
->md_io_page
);
2204 put_disk(device
->vdisk
);
2205 blk_cleanup_queue(device
->rq_queue
);
2206 kfree(device
->rs_plan_s
);
2207 kfree(first_peer_device(device
));
2210 for_each_connection(connection
, resource
)
2211 kref_put(&connection
->kref
, drbd_destroy_connection
);
2212 kref_put(&resource
->kref
, drbd_destroy_resource
);
2215 /* One global retry thread, if we need to push back some bio and have it
2216 * reinserted through our make request function.
2218 static struct retry_worker
{
2219 struct workqueue_struct
*wq
;
2220 struct work_struct worker
;
2223 struct list_head writes
;
2226 static void do_retry(struct work_struct
*ws
)
2228 struct retry_worker
*retry
= container_of(ws
, struct retry_worker
, worker
);
2230 struct drbd_request
*req
, *tmp
;
2232 spin_lock_irq(&retry
->lock
);
2233 list_splice_init(&retry
->writes
, &writes
);
2234 spin_unlock_irq(&retry
->lock
);
2236 list_for_each_entry_safe(req
, tmp
, &writes
, tl_requests
) {
2237 struct drbd_device
*device
= req
->w
.device
;
2238 struct bio
*bio
= req
->master_bio
;
2239 unsigned long start_time
= req
->start_time
;
2243 expect(atomic_read(&req
->completion_ref
) == 0) &&
2244 expect(req
->rq_state
& RQ_POSTPONED
) &&
2245 expect((req
->rq_state
& RQ_LOCAL_PENDING
) == 0 ||
2246 (req
->rq_state
& RQ_LOCAL_ABORTED
) != 0);
2249 drbd_err(device
, "req=%p completion_ref=%d rq_state=%x\n",
2250 req
, atomic_read(&req
->completion_ref
),
2253 /* We still need to put one kref associated with the
2254 * "completion_ref" going zero in the code path that queued it
2255 * here. The request object may still be referenced by a
2256 * frozen local req->private_bio, in case we force-detached.
2258 kref_put(&req
->kref
, drbd_req_destroy
);
2260 /* A single suspended or otherwise blocking device may stall
2261 * all others as well. Fortunately, this code path is to
2262 * recover from a situation that "should not happen":
2263 * concurrent writes in multi-primary setup.
2264 * In a "normal" lifecycle, this workqueue is supposed to be
2265 * destroyed without ever doing anything.
2266 * If it turns out to be an issue anyways, we can do per
2267 * resource (replication group) or per device (minor) retry
2268 * workqueues instead.
2271 /* We are not just doing generic_make_request(),
2272 * as we want to keep the start_time information. */
2274 __drbd_make_request(device
, bio
, start_time
);
2278 void drbd_restart_request(struct drbd_request
*req
)
2280 unsigned long flags
;
2281 spin_lock_irqsave(&retry
.lock
, flags
);
2282 list_move_tail(&req
->tl_requests
, &retry
.writes
);
2283 spin_unlock_irqrestore(&retry
.lock
, flags
);
2285 /* Drop the extra reference that would otherwise
2286 * have been dropped by complete_master_bio.
2287 * do_retry() needs to grab a new one. */
2288 dec_ap_bio(req
->w
.device
);
2290 queue_work(retry
.wq
, &retry
.worker
);
2293 void drbd_destroy_resource(struct kref
*kref
)
2295 struct drbd_resource
*resource
=
2296 container_of(kref
, struct drbd_resource
, kref
);
2298 idr_destroy(&resource
->devices
);
2299 free_cpumask_var(resource
->cpu_mask
);
2300 kfree(resource
->name
);
2304 void drbd_free_resource(struct drbd_resource
*resource
)
2306 struct drbd_connection
*connection
, *tmp
;
2308 for_each_connection_safe(connection
, tmp
, resource
) {
2309 list_del(&connection
->connections
);
2310 kref_put(&connection
->kref
, drbd_destroy_connection
);
2312 kref_put(&resource
->kref
, drbd_destroy_resource
);
2315 static void drbd_cleanup(void)
2318 struct drbd_device
*device
;
2319 struct drbd_resource
*resource
, *tmp
;
2321 unregister_reboot_notifier(&drbd_notifier
);
2323 /* first remove proc,
2324 * drbdsetup uses it's presence to detect
2325 * whether DRBD is loaded.
2326 * If we would get stuck in proc removal,
2327 * but have netlink already deregistered,
2328 * some drbdsetup commands may wait forever
2332 remove_proc_entry("drbd", NULL
);
2335 destroy_workqueue(retry
.wq
);
2337 drbd_genl_unregister();
2339 idr_for_each_entry(&drbd_devices
, device
, i
)
2340 drbd_delete_device(device
);
2342 /* not _rcu since, no other updater anymore. Genl already unregistered */
2343 for_each_resource_safe(resource
, tmp
, &drbd_resources
) {
2344 list_del(&resource
->resources
);
2345 drbd_free_resource(resource
);
2348 drbd_destroy_mempools();
2349 unregister_blkdev(DRBD_MAJOR
, "drbd");
2351 idr_destroy(&drbd_devices
);
2353 printk(KERN_INFO
"drbd: module cleanup done.\n");
2357 * drbd_congested() - Callback for the flusher thread
2358 * @congested_data: User data
2359 * @bdi_bits: Bits the BDI flusher thread is currently interested in
2361 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2363 static int drbd_congested(void *congested_data
, int bdi_bits
)
2365 struct drbd_device
*device
= congested_data
;
2366 struct request_queue
*q
;
2370 if (!may_inc_ap_bio(device
)) {
2371 /* DRBD has frozen IO */
2377 if (test_bit(CALLBACK_PENDING
, &first_peer_device(device
)->connection
->flags
)) {
2378 r
|= (1 << BDI_async_congested
);
2379 /* Without good local data, we would need to read from remote,
2380 * and that would need the worker thread as well, which is
2381 * currently blocked waiting for that usermode helper to
2384 if (!get_ldev_if_state(device
, D_UP_TO_DATE
))
2385 r
|= (1 << BDI_sync_congested
);
2393 if (get_ldev(device
)) {
2394 q
= bdev_get_queue(device
->ldev
->backing_bdev
);
2395 r
= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
2401 if (bdi_bits
& (1 << BDI_async_congested
) &&
2402 test_bit(NET_CONGESTED
, &first_peer_device(device
)->connection
->flags
)) {
2403 r
|= (1 << BDI_async_congested
);
2404 reason
= reason
== 'b' ? 'a' : 'n';
2408 device
->congestion_reason
= reason
;
2412 static void drbd_init_workqueue(struct drbd_work_queue
* wq
)
2414 spin_lock_init(&wq
->q_lock
);
2415 INIT_LIST_HEAD(&wq
->q
);
2416 init_waitqueue_head(&wq
->q_wait
);
2419 struct drbd_resource
*drbd_find_resource(const char *name
)
2421 struct drbd_resource
*resource
;
2423 if (!name
|| !name
[0])
2427 for_each_resource_rcu(resource
, &drbd_resources
) {
2428 if (!strcmp(resource
->name
, name
)) {
2429 kref_get(&resource
->kref
);
2439 struct drbd_connection
*conn_get_by_addrs(void *my_addr
, int my_addr_len
,
2440 void *peer_addr
, int peer_addr_len
)
2442 struct drbd_resource
*resource
;
2443 struct drbd_connection
*connection
;
2446 for_each_resource_rcu(resource
, &drbd_resources
) {
2447 for_each_connection_rcu(connection
, resource
) {
2448 if (connection
->my_addr_len
== my_addr_len
&&
2449 connection
->peer_addr_len
== peer_addr_len
&&
2450 !memcmp(&connection
->my_addr
, my_addr
, my_addr_len
) &&
2451 !memcmp(&connection
->peer_addr
, peer_addr
, peer_addr_len
)) {
2452 kref_get(&connection
->kref
);
2463 static int drbd_alloc_socket(struct drbd_socket
*socket
)
2465 socket
->rbuf
= (void *) __get_free_page(GFP_KERNEL
);
2468 socket
->sbuf
= (void *) __get_free_page(GFP_KERNEL
);
2474 static void drbd_free_socket(struct drbd_socket
*socket
)
2476 free_page((unsigned long) socket
->sbuf
);
2477 free_page((unsigned long) socket
->rbuf
);
2480 void conn_free_crypto(struct drbd_connection
*connection
)
2482 drbd_free_sock(connection
);
2484 crypto_free_hash(connection
->csums_tfm
);
2485 crypto_free_hash(connection
->verify_tfm
);
2486 crypto_free_hash(connection
->cram_hmac_tfm
);
2487 crypto_free_hash(connection
->integrity_tfm
);
2488 crypto_free_hash(connection
->peer_integrity_tfm
);
2489 kfree(connection
->int_dig_in
);
2490 kfree(connection
->int_dig_vv
);
2492 connection
->csums_tfm
= NULL
;
2493 connection
->verify_tfm
= NULL
;
2494 connection
->cram_hmac_tfm
= NULL
;
2495 connection
->integrity_tfm
= NULL
;
2496 connection
->peer_integrity_tfm
= NULL
;
2497 connection
->int_dig_in
= NULL
;
2498 connection
->int_dig_vv
= NULL
;
2501 int set_resource_options(struct drbd_resource
*resource
, struct res_opts
*res_opts
)
2503 struct drbd_connection
*connection
;
2504 cpumask_var_t new_cpu_mask
;
2507 if (!zalloc_cpumask_var(&new_cpu_mask
, GFP_KERNEL
))
2510 retcode = ERR_NOMEM;
2511 drbd_msg_put_info("unable to allocate cpumask");
2514 /* silently ignore cpu mask on UP kernel */
2515 if (nr_cpu_ids
> 1 && res_opts
->cpu_mask
[0] != 0) {
2516 err
= bitmap_parse(res_opts
->cpu_mask
, DRBD_CPU_MASK_SIZE
,
2517 cpumask_bits(new_cpu_mask
), nr_cpu_ids
);
2519 drbd_warn(resource
, "bitmap_parse() failed with %d\n", err
);
2520 /* retcode = ERR_CPU_MASK_PARSE; */
2524 resource
->res_opts
= *res_opts
;
2525 if (cpumask_empty(new_cpu_mask
))
2526 drbd_calc_cpu_mask(&new_cpu_mask
);
2527 if (!cpumask_equal(resource
->cpu_mask
, new_cpu_mask
)) {
2528 cpumask_copy(resource
->cpu_mask
, new_cpu_mask
);
2529 for_each_connection_rcu(connection
, resource
) {
2530 connection
->receiver
.reset_cpu_mask
= 1;
2531 connection
->asender
.reset_cpu_mask
= 1;
2532 connection
->worker
.reset_cpu_mask
= 1;
2538 free_cpumask_var(new_cpu_mask
);
2543 struct drbd_resource
*drbd_create_resource(const char *name
)
2545 struct drbd_resource
*resource
;
2547 resource
= kzalloc(sizeof(struct drbd_resource
), GFP_KERNEL
);
2550 resource
->name
= kstrdup(name
, GFP_KERNEL
);
2551 if (!resource
->name
)
2552 goto fail_free_resource
;
2553 if (!zalloc_cpumask_var(&resource
->cpu_mask
, GFP_KERNEL
))
2554 goto fail_free_name
;
2555 kref_init(&resource
->kref
);
2556 idr_init(&resource
->devices
);
2557 INIT_LIST_HEAD(&resource
->connections
);
2558 list_add_tail_rcu(&resource
->resources
, &drbd_resources
);
2559 mutex_init(&resource
->conf_update
);
2560 spin_lock_init(&resource
->req_lock
);
2564 kfree(resource
->name
);
2571 /* caller must be under genl_lock() */
2572 struct drbd_connection
*conn_create(const char *name
, struct res_opts
*res_opts
)
2574 struct drbd_resource
*resource
;
2575 struct drbd_connection
*connection
;
2577 connection
= kzalloc(sizeof(struct drbd_connection
), GFP_KERNEL
);
2581 if (drbd_alloc_socket(&connection
->data
))
2583 if (drbd_alloc_socket(&connection
->meta
))
2586 connection
->current_epoch
= kzalloc(sizeof(struct drbd_epoch
), GFP_KERNEL
);
2587 if (!connection
->current_epoch
)
2590 INIT_LIST_HEAD(&connection
->transfer_log
);
2592 INIT_LIST_HEAD(&connection
->current_epoch
->list
);
2593 connection
->epochs
= 1;
2594 spin_lock_init(&connection
->epoch_lock
);
2595 connection
->write_ordering
= WO_bdev_flush
;
2597 connection
->send
.seen_any_write_yet
= false;
2598 connection
->send
.current_epoch_nr
= 0;
2599 connection
->send
.current_epoch_writes
= 0;
2601 resource
= drbd_create_resource(name
);
2605 connection
->cstate
= C_STANDALONE
;
2606 mutex_init(&connection
->cstate_mutex
);
2607 init_waitqueue_head(&connection
->ping_wait
);
2608 idr_init(&connection
->peer_devices
);
2610 drbd_init_workqueue(&connection
->sender_work
);
2611 mutex_init(&connection
->data
.mutex
);
2612 mutex_init(&connection
->meta
.mutex
);
2614 drbd_thread_init(connection
, &connection
->receiver
, drbd_receiver
, "receiver");
2615 drbd_thread_init(connection
, &connection
->worker
, drbd_worker
, "worker");
2616 drbd_thread_init(connection
, &connection
->asender
, drbd_asender
, "asender");
2618 kref_init(&connection
->kref
);
2620 connection
->resource
= resource
;
2622 if (set_resource_options(resource
, res_opts
))
2625 kref_get(&resource
->kref
);
2626 list_add_tail_rcu(&connection
->connections
, &resource
->connections
);
2630 list_del(&resource
->resources
);
2631 drbd_free_resource(resource
);
2633 kfree(connection
->current_epoch
);
2634 drbd_free_socket(&connection
->meta
);
2635 drbd_free_socket(&connection
->data
);
2640 void drbd_destroy_connection(struct kref
*kref
)
2642 struct drbd_connection
*connection
= container_of(kref
, struct drbd_connection
, kref
);
2643 struct drbd_resource
*resource
= connection
->resource
;
2645 if (atomic_read(&connection
->current_epoch
->epoch_size
) != 0)
2646 drbd_err(connection
, "epoch_size:%d\n", atomic_read(&connection
->current_epoch
->epoch_size
));
2647 kfree(connection
->current_epoch
);
2649 idr_destroy(&connection
->peer_devices
);
2651 drbd_free_socket(&connection
->meta
);
2652 drbd_free_socket(&connection
->data
);
2653 kfree(connection
->int_dig_in
);
2654 kfree(connection
->int_dig_vv
);
2656 kref_put(&resource
->kref
, drbd_destroy_resource
);
2659 static int init_submitter(struct drbd_device
*device
)
2661 /* opencoded create_singlethread_workqueue(),
2662 * to be able to say "drbd%d", ..., minor */
2663 device
->submit
.wq
= alloc_workqueue("drbd%u_submit",
2664 WQ_UNBOUND
| WQ_MEM_RECLAIM
, 1, device
->minor
);
2665 if (!device
->submit
.wq
)
2668 INIT_WORK(&device
->submit
.worker
, do_submit
);
2669 spin_lock_init(&device
->submit
.lock
);
2670 INIT_LIST_HEAD(&device
->submit
.writes
);
2674 enum drbd_ret_code
drbd_create_device(struct drbd_resource
*resource
, unsigned int minor
, int vnr
)
2676 struct drbd_connection
*connection
;
2677 struct drbd_device
*device
;
2678 struct drbd_peer_device
*peer_device
, *tmp_peer_device
;
2679 struct gendisk
*disk
;
2680 struct request_queue
*q
;
2682 enum drbd_ret_code err
= ERR_NOMEM
;
2684 device
= minor_to_device(minor
);
2686 return ERR_MINOR_EXISTS
;
2688 /* GFP_KERNEL, we are outside of all write-out paths */
2689 device
= kzalloc(sizeof(struct drbd_device
), GFP_KERNEL
);
2692 kref_init(&device
->kref
);
2694 kref_get(&resource
->kref
);
2695 device
->resource
= resource
;
2696 device
->minor
= minor
;
2699 drbd_init_set_defaults(device
);
2701 q
= blk_alloc_queue(GFP_KERNEL
);
2704 device
->rq_queue
= q
;
2705 q
->queuedata
= device
;
2707 disk
= alloc_disk(1);
2710 device
->vdisk
= disk
;
2712 set_disk_ro(disk
, true);
2715 disk
->major
= DRBD_MAJOR
;
2716 disk
->first_minor
= minor
;
2717 disk
->fops
= &drbd_ops
;
2718 sprintf(disk
->disk_name
, "drbd%d", minor
);
2719 disk
->private_data
= device
;
2721 device
->this_bdev
= bdget(MKDEV(DRBD_MAJOR
, minor
));
2722 /* we have no partitions. we contain only ourselves. */
2723 device
->this_bdev
->bd_contains
= device
->this_bdev
;
2725 q
->backing_dev_info
.congested_fn
= drbd_congested
;
2726 q
->backing_dev_info
.congested_data
= device
;
2728 blk_queue_make_request(q
, drbd_make_request
);
2729 blk_queue_flush(q
, REQ_FLUSH
| REQ_FUA
);
2730 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2731 This triggers a max_bio_size message upon first attach or connect */
2732 blk_queue_max_hw_sectors(q
, DRBD_MAX_BIO_SIZE_SAFE
>> 8);
2733 blk_queue_bounce_limit(q
, BLK_BOUNCE_ANY
);
2734 blk_queue_merge_bvec(q
, drbd_merge_bvec
);
2735 q
->queue_lock
= &resource
->req_lock
;
2737 device
->md_io_page
= alloc_page(GFP_KERNEL
);
2738 if (!device
->md_io_page
)
2739 goto out_no_io_page
;
2741 if (drbd_bm_init(device
))
2743 device
->read_requests
= RB_ROOT
;
2744 device
->write_requests
= RB_ROOT
;
2746 id
= idr_alloc(&drbd_devices
, device
, minor
, minor
+ 1, GFP_KERNEL
);
2748 if (id
== -ENOSPC
) {
2749 err
= ERR_MINOR_EXISTS
;
2750 drbd_msg_put_info("requested minor exists already");
2752 goto out_no_minor_idr
;
2754 kref_get(&device
->kref
);
2756 id
= idr_alloc(&resource
->devices
, device
, vnr
, vnr
+ 1, GFP_KERNEL
);
2758 if (id
== -ENOSPC
) {
2759 err
= ERR_MINOR_EXISTS
;
2760 drbd_msg_put_info("requested minor exists already");
2762 goto out_idr_remove_minor
;
2764 kref_get(&device
->kref
);
2766 INIT_LIST_HEAD(&device
->peer_devices
);
2767 for_each_connection(connection
, resource
) {
2768 peer_device
= kzalloc(sizeof(struct drbd_peer_device
), GFP_KERNEL
);
2770 goto out_idr_remove_from_resource
;
2771 peer_device
->connection
= connection
;
2772 peer_device
->device
= device
;
2774 list_add(&peer_device
->peer_devices
, &device
->peer_devices
);
2775 kref_get(&device
->kref
);
2777 id
= idr_alloc(&connection
->peer_devices
, peer_device
, vnr
, vnr
+ 1, GFP_KERNEL
);
2779 if (id
== -ENOSPC
) {
2780 err
= ERR_INVALID_REQUEST
;
2781 drbd_msg_put_info("requested volume exists already");
2783 goto out_idr_remove_from_resource
;
2785 kref_get(&connection
->kref
);
2788 if (init_submitter(device
)) {
2790 drbd_msg_put_info("unable to create submit workqueue");
2791 goto out_idr_remove_vol
;
2796 /* inherit the connection state */
2797 device
->state
.conn
= first_connection(resource
)->cstate
;
2798 if (device
->state
.conn
== C_WF_REPORT_PARAMS
)
2799 drbd_connected(device
);
2804 idr_remove(&connection
->peer_devices
, vnr
);
2805 out_idr_remove_from_resource
:
2806 for_each_connection(connection
, resource
) {
2807 peer_device
= idr_find(&connection
->peer_devices
, vnr
);
2809 idr_remove(&connection
->peer_devices
, vnr
);
2810 kref_put(&connection
->kref
, drbd_destroy_connection
);
2813 for_each_peer_device_safe(peer_device
, tmp_peer_device
, device
) {
2814 list_del(&peer_device
->peer_devices
);
2817 idr_remove(&resource
->devices
, vnr
);
2818 out_idr_remove_minor
:
2819 idr_remove(&drbd_devices
, minor
);
2822 drbd_bm_cleanup(device
);
2824 __free_page(device
->md_io_page
);
2828 blk_cleanup_queue(q
);
2830 kref_put(&resource
->kref
, drbd_destroy_resource
);
2835 void drbd_delete_device(struct drbd_device
*device
)
2837 struct drbd_resource
*resource
= device
->resource
;
2838 struct drbd_connection
*connection
;
2841 for_each_connection(connection
, resource
) {
2842 idr_remove(&connection
->peer_devices
, device
->vnr
);
2845 idr_remove(&resource
->devices
, device
->vnr
);
2846 idr_remove(&drbd_devices
, device_to_minor(device
));
2847 del_gendisk(device
->vdisk
);
2849 kref_sub(&device
->kref
, refs
, drbd_destroy_device
);
2852 int __init
drbd_init(void)
2856 if (minor_count
< DRBD_MINOR_COUNT_MIN
|| minor_count
> DRBD_MINOR_COUNT_MAX
) {
2858 "drbd: invalid minor_count (%d)\n", minor_count
);
2862 minor_count
= DRBD_MINOR_COUNT_DEF
;
2866 err
= register_blkdev(DRBD_MAJOR
, "drbd");
2869 "drbd: unable to register block device major %d\n",
2874 register_reboot_notifier(&drbd_notifier
);
2877 * allocate all necessary structs
2879 init_waitqueue_head(&drbd_pp_wait
);
2881 drbd_proc
= NULL
; /* play safe for drbd_cleanup */
2882 idr_init(&drbd_devices
);
2884 rwlock_init(&global_state_lock
);
2885 INIT_LIST_HEAD(&drbd_resources
);
2887 err
= drbd_genl_register();
2889 printk(KERN_ERR
"drbd: unable to register generic netlink family\n");
2893 err
= drbd_create_mempools();
2898 drbd_proc
= proc_create_data("drbd", S_IFREG
| S_IRUGO
, NULL
, &drbd_proc_fops
, NULL
);
2900 printk(KERN_ERR
"drbd: unable to register proc file\n");
2904 retry
.wq
= create_singlethread_workqueue("drbd-reissue");
2906 printk(KERN_ERR
"drbd: unable to create retry workqueue\n");
2909 INIT_WORK(&retry
.worker
, do_retry
);
2910 spin_lock_init(&retry
.lock
);
2911 INIT_LIST_HEAD(&retry
.writes
);
2913 printk(KERN_INFO
"drbd: initialized. "
2914 "Version: " REL_VERSION
" (api:%d/proto:%d-%d)\n",
2915 API_VERSION
, PRO_VERSION_MIN
, PRO_VERSION_MAX
);
2916 printk(KERN_INFO
"drbd: %s\n", drbd_buildtag());
2917 printk(KERN_INFO
"drbd: registered as block device major %d\n",
2920 return 0; /* Success! */
2925 printk(KERN_ERR
"drbd: ran out of memory\n");
2927 printk(KERN_ERR
"drbd: initialization failure\n");
2931 void drbd_free_bc(struct drbd_backing_dev
*ldev
)
2936 blkdev_put(ldev
->backing_bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
2937 blkdev_put(ldev
->md_bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
2939 kfree(ldev
->disk_conf
);
2943 void drbd_free_sock(struct drbd_connection
*connection
)
2945 if (connection
->data
.socket
) {
2946 mutex_lock(&connection
->data
.mutex
);
2947 kernel_sock_shutdown(connection
->data
.socket
, SHUT_RDWR
);
2948 sock_release(connection
->data
.socket
);
2949 connection
->data
.socket
= NULL
;
2950 mutex_unlock(&connection
->data
.mutex
);
2952 if (connection
->meta
.socket
) {
2953 mutex_lock(&connection
->meta
.mutex
);
2954 kernel_sock_shutdown(connection
->meta
.socket
, SHUT_RDWR
);
2955 sock_release(connection
->meta
.socket
);
2956 connection
->meta
.socket
= NULL
;
2957 mutex_unlock(&connection
->meta
.mutex
);
2961 /* meta data management */
2963 void conn_md_sync(struct drbd_connection
*connection
)
2965 struct drbd_peer_device
*peer_device
;
2969 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2970 struct drbd_device
*device
= peer_device
->device
;
2972 kref_get(&device
->kref
);
2974 drbd_md_sync(device
);
2975 kref_put(&device
->kref
, drbd_destroy_device
);
2981 /* aligned 4kByte */
2982 struct meta_data_on_disk
{
2983 u64 la_size_sect
; /* last agreed size. */
2984 u64 uuid
[UI_SIZE
]; /* UUIDs. */
2987 u32 flags
; /* MDF */
2990 u32 al_offset
; /* offset to this block */
2991 u32 al_nr_extents
; /* important for restoring the AL (userspace) */
2992 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
2993 u32 bm_offset
; /* offset to the bitmap, from here */
2994 u32 bm_bytes_per_bit
; /* BM_BLOCK_SIZE */
2995 u32 la_peer_max_bio_size
; /* last peer max_bio_size */
2997 /* see al_tr_number_to_on_disk_sector() */
2999 u32 al_stripe_size_4k
;
3001 u8 reserved_u8
[4096 - (7*8 + 10*4)];
3006 void drbd_md_write(struct drbd_device
*device
, void *b
)
3008 struct meta_data_on_disk
*buffer
= b
;
3012 memset(buffer
, 0, sizeof(*buffer
));
3014 buffer
->la_size_sect
= cpu_to_be64(drbd_get_capacity(device
->this_bdev
));
3015 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
3016 buffer
->uuid
[i
] = cpu_to_be64(device
->ldev
->md
.uuid
[i
]);
3017 buffer
->flags
= cpu_to_be32(device
->ldev
->md
.flags
);
3018 buffer
->magic
= cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN
);
3020 buffer
->md_size_sect
= cpu_to_be32(device
->ldev
->md
.md_size_sect
);
3021 buffer
->al_offset
= cpu_to_be32(device
->ldev
->md
.al_offset
);
3022 buffer
->al_nr_extents
= cpu_to_be32(device
->act_log
->nr_elements
);
3023 buffer
->bm_bytes_per_bit
= cpu_to_be32(BM_BLOCK_SIZE
);
3024 buffer
->device_uuid
= cpu_to_be64(device
->ldev
->md
.device_uuid
);
3026 buffer
->bm_offset
= cpu_to_be32(device
->ldev
->md
.bm_offset
);
3027 buffer
->la_peer_max_bio_size
= cpu_to_be32(device
->peer_max_bio_size
);
3029 buffer
->al_stripes
= cpu_to_be32(device
->ldev
->md
.al_stripes
);
3030 buffer
->al_stripe_size_4k
= cpu_to_be32(device
->ldev
->md
.al_stripe_size_4k
);
3032 D_ASSERT(device
, drbd_md_ss(device
->ldev
) == device
->ldev
->md
.md_offset
);
3033 sector
= device
->ldev
->md
.md_offset
;
3035 if (drbd_md_sync_page_io(device
, device
->ldev
, sector
, WRITE
)) {
3036 /* this was a try anyways ... */
3037 drbd_err(device
, "meta data update failed!\n");
3038 drbd_chk_io_error(device
, 1, DRBD_META_IO_ERROR
);
3043 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3044 * @device: DRBD device.
3046 void drbd_md_sync(struct drbd_device
*device
)
3048 struct meta_data_on_disk
*buffer
;
3050 /* Don't accidentally change the DRBD meta data layout. */
3051 BUILD_BUG_ON(UI_SIZE
!= 4);
3052 BUILD_BUG_ON(sizeof(struct meta_data_on_disk
) != 4096);
3054 del_timer(&device
->md_sync_timer
);
3055 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3056 if (!test_and_clear_bit(MD_DIRTY
, &device
->flags
))
3059 /* We use here D_FAILED and not D_ATTACHING because we try to write
3060 * metadata even if we detach due to a disk failure! */
3061 if (!get_ldev_if_state(device
, D_FAILED
))
3064 buffer
= drbd_md_get_buffer(device
);
3068 drbd_md_write(device
, buffer
);
3070 /* Update device->ldev->md.la_size_sect,
3071 * since we updated it on metadata. */
3072 device
->ldev
->md
.la_size_sect
= drbd_get_capacity(device
->this_bdev
);
3074 drbd_md_put_buffer(device
);
3079 static int check_activity_log_stripe_size(struct drbd_device
*device
,
3080 struct meta_data_on_disk
*on_disk
,
3081 struct drbd_md
*in_core
)
3083 u32 al_stripes
= be32_to_cpu(on_disk
->al_stripes
);
3084 u32 al_stripe_size_4k
= be32_to_cpu(on_disk
->al_stripe_size_4k
);
3087 /* both not set: default to old fixed size activity log */
3088 if (al_stripes
== 0 && al_stripe_size_4k
== 0) {
3090 al_stripe_size_4k
= MD_32kB_SECT
/8;
3093 /* some paranoia plausibility checks */
3095 /* we need both values to be set */
3096 if (al_stripes
== 0 || al_stripe_size_4k
== 0)
3099 al_size_4k
= (u64
)al_stripes
* al_stripe_size_4k
;
3101 /* Upper limit of activity log area, to avoid potential overflow
3102 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3103 * than 72 * 4k blocks total only increases the amount of history,
3104 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
3105 if (al_size_4k
> (16 * 1024 * 1024/4))
3108 /* Lower limit: we need at least 8 transaction slots (32kB)
3109 * to not break existing setups */
3110 if (al_size_4k
< MD_32kB_SECT
/8)
3113 in_core
->al_stripe_size_4k
= al_stripe_size_4k
;
3114 in_core
->al_stripes
= al_stripes
;
3115 in_core
->al_size_4k
= al_size_4k
;
3119 drbd_err(device
, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3120 al_stripes
, al_stripe_size_4k
);
3124 static int check_offsets_and_sizes(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
)
3126 sector_t capacity
= drbd_get_capacity(bdev
->md_bdev
);
3127 struct drbd_md
*in_core
= &bdev
->md
;
3128 s32 on_disk_al_sect
;
3129 s32 on_disk_bm_sect
;
3131 /* The on-disk size of the activity log, calculated from offsets, and
3132 * the size of the activity log calculated from the stripe settings,
3134 * Though we could relax this a bit: it is ok, if the striped activity log
3135 * fits in the available on-disk activity log size.
3136 * Right now, that would break how resize is implemented.
3137 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3138 * of possible unused padding space in the on disk layout. */
3139 if (in_core
->al_offset
< 0) {
3140 if (in_core
->bm_offset
> in_core
->al_offset
)
3142 on_disk_al_sect
= -in_core
->al_offset
;
3143 on_disk_bm_sect
= in_core
->al_offset
- in_core
->bm_offset
;
3145 if (in_core
->al_offset
!= MD_4kB_SECT
)
3147 if (in_core
->bm_offset
< in_core
->al_offset
+ in_core
->al_size_4k
* MD_4kB_SECT
)
3150 on_disk_al_sect
= in_core
->bm_offset
- MD_4kB_SECT
;
3151 on_disk_bm_sect
= in_core
->md_size_sect
- in_core
->bm_offset
;
3154 /* old fixed size meta data is exactly that: fixed. */
3155 if (in_core
->meta_dev_idx
>= 0) {
3156 if (in_core
->md_size_sect
!= MD_128MB_SECT
3157 || in_core
->al_offset
!= MD_4kB_SECT
3158 || in_core
->bm_offset
!= MD_4kB_SECT
+ MD_32kB_SECT
3159 || in_core
->al_stripes
!= 1
3160 || in_core
->al_stripe_size_4k
!= MD_32kB_SECT
/8)
3164 if (capacity
< in_core
->md_size_sect
)
3166 if (capacity
- in_core
->md_size_sect
< drbd_md_first_sector(bdev
))
3169 /* should be aligned, and at least 32k */
3170 if ((on_disk_al_sect
& 7) || (on_disk_al_sect
< MD_32kB_SECT
))
3173 /* should fit (for now: exactly) into the available on-disk space;
3174 * overflow prevention is in check_activity_log_stripe_size() above. */
3175 if (on_disk_al_sect
!= in_core
->al_size_4k
* MD_4kB_SECT
)
3178 /* again, should be aligned */
3179 if (in_core
->bm_offset
& 7)
3182 /* FIXME check for device grow with flex external meta data? */
3184 /* can the available bitmap space cover the last agreed device size? */
3185 if (on_disk_bm_sect
< (in_core
->la_size_sect
+7)/MD_4kB_SECT
/8/512)
3191 drbd_err(device
, "meta data offsets don't make sense: idx=%d "
3192 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3193 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3194 in_core
->meta_dev_idx
,
3195 in_core
->al_stripes
, in_core
->al_stripe_size_4k
,
3196 in_core
->al_offset
, in_core
->bm_offset
, in_core
->md_size_sect
,
3197 (unsigned long long)in_core
->la_size_sect
,
3198 (unsigned long long)capacity
);
3205 * drbd_md_read() - Reads in the meta data super block
3206 * @device: DRBD device.
3207 * @bdev: Device from which the meta data should be read in.
3209 * Return NO_ERROR on success, and an enum drbd_ret_code in case
3210 * something goes wrong.
3212 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
3213 * even before @bdev is assigned to @device->ldev.
3215 int drbd_md_read(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
)
3217 struct meta_data_on_disk
*buffer
;
3219 int i
, rv
= NO_ERROR
;
3221 if (device
->state
.disk
!= D_DISKLESS
)
3222 return ERR_DISK_CONFIGURED
;
3224 buffer
= drbd_md_get_buffer(device
);
3228 /* First, figure out where our meta data superblock is located,
3230 bdev
->md
.meta_dev_idx
= bdev
->disk_conf
->meta_dev_idx
;
3231 bdev
->md
.md_offset
= drbd_md_ss(bdev
);
3233 if (drbd_md_sync_page_io(device
, bdev
, bdev
->md
.md_offset
, READ
)) {
3234 /* NOTE: can't do normal error processing here as this is
3235 called BEFORE disk is attached */
3236 drbd_err(device
, "Error while reading metadata.\n");
3237 rv
= ERR_IO_MD_DISK
;
3241 magic
= be32_to_cpu(buffer
->magic
);
3242 flags
= be32_to_cpu(buffer
->flags
);
3243 if (magic
== DRBD_MD_MAGIC_84_UNCLEAN
||
3244 (magic
== DRBD_MD_MAGIC_08
&& !(flags
& MDF_AL_CLEAN
))) {
3245 /* btw: that's Activity Log clean, not "all" clean. */
3246 drbd_err(device
, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3247 rv
= ERR_MD_UNCLEAN
;
3251 rv
= ERR_MD_INVALID
;
3252 if (magic
!= DRBD_MD_MAGIC_08
) {
3253 if (magic
== DRBD_MD_MAGIC_07
)
3254 drbd_err(device
, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3256 drbd_err(device
, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3260 if (be32_to_cpu(buffer
->bm_bytes_per_bit
) != BM_BLOCK_SIZE
) {
3261 drbd_err(device
, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3262 be32_to_cpu(buffer
->bm_bytes_per_bit
), BM_BLOCK_SIZE
);
3267 /* convert to in_core endian */
3268 bdev
->md
.la_size_sect
= be64_to_cpu(buffer
->la_size_sect
);
3269 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
3270 bdev
->md
.uuid
[i
] = be64_to_cpu(buffer
->uuid
[i
]);
3271 bdev
->md
.flags
= be32_to_cpu(buffer
->flags
);
3272 bdev
->md
.device_uuid
= be64_to_cpu(buffer
->device_uuid
);
3274 bdev
->md
.md_size_sect
= be32_to_cpu(buffer
->md_size_sect
);
3275 bdev
->md
.al_offset
= be32_to_cpu(buffer
->al_offset
);
3276 bdev
->md
.bm_offset
= be32_to_cpu(buffer
->bm_offset
);
3278 if (check_activity_log_stripe_size(device
, buffer
, &bdev
->md
))
3280 if (check_offsets_and_sizes(device
, bdev
))
3283 if (be32_to_cpu(buffer
->bm_offset
) != bdev
->md
.bm_offset
) {
3284 drbd_err(device
, "unexpected bm_offset: %d (expected %d)\n",
3285 be32_to_cpu(buffer
->bm_offset
), bdev
->md
.bm_offset
);
3288 if (be32_to_cpu(buffer
->md_size_sect
) != bdev
->md
.md_size_sect
) {
3289 drbd_err(device
, "unexpected md_size: %u (expected %u)\n",
3290 be32_to_cpu(buffer
->md_size_sect
), bdev
->md
.md_size_sect
);
3296 spin_lock_irq(&device
->resource
->req_lock
);
3297 if (device
->state
.conn
< C_CONNECTED
) {
3299 peer
= be32_to_cpu(buffer
->la_peer_max_bio_size
);
3300 peer
= max(peer
, DRBD_MAX_BIO_SIZE_SAFE
);
3301 device
->peer_max_bio_size
= peer
;
3303 spin_unlock_irq(&device
->resource
->req_lock
);
3306 drbd_md_put_buffer(device
);
3312 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3313 * @device: DRBD device.
3315 * Call this function if you change anything that should be written to
3316 * the meta-data super block. This function sets MD_DIRTY, and starts a
3317 * timer that ensures that within five seconds you have to call drbd_md_sync().
3320 void drbd_md_mark_dirty_(struct drbd_device
*device
, unsigned int line
, const char *func
)
3322 if (!test_and_set_bit(MD_DIRTY
, &device
->flags
)) {
3323 mod_timer(&device
->md_sync_timer
, jiffies
+ HZ
);
3324 device
->last_md_mark_dirty
.line
= line
;
3325 device
->last_md_mark_dirty
.func
= func
;
3329 void drbd_md_mark_dirty(struct drbd_device
*device
)
3331 if (!test_and_set_bit(MD_DIRTY
, &device
->flags
))
3332 mod_timer(&device
->md_sync_timer
, jiffies
+ 5*HZ
);
3336 void drbd_uuid_move_history(struct drbd_device
*device
) __must_hold(local
)
3340 for (i
= UI_HISTORY_START
; i
< UI_HISTORY_END
; i
++)
3341 device
->ldev
->md
.uuid
[i
+1] = device
->ldev
->md
.uuid
[i
];
3344 void __drbd_uuid_set(struct drbd_device
*device
, int idx
, u64 val
) __must_hold(local
)
3346 if (idx
== UI_CURRENT
) {
3347 if (device
->state
.role
== R_PRIMARY
)
3352 drbd_set_ed_uuid(device
, val
);
3355 device
->ldev
->md
.uuid
[idx
] = val
;
3356 drbd_md_mark_dirty(device
);
3359 void _drbd_uuid_set(struct drbd_device
*device
, int idx
, u64 val
) __must_hold(local
)
3361 unsigned long flags
;
3362 spin_lock_irqsave(&device
->ldev
->md
.uuid_lock
, flags
);
3363 __drbd_uuid_set(device
, idx
, val
);
3364 spin_unlock_irqrestore(&device
->ldev
->md
.uuid_lock
, flags
);
3367 void drbd_uuid_set(struct drbd_device
*device
, int idx
, u64 val
) __must_hold(local
)
3369 unsigned long flags
;
3370 spin_lock_irqsave(&device
->ldev
->md
.uuid_lock
, flags
);
3371 if (device
->ldev
->md
.uuid
[idx
]) {
3372 drbd_uuid_move_history(device
);
3373 device
->ldev
->md
.uuid
[UI_HISTORY_START
] = device
->ldev
->md
.uuid
[idx
];
3375 __drbd_uuid_set(device
, idx
, val
);
3376 spin_unlock_irqrestore(&device
->ldev
->md
.uuid_lock
, flags
);
3380 * drbd_uuid_new_current() - Creates a new current UUID
3381 * @device: DRBD device.
3383 * Creates a new current UUID, and rotates the old current UUID into
3384 * the bitmap slot. Causes an incremental resync upon next connect.
3386 void drbd_uuid_new_current(struct drbd_device
*device
) __must_hold(local
)
3389 unsigned long long bm_uuid
;
3391 get_random_bytes(&val
, sizeof(u64
));
3393 spin_lock_irq(&device
->ldev
->md
.uuid_lock
);
3394 bm_uuid
= device
->ldev
->md
.uuid
[UI_BITMAP
];
3397 drbd_warn(device
, "bm UUID was already set: %llX\n", bm_uuid
);
3399 device
->ldev
->md
.uuid
[UI_BITMAP
] = device
->ldev
->md
.uuid
[UI_CURRENT
];
3400 __drbd_uuid_set(device
, UI_CURRENT
, val
);
3401 spin_unlock_irq(&device
->ldev
->md
.uuid_lock
);
3403 drbd_print_uuids(device
, "new current UUID");
3404 /* get it to stable storage _now_ */
3405 drbd_md_sync(device
);
3408 void drbd_uuid_set_bm(struct drbd_device
*device
, u64 val
) __must_hold(local
)
3410 unsigned long flags
;
3411 if (device
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && val
== 0)
3414 spin_lock_irqsave(&device
->ldev
->md
.uuid_lock
, flags
);
3416 drbd_uuid_move_history(device
);
3417 device
->ldev
->md
.uuid
[UI_HISTORY_START
] = device
->ldev
->md
.uuid
[UI_BITMAP
];
3418 device
->ldev
->md
.uuid
[UI_BITMAP
] = 0;
3420 unsigned long long bm_uuid
= device
->ldev
->md
.uuid
[UI_BITMAP
];
3422 drbd_warn(device
, "bm UUID was already set: %llX\n", bm_uuid
);
3424 device
->ldev
->md
.uuid
[UI_BITMAP
] = val
& ~((u64
)1);
3426 spin_unlock_irqrestore(&device
->ldev
->md
.uuid_lock
, flags
);
3428 drbd_md_mark_dirty(device
);
3432 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3433 * @device: DRBD device.
3435 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3437 int drbd_bmio_set_n_write(struct drbd_device
*device
)
3441 if (get_ldev_if_state(device
, D_ATTACHING
)) {
3442 drbd_md_set_flag(device
, MDF_FULL_SYNC
);
3443 drbd_md_sync(device
);
3444 drbd_bm_set_all(device
);
3446 rv
= drbd_bm_write(device
);
3449 drbd_md_clear_flag(device
, MDF_FULL_SYNC
);
3450 drbd_md_sync(device
);
3460 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3461 * @device: DRBD device.
3463 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3465 int drbd_bmio_clear_n_write(struct drbd_device
*device
)
3469 drbd_resume_al(device
);
3470 if (get_ldev_if_state(device
, D_ATTACHING
)) {
3471 drbd_bm_clear_all(device
);
3472 rv
= drbd_bm_write(device
);
3479 static int w_bitmap_io(struct drbd_work
*w
, int unused
)
3481 struct bm_io_work
*work
= container_of(w
, struct bm_io_work
, w
);
3482 struct drbd_device
*device
= w
->device
;
3485 D_ASSERT(device
, atomic_read(&device
->ap_bio_cnt
) == 0);
3487 if (get_ldev(device
)) {
3488 drbd_bm_lock(device
, work
->why
, work
->flags
);
3489 rv
= work
->io_fn(device
);
3490 drbd_bm_unlock(device
);
3494 clear_bit_unlock(BITMAP_IO
, &device
->flags
);
3495 wake_up(&device
->misc_wait
);
3498 work
->done(device
, rv
);
3500 clear_bit(BITMAP_IO_QUEUED
, &device
->flags
);
3507 void drbd_ldev_destroy(struct drbd_device
*device
)
3509 lc_destroy(device
->resync
);
3510 device
->resync
= NULL
;
3511 lc_destroy(device
->act_log
);
3512 device
->act_log
= NULL
;
3514 drbd_free_bc(device
->ldev
);
3515 device
->ldev
= NULL
;);
3517 clear_bit(GO_DISKLESS
, &device
->flags
);
3520 static int w_go_diskless(struct drbd_work
*w
, int unused
)
3522 struct drbd_device
*device
= w
->device
;
3524 D_ASSERT(device
, device
->state
.disk
== D_FAILED
);
3525 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3526 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3527 * the protected members anymore, though, so once put_ldev reaches zero
3528 * again, it will be safe to free them. */
3530 /* Try to write changed bitmap pages, read errors may have just
3531 * set some bits outside the area covered by the activity log.
3533 * If we have an IO error during the bitmap writeout,
3534 * we will want a full sync next time, just in case.
3535 * (Do we want a specific meta data flag for this?)
3537 * If that does not make it to stable storage either,
3538 * we cannot do anything about that anymore.
3540 * We still need to check if both bitmap and ldev are present, we may
3541 * end up here after a failed attach, before ldev was even assigned.
3543 if (device
->bitmap
&& device
->ldev
) {
3544 /* An interrupted resync or similar is allowed to recounts bits
3546 * Any modifications would not be expected anymore, though.
3548 if (drbd_bitmap_io_from_worker(device
, drbd_bm_write
,
3549 "detach", BM_LOCKED_TEST_ALLOWED
)) {
3550 if (test_bit(WAS_READ_ERROR
, &device
->flags
)) {
3551 drbd_md_set_flag(device
, MDF_FULL_SYNC
);
3552 drbd_md_sync(device
);
3557 drbd_force_state(device
, NS(disk
, D_DISKLESS
));
3562 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3563 * @device: DRBD device.
3564 * @io_fn: IO callback to be called when bitmap IO is possible
3565 * @done: callback to be called after the bitmap IO was performed
3566 * @why: Descriptive text of the reason for doing the IO
3568 * While IO on the bitmap happens we freeze application IO thus we ensure
3569 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3570 * called from worker context. It MUST NOT be used while a previous such
3571 * work is still pending!
3573 void drbd_queue_bitmap_io(struct drbd_device
*device
,
3574 int (*io_fn
)(struct drbd_device
*),
3575 void (*done
)(struct drbd_device
*, int),
3576 char *why
, enum bm_flag flags
)
3578 D_ASSERT(device
, current
== first_peer_device(device
)->connection
->worker
.task
);
3580 D_ASSERT(device
, !test_bit(BITMAP_IO_QUEUED
, &device
->flags
));
3581 D_ASSERT(device
, !test_bit(BITMAP_IO
, &device
->flags
));
3582 D_ASSERT(device
, list_empty(&device
->bm_io_work
.w
.list
));
3583 if (device
->bm_io_work
.why
)
3584 drbd_err(device
, "FIXME going to queue '%s' but '%s' still pending?\n",
3585 why
, device
->bm_io_work
.why
);
3587 device
->bm_io_work
.io_fn
= io_fn
;
3588 device
->bm_io_work
.done
= done
;
3589 device
->bm_io_work
.why
= why
;
3590 device
->bm_io_work
.flags
= flags
;
3592 spin_lock_irq(&device
->resource
->req_lock
);
3593 set_bit(BITMAP_IO
, &device
->flags
);
3594 if (atomic_read(&device
->ap_bio_cnt
) == 0) {
3595 if (!test_and_set_bit(BITMAP_IO_QUEUED
, &device
->flags
))
3596 drbd_queue_work(&first_peer_device(device
)->connection
->sender_work
, &device
->bm_io_work
.w
);
3598 spin_unlock_irq(&device
->resource
->req_lock
);
3602 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3603 * @device: DRBD device.
3604 * @io_fn: IO callback to be called when bitmap IO is possible
3605 * @why: Descriptive text of the reason for doing the IO
3607 * freezes application IO while that the actual IO operations runs. This
3608 * functions MAY NOT be called from worker context.
3610 int drbd_bitmap_io(struct drbd_device
*device
, int (*io_fn
)(struct drbd_device
*),
3611 char *why
, enum bm_flag flags
)
3615 D_ASSERT(device
, current
!= first_peer_device(device
)->connection
->worker
.task
);
3617 if ((flags
& BM_LOCKED_SET_ALLOWED
) == 0)
3618 drbd_suspend_io(device
);
3620 drbd_bm_lock(device
, why
, flags
);
3622 drbd_bm_unlock(device
);
3624 if ((flags
& BM_LOCKED_SET_ALLOWED
) == 0)
3625 drbd_resume_io(device
);
3630 void drbd_md_set_flag(struct drbd_device
*device
, int flag
) __must_hold(local
)
3632 if ((device
->ldev
->md
.flags
& flag
) != flag
) {
3633 drbd_md_mark_dirty(device
);
3634 device
->ldev
->md
.flags
|= flag
;
3638 void drbd_md_clear_flag(struct drbd_device
*device
, int flag
) __must_hold(local
)
3640 if ((device
->ldev
->md
.flags
& flag
) != 0) {
3641 drbd_md_mark_dirty(device
);
3642 device
->ldev
->md
.flags
&= ~flag
;
3645 int drbd_md_test_flag(struct drbd_backing_dev
*bdev
, int flag
)
3647 return (bdev
->md
.flags
& flag
) != 0;
3650 static void md_sync_timer_fn(unsigned long data
)
3652 struct drbd_device
*device
= (struct drbd_device
*) data
;
3654 /* must not double-queue! */
3655 if (list_empty(&device
->md_sync_work
.list
))
3656 drbd_queue_work_front(&first_peer_device(device
)->connection
->sender_work
, &device
->md_sync_work
);
3659 static int w_md_sync(struct drbd_work
*w
, int unused
)
3661 struct drbd_device
*device
= w
->device
;
3663 drbd_warn(device
, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3665 drbd_warn(device
, "last md_mark_dirty: %s:%u\n",
3666 device
->last_md_mark_dirty
.func
, device
->last_md_mark_dirty
.line
);
3668 drbd_md_sync(device
);
3672 const char *cmdname(enum drbd_packet cmd
)
3674 /* THINK may need to become several global tables
3675 * when we want to support more than
3676 * one PRO_VERSION */
3677 static const char *cmdnames
[] = {
3679 [P_DATA_REPLY
] = "DataReply",
3680 [P_RS_DATA_REPLY
] = "RSDataReply",
3681 [P_BARRIER
] = "Barrier",
3682 [P_BITMAP
] = "ReportBitMap",
3683 [P_BECOME_SYNC_TARGET
] = "BecomeSyncTarget",
3684 [P_BECOME_SYNC_SOURCE
] = "BecomeSyncSource",
3685 [P_UNPLUG_REMOTE
] = "UnplugRemote",
3686 [P_DATA_REQUEST
] = "DataRequest",
3687 [P_RS_DATA_REQUEST
] = "RSDataRequest",
3688 [P_SYNC_PARAM
] = "SyncParam",
3689 [P_SYNC_PARAM89
] = "SyncParam89",
3690 [P_PROTOCOL
] = "ReportProtocol",
3691 [P_UUIDS
] = "ReportUUIDs",
3692 [P_SIZES
] = "ReportSizes",
3693 [P_STATE
] = "ReportState",
3694 [P_SYNC_UUID
] = "ReportSyncUUID",
3695 [P_AUTH_CHALLENGE
] = "AuthChallenge",
3696 [P_AUTH_RESPONSE
] = "AuthResponse",
3698 [P_PING_ACK
] = "PingAck",
3699 [P_RECV_ACK
] = "RecvAck",
3700 [P_WRITE_ACK
] = "WriteAck",
3701 [P_RS_WRITE_ACK
] = "RSWriteAck",
3702 [P_SUPERSEDED
] = "Superseded",
3703 [P_NEG_ACK
] = "NegAck",
3704 [P_NEG_DREPLY
] = "NegDReply",
3705 [P_NEG_RS_DREPLY
] = "NegRSDReply",
3706 [P_BARRIER_ACK
] = "BarrierAck",
3707 [P_STATE_CHG_REQ
] = "StateChgRequest",
3708 [P_STATE_CHG_REPLY
] = "StateChgReply",
3709 [P_OV_REQUEST
] = "OVRequest",
3710 [P_OV_REPLY
] = "OVReply",
3711 [P_OV_RESULT
] = "OVResult",
3712 [P_CSUM_RS_REQUEST
] = "CsumRSRequest",
3713 [P_RS_IS_IN_SYNC
] = "CsumRSIsInSync",
3714 [P_COMPRESSED_BITMAP
] = "CBitmap",
3715 [P_DELAY_PROBE
] = "DelayProbe",
3716 [P_OUT_OF_SYNC
] = "OutOfSync",
3717 [P_RETRY_WRITE
] = "RetryWrite",
3718 [P_RS_CANCEL
] = "RSCancel",
3719 [P_CONN_ST_CHG_REQ
] = "conn_st_chg_req",
3720 [P_CONN_ST_CHG_REPLY
] = "conn_st_chg_reply",
3721 [P_RETRY_WRITE
] = "retry_write",
3722 [P_PROTOCOL_UPDATE
] = "protocol_update",
3724 /* enum drbd_packet, but not commands - obsoleted flags:
3730 /* too big for the array: 0xfffX */
3731 if (cmd
== P_INITIAL_META
)
3732 return "InitialMeta";
3733 if (cmd
== P_INITIAL_DATA
)
3734 return "InitialData";
3735 if (cmd
== P_CONNECTION_FEATURES
)
3736 return "ConnectionFeatures";
3737 if (cmd
>= ARRAY_SIZE(cmdnames
))
3739 return cmdnames
[cmd
];
3743 * drbd_wait_misc - wait for a request to make progress
3744 * @device: device associated with the request
3745 * @i: the struct drbd_interval embedded in struct drbd_request or
3746 * struct drbd_peer_request
3748 int drbd_wait_misc(struct drbd_device
*device
, struct drbd_interval
*i
)
3750 struct net_conf
*nc
;
3755 nc
= rcu_dereference(first_peer_device(device
)->connection
->net_conf
);
3760 timeout
= nc
->ko_count
? nc
->timeout
* HZ
/ 10 * nc
->ko_count
: MAX_SCHEDULE_TIMEOUT
;
3763 /* Indicate to wake up device->misc_wait on progress. */
3765 prepare_to_wait(&device
->misc_wait
, &wait
, TASK_INTERRUPTIBLE
);
3766 spin_unlock_irq(&device
->resource
->req_lock
);
3767 timeout
= schedule_timeout(timeout
);
3768 finish_wait(&device
->misc_wait
, &wait
);
3769 spin_lock_irq(&device
->resource
->req_lock
);
3770 if (!timeout
|| device
->state
.conn
< C_CONNECTED
)
3772 if (signal_pending(current
))
3773 return -ERESTARTSYS
;
3777 #ifdef CONFIG_DRBD_FAULT_INJECTION
3778 /* Fault insertion support including random number generator shamelessly
3779 * stolen from kernel/rcutorture.c */
3780 struct fault_random_state
{
3781 unsigned long state
;
3782 unsigned long count
;
3785 #define FAULT_RANDOM_MULT 39916801 /* prime */
3786 #define FAULT_RANDOM_ADD 479001701 /* prime */
3787 #define FAULT_RANDOM_REFRESH 10000
3790 * Crude but fast random-number generator. Uses a linear congruential
3791 * generator, with occasional help from get_random_bytes().
3793 static unsigned long
3794 _drbd_fault_random(struct fault_random_state
*rsp
)
3798 if (!rsp
->count
--) {
3799 get_random_bytes(&refresh
, sizeof(refresh
));
3800 rsp
->state
+= refresh
;
3801 rsp
->count
= FAULT_RANDOM_REFRESH
;
3803 rsp
->state
= rsp
->state
* FAULT_RANDOM_MULT
+ FAULT_RANDOM_ADD
;
3804 return swahw32(rsp
->state
);
3808 _drbd_fault_str(unsigned int type
) {
3809 static char *_faults
[] = {
3810 [DRBD_FAULT_MD_WR
] = "Meta-data write",
3811 [DRBD_FAULT_MD_RD
] = "Meta-data read",
3812 [DRBD_FAULT_RS_WR
] = "Resync write",
3813 [DRBD_FAULT_RS_RD
] = "Resync read",
3814 [DRBD_FAULT_DT_WR
] = "Data write",
3815 [DRBD_FAULT_DT_RD
] = "Data read",
3816 [DRBD_FAULT_DT_RA
] = "Data read ahead",
3817 [DRBD_FAULT_BM_ALLOC
] = "BM allocation",
3818 [DRBD_FAULT_AL_EE
] = "EE allocation",
3819 [DRBD_FAULT_RECEIVE
] = "receive data corruption",
3822 return (type
< DRBD_FAULT_MAX
) ? _faults
[type
] : "**Unknown**";
3826 _drbd_insert_fault(struct drbd_device
*device
, unsigned int type
)
3828 static struct fault_random_state rrs
= {0, 0};
3830 unsigned int ret
= (
3832 ((1 << device_to_minor(device
)) & fault_devs
) != 0) &&
3833 (((_drbd_fault_random(&rrs
) % 100) + 1) <= fault_rate
));
3838 if (__ratelimit(&drbd_ratelimit_state
))
3839 drbd_warn(device
, "***Simulating %s failure\n",
3840 _drbd_fault_str(type
));
3847 const char *drbd_buildtag(void)
3849 /* DRBD built from external sources has here a reference to the
3850 git hash of the source code. */
3852 static char buildtag
[38] = "\0uilt-in";
3854 if (buildtag
[0] == 0) {
3856 sprintf(buildtag
, "srcversion: %-24s", THIS_MODULE
->srcversion
);
3865 module_init(drbd_init
)
3866 module_exit(drbd_cleanup
)
3868 EXPORT_SYMBOL(drbd_conn_str
);
3869 EXPORT_SYMBOL(drbd_role_str
);
3870 EXPORT_SYMBOL(drbd_disk_str
);
3871 EXPORT_SYMBOL(drbd_set_st_err_str
);