4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
57 static int drbd_do_handshake(struct drbd_conf
*mdev
);
58 static int drbd_do_auth(struct drbd_conf
*mdev
);
60 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_conf
*, struct drbd_epoch
*, enum epoch_event
);
61 static int e_end_block(struct drbd_conf
*, struct drbd_work
*, int);
64 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
71 /* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
75 static struct page
*page_chain_del(struct page
**head
, int n
)
89 tmp
= page_chain_next(page
);
91 break; /* found sufficient pages */
93 /* insufficient pages, don't use any of them. */
98 /* add end of list marker for the returned list */
99 set_page_private(page
, 0);
100 /* actual return value, and adjustment of head */
106 /* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109 static struct page
*page_chain_tail(struct page
*page
, int *len
)
113 while ((tmp
= page_chain_next(page
)))
120 static int page_chain_free(struct page
*page
)
124 page_chain_for_each_safe(page
, tmp
) {
131 static void page_chain_add(struct page
**head
,
132 struct page
*chain_first
, struct page
*chain_last
)
136 tmp
= page_chain_tail(chain_first
, NULL
);
137 BUG_ON(tmp
!= chain_last
);
140 /* add chain to head */
141 set_page_private(chain_last
, (unsigned long)*head
);
145 static struct page
*drbd_pp_first_pages_or_try_alloc(struct drbd_conf
*mdev
, int number
)
147 struct page
*page
= NULL
;
148 struct page
*tmp
= NULL
;
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
153 if (drbd_pp_vacant
>= number
) {
154 spin_lock(&drbd_pp_lock
);
155 page
= page_chain_del(&drbd_pp_pool
, number
);
157 drbd_pp_vacant
-= number
;
158 spin_unlock(&drbd_pp_lock
);
163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
166 for (i
= 0; i
< number
; i
++) {
167 tmp
= alloc_page(GFP_TRY
);
170 set_page_private(tmp
, (unsigned long)page
);
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
181 tmp
= page_chain_tail(page
, NULL
);
182 spin_lock(&drbd_pp_lock
);
183 page_chain_add(&drbd_pp_pool
, page
, tmp
);
185 spin_unlock(&drbd_pp_lock
);
190 static void reclaim_net_ee(struct drbd_conf
*mdev
, struct list_head
*to_be_freed
)
192 struct drbd_epoch_entry
*e
;
193 struct list_head
*le
, *tle
;
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
200 list_for_each_safe(le
, tle
, &mdev
->net_ee
) {
201 e
= list_entry(le
, struct drbd_epoch_entry
, w
.list
);
202 if (drbd_ee_has_active_page(e
))
204 list_move(le
, to_be_freed
);
208 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf
*mdev
)
210 LIST_HEAD(reclaimed
);
211 struct drbd_epoch_entry
*e
, *t
;
213 spin_lock_irq(&mdev
->req_lock
);
214 reclaim_net_ee(mdev
, &reclaimed
);
215 spin_unlock_irq(&mdev
->req_lock
);
217 list_for_each_entry_safe(e
, t
, &reclaimed
, w
.list
)
218 drbd_free_net_ee(mdev
, e
);
222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
223 * @mdev: DRBD device.
224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
231 * Returns a page chain linked via page->private.
233 static struct page
*drbd_pp_alloc(struct drbd_conf
*mdev
, unsigned number
, bool retry
)
235 struct page
*page
= NULL
;
238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev
->pp_in_use
) < mdev
->net_conf
->max_buffers
)
241 page
= drbd_pp_first_pages_or_try_alloc(mdev
, number
);
243 while (page
== NULL
) {
244 prepare_to_wait(&drbd_pp_wait
, &wait
, TASK_INTERRUPTIBLE
);
246 drbd_kick_lo_and_reclaim_net(mdev
);
248 if (atomic_read(&mdev
->pp_in_use
) < mdev
->net_conf
->max_buffers
) {
249 page
= drbd_pp_first_pages_or_try_alloc(mdev
, number
);
257 if (signal_pending(current
)) {
258 dev_warn(DEV
, "drbd_pp_alloc interrupted!\n");
264 finish_wait(&drbd_pp_wait
, &wait
);
267 atomic_add(number
, &mdev
->pp_in_use
);
271 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
275 static void drbd_pp_free(struct drbd_conf
*mdev
, struct page
*page
, int is_net
)
277 atomic_t
*a
= is_net
? &mdev
->pp_in_use_by_net
: &mdev
->pp_in_use
;
280 if (drbd_pp_vacant
> (DRBD_MAX_BIO_SIZE
/PAGE_SIZE
)*minor_count
)
281 i
= page_chain_free(page
);
284 tmp
= page_chain_tail(page
, &i
);
285 spin_lock(&drbd_pp_lock
);
286 page_chain_add(&drbd_pp_pool
, page
, tmp
);
288 spin_unlock(&drbd_pp_lock
);
290 i
= atomic_sub_return(i
, a
);
292 dev_warn(DEV
, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net
? "pp_in_use_by_net" : "pp_in_use", i
);
294 wake_up(&drbd_pp_wait
);
298 You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
301 You must not have the req_lock:
307 drbd_process_done_ee()
309 drbd_wait_ee_list_empty()
312 struct drbd_epoch_entry
*drbd_alloc_ee(struct drbd_conf
*mdev
,
315 unsigned int data_size
,
316 gfp_t gfp_mask
) __must_hold(local
)
318 struct drbd_epoch_entry
*e
;
320 unsigned nr_pages
= (data_size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
322 if (drbd_insert_fault(mdev
, DRBD_FAULT_AL_EE
))
325 e
= mempool_alloc(drbd_ee_mempool
, gfp_mask
& ~__GFP_HIGHMEM
);
327 if (!(gfp_mask
& __GFP_NOWARN
))
328 dev_err(DEV
, "alloc_ee: Allocation of an EE failed\n");
332 page
= drbd_pp_alloc(mdev
, nr_pages
, (gfp_mask
& __GFP_WAIT
));
336 INIT_HLIST_NODE(&e
->colision
);
340 atomic_set(&e
->pending_bios
, 0);
349 mempool_free(e
, drbd_ee_mempool
);
353 void drbd_free_some_ee(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
, int is_net
)
355 if (e
->flags
& EE_HAS_DIGEST
)
357 drbd_pp_free(mdev
, e
->pages
, is_net
);
358 D_ASSERT(atomic_read(&e
->pending_bios
) == 0);
359 D_ASSERT(hlist_unhashed(&e
->colision
));
360 mempool_free(e
, drbd_ee_mempool
);
363 int drbd_release_ee(struct drbd_conf
*mdev
, struct list_head
*list
)
365 LIST_HEAD(work_list
);
366 struct drbd_epoch_entry
*e
, *t
;
368 int is_net
= list
== &mdev
->net_ee
;
370 spin_lock_irq(&mdev
->req_lock
);
371 list_splice_init(list
, &work_list
);
372 spin_unlock_irq(&mdev
->req_lock
);
374 list_for_each_entry_safe(e
, t
, &work_list
, w
.list
) {
375 drbd_free_some_ee(mdev
, e
, is_net
);
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
391 static int drbd_process_done_ee(struct drbd_conf
*mdev
)
393 LIST_HEAD(work_list
);
394 LIST_HEAD(reclaimed
);
395 struct drbd_epoch_entry
*e
, *t
;
396 int ok
= (mdev
->state
.conn
>= C_WF_REPORT_PARAMS
);
398 spin_lock_irq(&mdev
->req_lock
);
399 reclaim_net_ee(mdev
, &reclaimed
);
400 list_splice_init(&mdev
->done_ee
, &work_list
);
401 spin_unlock_irq(&mdev
->req_lock
);
403 list_for_each_entry_safe(e
, t
, &reclaimed
, w
.list
)
404 drbd_free_net_ee(mdev
, e
);
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
410 list_for_each_entry_safe(e
, t
, &work_list
, w
.list
) {
411 /* list_del not necessary, next/prev members not touched */
412 ok
= e
->w
.cb(mdev
, &e
->w
, !ok
) && ok
;
413 drbd_free_ee(mdev
, e
);
415 wake_up(&mdev
->ee_wait
);
420 void _drbd_wait_ee_list_empty(struct drbd_conf
*mdev
, struct list_head
*head
)
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head
)) {
427 prepare_to_wait(&mdev
->ee_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
428 spin_unlock_irq(&mdev
->req_lock
);
430 finish_wait(&mdev
->ee_wait
, &wait
);
431 spin_lock_irq(&mdev
->req_lock
);
435 void drbd_wait_ee_list_empty(struct drbd_conf
*mdev
, struct list_head
*head
)
437 spin_lock_irq(&mdev
->req_lock
);
438 _drbd_wait_ee_list_empty(mdev
, head
);
439 spin_unlock_irq(&mdev
->req_lock
);
442 /* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444 static int drbd_accept(struct drbd_conf
*mdev
, const char **what
,
445 struct socket
*sock
, struct socket
**newsock
)
447 struct sock
*sk
= sock
->sk
;
451 err
= sock
->ops
->listen(sock
, 5);
455 *what
= "sock_create_lite";
456 err
= sock_create_lite(sk
->sk_family
, sk
->sk_type
, sk
->sk_protocol
,
462 err
= sock
->ops
->accept(sock
, *newsock
, 0);
464 sock_release(*newsock
);
468 (*newsock
)->ops
= sock
->ops
;
474 static int drbd_recv_short(struct drbd_conf
*mdev
, struct socket
*sock
,
475 void *buf
, size_t size
, int flags
)
482 struct msghdr msg
= {
484 .msg_iov
= (struct iovec
*)&iov
,
485 .msg_flags
= (flags
? flags
: MSG_WAITALL
| MSG_NOSIGNAL
)
491 rv
= sock_recvmsg(sock
, &msg
, size
, msg
.msg_flags
);
497 static int drbd_recv(struct drbd_conf
*mdev
, void *buf
, size_t size
)
504 struct msghdr msg
= {
506 .msg_iov
= (struct iovec
*)&iov
,
507 .msg_flags
= MSG_WAITALL
| MSG_NOSIGNAL
515 rv
= sock_recvmsg(mdev
->data
.socket
, &msg
, size
, msg
.msg_flags
);
520 * ECONNRESET other side closed the connection
521 * ERESTARTSYS (on sock) we got a signal
525 if (rv
== -ECONNRESET
)
526 dev_info(DEV
, "sock was reset by peer\n");
527 else if (rv
!= -ERESTARTSYS
)
528 dev_err(DEV
, "sock_recvmsg returned %d\n", rv
);
530 } else if (rv
== 0) {
531 dev_info(DEV
, "sock was shut down by peer\n");
534 /* signal came in, or peer/link went down,
535 * after we read a partial message
537 /* D_ASSERT(signal_pending(current)); */
545 drbd_force_state(mdev
, NS(conn
, C_BROKEN_PIPE
));
551 * On individual connections, the socket buffer size must be set prior to the
552 * listen(2) or connect(2) calls in order to have it take effect.
553 * This is our wrapper to do so.
555 static void drbd_setbufsize(struct socket
*sock
, unsigned int snd
,
558 /* open coded SO_SNDBUF, SO_RCVBUF */
560 sock
->sk
->sk_sndbuf
= snd
;
561 sock
->sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
564 sock
->sk
->sk_rcvbuf
= rcv
;
565 sock
->sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
569 static struct socket
*drbd_try_connect(struct drbd_conf
*mdev
)
573 struct sockaddr_in6 src_in6
;
575 int disconnect_on_error
= 1;
577 if (!get_net_conf(mdev
))
580 what
= "sock_create_kern";
581 err
= sock_create_kern(((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
,
582 SOCK_STREAM
, IPPROTO_TCP
, &sock
);
588 sock
->sk
->sk_rcvtimeo
=
589 sock
->sk
->sk_sndtimeo
= mdev
->net_conf
->try_connect_int
*HZ
;
590 drbd_setbufsize(sock
, mdev
->net_conf
->sndbuf_size
,
591 mdev
->net_conf
->rcvbuf_size
);
593 /* explicitly bind to the configured IP as source IP
594 * for the outgoing connections.
595 * This is needed for multihomed hosts and to be
596 * able to use lo: interfaces for drbd.
597 * Make sure to use 0 as port number, so linux selects
598 * a free one dynamically.
600 memcpy(&src_in6
, mdev
->net_conf
->my_addr
,
601 min_t(int, mdev
->net_conf
->my_addr_len
, sizeof(src_in6
)));
602 if (((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
== AF_INET6
)
603 src_in6
.sin6_port
= 0;
605 ((struct sockaddr_in
*)&src_in6
)->sin_port
= 0; /* AF_INET & AF_SCI */
607 what
= "bind before connect";
608 err
= sock
->ops
->bind(sock
,
609 (struct sockaddr
*) &src_in6
,
610 mdev
->net_conf
->my_addr_len
);
614 /* connect may fail, peer not yet available.
615 * stay C_WF_CONNECTION, don't go Disconnecting! */
616 disconnect_on_error
= 0;
618 err
= sock
->ops
->connect(sock
,
619 (struct sockaddr
*)mdev
->net_conf
->peer_addr
,
620 mdev
->net_conf
->peer_addr_len
, 0);
629 /* timeout, busy, signal pending */
630 case ETIMEDOUT
: case EAGAIN
: case EINPROGRESS
:
631 case EINTR
: case ERESTARTSYS
:
632 /* peer not (yet) available, network problem */
633 case ECONNREFUSED
: case ENETUNREACH
:
634 case EHOSTDOWN
: case EHOSTUNREACH
:
635 disconnect_on_error
= 0;
638 dev_err(DEV
, "%s failed, err = %d\n", what
, err
);
640 if (disconnect_on_error
)
641 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
647 static struct socket
*drbd_wait_for_connect(struct drbd_conf
*mdev
)
650 struct socket
*s_estab
= NULL
, *s_listen
;
653 if (!get_net_conf(mdev
))
656 what
= "sock_create_kern";
657 err
= sock_create_kern(((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
,
658 SOCK_STREAM
, IPPROTO_TCP
, &s_listen
);
664 timeo
= mdev
->net_conf
->try_connect_int
* HZ
;
665 timeo
+= (random32() & 1) ? timeo
/ 7 : -timeo
/ 7; /* 28.5% random jitter */
667 s_listen
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
668 s_listen
->sk
->sk_rcvtimeo
= timeo
;
669 s_listen
->sk
->sk_sndtimeo
= timeo
;
670 drbd_setbufsize(s_listen
, mdev
->net_conf
->sndbuf_size
,
671 mdev
->net_conf
->rcvbuf_size
);
673 what
= "bind before listen";
674 err
= s_listen
->ops
->bind(s_listen
,
675 (struct sockaddr
*) mdev
->net_conf
->my_addr
,
676 mdev
->net_conf
->my_addr_len
);
680 err
= drbd_accept(mdev
, &what
, s_listen
, &s_estab
);
684 sock_release(s_listen
);
686 if (err
!= -EAGAIN
&& err
!= -EINTR
&& err
!= -ERESTARTSYS
) {
687 dev_err(DEV
, "%s failed, err = %d\n", what
, err
);
688 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
696 static int drbd_send_fp(struct drbd_conf
*mdev
,
697 struct socket
*sock
, enum drbd_packets cmd
)
699 struct p_header80
*h
= &mdev
->data
.sbuf
.header
.h80
;
701 return _drbd_send_cmd(mdev
, sock
, cmd
, h
, sizeof(*h
), 0);
704 static enum drbd_packets
drbd_recv_fp(struct drbd_conf
*mdev
, struct socket
*sock
)
706 struct p_header80
*h
= &mdev
->data
.rbuf
.header
.h80
;
709 rr
= drbd_recv_short(mdev
, sock
, h
, sizeof(*h
), 0);
711 if (rr
== sizeof(*h
) && h
->magic
== BE_DRBD_MAGIC
)
712 return be16_to_cpu(h
->command
);
718 * drbd_socket_okay() - Free the socket if its connection is not okay
719 * @mdev: DRBD device.
720 * @sock: pointer to the pointer to the socket.
722 static int drbd_socket_okay(struct drbd_conf
*mdev
, struct socket
**sock
)
730 rr
= drbd_recv_short(mdev
, *sock
, tb
, 4, MSG_DONTWAIT
| MSG_PEEK
);
732 if (rr
> 0 || rr
== -EAGAIN
) {
743 * 1 yes, we have a valid connection
744 * 0 oops, did not work out, please try again
745 * -1 peer talks different language,
746 * no point in trying again, please go standalone.
747 * -2 We do not have a network config...
749 static int drbd_connect(struct drbd_conf
*mdev
)
751 struct socket
*s
, *sock
, *msock
;
754 D_ASSERT(!mdev
->data
.socket
);
756 if (drbd_request_state(mdev
, NS(conn
, C_WF_CONNECTION
)) < SS_SUCCESS
)
759 clear_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
766 /* 3 tries, this should take less than a second! */
767 s
= drbd_try_connect(mdev
);
770 /* give the other side time to call bind() & listen() */
771 schedule_timeout_interruptible(HZ
/ 10);
776 drbd_send_fp(mdev
, s
, P_HAND_SHAKE_S
);
780 drbd_send_fp(mdev
, s
, P_HAND_SHAKE_M
);
784 dev_err(DEV
, "Logic error in drbd_connect()\n");
785 goto out_release_sockets
;
790 schedule_timeout_interruptible(HZ
/ 10);
791 ok
= drbd_socket_okay(mdev
, &sock
);
792 ok
= drbd_socket_okay(mdev
, &msock
) && ok
;
798 s
= drbd_wait_for_connect(mdev
);
800 try = drbd_recv_fp(mdev
, s
);
801 drbd_socket_okay(mdev
, &sock
);
802 drbd_socket_okay(mdev
, &msock
);
806 dev_warn(DEV
, "initial packet S crossed\n");
813 dev_warn(DEV
, "initial packet M crossed\n");
817 set_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
820 dev_warn(DEV
, "Error receiving initial packet\n");
827 if (mdev
->state
.conn
<= C_DISCONNECTING
)
828 goto out_release_sockets
;
829 if (signal_pending(current
)) {
830 flush_signals(current
);
832 if (get_t_state(&mdev
->receiver
) == Exiting
)
833 goto out_release_sockets
;
837 ok
= drbd_socket_okay(mdev
, &sock
);
838 ok
= drbd_socket_okay(mdev
, &msock
) && ok
;
844 msock
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
845 sock
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
847 sock
->sk
->sk_allocation
= GFP_NOIO
;
848 msock
->sk
->sk_allocation
= GFP_NOIO
;
850 sock
->sk
->sk_priority
= TC_PRIO_INTERACTIVE_BULK
;
851 msock
->sk
->sk_priority
= TC_PRIO_INTERACTIVE
;
854 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
855 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
856 * first set it to the P_HAND_SHAKE timeout,
857 * which we set to 4x the configured ping_timeout. */
858 sock
->sk
->sk_sndtimeo
=
859 sock
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_timeo
*4*HZ
/10;
861 msock
->sk
->sk_sndtimeo
= mdev
->net_conf
->timeout
*HZ
/10;
862 msock
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_int
*HZ
;
864 /* we don't want delays.
865 * we use TCP_CORK where apropriate, though */
866 drbd_tcp_nodelay(sock
);
867 drbd_tcp_nodelay(msock
);
869 mdev
->data
.socket
= sock
;
870 mdev
->meta
.socket
= msock
;
871 mdev
->last_received
= jiffies
;
873 D_ASSERT(mdev
->asender
.task
== NULL
);
875 h
= drbd_do_handshake(mdev
);
879 if (mdev
->cram_hmac_tfm
) {
880 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
881 switch (drbd_do_auth(mdev
)) {
883 dev_err(DEV
, "Authentication of peer failed\n");
886 dev_err(DEV
, "Authentication of peer failed, trying again.\n");
891 if (drbd_request_state(mdev
, NS(conn
, C_WF_REPORT_PARAMS
)) < SS_SUCCESS
)
894 sock
->sk
->sk_sndtimeo
= mdev
->net_conf
->timeout
*HZ
/10;
895 sock
->sk
->sk_rcvtimeo
= MAX_SCHEDULE_TIMEOUT
;
897 atomic_set(&mdev
->packet_seq
, 0);
900 drbd_thread_start(&mdev
->asender
);
902 if (mdev
->agreed_pro_version
< 95 && get_ldev(mdev
)) {
903 drbd_setup_queue_param(mdev
, DRBD_MAX_SIZE_H80_PACKET
);
907 if (drbd_send_protocol(mdev
) == -1)
909 drbd_send_sync_param(mdev
, &mdev
->sync_conf
);
910 drbd_send_sizes(mdev
, 0, 0);
911 drbd_send_uuids(mdev
);
912 drbd_send_state(mdev
);
913 clear_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
914 clear_bit(RESIZE_PENDING
, &mdev
->flags
);
926 static int drbd_recv_header(struct drbd_conf
*mdev
, enum drbd_packets
*cmd
, unsigned int *packet_size
)
928 union p_header
*h
= &mdev
->data
.rbuf
.header
;
931 r
= drbd_recv(mdev
, h
, sizeof(*h
));
932 if (unlikely(r
!= sizeof(*h
))) {
933 if (!signal_pending(current
))
934 dev_warn(DEV
, "short read expecting header on sock: r=%d\n", r
);
938 if (likely(h
->h80
.magic
== BE_DRBD_MAGIC
)) {
939 *cmd
= be16_to_cpu(h
->h80
.command
);
940 *packet_size
= be16_to_cpu(h
->h80
.length
);
941 } else if (h
->h95
.magic
== BE_DRBD_MAGIC_BIG
) {
942 *cmd
= be16_to_cpu(h
->h95
.command
);
943 *packet_size
= be32_to_cpu(h
->h95
.length
);
945 dev_err(DEV
, "magic?? on data m: 0x%08x c: %d l: %d\n",
946 be32_to_cpu(h
->h80
.magic
),
947 be16_to_cpu(h
->h80
.command
),
948 be16_to_cpu(h
->h80
.length
));
951 mdev
->last_received
= jiffies
;
956 static void drbd_flush(struct drbd_conf
*mdev
)
960 if (mdev
->write_ordering
>= WO_bdev_flush
&& get_ldev(mdev
)) {
961 rv
= blkdev_issue_flush(mdev
->ldev
->backing_bdev
, GFP_KERNEL
,
964 dev_err(DEV
, "local disk flush failed with status %d\n", rv
);
965 /* would rather check on EOPNOTSUPP, but that is not reliable.
966 * don't try again for ANY return value != 0
967 * if (rv == -EOPNOTSUPP) */
968 drbd_bump_write_ordering(mdev
, WO_drain_io
);
975 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
976 * @mdev: DRBD device.
977 * @epoch: Epoch object.
980 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_conf
*mdev
,
981 struct drbd_epoch
*epoch
,
985 struct drbd_epoch
*next_epoch
;
986 enum finish_epoch rv
= FE_STILL_LIVE
;
988 spin_lock(&mdev
->epoch_lock
);
992 epoch_size
= atomic_read(&epoch
->epoch_size
);
994 switch (ev
& ~EV_CLEANUP
) {
996 atomic_dec(&epoch
->active
);
998 case EV_GOT_BARRIER_NR
:
999 set_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
);
1001 case EV_BECAME_LAST
:
1006 if (epoch_size
!= 0 &&
1007 atomic_read(&epoch
->active
) == 0 &&
1008 test_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
)) {
1009 if (!(ev
& EV_CLEANUP
)) {
1010 spin_unlock(&mdev
->epoch_lock
);
1011 drbd_send_b_ack(mdev
, epoch
->barrier_nr
, epoch_size
);
1012 spin_lock(&mdev
->epoch_lock
);
1016 if (mdev
->current_epoch
!= epoch
) {
1017 next_epoch
= list_entry(epoch
->list
.next
, struct drbd_epoch
, list
);
1018 list_del(&epoch
->list
);
1019 ev
= EV_BECAME_LAST
| (ev
& EV_CLEANUP
);
1023 if (rv
== FE_STILL_LIVE
)
1027 atomic_set(&epoch
->epoch_size
, 0);
1028 /* atomic_set(&epoch->active, 0); is already zero */
1029 if (rv
== FE_STILL_LIVE
)
1031 wake_up(&mdev
->ee_wait
);
1041 spin_unlock(&mdev
->epoch_lock
);
1047 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1048 * @mdev: DRBD device.
1049 * @wo: Write ordering method to try.
1051 void drbd_bump_write_ordering(struct drbd_conf
*mdev
, enum write_ordering_e wo
) __must_hold(local
)
1053 enum write_ordering_e pwo
;
1054 static char *write_ordering_str
[] = {
1056 [WO_drain_io
] = "drain",
1057 [WO_bdev_flush
] = "flush",
1060 pwo
= mdev
->write_ordering
;
1062 if (wo
== WO_bdev_flush
&& mdev
->ldev
->dc
.no_disk_flush
)
1064 if (wo
== WO_drain_io
&& mdev
->ldev
->dc
.no_disk_drain
)
1066 mdev
->write_ordering
= wo
;
1067 if (pwo
!= mdev
->write_ordering
|| wo
== WO_bdev_flush
)
1068 dev_info(DEV
, "Method to ensure write ordering: %s\n", write_ordering_str
[mdev
->write_ordering
]);
1073 * @mdev: DRBD device.
1075 * @rw: flag field, see bio->bi_rw
1077 * May spread the pages to multiple bios,
1078 * depending on bio_add_page restrictions.
1080 * Returns 0 if all bios have been submitted,
1081 * -ENOMEM if we could not allocate enough bios,
1082 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1083 * single page to an empty bio (which should never happen and likely indicates
1084 * that the lower level IO stack is in some way broken). This has been observed
1085 * on certain Xen deployments.
1087 /* TODO allocate from our own bio_set. */
1088 int drbd_submit_ee(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
,
1089 const unsigned rw
, const int fault_type
)
1091 struct bio
*bios
= NULL
;
1093 struct page
*page
= e
->pages
;
1094 sector_t sector
= e
->sector
;
1095 unsigned ds
= e
->size
;
1096 unsigned n_bios
= 0;
1097 unsigned nr_pages
= (ds
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
1100 /* In most cases, we will only need one bio. But in case the lower
1101 * level restrictions happen to be different at this offset on this
1102 * side than those of the sending peer, we may need to submit the
1103 * request in more than one bio. */
1105 bio
= bio_alloc(GFP_NOIO
, nr_pages
);
1107 dev_err(DEV
, "submit_ee: Allocation of a bio failed\n");
1110 /* > e->sector, unless this is the first bio */
1111 bio
->bi_sector
= sector
;
1112 bio
->bi_bdev
= mdev
->ldev
->backing_bdev
;
1114 bio
->bi_private
= e
;
1115 bio
->bi_end_io
= drbd_endio_sec
;
1117 bio
->bi_next
= bios
;
1121 page_chain_for_each(page
) {
1122 unsigned len
= min_t(unsigned, ds
, PAGE_SIZE
);
1123 if (!bio_add_page(bio
, page
, len
, 0)) {
1124 /* A single page must always be possible!
1125 * But in case it fails anyways,
1126 * we deal with it, and complain (below). */
1127 if (bio
->bi_vcnt
== 0) {
1129 "bio_add_page failed for len=%u, "
1130 "bi_vcnt=0 (bi_sector=%llu)\n",
1131 len
, (unsigned long long)bio
->bi_sector
);
1141 D_ASSERT(page
== NULL
);
1144 atomic_set(&e
->pending_bios
, n_bios
);
1147 bios
= bios
->bi_next
;
1148 bio
->bi_next
= NULL
;
1150 drbd_generic_make_request(mdev
, fault_type
, bio
);
1157 bios
= bios
->bi_next
;
1163 static int receive_Barrier(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1166 struct p_barrier
*p
= &mdev
->data
.rbuf
.barrier
;
1167 struct drbd_epoch
*epoch
;
1171 mdev
->current_epoch
->barrier_nr
= p
->barrier
;
1172 rv
= drbd_may_finish_epoch(mdev
, mdev
->current_epoch
, EV_GOT_BARRIER_NR
);
1174 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1175 * the activity log, which means it would not be resynced in case the
1176 * R_PRIMARY crashes now.
1177 * Therefore we must send the barrier_ack after the barrier request was
1179 switch (mdev
->write_ordering
) {
1181 if (rv
== FE_RECYCLED
)
1184 /* receiver context, in the writeout path of the other node.
1185 * avoid potential distributed deadlock */
1186 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1190 dev_warn(DEV
, "Allocation of an epoch failed, slowing down\n");
1195 drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
1198 if (atomic_read(&mdev
->current_epoch
->epoch_size
)) {
1199 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1204 epoch
= mdev
->current_epoch
;
1205 wait_event(mdev
->ee_wait
, atomic_read(&epoch
->epoch_size
) == 0);
1207 D_ASSERT(atomic_read(&epoch
->active
) == 0);
1208 D_ASSERT(epoch
->flags
== 0);
1212 dev_err(DEV
, "Strangeness in mdev->write_ordering %d\n", mdev
->write_ordering
);
1217 atomic_set(&epoch
->epoch_size
, 0);
1218 atomic_set(&epoch
->active
, 0);
1220 spin_lock(&mdev
->epoch_lock
);
1221 if (atomic_read(&mdev
->current_epoch
->epoch_size
)) {
1222 list_add(&epoch
->list
, &mdev
->current_epoch
->list
);
1223 mdev
->current_epoch
= epoch
;
1226 /* The current_epoch got recycled while we allocated this one... */
1229 spin_unlock(&mdev
->epoch_lock
);
1234 /* used from receive_RSDataReply (recv_resync_read)
1235 * and from receive_Data */
1236 static struct drbd_epoch_entry
*
1237 read_in_block(struct drbd_conf
*mdev
, u64 id
, sector_t sector
, int data_size
) __must_hold(local
)
1239 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
1240 struct drbd_epoch_entry
*e
;
1243 void *dig_in
= mdev
->int_dig_in
;
1244 void *dig_vv
= mdev
->int_dig_vv
;
1245 unsigned long *data
;
1247 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_r_tfm
) ?
1248 crypto_hash_digestsize(mdev
->integrity_r_tfm
) : 0;
1251 rr
= drbd_recv(mdev
, dig_in
, dgs
);
1253 if (!signal_pending(current
))
1255 "short read receiving data digest: read %d expected %d\n",
1263 ERR_IF(data_size
== 0) return NULL
;
1264 ERR_IF(data_size
& 0x1ff) return NULL
;
1265 ERR_IF(data_size
> DRBD_MAX_BIO_SIZE
) return NULL
;
1267 /* even though we trust out peer,
1268 * we sometimes have to double check. */
1269 if (sector
+ (data_size
>>9) > capacity
) {
1270 dev_err(DEV
, "request from peer beyond end of local disk: "
1271 "capacity: %llus < sector: %llus + size: %u\n",
1272 (unsigned long long)capacity
,
1273 (unsigned long long)sector
, data_size
);
1277 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1278 * "criss-cross" setup, that might cause write-out on some other DRBD,
1279 * which in turn might block on the other node at this very place. */
1280 e
= drbd_alloc_ee(mdev
, id
, sector
, data_size
, GFP_NOIO
);
1286 page_chain_for_each(page
) {
1287 unsigned len
= min_t(int, ds
, PAGE_SIZE
);
1289 rr
= drbd_recv(mdev
, data
, len
);
1290 if (drbd_insert_fault(mdev
, DRBD_FAULT_RECEIVE
)) {
1291 dev_err(DEV
, "Fault injection: Corrupting data on receive\n");
1292 data
[0] = data
[0] ^ (unsigned long)-1;
1296 drbd_free_ee(mdev
, e
);
1297 if (!signal_pending(current
))
1298 dev_warn(DEV
, "short read receiving data: read %d expected %d\n",
1306 drbd_csum_ee(mdev
, mdev
->integrity_r_tfm
, e
, dig_vv
);
1307 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1308 dev_err(DEV
, "Digest integrity check FAILED: %llus +%u\n",
1309 (unsigned long long)sector
, data_size
);
1310 drbd_bcast_ee(mdev
, "digest failed",
1311 dgs
, dig_in
, dig_vv
, e
);
1312 drbd_free_ee(mdev
, e
);
1316 mdev
->recv_cnt
+= data_size
>>9;
1320 /* drbd_drain_block() just takes a data block
1321 * out of the socket input buffer, and discards it.
1323 static int drbd_drain_block(struct drbd_conf
*mdev
, int data_size
)
1332 page
= drbd_pp_alloc(mdev
, 1, 1);
1336 rr
= drbd_recv(mdev
, data
, min_t(int, data_size
, PAGE_SIZE
));
1337 if (rr
!= min_t(int, data_size
, PAGE_SIZE
)) {
1339 if (!signal_pending(current
))
1341 "short read receiving data: read %d expected %d\n",
1342 rr
, min_t(int, data_size
, PAGE_SIZE
));
1348 drbd_pp_free(mdev
, page
, 0);
1352 static int recv_dless_read(struct drbd_conf
*mdev
, struct drbd_request
*req
,
1353 sector_t sector
, int data_size
)
1355 struct bio_vec
*bvec
;
1357 int dgs
, rr
, i
, expect
;
1358 void *dig_in
= mdev
->int_dig_in
;
1359 void *dig_vv
= mdev
->int_dig_vv
;
1361 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_r_tfm
) ?
1362 crypto_hash_digestsize(mdev
->integrity_r_tfm
) : 0;
1365 rr
= drbd_recv(mdev
, dig_in
, dgs
);
1367 if (!signal_pending(current
))
1369 "short read receiving data reply digest: read %d expected %d\n",
1377 /* optimistically update recv_cnt. if receiving fails below,
1378 * we disconnect anyways, and counters will be reset. */
1379 mdev
->recv_cnt
+= data_size
>>9;
1381 bio
= req
->master_bio
;
1382 D_ASSERT(sector
== bio
->bi_sector
);
1384 bio_for_each_segment(bvec
, bio
, i
) {
1385 expect
= min_t(int, data_size
, bvec
->bv_len
);
1386 rr
= drbd_recv(mdev
,
1387 kmap(bvec
->bv_page
)+bvec
->bv_offset
,
1389 kunmap(bvec
->bv_page
);
1391 if (!signal_pending(current
))
1392 dev_warn(DEV
, "short read receiving data reply: "
1393 "read %d expected %d\n",
1401 drbd_csum_bio(mdev
, mdev
->integrity_r_tfm
, bio
, dig_vv
);
1402 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1403 dev_err(DEV
, "Digest integrity check FAILED. Broken NICs?\n");
1408 D_ASSERT(data_size
== 0);
1412 /* e_end_resync_block() is called via
1413 * drbd_process_done_ee() by asender only */
1414 static int e_end_resync_block(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1416 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1417 sector_t sector
= e
->sector
;
1420 D_ASSERT(hlist_unhashed(&e
->colision
));
1422 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1423 drbd_set_in_sync(mdev
, sector
, e
->size
);
1424 ok
= drbd_send_ack(mdev
, P_RS_WRITE_ACK
, e
);
1426 /* Record failure to sync */
1427 drbd_rs_failed_io(mdev
, sector
, e
->size
);
1429 ok
= drbd_send_ack(mdev
, P_NEG_ACK
, e
);
1436 static int recv_resync_read(struct drbd_conf
*mdev
, sector_t sector
, int data_size
) __releases(local
)
1438 struct drbd_epoch_entry
*e
;
1440 e
= read_in_block(mdev
, ID_SYNCER
, sector
, data_size
);
1444 dec_rs_pending(mdev
);
1447 /* corresponding dec_unacked() in e_end_resync_block()
1448 * respective _drbd_clear_done_ee */
1450 e
->w
.cb
= e_end_resync_block
;
1452 spin_lock_irq(&mdev
->req_lock
);
1453 list_add(&e
->w
.list
, &mdev
->sync_ee
);
1454 spin_unlock_irq(&mdev
->req_lock
);
1456 atomic_add(data_size
>> 9, &mdev
->rs_sect_ev
);
1457 if (drbd_submit_ee(mdev
, e
, WRITE
, DRBD_FAULT_RS_WR
) == 0)
1460 /* don't care for the reason here */
1461 dev_err(DEV
, "submit failed, triggering re-connect\n");
1462 spin_lock_irq(&mdev
->req_lock
);
1463 list_del(&e
->w
.list
);
1464 spin_unlock_irq(&mdev
->req_lock
);
1466 drbd_free_ee(mdev
, e
);
1472 static int receive_DataReply(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1474 struct drbd_request
*req
;
1477 struct p_data
*p
= &mdev
->data
.rbuf
.data
;
1479 sector
= be64_to_cpu(p
->sector
);
1481 spin_lock_irq(&mdev
->req_lock
);
1482 req
= _ar_id_to_req(mdev
, p
->block_id
, sector
);
1483 spin_unlock_irq(&mdev
->req_lock
);
1484 if (unlikely(!req
)) {
1485 dev_err(DEV
, "Got a corrupt block_id/sector pair(1).\n");
1489 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1490 * special casing it there for the various failure cases.
1491 * still no race with drbd_fail_pending_reads */
1492 ok
= recv_dless_read(mdev
, req
, sector
, data_size
);
1495 req_mod(req
, data_received
);
1496 /* else: nothing. handled from drbd_disconnect...
1497 * I don't think we may complete this just yet
1498 * in case we are "on-disconnect: freeze" */
1503 static int receive_RSDataReply(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1507 struct p_data
*p
= &mdev
->data
.rbuf
.data
;
1509 sector
= be64_to_cpu(p
->sector
);
1510 D_ASSERT(p
->block_id
== ID_SYNCER
);
1512 if (get_ldev(mdev
)) {
1513 /* data is submitted to disk within recv_resync_read.
1514 * corresponding put_ldev done below on error,
1515 * or in drbd_endio_write_sec. */
1516 ok
= recv_resync_read(mdev
, sector
, data_size
);
1518 if (__ratelimit(&drbd_ratelimit_state
))
1519 dev_err(DEV
, "Can not write resync data to local disk.\n");
1521 ok
= drbd_drain_block(mdev
, data_size
);
1523 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
, data_size
);
1526 atomic_add(data_size
>> 9, &mdev
->rs_sect_in
);
1531 /* e_end_block() is called via drbd_process_done_ee().
1532 * this means this function only runs in the asender thread
1534 static int e_end_block(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1536 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1537 sector_t sector
= e
->sector
;
1540 if (mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
) {
1541 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1542 pcmd
= (mdev
->state
.conn
>= C_SYNC_SOURCE
&&
1543 mdev
->state
.conn
<= C_PAUSED_SYNC_T
&&
1544 e
->flags
& EE_MAY_SET_IN_SYNC
) ?
1545 P_RS_WRITE_ACK
: P_WRITE_ACK
;
1546 ok
&= drbd_send_ack(mdev
, pcmd
, e
);
1547 if (pcmd
== P_RS_WRITE_ACK
)
1548 drbd_set_in_sync(mdev
, sector
, e
->size
);
1550 ok
= drbd_send_ack(mdev
, P_NEG_ACK
, e
);
1551 /* we expect it to be marked out of sync anyways...
1552 * maybe assert this? */
1556 /* we delete from the conflict detection hash _after_ we sent out the
1557 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1558 if (mdev
->net_conf
->two_primaries
) {
1559 spin_lock_irq(&mdev
->req_lock
);
1560 D_ASSERT(!hlist_unhashed(&e
->colision
));
1561 hlist_del_init(&e
->colision
);
1562 spin_unlock_irq(&mdev
->req_lock
);
1564 D_ASSERT(hlist_unhashed(&e
->colision
));
1567 drbd_may_finish_epoch(mdev
, e
->epoch
, EV_PUT
+ (cancel
? EV_CLEANUP
: 0));
1572 static int e_send_discard_ack(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1574 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1577 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
1578 ok
= drbd_send_ack(mdev
, P_DISCARD_ACK
, e
);
1580 spin_lock_irq(&mdev
->req_lock
);
1581 D_ASSERT(!hlist_unhashed(&e
->colision
));
1582 hlist_del_init(&e
->colision
);
1583 spin_unlock_irq(&mdev
->req_lock
);
1590 /* Called from receive_Data.
1591 * Synchronize packets on sock with packets on msock.
1593 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1594 * packet traveling on msock, they are still processed in the order they have
1597 * Note: we don't care for Ack packets overtaking P_DATA packets.
1599 * In case packet_seq is larger than mdev->peer_seq number, there are
1600 * outstanding packets on the msock. We wait for them to arrive.
1601 * In case we are the logically next packet, we update mdev->peer_seq
1602 * ourselves. Correctly handles 32bit wrap around.
1604 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1605 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1606 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1607 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1609 * returns 0 if we may process the packet,
1610 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1611 static int drbd_wait_peer_seq(struct drbd_conf
*mdev
, const u32 packet_seq
)
1617 spin_lock(&mdev
->peer_seq_lock
);
1619 prepare_to_wait(&mdev
->seq_wait
, &wait
, TASK_INTERRUPTIBLE
);
1620 if (seq_le(packet_seq
, mdev
->peer_seq
+1))
1622 if (signal_pending(current
)) {
1626 p_seq
= mdev
->peer_seq
;
1627 spin_unlock(&mdev
->peer_seq_lock
);
1628 timeout
= schedule_timeout(30*HZ
);
1629 spin_lock(&mdev
->peer_seq_lock
);
1630 if (timeout
== 0 && p_seq
== mdev
->peer_seq
) {
1632 dev_err(DEV
, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1636 finish_wait(&mdev
->seq_wait
, &wait
);
1637 if (mdev
->peer_seq
+1 == packet_seq
)
1639 spin_unlock(&mdev
->peer_seq_lock
);
1643 /* see also bio_flags_to_wire()
1644 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1645 * flags and back. We may replicate to other kernel versions. */
1646 static unsigned long wire_flags_to_bio(struct drbd_conf
*mdev
, u32 dpf
)
1648 return (dpf
& DP_RW_SYNC
? REQ_SYNC
: 0) |
1649 (dpf
& DP_FUA
? REQ_FUA
: 0) |
1650 (dpf
& DP_FLUSH
? REQ_FLUSH
: 0) |
1651 (dpf
& DP_DISCARD
? REQ_DISCARD
: 0);
1654 /* mirrored write */
1655 static int receive_Data(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1658 struct drbd_epoch_entry
*e
;
1659 struct p_data
*p
= &mdev
->data
.rbuf
.data
;
1663 if (!get_ldev(mdev
)) {
1664 spin_lock(&mdev
->peer_seq_lock
);
1665 if (mdev
->peer_seq
+1 == be32_to_cpu(p
->seq_num
))
1667 spin_unlock(&mdev
->peer_seq_lock
);
1669 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
, data_size
);
1670 atomic_inc(&mdev
->current_epoch
->epoch_size
);
1671 return drbd_drain_block(mdev
, data_size
);
1674 /* get_ldev(mdev) successful.
1675 * Corresponding put_ldev done either below (on various errors),
1676 * or in drbd_endio_write_sec, if we successfully submit the data at
1677 * the end of this function. */
1679 sector
= be64_to_cpu(p
->sector
);
1680 e
= read_in_block(mdev
, p
->block_id
, sector
, data_size
);
1686 e
->w
.cb
= e_end_block
;
1688 dp_flags
= be32_to_cpu(p
->dp_flags
);
1689 rw
|= wire_flags_to_bio(mdev
, dp_flags
);
1691 if (dp_flags
& DP_MAY_SET_IN_SYNC
)
1692 e
->flags
|= EE_MAY_SET_IN_SYNC
;
1694 spin_lock(&mdev
->epoch_lock
);
1695 e
->epoch
= mdev
->current_epoch
;
1696 atomic_inc(&e
->epoch
->epoch_size
);
1697 atomic_inc(&e
->epoch
->active
);
1698 spin_unlock(&mdev
->epoch_lock
);
1700 /* I'm the receiver, I do hold a net_cnt reference. */
1701 if (!mdev
->net_conf
->two_primaries
) {
1702 spin_lock_irq(&mdev
->req_lock
);
1704 /* don't get the req_lock yet,
1705 * we may sleep in drbd_wait_peer_seq */
1706 const int size
= e
->size
;
1707 const int discard
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
1709 struct drbd_request
*i
;
1710 struct hlist_node
*n
;
1711 struct hlist_head
*slot
;
1714 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
1715 BUG_ON(mdev
->ee_hash
== NULL
);
1716 BUG_ON(mdev
->tl_hash
== NULL
);
1718 /* conflict detection and handling:
1719 * 1. wait on the sequence number,
1720 * in case this data packet overtook ACK packets.
1721 * 2. check our hash tables for conflicting requests.
1722 * we only need to walk the tl_hash, since an ee can not
1723 * have a conflict with an other ee: on the submitting
1724 * node, the corresponding req had already been conflicting,
1725 * and a conflicting req is never sent.
1727 * Note: for two_primaries, we are protocol C,
1728 * so there cannot be any request that is DONE
1729 * but still on the transfer log.
1731 * unconditionally add to the ee_hash.
1733 * if no conflicting request is found:
1736 * if any conflicting request is found
1737 * that has not yet been acked,
1738 * AND I have the "discard concurrent writes" flag:
1739 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1741 * if any conflicting request is found:
1742 * block the receiver, waiting on misc_wait
1743 * until no more conflicting requests are there,
1744 * or we get interrupted (disconnect).
1746 * we do not just write after local io completion of those
1747 * requests, but only after req is done completely, i.e.
1748 * we wait for the P_DISCARD_ACK to arrive!
1750 * then proceed normally, i.e. submit.
1752 if (drbd_wait_peer_seq(mdev
, be32_to_cpu(p
->seq_num
)))
1753 goto out_interrupted
;
1755 spin_lock_irq(&mdev
->req_lock
);
1757 hlist_add_head(&e
->colision
, ee_hash_slot(mdev
, sector
));
1759 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1760 slot
= tl_hash_slot(mdev
, sector
);
1763 int have_unacked
= 0;
1764 int have_conflict
= 0;
1765 prepare_to_wait(&mdev
->misc_wait
, &wait
,
1766 TASK_INTERRUPTIBLE
);
1767 hlist_for_each_entry(i
, n
, slot
, colision
) {
1769 /* only ALERT on first iteration,
1770 * we may be woken up early... */
1772 dev_alert(DEV
, "%s[%u] Concurrent local write detected!"
1773 " new: %llus +%u; pending: %llus +%u\n",
1774 current
->comm
, current
->pid
,
1775 (unsigned long long)sector
, size
,
1776 (unsigned long long)i
->sector
, i
->size
);
1777 if (i
->rq_state
& RQ_NET_PENDING
)
1786 /* Discard Ack only for the _first_ iteration */
1787 if (first
&& discard
&& have_unacked
) {
1788 dev_alert(DEV
, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1789 (unsigned long long)sector
);
1791 e
->w
.cb
= e_send_discard_ack
;
1792 list_add_tail(&e
->w
.list
, &mdev
->done_ee
);
1794 spin_unlock_irq(&mdev
->req_lock
);
1796 /* we could probably send that P_DISCARD_ACK ourselves,
1797 * but I don't like the receiver using the msock */
1801 finish_wait(&mdev
->misc_wait
, &wait
);
1805 if (signal_pending(current
)) {
1806 hlist_del_init(&e
->colision
);
1808 spin_unlock_irq(&mdev
->req_lock
);
1810 finish_wait(&mdev
->misc_wait
, &wait
);
1811 goto out_interrupted
;
1814 spin_unlock_irq(&mdev
->req_lock
);
1817 dev_alert(DEV
, "Concurrent write! [W AFTERWARDS] "
1818 "sec=%llus\n", (unsigned long long)sector
);
1819 } else if (discard
) {
1820 /* we had none on the first iteration.
1821 * there must be none now. */
1822 D_ASSERT(have_unacked
== 0);
1825 spin_lock_irq(&mdev
->req_lock
);
1827 finish_wait(&mdev
->misc_wait
, &wait
);
1830 list_add(&e
->w
.list
, &mdev
->active_ee
);
1831 spin_unlock_irq(&mdev
->req_lock
);
1833 switch (mdev
->net_conf
->wire_protocol
) {
1836 /* corresponding dec_unacked() in e_end_block()
1837 * respective _drbd_clear_done_ee */
1840 /* I really don't like it that the receiver thread
1841 * sends on the msock, but anyways */
1842 drbd_send_ack(mdev
, P_RECV_ACK
, e
);
1849 if (mdev
->state
.pdsk
< D_INCONSISTENT
) {
1850 /* In case we have the only disk of the cluster, */
1851 drbd_set_out_of_sync(mdev
, e
->sector
, e
->size
);
1852 e
->flags
|= EE_CALL_AL_COMPLETE_IO
;
1853 e
->flags
&= ~EE_MAY_SET_IN_SYNC
;
1854 drbd_al_begin_io(mdev
, e
->sector
);
1857 if (drbd_submit_ee(mdev
, e
, rw
, DRBD_FAULT_DT_WR
) == 0)
1860 /* don't care for the reason here */
1861 dev_err(DEV
, "submit failed, triggering re-connect\n");
1862 spin_lock_irq(&mdev
->req_lock
);
1863 list_del(&e
->w
.list
);
1864 hlist_del_init(&e
->colision
);
1865 spin_unlock_irq(&mdev
->req_lock
);
1866 if (e
->flags
& EE_CALL_AL_COMPLETE_IO
)
1867 drbd_al_complete_io(mdev
, e
->sector
);
1870 drbd_may_finish_epoch(mdev
, e
->epoch
, EV_PUT
+ EV_CLEANUP
);
1872 drbd_free_ee(mdev
, e
);
1876 /* We may throttle resync, if the lower device seems to be busy,
1877 * and current sync rate is above c_min_rate.
1879 * To decide whether or not the lower device is busy, we use a scheme similar
1880 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1881 * (more than 64 sectors) of activity we cannot account for with our own resync
1882 * activity, it obviously is "busy".
1884 * The current sync rate used here uses only the most recent two step marks,
1885 * to have a short time average so we can react faster.
1887 int drbd_rs_should_slow_down(struct drbd_conf
*mdev
, sector_t sector
)
1889 struct gendisk
*disk
= mdev
->ldev
->backing_bdev
->bd_contains
->bd_disk
;
1890 unsigned long db
, dt
, dbdt
;
1891 struct lc_element
*tmp
;
1895 /* feature disabled? */
1896 if (mdev
->sync_conf
.c_min_rate
== 0)
1899 spin_lock_irq(&mdev
->al_lock
);
1900 tmp
= lc_find(mdev
->resync
, BM_SECT_TO_EXT(sector
));
1902 struct bm_extent
*bm_ext
= lc_entry(tmp
, struct bm_extent
, lce
);
1903 if (test_bit(BME_PRIORITY
, &bm_ext
->flags
)) {
1904 spin_unlock_irq(&mdev
->al_lock
);
1907 /* Do not slow down if app IO is already waiting for this extent */
1909 spin_unlock_irq(&mdev
->al_lock
);
1911 curr_events
= (int)part_stat_read(&disk
->part0
, sectors
[0]) +
1912 (int)part_stat_read(&disk
->part0
, sectors
[1]) -
1913 atomic_read(&mdev
->rs_sect_ev
);
1915 if (!mdev
->rs_last_events
|| curr_events
- mdev
->rs_last_events
> 64) {
1916 unsigned long rs_left
;
1919 mdev
->rs_last_events
= curr_events
;
1921 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1923 i
= (mdev
->rs_last_mark
+ DRBD_SYNC_MARKS
-1) % DRBD_SYNC_MARKS
;
1925 if (mdev
->state
.conn
== C_VERIFY_S
|| mdev
->state
.conn
== C_VERIFY_T
)
1926 rs_left
= mdev
->ov_left
;
1928 rs_left
= drbd_bm_total_weight(mdev
) - mdev
->rs_failed
;
1930 dt
= ((long)jiffies
- (long)mdev
->rs_mark_time
[i
]) / HZ
;
1933 db
= mdev
->rs_mark_left
[i
] - rs_left
;
1934 dbdt
= Bit2KB(db
/dt
);
1936 if (dbdt
> mdev
->sync_conf
.c_min_rate
)
1943 static int receive_DataRequest(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int digest_size
)
1946 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
1947 struct drbd_epoch_entry
*e
;
1948 struct digest_info
*di
= NULL
;
1950 unsigned int fault_type
;
1951 struct p_block_req
*p
= &mdev
->data
.rbuf
.block_req
;
1953 sector
= be64_to_cpu(p
->sector
);
1954 size
= be32_to_cpu(p
->blksize
);
1956 if (size
<= 0 || (size
& 0x1ff) != 0 || size
> DRBD_MAX_BIO_SIZE
) {
1957 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
1958 (unsigned long long)sector
, size
);
1961 if (sector
+ (size
>>9) > capacity
) {
1962 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
1963 (unsigned long long)sector
, size
);
1967 if (!get_ldev_if_state(mdev
, D_UP_TO_DATE
)) {
1970 case P_DATA_REQUEST
:
1971 drbd_send_ack_rp(mdev
, P_NEG_DREPLY
, p
);
1973 case P_RS_DATA_REQUEST
:
1974 case P_CSUM_RS_REQUEST
:
1976 drbd_send_ack_rp(mdev
, P_NEG_RS_DREPLY
, p
);
1980 dec_rs_pending(mdev
);
1981 drbd_send_ack_ex(mdev
, P_OV_RESULT
, sector
, size
, ID_IN_SYNC
);
1984 dev_err(DEV
, "unexpected command (%s) in receive_DataRequest\n",
1987 if (verb
&& __ratelimit(&drbd_ratelimit_state
))
1988 dev_err(DEV
, "Can not satisfy peer's read request, "
1989 "no local data.\n");
1991 /* drain possibly payload */
1992 return drbd_drain_block(mdev
, digest_size
);
1995 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1996 * "criss-cross" setup, that might cause write-out on some other DRBD,
1997 * which in turn might block on the other node at this very place. */
1998 e
= drbd_alloc_ee(mdev
, p
->block_id
, sector
, size
, GFP_NOIO
);
2005 case P_DATA_REQUEST
:
2006 e
->w
.cb
= w_e_end_data_req
;
2007 fault_type
= DRBD_FAULT_DT_RD
;
2008 /* application IO, don't drbd_rs_begin_io */
2011 case P_RS_DATA_REQUEST
:
2012 e
->w
.cb
= w_e_end_rsdata_req
;
2013 fault_type
= DRBD_FAULT_RS_RD
;
2014 /* used in the sector offset progress display */
2015 mdev
->bm_resync_fo
= BM_SECT_TO_BIT(sector
);
2019 case P_CSUM_RS_REQUEST
:
2020 fault_type
= DRBD_FAULT_RS_RD
;
2021 di
= kmalloc(sizeof(*di
) + digest_size
, GFP_NOIO
);
2025 di
->digest_size
= digest_size
;
2026 di
->digest
= (((char *)di
)+sizeof(struct digest_info
));
2029 e
->flags
|= EE_HAS_DIGEST
;
2031 if (drbd_recv(mdev
, di
->digest
, digest_size
) != digest_size
)
2034 if (cmd
== P_CSUM_RS_REQUEST
) {
2035 D_ASSERT(mdev
->agreed_pro_version
>= 89);
2036 e
->w
.cb
= w_e_end_csum_rs_req
;
2037 /* used in the sector offset progress display */
2038 mdev
->bm_resync_fo
= BM_SECT_TO_BIT(sector
);
2039 } else if (cmd
== P_OV_REPLY
) {
2040 /* track progress, we may need to throttle */
2041 atomic_add(size
>> 9, &mdev
->rs_sect_in
);
2042 e
->w
.cb
= w_e_end_ov_reply
;
2043 dec_rs_pending(mdev
);
2044 /* drbd_rs_begin_io done when we sent this request,
2045 * but accounting still needs to be done. */
2046 goto submit_for_resync
;
2051 if (mdev
->ov_start_sector
== ~(sector_t
)0 &&
2052 mdev
->agreed_pro_version
>= 90) {
2053 unsigned long now
= jiffies
;
2055 mdev
->ov_start_sector
= sector
;
2056 mdev
->ov_position
= sector
;
2057 mdev
->ov_left
= drbd_bm_bits(mdev
) - BM_SECT_TO_BIT(sector
);
2058 mdev
->rs_total
= mdev
->ov_left
;
2059 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
2060 mdev
->rs_mark_left
[i
] = mdev
->ov_left
;
2061 mdev
->rs_mark_time
[i
] = now
;
2063 dev_info(DEV
, "Online Verify start sector: %llu\n",
2064 (unsigned long long)sector
);
2066 e
->w
.cb
= w_e_end_ov_req
;
2067 fault_type
= DRBD_FAULT_RS_RD
;
2071 dev_err(DEV
, "unexpected command (%s) in receive_DataRequest\n",
2073 fault_type
= DRBD_FAULT_MAX
;
2077 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2078 * wrt the receiver, but it is not as straightforward as it may seem.
2079 * Various places in the resync start and stop logic assume resync
2080 * requests are processed in order, requeuing this on the worker thread
2081 * introduces a bunch of new code for synchronization between threads.
2083 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2084 * "forever", throttling after drbd_rs_begin_io will lock that extent
2085 * for application writes for the same time. For now, just throttle
2086 * here, where the rest of the code expects the receiver to sleep for
2090 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2091 * this defers syncer requests for some time, before letting at least
2092 * on request through. The resync controller on the receiving side
2093 * will adapt to the incoming rate accordingly.
2095 * We cannot throttle here if remote is Primary/SyncTarget:
2096 * we would also throttle its application reads.
2097 * In that case, throttling is done on the SyncTarget only.
2099 if (mdev
->state
.peer
!= R_PRIMARY
&& drbd_rs_should_slow_down(mdev
, sector
))
2100 schedule_timeout_uninterruptible(HZ
/10);
2101 if (drbd_rs_begin_io(mdev
, sector
))
2105 atomic_add(size
>> 9, &mdev
->rs_sect_ev
);
2109 spin_lock_irq(&mdev
->req_lock
);
2110 list_add_tail(&e
->w
.list
, &mdev
->read_ee
);
2111 spin_unlock_irq(&mdev
->req_lock
);
2113 if (drbd_submit_ee(mdev
, e
, READ
, fault_type
) == 0)
2116 /* don't care for the reason here */
2117 dev_err(DEV
, "submit failed, triggering re-connect\n");
2118 spin_lock_irq(&mdev
->req_lock
);
2119 list_del(&e
->w
.list
);
2120 spin_unlock_irq(&mdev
->req_lock
);
2121 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2125 drbd_free_ee(mdev
, e
);
2129 static int drbd_asb_recover_0p(struct drbd_conf
*mdev
) __must_hold(local
)
2131 int self
, peer
, rv
= -100;
2132 unsigned long ch_self
, ch_peer
;
2134 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2135 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2137 ch_peer
= mdev
->p_uuid
[UI_SIZE
];
2138 ch_self
= mdev
->comm_bm_set
;
2140 switch (mdev
->net_conf
->after_sb_0p
) {
2142 case ASB_DISCARD_SECONDARY
:
2143 case ASB_CALL_HELPER
:
2144 dev_err(DEV
, "Configuration error.\n");
2146 case ASB_DISCONNECT
:
2148 case ASB_DISCARD_YOUNGER_PRI
:
2149 if (self
== 0 && peer
== 1) {
2153 if (self
== 1 && peer
== 0) {
2157 /* Else fall through to one of the other strategies... */
2158 case ASB_DISCARD_OLDER_PRI
:
2159 if (self
== 0 && peer
== 1) {
2163 if (self
== 1 && peer
== 0) {
2167 /* Else fall through to one of the other strategies... */
2168 dev_warn(DEV
, "Discard younger/older primary did not find a decision\n"
2169 "Using discard-least-changes instead\n");
2170 case ASB_DISCARD_ZERO_CHG
:
2171 if (ch_peer
== 0 && ch_self
== 0) {
2172 rv
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
)
2176 if (ch_peer
== 0) { rv
= 1; break; }
2177 if (ch_self
== 0) { rv
= -1; break; }
2179 if (mdev
->net_conf
->after_sb_0p
== ASB_DISCARD_ZERO_CHG
)
2181 case ASB_DISCARD_LEAST_CHG
:
2182 if (ch_self
< ch_peer
)
2184 else if (ch_self
> ch_peer
)
2186 else /* ( ch_self == ch_peer ) */
2187 /* Well, then use something else. */
2188 rv
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
)
2191 case ASB_DISCARD_LOCAL
:
2194 case ASB_DISCARD_REMOTE
:
2201 static int drbd_asb_recover_1p(struct drbd_conf
*mdev
) __must_hold(local
)
2205 switch (mdev
->net_conf
->after_sb_1p
) {
2206 case ASB_DISCARD_YOUNGER_PRI
:
2207 case ASB_DISCARD_OLDER_PRI
:
2208 case ASB_DISCARD_LEAST_CHG
:
2209 case ASB_DISCARD_LOCAL
:
2210 case ASB_DISCARD_REMOTE
:
2211 dev_err(DEV
, "Configuration error.\n");
2213 case ASB_DISCONNECT
:
2216 hg
= drbd_asb_recover_0p(mdev
);
2217 if (hg
== -1 && mdev
->state
.role
== R_SECONDARY
)
2219 if (hg
== 1 && mdev
->state
.role
== R_PRIMARY
)
2223 rv
= drbd_asb_recover_0p(mdev
);
2225 case ASB_DISCARD_SECONDARY
:
2226 return mdev
->state
.role
== R_PRIMARY
? 1 : -1;
2227 case ASB_CALL_HELPER
:
2228 hg
= drbd_asb_recover_0p(mdev
);
2229 if (hg
== -1 && mdev
->state
.role
== R_PRIMARY
) {
2230 enum drbd_state_rv rv2
;
2232 drbd_set_role(mdev
, R_SECONDARY
, 0);
2233 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2234 * we might be here in C_WF_REPORT_PARAMS which is transient.
2235 * we do not need to wait for the after state change work either. */
2236 rv2
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2237 if (rv2
!= SS_SUCCESS
) {
2238 drbd_khelper(mdev
, "pri-lost-after-sb");
2240 dev_warn(DEV
, "Successfully gave up primary role.\n");
2250 static int drbd_asb_recover_2p(struct drbd_conf
*mdev
) __must_hold(local
)
2254 switch (mdev
->net_conf
->after_sb_2p
) {
2255 case ASB_DISCARD_YOUNGER_PRI
:
2256 case ASB_DISCARD_OLDER_PRI
:
2257 case ASB_DISCARD_LEAST_CHG
:
2258 case ASB_DISCARD_LOCAL
:
2259 case ASB_DISCARD_REMOTE
:
2261 case ASB_DISCARD_SECONDARY
:
2262 dev_err(DEV
, "Configuration error.\n");
2265 rv
= drbd_asb_recover_0p(mdev
);
2267 case ASB_DISCONNECT
:
2269 case ASB_CALL_HELPER
:
2270 hg
= drbd_asb_recover_0p(mdev
);
2272 enum drbd_state_rv rv2
;
2274 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2275 * we might be here in C_WF_REPORT_PARAMS which is transient.
2276 * we do not need to wait for the after state change work either. */
2277 rv2
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2278 if (rv2
!= SS_SUCCESS
) {
2279 drbd_khelper(mdev
, "pri-lost-after-sb");
2281 dev_warn(DEV
, "Successfully gave up primary role.\n");
2291 static void drbd_uuid_dump(struct drbd_conf
*mdev
, char *text
, u64
*uuid
,
2292 u64 bits
, u64 flags
)
2295 dev_info(DEV
, "%s uuid info vanished while I was looking!\n", text
);
2298 dev_info(DEV
, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2300 (unsigned long long)uuid
[UI_CURRENT
],
2301 (unsigned long long)uuid
[UI_BITMAP
],
2302 (unsigned long long)uuid
[UI_HISTORY_START
],
2303 (unsigned long long)uuid
[UI_HISTORY_END
],
2304 (unsigned long long)bits
,
2305 (unsigned long long)flags
);
2309 100 after split brain try auto recover
2310 2 C_SYNC_SOURCE set BitMap
2311 1 C_SYNC_SOURCE use BitMap
2313 -1 C_SYNC_TARGET use BitMap
2314 -2 C_SYNC_TARGET set BitMap
2315 -100 after split brain, disconnect
2316 -1000 unrelated data
2317 -1091 requires proto 91
2318 -1096 requires proto 96
2320 static int drbd_uuid_compare(struct drbd_conf
*mdev
, int *rule_nr
) __must_hold(local
)
2325 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2326 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2329 if (self
== UUID_JUST_CREATED
&& peer
== UUID_JUST_CREATED
)
2333 if ((self
== UUID_JUST_CREATED
|| self
== (u64
)0) &&
2334 peer
!= UUID_JUST_CREATED
)
2338 if (self
!= UUID_JUST_CREATED
&&
2339 (peer
== UUID_JUST_CREATED
|| peer
== (u64
)0))
2343 int rct
, dc
; /* roles at crash time */
2345 if (mdev
->p_uuid
[UI_BITMAP
] == (u64
)0 && mdev
->ldev
->md
.uuid
[UI_BITMAP
] != (u64
)0) {
2347 if (mdev
->agreed_pro_version
< 91)
2350 if ((mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1)) &&
2351 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1))) {
2352 dev_info(DEV
, "was SyncSource, missed the resync finished event, corrected myself:\n");
2353 drbd_uuid_set_bm(mdev
, 0UL);
2355 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2356 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2359 dev_info(DEV
, "was SyncSource (peer failed to write sync_uuid)\n");
2366 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
] == (u64
)0 && mdev
->p_uuid
[UI_BITMAP
] != (u64
)0) {
2368 if (mdev
->agreed_pro_version
< 91)
2371 if ((mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1)) &&
2372 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1))) {
2373 dev_info(DEV
, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2375 mdev
->p_uuid
[UI_HISTORY_START
+ 1] = mdev
->p_uuid
[UI_HISTORY_START
];
2376 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_BITMAP
];
2377 mdev
->p_uuid
[UI_BITMAP
] = 0UL;
2379 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
, mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2382 dev_info(DEV
, "was SyncTarget (failed to write sync_uuid)\n");
2389 /* Common power [off|failure] */
2390 rct
= (test_bit(CRASHED_PRIMARY
, &mdev
->flags
) ? 1 : 0) +
2391 (mdev
->p_uuid
[UI_FLAGS
] & 2);
2392 /* lowest bit is set when we were primary,
2393 * next bit (weight 2) is set when peer was primary */
2397 case 0: /* !self_pri && !peer_pri */ return 0;
2398 case 1: /* self_pri && !peer_pri */ return 1;
2399 case 2: /* !self_pri && peer_pri */ return -1;
2400 case 3: /* self_pri && peer_pri */
2401 dc
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
2407 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2412 peer
= mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1);
2414 if (mdev
->agreed_pro_version
< 96 ?
2415 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) ==
2416 (mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) :
2417 peer
+ UUID_NEW_BM_OFFSET
== (mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1))) {
2418 /* The last P_SYNC_UUID did not get though. Undo the last start of
2419 resync as sync source modifications of the peer's UUIDs. */
2421 if (mdev
->agreed_pro_version
< 91)
2424 mdev
->p_uuid
[UI_BITMAP
] = mdev
->p_uuid
[UI_HISTORY_START
];
2425 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_HISTORY_START
+ 1];
2427 dev_info(DEV
, "Did not got last syncUUID packet, corrected:\n");
2428 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
, mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2435 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2436 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2437 peer
= mdev
->p_uuid
[i
] & ~((u64
)1);
2443 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2444 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2449 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1);
2451 if (mdev
->agreed_pro_version
< 96 ?
2452 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) ==
2453 (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1)) :
2454 self
+ UUID_NEW_BM_OFFSET
== (mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1))) {
2455 /* The last P_SYNC_UUID did not get though. Undo the last start of
2456 resync as sync source modifications of our UUIDs. */
2458 if (mdev
->agreed_pro_version
< 91)
2461 _drbd_uuid_set(mdev
, UI_BITMAP
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
]);
2462 _drbd_uuid_set(mdev
, UI_HISTORY_START
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1]);
2464 dev_info(DEV
, "Last syncUUID did not get through, corrected:\n");
2465 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2466 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2474 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2475 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2476 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2482 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2483 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2484 if (self
== peer
&& self
!= ((u64
)0))
2488 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2489 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2490 for (j
= UI_HISTORY_START
; j
<= UI_HISTORY_END
; j
++) {
2491 peer
= mdev
->p_uuid
[j
] & ~((u64
)1);
2500 /* drbd_sync_handshake() returns the new conn state on success, or
2501 CONN_MASK (-1) on failure.
2503 static enum drbd_conns
drbd_sync_handshake(struct drbd_conf
*mdev
, enum drbd_role peer_role
,
2504 enum drbd_disk_state peer_disk
) __must_hold(local
)
2507 enum drbd_conns rv
= C_MASK
;
2508 enum drbd_disk_state mydisk
;
2510 mydisk
= mdev
->state
.disk
;
2511 if (mydisk
== D_NEGOTIATING
)
2512 mydisk
= mdev
->new_state_tmp
.disk
;
2514 dev_info(DEV
, "drbd_sync_handshake:\n");
2515 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
, mdev
->comm_bm_set
, 0);
2516 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
,
2517 mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2519 hg
= drbd_uuid_compare(mdev
, &rule_nr
);
2521 dev_info(DEV
, "uuid_compare()=%d by rule %d\n", hg
, rule_nr
);
2524 dev_alert(DEV
, "Unrelated data, aborting!\n");
2528 dev_alert(DEV
, "To resolve this both sides have to support at least protocol %d\n", -hg
- 1000);
2532 if ((mydisk
== D_INCONSISTENT
&& peer_disk
> D_INCONSISTENT
) ||
2533 (peer_disk
== D_INCONSISTENT
&& mydisk
> D_INCONSISTENT
)) {
2534 int f
= (hg
== -100) || abs(hg
) == 2;
2535 hg
= mydisk
> D_INCONSISTENT
? 1 : -1;
2538 dev_info(DEV
, "Becoming sync %s due to disk states.\n",
2539 hg
> 0 ? "source" : "target");
2543 drbd_khelper(mdev
, "initial-split-brain");
2545 if (hg
== 100 || (hg
== -100 && mdev
->net_conf
->always_asbp
)) {
2546 int pcount
= (mdev
->state
.role
== R_PRIMARY
)
2547 + (peer_role
== R_PRIMARY
);
2548 int forced
= (hg
== -100);
2552 hg
= drbd_asb_recover_0p(mdev
);
2555 hg
= drbd_asb_recover_1p(mdev
);
2558 hg
= drbd_asb_recover_2p(mdev
);
2561 if (abs(hg
) < 100) {
2562 dev_warn(DEV
, "Split-Brain detected, %d primaries, "
2563 "automatically solved. Sync from %s node\n",
2564 pcount
, (hg
< 0) ? "peer" : "this");
2566 dev_warn(DEV
, "Doing a full sync, since"
2567 " UUIDs where ambiguous.\n");
2574 if (mdev
->net_conf
->want_lose
&& !(mdev
->p_uuid
[UI_FLAGS
]&1))
2576 if (!mdev
->net_conf
->want_lose
&& (mdev
->p_uuid
[UI_FLAGS
]&1))
2580 dev_warn(DEV
, "Split-Brain detected, manually solved. "
2581 "Sync from %s node\n",
2582 (hg
< 0) ? "peer" : "this");
2586 /* FIXME this log message is not correct if we end up here
2587 * after an attempted attach on a diskless node.
2588 * We just refuse to attach -- well, we drop the "connection"
2589 * to that disk, in a way... */
2590 dev_alert(DEV
, "Split-Brain detected but unresolved, dropping connection!\n");
2591 drbd_khelper(mdev
, "split-brain");
2595 if (hg
> 0 && mydisk
<= D_INCONSISTENT
) {
2596 dev_err(DEV
, "I shall become SyncSource, but I am inconsistent!\n");
2600 if (hg
< 0 && /* by intention we do not use mydisk here. */
2601 mdev
->state
.role
== R_PRIMARY
&& mdev
->state
.disk
>= D_CONSISTENT
) {
2602 switch (mdev
->net_conf
->rr_conflict
) {
2603 case ASB_CALL_HELPER
:
2604 drbd_khelper(mdev
, "pri-lost");
2606 case ASB_DISCONNECT
:
2607 dev_err(DEV
, "I shall become SyncTarget, but I am primary!\n");
2610 dev_warn(DEV
, "Becoming SyncTarget, violating the stable-data"
2615 if (mdev
->net_conf
->dry_run
|| test_bit(CONN_DRY_RUN
, &mdev
->flags
)) {
2617 dev_info(DEV
, "dry-run connect: No resync, would become Connected immediately.\n");
2619 dev_info(DEV
, "dry-run connect: Would become %s, doing a %s resync.",
2620 drbd_conn_str(hg
> 0 ? C_SYNC_SOURCE
: C_SYNC_TARGET
),
2621 abs(hg
) >= 2 ? "full" : "bit-map based");
2626 dev_info(DEV
, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2627 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_n_write
, "set_n_write from sync_handshake",
2628 BM_LOCKED_SET_ALLOWED
))
2632 if (hg
> 0) { /* become sync source. */
2634 } else if (hg
< 0) { /* become sync target */
2638 if (drbd_bm_total_weight(mdev
)) {
2639 dev_info(DEV
, "No resync, but %lu bits in bitmap!\n",
2640 drbd_bm_total_weight(mdev
));
2647 /* returns 1 if invalid */
2648 static int cmp_after_sb(enum drbd_after_sb_p peer
, enum drbd_after_sb_p self
)
2650 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2651 if ((peer
== ASB_DISCARD_REMOTE
&& self
== ASB_DISCARD_LOCAL
) ||
2652 (self
== ASB_DISCARD_REMOTE
&& peer
== ASB_DISCARD_LOCAL
))
2655 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2656 if (peer
== ASB_DISCARD_REMOTE
|| peer
== ASB_DISCARD_LOCAL
||
2657 self
== ASB_DISCARD_REMOTE
|| self
== ASB_DISCARD_LOCAL
)
2660 /* everything else is valid if they are equal on both sides. */
2664 /* everything es is invalid. */
2668 static int receive_protocol(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
2670 struct p_protocol
*p
= &mdev
->data
.rbuf
.protocol
;
2671 int p_proto
, p_after_sb_0p
, p_after_sb_1p
, p_after_sb_2p
;
2672 int p_want_lose
, p_two_primaries
, cf
;
2673 char p_integrity_alg
[SHARED_SECRET_MAX
] = "";
2675 p_proto
= be32_to_cpu(p
->protocol
);
2676 p_after_sb_0p
= be32_to_cpu(p
->after_sb_0p
);
2677 p_after_sb_1p
= be32_to_cpu(p
->after_sb_1p
);
2678 p_after_sb_2p
= be32_to_cpu(p
->after_sb_2p
);
2679 p_two_primaries
= be32_to_cpu(p
->two_primaries
);
2680 cf
= be32_to_cpu(p
->conn_flags
);
2681 p_want_lose
= cf
& CF_WANT_LOSE
;
2683 clear_bit(CONN_DRY_RUN
, &mdev
->flags
);
2685 if (cf
& CF_DRY_RUN
)
2686 set_bit(CONN_DRY_RUN
, &mdev
->flags
);
2688 if (p_proto
!= mdev
->net_conf
->wire_protocol
) {
2689 dev_err(DEV
, "incompatible communication protocols\n");
2693 if (cmp_after_sb(p_after_sb_0p
, mdev
->net_conf
->after_sb_0p
)) {
2694 dev_err(DEV
, "incompatible after-sb-0pri settings\n");
2698 if (cmp_after_sb(p_after_sb_1p
, mdev
->net_conf
->after_sb_1p
)) {
2699 dev_err(DEV
, "incompatible after-sb-1pri settings\n");
2703 if (cmp_after_sb(p_after_sb_2p
, mdev
->net_conf
->after_sb_2p
)) {
2704 dev_err(DEV
, "incompatible after-sb-2pri settings\n");
2708 if (p_want_lose
&& mdev
->net_conf
->want_lose
) {
2709 dev_err(DEV
, "both sides have the 'want_lose' flag set\n");
2713 if (p_two_primaries
!= mdev
->net_conf
->two_primaries
) {
2714 dev_err(DEV
, "incompatible setting of the two-primaries options\n");
2718 if (mdev
->agreed_pro_version
>= 87) {
2719 unsigned char *my_alg
= mdev
->net_conf
->integrity_alg
;
2721 if (drbd_recv(mdev
, p_integrity_alg
, data_size
) != data_size
)
2724 p_integrity_alg
[SHARED_SECRET_MAX
-1] = 0;
2725 if (strcmp(p_integrity_alg
, my_alg
)) {
2726 dev_err(DEV
, "incompatible setting of the data-integrity-alg\n");
2729 dev_info(DEV
, "data-integrity-alg: %s\n",
2730 my_alg
[0] ? my_alg
: (unsigned char *)"<not-used>");
2736 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2741 * input: alg name, feature name
2742 * return: NULL (alg name was "")
2743 * ERR_PTR(error) if something goes wrong
2744 * or the crypto hash ptr, if it worked out ok. */
2745 struct crypto_hash
*drbd_crypto_alloc_digest_safe(const struct drbd_conf
*mdev
,
2746 const char *alg
, const char *name
)
2748 struct crypto_hash
*tfm
;
2753 tfm
= crypto_alloc_hash(alg
, 0, CRYPTO_ALG_ASYNC
);
2755 dev_err(DEV
, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2756 alg
, name
, PTR_ERR(tfm
));
2759 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm
))) {
2760 crypto_free_hash(tfm
);
2761 dev_err(DEV
, "\"%s\" is not a digest (%s)\n", alg
, name
);
2762 return ERR_PTR(-EINVAL
);
2767 static int receive_SyncParam(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int packet_size
)
2770 struct p_rs_param_95
*p
= &mdev
->data
.rbuf
.rs_param_95
;
2771 unsigned int header_size
, data_size
, exp_max_sz
;
2772 struct crypto_hash
*verify_tfm
= NULL
;
2773 struct crypto_hash
*csums_tfm
= NULL
;
2774 const int apv
= mdev
->agreed_pro_version
;
2775 int *rs_plan_s
= NULL
;
2778 exp_max_sz
= apv
<= 87 ? sizeof(struct p_rs_param
)
2779 : apv
== 88 ? sizeof(struct p_rs_param
)
2781 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
2782 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
2784 if (packet_size
> exp_max_sz
) {
2785 dev_err(DEV
, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2786 packet_size
, exp_max_sz
);
2791 header_size
= sizeof(struct p_rs_param
) - sizeof(struct p_header80
);
2792 data_size
= packet_size
- header_size
;
2793 } else if (apv
<= 94) {
2794 header_size
= sizeof(struct p_rs_param_89
) - sizeof(struct p_header80
);
2795 data_size
= packet_size
- header_size
;
2796 D_ASSERT(data_size
== 0);
2798 header_size
= sizeof(struct p_rs_param_95
) - sizeof(struct p_header80
);
2799 data_size
= packet_size
- header_size
;
2800 D_ASSERT(data_size
== 0);
2803 /* initialize verify_alg and csums_alg */
2804 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
2806 if (drbd_recv(mdev
, &p
->head
.payload
, header_size
) != header_size
)
2809 mdev
->sync_conf
.rate
= be32_to_cpu(p
->rate
);
2813 if (data_size
> SHARED_SECRET_MAX
) {
2814 dev_err(DEV
, "verify-alg too long, "
2815 "peer wants %u, accepting only %u byte\n",
2816 data_size
, SHARED_SECRET_MAX
);
2820 if (drbd_recv(mdev
, p
->verify_alg
, data_size
) != data_size
)
2823 /* we expect NUL terminated string */
2824 /* but just in case someone tries to be evil */
2825 D_ASSERT(p
->verify_alg
[data_size
-1] == 0);
2826 p
->verify_alg
[data_size
-1] = 0;
2828 } else /* apv >= 89 */ {
2829 /* we still expect NUL terminated strings */
2830 /* but just in case someone tries to be evil */
2831 D_ASSERT(p
->verify_alg
[SHARED_SECRET_MAX
-1] == 0);
2832 D_ASSERT(p
->csums_alg
[SHARED_SECRET_MAX
-1] == 0);
2833 p
->verify_alg
[SHARED_SECRET_MAX
-1] = 0;
2834 p
->csums_alg
[SHARED_SECRET_MAX
-1] = 0;
2837 if (strcmp(mdev
->sync_conf
.verify_alg
, p
->verify_alg
)) {
2838 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
2839 dev_err(DEV
, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2840 mdev
->sync_conf
.verify_alg
, p
->verify_alg
);
2843 verify_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
2844 p
->verify_alg
, "verify-alg");
2845 if (IS_ERR(verify_tfm
)) {
2851 if (apv
>= 89 && strcmp(mdev
->sync_conf
.csums_alg
, p
->csums_alg
)) {
2852 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
2853 dev_err(DEV
, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2854 mdev
->sync_conf
.csums_alg
, p
->csums_alg
);
2857 csums_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
2858 p
->csums_alg
, "csums-alg");
2859 if (IS_ERR(csums_tfm
)) {
2866 mdev
->sync_conf
.rate
= be32_to_cpu(p
->rate
);
2867 mdev
->sync_conf
.c_plan_ahead
= be32_to_cpu(p
->c_plan_ahead
);
2868 mdev
->sync_conf
.c_delay_target
= be32_to_cpu(p
->c_delay_target
);
2869 mdev
->sync_conf
.c_fill_target
= be32_to_cpu(p
->c_fill_target
);
2870 mdev
->sync_conf
.c_max_rate
= be32_to_cpu(p
->c_max_rate
);
2872 fifo_size
= (mdev
->sync_conf
.c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
2873 if (fifo_size
!= mdev
->rs_plan_s
.size
&& fifo_size
> 0) {
2874 rs_plan_s
= kzalloc(sizeof(int) * fifo_size
, GFP_KERNEL
);
2876 dev_err(DEV
, "kmalloc of fifo_buffer failed");
2882 spin_lock(&mdev
->peer_seq_lock
);
2883 /* lock against drbd_nl_syncer_conf() */
2885 strcpy(mdev
->sync_conf
.verify_alg
, p
->verify_alg
);
2886 mdev
->sync_conf
.verify_alg_len
= strlen(p
->verify_alg
) + 1;
2887 crypto_free_hash(mdev
->verify_tfm
);
2888 mdev
->verify_tfm
= verify_tfm
;
2889 dev_info(DEV
, "using verify-alg: \"%s\"\n", p
->verify_alg
);
2892 strcpy(mdev
->sync_conf
.csums_alg
, p
->csums_alg
);
2893 mdev
->sync_conf
.csums_alg_len
= strlen(p
->csums_alg
) + 1;
2894 crypto_free_hash(mdev
->csums_tfm
);
2895 mdev
->csums_tfm
= csums_tfm
;
2896 dev_info(DEV
, "using csums-alg: \"%s\"\n", p
->csums_alg
);
2898 if (fifo_size
!= mdev
->rs_plan_s
.size
) {
2899 kfree(mdev
->rs_plan_s
.values
);
2900 mdev
->rs_plan_s
.values
= rs_plan_s
;
2901 mdev
->rs_plan_s
.size
= fifo_size
;
2902 mdev
->rs_planed
= 0;
2904 spin_unlock(&mdev
->peer_seq_lock
);
2909 /* just for completeness: actually not needed,
2910 * as this is not reached if csums_tfm was ok. */
2911 crypto_free_hash(csums_tfm
);
2912 /* but free the verify_tfm again, if csums_tfm did not work out */
2913 crypto_free_hash(verify_tfm
);
2914 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2918 static void drbd_setup_order_type(struct drbd_conf
*mdev
, int peer
)
2920 /* sorry, we currently have no working implementation
2921 * of distributed TCQ */
2924 /* warn if the arguments differ by more than 12.5% */
2925 static void warn_if_differ_considerably(struct drbd_conf
*mdev
,
2926 const char *s
, sector_t a
, sector_t b
)
2929 if (a
== 0 || b
== 0)
2931 d
= (a
> b
) ? (a
- b
) : (b
- a
);
2932 if (d
> (a
>>3) || d
> (b
>>3))
2933 dev_warn(DEV
, "Considerable difference in %s: %llus vs. %llus\n", s
,
2934 (unsigned long long)a
, (unsigned long long)b
);
2937 static int receive_sizes(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
2939 struct p_sizes
*p
= &mdev
->data
.rbuf
.sizes
;
2940 enum determine_dev_size dd
= unchanged
;
2941 unsigned int max_bio_size
;
2942 sector_t p_size
, p_usize
, my_usize
;
2943 int ldsc
= 0; /* local disk size changed */
2944 enum dds_flags ddsf
;
2946 p_size
= be64_to_cpu(p
->d_size
);
2947 p_usize
= be64_to_cpu(p
->u_size
);
2949 if (p_size
== 0 && mdev
->state
.disk
== D_DISKLESS
) {
2950 dev_err(DEV
, "some backing storage is needed\n");
2951 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2955 /* just store the peer's disk size for now.
2956 * we still need to figure out whether we accept that. */
2957 mdev
->p_size
= p_size
;
2959 if (get_ldev(mdev
)) {
2960 warn_if_differ_considerably(mdev
, "lower level device sizes",
2961 p_size
, drbd_get_max_capacity(mdev
->ldev
));
2962 warn_if_differ_considerably(mdev
, "user requested size",
2963 p_usize
, mdev
->ldev
->dc
.disk_size
);
2965 /* if this is the first connect, or an otherwise expected
2966 * param exchange, choose the minimum */
2967 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
)
2968 p_usize
= min_not_zero((sector_t
)mdev
->ldev
->dc
.disk_size
,
2971 my_usize
= mdev
->ldev
->dc
.disk_size
;
2973 if (mdev
->ldev
->dc
.disk_size
!= p_usize
) {
2974 mdev
->ldev
->dc
.disk_size
= p_usize
;
2975 dev_info(DEV
, "Peer sets u_size to %lu sectors\n",
2976 (unsigned long)mdev
->ldev
->dc
.disk_size
);
2979 /* Never shrink a device with usable data during connect.
2980 But allow online shrinking if we are connected. */
2981 if (drbd_new_dev_size(mdev
, mdev
->ldev
, 0) <
2982 drbd_get_capacity(mdev
->this_bdev
) &&
2983 mdev
->state
.disk
>= D_OUTDATED
&&
2984 mdev
->state
.conn
< C_CONNECTED
) {
2985 dev_err(DEV
, "The peer's disk size is too small!\n");
2986 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2987 mdev
->ldev
->dc
.disk_size
= my_usize
;
2994 ddsf
= be16_to_cpu(p
->dds_flags
);
2995 if (get_ldev(mdev
)) {
2996 dd
= drbd_determin_dev_size(mdev
, ddsf
);
2998 if (dd
== dev_size_error
)
3002 /* I am diskless, need to accept the peer's size. */
3003 drbd_set_my_capacity(mdev
, p_size
);
3006 if (get_ldev(mdev
)) {
3007 if (mdev
->ldev
->known_size
!= drbd_get_capacity(mdev
->ldev
->backing_bdev
)) {
3008 mdev
->ldev
->known_size
= drbd_get_capacity(mdev
->ldev
->backing_bdev
);
3012 if (mdev
->agreed_pro_version
< 94)
3013 max_bio_size
= be32_to_cpu(p
->max_bio_size
);
3014 else if (mdev
->agreed_pro_version
== 94)
3015 max_bio_size
= DRBD_MAX_SIZE_H80_PACKET
;
3016 else /* drbd 8.3.8 onwards */
3017 max_bio_size
= DRBD_MAX_BIO_SIZE
;
3019 if (max_bio_size
!= queue_max_hw_sectors(mdev
->rq_queue
) << 9)
3020 drbd_setup_queue_param(mdev
, max_bio_size
);
3022 drbd_setup_order_type(mdev
, be16_to_cpu(p
->queue_order_type
));
3026 if (mdev
->state
.conn
> C_WF_REPORT_PARAMS
) {
3027 if (be64_to_cpu(p
->c_size
) !=
3028 drbd_get_capacity(mdev
->this_bdev
) || ldsc
) {
3029 /* we have different sizes, probably peer
3030 * needs to know my new size... */
3031 drbd_send_sizes(mdev
, 0, ddsf
);
3033 if (test_and_clear_bit(RESIZE_PENDING
, &mdev
->flags
) ||
3034 (dd
== grew
&& mdev
->state
.conn
== C_CONNECTED
)) {
3035 if (mdev
->state
.pdsk
>= D_INCONSISTENT
&&
3036 mdev
->state
.disk
>= D_INCONSISTENT
) {
3037 if (ddsf
& DDSF_NO_RESYNC
)
3038 dev_info(DEV
, "Resync of new storage suppressed with --assume-clean\n");
3040 resync_after_online_grow(mdev
);
3042 set_bit(RESYNC_AFTER_NEG
, &mdev
->flags
);
3049 static int receive_uuids(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3051 struct p_uuids
*p
= &mdev
->data
.rbuf
.uuids
;
3053 int i
, updated_uuids
= 0;
3055 p_uuid
= kmalloc(sizeof(u64
)*UI_EXTENDED_SIZE
, GFP_NOIO
);
3057 for (i
= UI_CURRENT
; i
< UI_EXTENDED_SIZE
; i
++)
3058 p_uuid
[i
] = be64_to_cpu(p
->uuid
[i
]);
3060 kfree(mdev
->p_uuid
);
3061 mdev
->p_uuid
= p_uuid
;
3063 if (mdev
->state
.conn
< C_CONNECTED
&&
3064 mdev
->state
.disk
< D_INCONSISTENT
&&
3065 mdev
->state
.role
== R_PRIMARY
&&
3066 (mdev
->ed_uuid
& ~((u64
)1)) != (p_uuid
[UI_CURRENT
] & ~((u64
)1))) {
3067 dev_err(DEV
, "Can only connect to data with current UUID=%016llX\n",
3068 (unsigned long long)mdev
->ed_uuid
);
3069 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3073 if (get_ldev(mdev
)) {
3074 int skip_initial_sync
=
3075 mdev
->state
.conn
== C_CONNECTED
&&
3076 mdev
->agreed_pro_version
>= 90 &&
3077 mdev
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&&
3078 (p_uuid
[UI_FLAGS
] & 8);
3079 if (skip_initial_sync
) {
3080 dev_info(DEV
, "Accepted new current UUID, preparing to skip initial sync\n");
3081 drbd_bitmap_io(mdev
, &drbd_bmio_clear_n_write
,
3082 "clear_n_write from receive_uuids",
3083 BM_LOCKED_TEST_ALLOWED
);
3084 _drbd_uuid_set(mdev
, UI_CURRENT
, p_uuid
[UI_CURRENT
]);
3085 _drbd_uuid_set(mdev
, UI_BITMAP
, 0);
3086 _drbd_set_state(_NS2(mdev
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
3092 } else if (mdev
->state
.disk
< D_INCONSISTENT
&&
3093 mdev
->state
.role
== R_PRIMARY
) {
3094 /* I am a diskless primary, the peer just created a new current UUID
3096 updated_uuids
= drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3099 /* Before we test for the disk state, we should wait until an eventually
3100 ongoing cluster wide state change is finished. That is important if
3101 we are primary and are detaching from our disk. We need to see the
3102 new disk state... */
3103 wait_event(mdev
->misc_wait
, !test_bit(CLUSTER_ST_CHANGE
, &mdev
->flags
));
3104 if (mdev
->state
.conn
>= C_CONNECTED
&& mdev
->state
.disk
< D_INCONSISTENT
)
3105 updated_uuids
|= drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3108 drbd_print_uuids(mdev
, "receiver updated UUIDs to");
3114 * convert_state() - Converts the peer's view of the cluster state to our point of view
3115 * @ps: The state as seen by the peer.
3117 static union drbd_state
convert_state(union drbd_state ps
)
3119 union drbd_state ms
;
3121 static enum drbd_conns c_tab
[] = {
3122 [C_CONNECTED
] = C_CONNECTED
,
3124 [C_STARTING_SYNC_S
] = C_STARTING_SYNC_T
,
3125 [C_STARTING_SYNC_T
] = C_STARTING_SYNC_S
,
3126 [C_DISCONNECTING
] = C_TEAR_DOWN
, /* C_NETWORK_FAILURE, */
3127 [C_VERIFY_S
] = C_VERIFY_T
,
3133 ms
.conn
= c_tab
[ps
.conn
];
3138 ms
.peer_isp
= (ps
.aftr_isp
| ps
.user_isp
);
3143 static int receive_req_state(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3145 struct p_req_state
*p
= &mdev
->data
.rbuf
.req_state
;
3146 union drbd_state mask
, val
;
3147 enum drbd_state_rv rv
;
3149 mask
.i
= be32_to_cpu(p
->mask
);
3150 val
.i
= be32_to_cpu(p
->val
);
3152 if (test_bit(DISCARD_CONCURRENT
, &mdev
->flags
) &&
3153 test_bit(CLUSTER_ST_CHANGE
, &mdev
->flags
)) {
3154 drbd_send_sr_reply(mdev
, SS_CONCURRENT_ST_CHG
);
3158 mask
= convert_state(mask
);
3159 val
= convert_state(val
);
3161 rv
= drbd_change_state(mdev
, CS_VERBOSE
, mask
, val
);
3163 drbd_send_sr_reply(mdev
, rv
);
3169 static int receive_state(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3171 struct p_state
*p
= &mdev
->data
.rbuf
.state
;
3172 union drbd_state os
, ns
, peer_state
;
3173 enum drbd_disk_state real_peer_disk
;
3174 enum chg_state_flags cs_flags
;
3177 peer_state
.i
= be32_to_cpu(p
->state
);
3179 real_peer_disk
= peer_state
.disk
;
3180 if (peer_state
.disk
== D_NEGOTIATING
) {
3181 real_peer_disk
= mdev
->p_uuid
[UI_FLAGS
] & 4 ? D_INCONSISTENT
: D_CONSISTENT
;
3182 dev_info(DEV
, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk
));
3185 spin_lock_irq(&mdev
->req_lock
);
3187 os
= ns
= mdev
->state
;
3188 spin_unlock_irq(&mdev
->req_lock
);
3190 /* peer says his disk is uptodate, while we think it is inconsistent,
3191 * and this happens while we think we have a sync going on. */
3192 if (os
.pdsk
== D_INCONSISTENT
&& real_peer_disk
== D_UP_TO_DATE
&&
3193 os
.conn
> C_CONNECTED
&& os
.disk
== D_UP_TO_DATE
) {
3194 /* If we are (becoming) SyncSource, but peer is still in sync
3195 * preparation, ignore its uptodate-ness to avoid flapping, it
3196 * will change to inconsistent once the peer reaches active
3198 * It may have changed syncer-paused flags, however, so we
3199 * cannot ignore this completely. */
3200 if (peer_state
.conn
> C_CONNECTED
&&
3201 peer_state
.conn
< C_SYNC_SOURCE
)
3202 real_peer_disk
= D_INCONSISTENT
;
3204 /* if peer_state changes to connected at the same time,
3205 * it explicitly notifies us that it finished resync.
3206 * Maybe we should finish it up, too? */
3207 else if (os
.conn
>= C_SYNC_SOURCE
&&
3208 peer_state
.conn
== C_CONNECTED
) {
3209 if (drbd_bm_total_weight(mdev
) <= mdev
->rs_failed
)
3210 drbd_resync_finished(mdev
);
3215 /* peer says his disk is inconsistent, while we think it is uptodate,
3216 * and this happens while the peer still thinks we have a sync going on,
3217 * but we think we are already done with the sync.
3218 * We ignore this to avoid flapping pdsk.
3219 * This should not happen, if the peer is a recent version of drbd. */
3220 if (os
.pdsk
== D_UP_TO_DATE
&& real_peer_disk
== D_INCONSISTENT
&&
3221 os
.conn
== C_CONNECTED
&& peer_state
.conn
> C_SYNC_SOURCE
)
3222 real_peer_disk
= D_UP_TO_DATE
;
3224 if (ns
.conn
== C_WF_REPORT_PARAMS
)
3225 ns
.conn
= C_CONNECTED
;
3227 if (peer_state
.conn
== C_AHEAD
)
3230 if (mdev
->p_uuid
&& peer_state
.disk
>= D_NEGOTIATING
&&
3231 get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3232 int cr
; /* consider resync */
3234 /* if we established a new connection */
3235 cr
= (os
.conn
< C_CONNECTED
);
3236 /* if we had an established connection
3237 * and one of the nodes newly attaches a disk */
3238 cr
|= (os
.conn
== C_CONNECTED
&&
3239 (peer_state
.disk
== D_NEGOTIATING
||
3240 os
.disk
== D_NEGOTIATING
));
3241 /* if we have both been inconsistent, and the peer has been
3242 * forced to be UpToDate with --overwrite-data */
3243 cr
|= test_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3244 /* if we had been plain connected, and the admin requested to
3245 * start a sync by "invalidate" or "invalidate-remote" */
3246 cr
|= (os
.conn
== C_CONNECTED
&&
3247 (peer_state
.conn
>= C_STARTING_SYNC_S
&&
3248 peer_state
.conn
<= C_WF_BITMAP_T
));
3251 ns
.conn
= drbd_sync_handshake(mdev
, peer_state
.role
, real_peer_disk
);
3254 if (ns
.conn
== C_MASK
) {
3255 ns
.conn
= C_CONNECTED
;
3256 if (mdev
->state
.disk
== D_NEGOTIATING
) {
3257 drbd_force_state(mdev
, NS(disk
, D_FAILED
));
3258 } else if (peer_state
.disk
== D_NEGOTIATING
) {
3259 dev_err(DEV
, "Disk attach process on the peer node was aborted.\n");
3260 peer_state
.disk
= D_DISKLESS
;
3261 real_peer_disk
= D_DISKLESS
;
3263 if (test_and_clear_bit(CONN_DRY_RUN
, &mdev
->flags
))
3265 D_ASSERT(os
.conn
== C_WF_REPORT_PARAMS
);
3266 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3272 spin_lock_irq(&mdev
->req_lock
);
3273 if (mdev
->state
.i
!= os
.i
)
3275 clear_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3276 ns
.peer
= peer_state
.role
;
3277 ns
.pdsk
= real_peer_disk
;
3278 ns
.peer_isp
= (peer_state
.aftr_isp
| peer_state
.user_isp
);
3279 if ((ns
.conn
== C_CONNECTED
|| ns
.conn
== C_WF_BITMAP_S
) && ns
.disk
== D_NEGOTIATING
)
3280 ns
.disk
= mdev
->new_state_tmp
.disk
;
3281 cs_flags
= CS_VERBOSE
+ (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
? 0 : CS_HARD
);
3282 if (ns
.pdsk
== D_CONSISTENT
&& is_susp(ns
) && ns
.conn
== C_CONNECTED
&& os
.conn
< C_CONNECTED
&&
3283 test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
3284 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3285 for temporal network outages! */
3286 spin_unlock_irq(&mdev
->req_lock
);
3287 dev_err(DEV
, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3289 drbd_uuid_new_current(mdev
);
3290 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
3291 drbd_force_state(mdev
, NS2(conn
, C_PROTOCOL_ERROR
, susp
, 0));
3294 rv
= _drbd_set_state(mdev
, ns
, cs_flags
, NULL
);
3296 spin_unlock_irq(&mdev
->req_lock
);
3298 if (rv
< SS_SUCCESS
) {
3299 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3303 if (os
.conn
> C_WF_REPORT_PARAMS
) {
3304 if (ns
.conn
> C_CONNECTED
&& peer_state
.conn
<= C_CONNECTED
&&
3305 peer_state
.disk
!= D_NEGOTIATING
) {
3306 /* we want resync, peer has not yet decided to sync... */
3307 /* Nowadays only used when forcing a node into primary role and
3308 setting its disk to UpToDate with that */
3309 drbd_send_uuids(mdev
);
3310 drbd_send_state(mdev
);
3314 mdev
->net_conf
->want_lose
= 0;
3316 drbd_md_sync(mdev
); /* update connected indicator, la_size, ... */
3321 static int receive_sync_uuid(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3323 struct p_rs_uuid
*p
= &mdev
->data
.rbuf
.rs_uuid
;
3325 wait_event(mdev
->misc_wait
,
3326 mdev
->state
.conn
== C_WF_SYNC_UUID
||
3327 mdev
->state
.conn
== C_BEHIND
||
3328 mdev
->state
.conn
< C_CONNECTED
||
3329 mdev
->state
.disk
< D_NEGOTIATING
);
3331 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3333 /* Here the _drbd_uuid_ functions are right, current should
3334 _not_ be rotated into the history */
3335 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3336 _drbd_uuid_set(mdev
, UI_CURRENT
, be64_to_cpu(p
->uuid
));
3337 _drbd_uuid_set(mdev
, UI_BITMAP
, 0UL);
3339 drbd_print_uuids(mdev
, "updated sync uuid");
3340 drbd_start_resync(mdev
, C_SYNC_TARGET
);
3344 dev_err(DEV
, "Ignoring SyncUUID packet!\n");
3350 * receive_bitmap_plain
3352 * Return 0 when done, 1 when another iteration is needed, and a negative error
3353 * code upon failure.
3356 receive_bitmap_plain(struct drbd_conf
*mdev
, unsigned int data_size
,
3357 unsigned long *buffer
, struct bm_xfer_ctx
*c
)
3359 unsigned num_words
= min_t(size_t, BM_PACKET_WORDS
, c
->bm_words
- c
->word_offset
);
3360 unsigned want
= num_words
* sizeof(long);
3363 if (want
!= data_size
) {
3364 dev_err(DEV
, "%s:want (%u) != data_size (%u)\n", __func__
, want
, data_size
);
3369 err
= drbd_recv(mdev
, buffer
, want
);
3376 drbd_bm_merge_lel(mdev
, c
->word_offset
, num_words
, buffer
);
3378 c
->word_offset
+= num_words
;
3379 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
3380 if (c
->bit_offset
> c
->bm_bits
)
3381 c
->bit_offset
= c
->bm_bits
;
3389 * Return 0 when done, 1 when another iteration is needed, and a negative error
3390 * code upon failure.
3393 recv_bm_rle_bits(struct drbd_conf
*mdev
,
3394 struct p_compressed_bm
*p
,
3395 struct bm_xfer_ctx
*c
)
3397 struct bitstream bs
;
3401 unsigned long s
= c
->bit_offset
;
3403 int len
= be16_to_cpu(p
->head
.length
) - (sizeof(*p
) - sizeof(p
->head
));
3404 int toggle
= DCBP_get_start(p
);
3408 bitstream_init(&bs
, p
->code
, len
, DCBP_get_pad_bits(p
));
3410 bits
= bitstream_get_bits(&bs
, &look_ahead
, 64);
3414 for (have
= bits
; have
> 0; s
+= rl
, toggle
= !toggle
) {
3415 bits
= vli_decode_bits(&rl
, look_ahead
);
3421 if (e
>= c
->bm_bits
) {
3422 dev_err(DEV
, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e
);
3425 _drbd_bm_set_bits(mdev
, s
, e
);
3429 dev_err(DEV
, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3430 have
, bits
, look_ahead
,
3431 (unsigned int)(bs
.cur
.b
- p
->code
),
3432 (unsigned int)bs
.buf_len
);
3435 look_ahead
>>= bits
;
3438 bits
= bitstream_get_bits(&bs
, &tmp
, 64 - have
);
3441 look_ahead
|= tmp
<< have
;
3446 bm_xfer_ctx_bit_to_word_offset(c
);
3448 return (s
!= c
->bm_bits
);
3454 * Return 0 when done, 1 when another iteration is needed, and a negative error
3455 * code upon failure.
3458 decode_bitmap_c(struct drbd_conf
*mdev
,
3459 struct p_compressed_bm
*p
,
3460 struct bm_xfer_ctx
*c
)
3462 if (DCBP_get_code(p
) == RLE_VLI_Bits
)
3463 return recv_bm_rle_bits(mdev
, p
, c
);
3465 /* other variants had been implemented for evaluation,
3466 * but have been dropped as this one turned out to be "best"
3467 * during all our tests. */
3469 dev_err(DEV
, "receive_bitmap_c: unknown encoding %u\n", p
->encoding
);
3470 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
3474 void INFO_bm_xfer_stats(struct drbd_conf
*mdev
,
3475 const char *direction
, struct bm_xfer_ctx
*c
)
3477 /* what would it take to transfer it "plaintext" */
3478 unsigned plain
= sizeof(struct p_header80
) *
3479 ((c
->bm_words
+BM_PACKET_WORDS
-1)/BM_PACKET_WORDS
+1)
3480 + c
->bm_words
* sizeof(long);
3481 unsigned total
= c
->bytes
[0] + c
->bytes
[1];
3484 /* total can not be zero. but just in case: */
3488 /* don't report if not compressed */
3492 /* total < plain. check for overflow, still */
3493 r
= (total
> UINT_MAX
/1000) ? (total
/ (plain
/1000))
3494 : (1000 * total
/ plain
);
3500 dev_info(DEV
, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3501 "total %u; compression: %u.%u%%\n",
3503 c
->bytes
[1], c
->packets
[1],
3504 c
->bytes
[0], c
->packets
[0],
3505 total
, r
/10, r
% 10);
3508 /* Since we are processing the bitfield from lower addresses to higher,
3509 it does not matter if the process it in 32 bit chunks or 64 bit
3510 chunks as long as it is little endian. (Understand it as byte stream,
3511 beginning with the lowest byte...) If we would use big endian
3512 we would need to process it from the highest address to the lowest,
3513 in order to be agnostic to the 32 vs 64 bits issue.
3515 returns 0 on failure, 1 if we successfully received it. */
3516 static int receive_bitmap(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3518 struct bm_xfer_ctx c
;
3522 struct p_header80
*h
= &mdev
->data
.rbuf
.header
.h80
;
3524 drbd_bm_lock(mdev
, "receive bitmap", BM_LOCKED_SET_ALLOWED
);
3525 /* you are supposed to send additional out-of-sync information
3526 * if you actually set bits during this phase */
3528 /* maybe we should use some per thread scratch page,
3529 * and allocate that during initial device creation? */
3530 buffer
= (unsigned long *) __get_free_page(GFP_NOIO
);
3532 dev_err(DEV
, "failed to allocate one page buffer in %s\n", __func__
);
3536 c
= (struct bm_xfer_ctx
) {
3537 .bm_bits
= drbd_bm_bits(mdev
),
3538 .bm_words
= drbd_bm_words(mdev
),
3542 if (cmd
== P_BITMAP
) {
3543 err
= receive_bitmap_plain(mdev
, data_size
, buffer
, &c
);
3544 } else if (cmd
== P_COMPRESSED_BITMAP
) {
3545 /* MAYBE: sanity check that we speak proto >= 90,
3546 * and the feature is enabled! */
3547 struct p_compressed_bm
*p
;
3549 if (data_size
> BM_PACKET_PAYLOAD_BYTES
) {
3550 dev_err(DEV
, "ReportCBitmap packet too large\n");
3553 /* use the page buff */
3555 memcpy(p
, h
, sizeof(*h
));
3556 if (drbd_recv(mdev
, p
->head
.payload
, data_size
) != data_size
)
3558 if (data_size
<= (sizeof(*p
) - sizeof(p
->head
))) {
3559 dev_err(DEV
, "ReportCBitmap packet too small (l:%u)\n", data_size
);
3562 err
= decode_bitmap_c(mdev
, p
, &c
);
3564 dev_warn(DEV
, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd
);
3568 c
.packets
[cmd
== P_BITMAP
]++;
3569 c
.bytes
[cmd
== P_BITMAP
] += sizeof(struct p_header80
) + data_size
;
3576 if (!drbd_recv_header(mdev
, &cmd
, &data_size
))
3580 INFO_bm_xfer_stats(mdev
, "receive", &c
);
3582 if (mdev
->state
.conn
== C_WF_BITMAP_T
) {
3583 enum drbd_state_rv rv
;
3585 ok
= !drbd_send_bitmap(mdev
);
3588 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3589 rv
= _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
3590 D_ASSERT(rv
== SS_SUCCESS
);
3591 } else if (mdev
->state
.conn
!= C_WF_BITMAP_S
) {
3592 /* admin may have requested C_DISCONNECTING,
3593 * other threads may have noticed network errors */
3594 dev_info(DEV
, "unexpected cstate (%s) in receive_bitmap\n",
3595 drbd_conn_str(mdev
->state
.conn
));
3600 drbd_bm_unlock(mdev
);
3601 if (ok
&& mdev
->state
.conn
== C_WF_BITMAP_S
)
3602 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
3603 free_page((unsigned long) buffer
);
3607 static int receive_skip(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3609 /* TODO zero copy sink :) */
3610 static char sink
[128];
3613 dev_warn(DEV
, "skipping unknown optional packet type %d, l: %d!\n",
3618 want
= min_t(int, size
, sizeof(sink
));
3619 r
= drbd_recv(mdev
, sink
, want
);
3620 ERR_IF(r
<= 0) break;
3626 static int receive_UnplugRemote(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3628 /* Make sure we've acked all the TCP data associated
3629 * with the data requests being unplugged */
3630 drbd_tcp_quickack(mdev
->data
.socket
);
3635 static int receive_out_of_sync(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3637 struct p_block_desc
*p
= &mdev
->data
.rbuf
.block_desc
;
3639 switch (mdev
->state
.conn
) {
3640 case C_WF_SYNC_UUID
:
3645 dev_err(DEV
, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3646 drbd_conn_str(mdev
->state
.conn
));
3649 drbd_set_out_of_sync(mdev
, be64_to_cpu(p
->sector
), be32_to_cpu(p
->blksize
));
3654 typedef int (*drbd_cmd_handler_f
)(struct drbd_conf
*, enum drbd_packets cmd
, unsigned int to_receive
);
3659 drbd_cmd_handler_f function
;
3662 static struct data_cmd drbd_cmd_handler
[] = {
3663 [P_DATA
] = { 1, sizeof(struct p_data
), receive_Data
},
3664 [P_DATA_REPLY
] = { 1, sizeof(struct p_data
), receive_DataReply
},
3665 [P_RS_DATA_REPLY
] = { 1, sizeof(struct p_data
), receive_RSDataReply
} ,
3666 [P_BARRIER
] = { 0, sizeof(struct p_barrier
), receive_Barrier
} ,
3667 [P_BITMAP
] = { 1, sizeof(struct p_header80
), receive_bitmap
} ,
3668 [P_COMPRESSED_BITMAP
] = { 1, sizeof(struct p_header80
), receive_bitmap
} ,
3669 [P_UNPLUG_REMOTE
] = { 0, sizeof(struct p_header80
), receive_UnplugRemote
},
3670 [P_DATA_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
3671 [P_RS_DATA_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
3672 [P_SYNC_PARAM
] = { 1, sizeof(struct p_header80
), receive_SyncParam
},
3673 [P_SYNC_PARAM89
] = { 1, sizeof(struct p_header80
), receive_SyncParam
},
3674 [P_PROTOCOL
] = { 1, sizeof(struct p_protocol
), receive_protocol
},
3675 [P_UUIDS
] = { 0, sizeof(struct p_uuids
), receive_uuids
},
3676 [P_SIZES
] = { 0, sizeof(struct p_sizes
), receive_sizes
},
3677 [P_STATE
] = { 0, sizeof(struct p_state
), receive_state
},
3678 [P_STATE_CHG_REQ
] = { 0, sizeof(struct p_req_state
), receive_req_state
},
3679 [P_SYNC_UUID
] = { 0, sizeof(struct p_rs_uuid
), receive_sync_uuid
},
3680 [P_OV_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
3681 [P_OV_REPLY
] = { 1, sizeof(struct p_block_req
), receive_DataRequest
},
3682 [P_CSUM_RS_REQUEST
] = { 1, sizeof(struct p_block_req
), receive_DataRequest
},
3683 [P_DELAY_PROBE
] = { 0, sizeof(struct p_delay_probe93
), receive_skip
},
3684 [P_OUT_OF_SYNC
] = { 0, sizeof(struct p_block_desc
), receive_out_of_sync
},
3685 /* anything missing from this table is in
3686 * the asender_tbl, see get_asender_cmd */
3687 [P_MAX_CMD
] = { 0, 0, NULL
},
3690 /* All handler functions that expect a sub-header get that sub-heder in
3691 mdev->data.rbuf.header.head.payload.
3693 Usually in mdev->data.rbuf.header.head the callback can find the usual
3694 p_header, but they may not rely on that. Since there is also p_header95 !
3697 static void drbdd(struct drbd_conf
*mdev
)
3699 union p_header
*header
= &mdev
->data
.rbuf
.header
;
3700 unsigned int packet_size
;
3701 enum drbd_packets cmd
;
3702 size_t shs
; /* sub header size */
3705 while (get_t_state(&mdev
->receiver
) == Running
) {
3706 drbd_thread_current_set_cpu(mdev
);
3707 if (!drbd_recv_header(mdev
, &cmd
, &packet_size
))
3710 if (unlikely(cmd
>= P_MAX_CMD
|| !drbd_cmd_handler
[cmd
].function
)) {
3711 dev_err(DEV
, "unknown packet type %d, l: %d!\n", cmd
, packet_size
);
3715 shs
= drbd_cmd_handler
[cmd
].pkt_size
- sizeof(union p_header
);
3716 if (packet_size
- shs
> 0 && !drbd_cmd_handler
[cmd
].expect_payload
) {
3717 dev_err(DEV
, "No payload expected %s l:%d\n", cmdname(cmd
), packet_size
);
3722 rv
= drbd_recv(mdev
, &header
->h80
.payload
, shs
);
3723 if (unlikely(rv
!= shs
)) {
3724 if (!signal_pending(current
))
3725 dev_warn(DEV
, "short read while reading sub header: rv=%d\n", rv
);
3730 rv
= drbd_cmd_handler
[cmd
].function(mdev
, cmd
, packet_size
- shs
);
3732 if (unlikely(!rv
)) {
3733 dev_err(DEV
, "error receiving %s, l: %d!\n",
3734 cmdname(cmd
), packet_size
);
3741 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
3743 /* If we leave here, we probably want to update at least the
3744 * "Connected" indicator on stable storage. Do so explicitly here. */
3748 void drbd_flush_workqueue(struct drbd_conf
*mdev
)
3750 struct drbd_wq_barrier barr
;
3752 barr
.w
.cb
= w_prev_work_done
;
3753 init_completion(&barr
.done
);
3754 drbd_queue_work(&mdev
->data
.work
, &barr
.w
);
3755 wait_for_completion(&barr
.done
);
3758 void drbd_free_tl_hash(struct drbd_conf
*mdev
)
3760 struct hlist_head
*h
;
3762 spin_lock_irq(&mdev
->req_lock
);
3764 if (!mdev
->tl_hash
|| mdev
->state
.conn
!= C_STANDALONE
) {
3765 spin_unlock_irq(&mdev
->req_lock
);
3769 for (h
= mdev
->ee_hash
; h
< mdev
->ee_hash
+ mdev
->ee_hash_s
; h
++)
3771 dev_err(DEV
, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3772 (int)(h
- mdev
->ee_hash
), h
->first
);
3773 kfree(mdev
->ee_hash
);
3774 mdev
->ee_hash
= NULL
;
3775 mdev
->ee_hash_s
= 0;
3778 for (h
= mdev
->tl_hash
; h
< mdev
->tl_hash
+ mdev
->tl_hash_s
; h
++)
3780 dev_err(DEV
, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3781 (int)(h
- mdev
->tl_hash
), h
->first
);
3782 kfree(mdev
->tl_hash
);
3783 mdev
->tl_hash
= NULL
;
3784 mdev
->tl_hash_s
= 0;
3785 spin_unlock_irq(&mdev
->req_lock
);
3788 static void drbd_disconnect(struct drbd_conf
*mdev
)
3790 enum drbd_fencing_p fp
;
3791 union drbd_state os
, ns
;
3792 int rv
= SS_UNKNOWN_ERROR
;
3795 if (mdev
->state
.conn
== C_STANDALONE
)
3798 /* asender does not clean up anything. it must not interfere, either */
3799 drbd_thread_stop(&mdev
->asender
);
3800 drbd_free_sock(mdev
);
3802 /* wait for current activity to cease. */
3803 spin_lock_irq(&mdev
->req_lock
);
3804 _drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
3805 _drbd_wait_ee_list_empty(mdev
, &mdev
->sync_ee
);
3806 _drbd_wait_ee_list_empty(mdev
, &mdev
->read_ee
);
3807 spin_unlock_irq(&mdev
->req_lock
);
3809 /* We do not have data structures that would allow us to
3810 * get the rs_pending_cnt down to 0 again.
3811 * * On C_SYNC_TARGET we do not have any data structures describing
3812 * the pending RSDataRequest's we have sent.
3813 * * On C_SYNC_SOURCE there is no data structure that tracks
3814 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3815 * And no, it is not the sum of the reference counts in the
3816 * resync_LRU. The resync_LRU tracks the whole operation including
3817 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3819 drbd_rs_cancel_all(mdev
);
3821 mdev
->rs_failed
= 0;
3822 atomic_set(&mdev
->rs_pending_cnt
, 0);
3823 wake_up(&mdev
->misc_wait
);
3825 /* make sure syncer is stopped and w_resume_next_sg queued */
3826 del_timer_sync(&mdev
->resync_timer
);
3827 resync_timer_fn((unsigned long)mdev
);
3829 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3830 * w_make_resync_request etc. which may still be on the worker queue
3831 * to be "canceled" */
3832 drbd_flush_workqueue(mdev
);
3834 /* This also does reclaim_net_ee(). If we do this too early, we might
3835 * miss some resync ee and pages.*/
3836 drbd_process_done_ee(mdev
);
3838 kfree(mdev
->p_uuid
);
3839 mdev
->p_uuid
= NULL
;
3841 if (!is_susp(mdev
->state
))
3844 dev_info(DEV
, "Connection closed\n");
3849 if (get_ldev(mdev
)) {
3850 fp
= mdev
->ldev
->dc
.fencing
;
3854 if (mdev
->state
.role
== R_PRIMARY
&& fp
>= FP_RESOURCE
&& mdev
->state
.pdsk
>= D_UNKNOWN
)
3855 drbd_try_outdate_peer_async(mdev
);
3857 spin_lock_irq(&mdev
->req_lock
);
3859 if (os
.conn
>= C_UNCONNECTED
) {
3860 /* Do not restart in case we are C_DISCONNECTING */
3862 ns
.conn
= C_UNCONNECTED
;
3863 rv
= _drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
3865 spin_unlock_irq(&mdev
->req_lock
);
3867 if (os
.conn
== C_DISCONNECTING
) {
3868 wait_event(mdev
->net_cnt_wait
, atomic_read(&mdev
->net_cnt
) == 0);
3870 crypto_free_hash(mdev
->cram_hmac_tfm
);
3871 mdev
->cram_hmac_tfm
= NULL
;
3873 kfree(mdev
->net_conf
);
3874 mdev
->net_conf
= NULL
;
3875 drbd_request_state(mdev
, NS(conn
, C_STANDALONE
));
3878 /* serialize with bitmap writeout triggered by the state change,
3880 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
3882 /* tcp_close and release of sendpage pages can be deferred. I don't
3883 * want to use SO_LINGER, because apparently it can be deferred for
3884 * more than 20 seconds (longest time I checked).
3886 * Actually we don't care for exactly when the network stack does its
3887 * put_page(), but release our reference on these pages right here.
3889 i
= drbd_release_ee(mdev
, &mdev
->net_ee
);
3891 dev_info(DEV
, "net_ee not empty, killed %u entries\n", i
);
3892 i
= atomic_read(&mdev
->pp_in_use_by_net
);
3894 dev_info(DEV
, "pp_in_use_by_net = %d, expected 0\n", i
);
3895 i
= atomic_read(&mdev
->pp_in_use
);
3897 dev_info(DEV
, "pp_in_use = %d, expected 0\n", i
);
3899 D_ASSERT(list_empty(&mdev
->read_ee
));
3900 D_ASSERT(list_empty(&mdev
->active_ee
));
3901 D_ASSERT(list_empty(&mdev
->sync_ee
));
3902 D_ASSERT(list_empty(&mdev
->done_ee
));
3904 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3905 atomic_set(&mdev
->current_epoch
->epoch_size
, 0);
3906 D_ASSERT(list_empty(&mdev
->current_epoch
->list
));
3910 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3911 * we can agree on is stored in agreed_pro_version.
3913 * feature flags and the reserved array should be enough room for future
3914 * enhancements of the handshake protocol, and possible plugins...
3916 * for now, they are expected to be zero, but ignored.
3918 static int drbd_send_handshake(struct drbd_conf
*mdev
)
3920 /* ASSERT current == mdev->receiver ... */
3921 struct p_handshake
*p
= &mdev
->data
.sbuf
.handshake
;
3924 if (mutex_lock_interruptible(&mdev
->data
.mutex
)) {
3925 dev_err(DEV
, "interrupted during initial handshake\n");
3926 return 0; /* interrupted. not ok. */
3929 if (mdev
->data
.socket
== NULL
) {
3930 mutex_unlock(&mdev
->data
.mutex
);
3934 memset(p
, 0, sizeof(*p
));
3935 p
->protocol_min
= cpu_to_be32(PRO_VERSION_MIN
);
3936 p
->protocol_max
= cpu_to_be32(PRO_VERSION_MAX
);
3937 ok
= _drbd_send_cmd( mdev
, mdev
->data
.socket
, P_HAND_SHAKE
,
3938 (struct p_header80
*)p
, sizeof(*p
), 0 );
3939 mutex_unlock(&mdev
->data
.mutex
);
3945 * 1 yes, we have a valid connection
3946 * 0 oops, did not work out, please try again
3947 * -1 peer talks different language,
3948 * no point in trying again, please go standalone.
3950 static int drbd_do_handshake(struct drbd_conf
*mdev
)
3952 /* ASSERT current == mdev->receiver ... */
3953 struct p_handshake
*p
= &mdev
->data
.rbuf
.handshake
;
3954 const int expect
= sizeof(struct p_handshake
) - sizeof(struct p_header80
);
3955 unsigned int length
;
3956 enum drbd_packets cmd
;
3959 rv
= drbd_send_handshake(mdev
);
3963 rv
= drbd_recv_header(mdev
, &cmd
, &length
);
3967 if (cmd
!= P_HAND_SHAKE
) {
3968 dev_err(DEV
, "expected HandShake packet, received: %s (0x%04x)\n",
3973 if (length
!= expect
) {
3974 dev_err(DEV
, "expected HandShake length: %u, received: %u\n",
3979 rv
= drbd_recv(mdev
, &p
->head
.payload
, expect
);
3982 if (!signal_pending(current
))
3983 dev_warn(DEV
, "short read receiving handshake packet: l=%u\n", rv
);
3987 p
->protocol_min
= be32_to_cpu(p
->protocol_min
);
3988 p
->protocol_max
= be32_to_cpu(p
->protocol_max
);
3989 if (p
->protocol_max
== 0)
3990 p
->protocol_max
= p
->protocol_min
;
3992 if (PRO_VERSION_MAX
< p
->protocol_min
||
3993 PRO_VERSION_MIN
> p
->protocol_max
)
3996 mdev
->agreed_pro_version
= min_t(int, PRO_VERSION_MAX
, p
->protocol_max
);
3998 dev_info(DEV
, "Handshake successful: "
3999 "Agreed network protocol version %d\n", mdev
->agreed_pro_version
);
4004 dev_err(DEV
, "incompatible DRBD dialects: "
4005 "I support %d-%d, peer supports %d-%d\n",
4006 PRO_VERSION_MIN
, PRO_VERSION_MAX
,
4007 p
->protocol_min
, p
->protocol_max
);
4011 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4012 static int drbd_do_auth(struct drbd_conf
*mdev
)
4014 dev_err(DEV
, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4015 dev_err(DEV
, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4019 #define CHALLENGE_LEN 64
4023 0 - failed, try again (network error),
4024 -1 - auth failed, don't try again.
4027 static int drbd_do_auth(struct drbd_conf
*mdev
)
4029 char my_challenge
[CHALLENGE_LEN
]; /* 64 Bytes... */
4030 struct scatterlist sg
;
4031 char *response
= NULL
;
4032 char *right_response
= NULL
;
4033 char *peers_ch
= NULL
;
4034 unsigned int key_len
= strlen(mdev
->net_conf
->shared_secret
);
4035 unsigned int resp_size
;
4036 struct hash_desc desc
;
4037 enum drbd_packets cmd
;
4038 unsigned int length
;
4041 desc
.tfm
= mdev
->cram_hmac_tfm
;
4044 rv
= crypto_hash_setkey(mdev
->cram_hmac_tfm
,
4045 (u8
*)mdev
->net_conf
->shared_secret
, key_len
);
4047 dev_err(DEV
, "crypto_hash_setkey() failed with %d\n", rv
);
4052 get_random_bytes(my_challenge
, CHALLENGE_LEN
);
4054 rv
= drbd_send_cmd2(mdev
, P_AUTH_CHALLENGE
, my_challenge
, CHALLENGE_LEN
);
4058 rv
= drbd_recv_header(mdev
, &cmd
, &length
);
4062 if (cmd
!= P_AUTH_CHALLENGE
) {
4063 dev_err(DEV
, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4069 if (length
> CHALLENGE_LEN
* 2) {
4070 dev_err(DEV
, "expected AuthChallenge payload too big.\n");
4075 peers_ch
= kmalloc(length
, GFP_NOIO
);
4076 if (peers_ch
== NULL
) {
4077 dev_err(DEV
, "kmalloc of peers_ch failed\n");
4082 rv
= drbd_recv(mdev
, peers_ch
, length
);
4085 if (!signal_pending(current
))
4086 dev_warn(DEV
, "short read AuthChallenge: l=%u\n", rv
);
4091 resp_size
= crypto_hash_digestsize(mdev
->cram_hmac_tfm
);
4092 response
= kmalloc(resp_size
, GFP_NOIO
);
4093 if (response
== NULL
) {
4094 dev_err(DEV
, "kmalloc of response failed\n");
4099 sg_init_table(&sg
, 1);
4100 sg_set_buf(&sg
, peers_ch
, length
);
4102 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, response
);
4104 dev_err(DEV
, "crypto_hash_digest() failed with %d\n", rv
);
4109 rv
= drbd_send_cmd2(mdev
, P_AUTH_RESPONSE
, response
, resp_size
);
4113 rv
= drbd_recv_header(mdev
, &cmd
, &length
);
4117 if (cmd
!= P_AUTH_RESPONSE
) {
4118 dev_err(DEV
, "expected AuthResponse packet, received: %s (0x%04x)\n",
4124 if (length
!= resp_size
) {
4125 dev_err(DEV
, "expected AuthResponse payload of wrong size\n");
4130 rv
= drbd_recv(mdev
, response
, resp_size
);
4132 if (rv
!= resp_size
) {
4133 if (!signal_pending(current
))
4134 dev_warn(DEV
, "short read receiving AuthResponse: l=%u\n", rv
);
4139 right_response
= kmalloc(resp_size
, GFP_NOIO
);
4140 if (right_response
== NULL
) {
4141 dev_err(DEV
, "kmalloc of right_response failed\n");
4146 sg_set_buf(&sg
, my_challenge
, CHALLENGE_LEN
);
4148 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, right_response
);
4150 dev_err(DEV
, "crypto_hash_digest() failed with %d\n", rv
);
4155 rv
= !memcmp(response
, right_response
, resp_size
);
4158 dev_info(DEV
, "Peer authenticated using %d bytes of '%s' HMAC\n",
4159 resp_size
, mdev
->net_conf
->cram_hmac_alg
);
4166 kfree(right_response
);
4172 int drbdd_init(struct drbd_thread
*thi
)
4174 struct drbd_conf
*mdev
= thi
->mdev
;
4175 unsigned int minor
= mdev_to_minor(mdev
);
4178 sprintf(current
->comm
, "drbd%d_receiver", minor
);
4180 dev_info(DEV
, "receiver (re)started\n");
4183 h
= drbd_connect(mdev
);
4185 drbd_disconnect(mdev
);
4186 schedule_timeout_interruptible(HZ
);
4189 dev_warn(DEV
, "Discarding network configuration.\n");
4190 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
4195 if (get_net_conf(mdev
)) {
4201 drbd_disconnect(mdev
);
4203 dev_info(DEV
, "receiver terminated\n");
4207 /* ********* acknowledge sender ******** */
4209 static int got_RqSReply(struct drbd_conf
*mdev
, struct p_header80
*h
)
4211 struct p_req_state_reply
*p
= (struct p_req_state_reply
*)h
;
4213 int retcode
= be32_to_cpu(p
->retcode
);
4215 if (retcode
>= SS_SUCCESS
) {
4216 set_bit(CL_ST_CHG_SUCCESS
, &mdev
->flags
);
4218 set_bit(CL_ST_CHG_FAIL
, &mdev
->flags
);
4219 dev_err(DEV
, "Requested state change failed by peer: %s (%d)\n",
4220 drbd_set_st_err_str(retcode
), retcode
);
4222 wake_up(&mdev
->state_wait
);
4227 static int got_Ping(struct drbd_conf
*mdev
, struct p_header80
*h
)
4229 return drbd_send_ping_ack(mdev
);
4233 static int got_PingAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4235 /* restore idle timeout */
4236 mdev
->meta
.socket
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_int
*HZ
;
4237 if (!test_and_set_bit(GOT_PING_ACK
, &mdev
->flags
))
4238 wake_up(&mdev
->misc_wait
);
4243 static int got_IsInSync(struct drbd_conf
*mdev
, struct p_header80
*h
)
4245 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4246 sector_t sector
= be64_to_cpu(p
->sector
);
4247 int blksize
= be32_to_cpu(p
->blksize
);
4249 D_ASSERT(mdev
->agreed_pro_version
>= 89);
4251 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4253 if (get_ldev(mdev
)) {
4254 drbd_rs_complete_io(mdev
, sector
);
4255 drbd_set_in_sync(mdev
, sector
, blksize
);
4256 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4257 mdev
->rs_same_csum
+= (blksize
>> BM_BLOCK_SHIFT
);
4260 dec_rs_pending(mdev
);
4261 atomic_add(blksize
>> 9, &mdev
->rs_sect_in
);
4266 /* when we receive the ACK for a write request,
4267 * verify that we actually know about it */
4268 static struct drbd_request
*_ack_id_to_req(struct drbd_conf
*mdev
,
4269 u64 id
, sector_t sector
)
4271 struct hlist_head
*slot
= tl_hash_slot(mdev
, sector
);
4272 struct hlist_node
*n
;
4273 struct drbd_request
*req
;
4275 hlist_for_each_entry(req
, n
, slot
, colision
) {
4276 if ((unsigned long)req
== (unsigned long)id
) {
4277 if (req
->sector
!= sector
) {
4278 dev_err(DEV
, "_ack_id_to_req: found req %p but it has "
4279 "wrong sector (%llus versus %llus)\n", req
,
4280 (unsigned long long)req
->sector
,
4281 (unsigned long long)sector
);
4290 typedef struct drbd_request
*(req_validator_fn
)
4291 (struct drbd_conf
*mdev
, u64 id
, sector_t sector
);
4293 static int validate_req_change_req_state(struct drbd_conf
*mdev
,
4294 u64 id
, sector_t sector
, req_validator_fn validator
,
4295 const char *func
, enum drbd_req_event what
)
4297 struct drbd_request
*req
;
4298 struct bio_and_error m
;
4300 spin_lock_irq(&mdev
->req_lock
);
4301 req
= validator(mdev
, id
, sector
);
4302 if (unlikely(!req
)) {
4303 spin_unlock_irq(&mdev
->req_lock
);
4305 dev_err(DEV
, "%s: failed to find req %p, sector %llus\n", func
,
4306 (void *)(unsigned long)id
, (unsigned long long)sector
);
4309 __req_mod(req
, what
, &m
);
4310 spin_unlock_irq(&mdev
->req_lock
);
4313 complete_master_bio(mdev
, &m
);
4317 static int got_BlockAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4319 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4320 sector_t sector
= be64_to_cpu(p
->sector
);
4321 int blksize
= be32_to_cpu(p
->blksize
);
4322 enum drbd_req_event what
;
4324 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4326 if (is_syncer_block_id(p
->block_id
)) {
4327 drbd_set_in_sync(mdev
, sector
, blksize
);
4328 dec_rs_pending(mdev
);
4331 switch (be16_to_cpu(h
->command
)) {
4332 case P_RS_WRITE_ACK
:
4333 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4334 what
= write_acked_by_peer_and_sis
;
4337 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4338 what
= write_acked_by_peer
;
4341 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_B
);
4342 what
= recv_acked_by_peer
;
4345 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4346 what
= conflict_discarded_by_peer
;
4353 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4354 _ack_id_to_req
, __func__
, what
);
4357 static int got_NegAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4359 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4360 sector_t sector
= be64_to_cpu(p
->sector
);
4361 int size
= be32_to_cpu(p
->blksize
);
4362 struct drbd_request
*req
;
4363 struct bio_and_error m
;
4365 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4367 if (is_syncer_block_id(p
->block_id
)) {
4368 dec_rs_pending(mdev
);
4369 drbd_rs_failed_io(mdev
, sector
, size
);
4373 spin_lock_irq(&mdev
->req_lock
);
4374 req
= _ack_id_to_req(mdev
, p
->block_id
, sector
);
4376 spin_unlock_irq(&mdev
->req_lock
);
4377 if (mdev
->net_conf
->wire_protocol
== DRBD_PROT_A
||
4378 mdev
->net_conf
->wire_protocol
== DRBD_PROT_B
) {
4379 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4380 The master bio might already be completed, therefore the
4381 request is no longer in the collision hash.
4382 => Do not try to validate block_id as request. */
4383 /* In Protocol B we might already have got a P_RECV_ACK
4384 but then get a P_NEG_ACK after wards. */
4385 drbd_set_out_of_sync(mdev
, sector
, size
);
4388 dev_err(DEV
, "%s: failed to find req %p, sector %llus\n", __func__
,
4389 (void *)(unsigned long)p
->block_id
, (unsigned long long)sector
);
4393 __req_mod(req
, neg_acked
, &m
);
4394 spin_unlock_irq(&mdev
->req_lock
);
4397 complete_master_bio(mdev
, &m
);
4401 static int got_NegDReply(struct drbd_conf
*mdev
, struct p_header80
*h
)
4403 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4404 sector_t sector
= be64_to_cpu(p
->sector
);
4406 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4407 dev_err(DEV
, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4408 (unsigned long long)sector
, be32_to_cpu(p
->blksize
));
4410 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4411 _ar_id_to_req
, __func__
, neg_acked
);
4414 static int got_NegRSDReply(struct drbd_conf
*mdev
, struct p_header80
*h
)
4418 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4420 sector
= be64_to_cpu(p
->sector
);
4421 size
= be32_to_cpu(p
->blksize
);
4423 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4425 dec_rs_pending(mdev
);
4427 if (get_ldev_if_state(mdev
, D_FAILED
)) {
4428 drbd_rs_complete_io(mdev
, sector
);
4429 switch (be16_to_cpu(h
->command
)) {
4430 case P_NEG_RS_DREPLY
:
4431 drbd_rs_failed_io(mdev
, sector
, size
);
4445 static int got_BarrierAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4447 struct p_barrier_ack
*p
= (struct p_barrier_ack
*)h
;
4449 tl_release(mdev
, p
->barrier
, be32_to_cpu(p
->set_size
));
4451 if (mdev
->state
.conn
== C_AHEAD
&&
4452 atomic_read(&mdev
->ap_in_flight
) == 0 &&
4453 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE
, &mdev
->current_epoch
->flags
)) {
4454 mdev
->start_resync_timer
.expires
= jiffies
+ HZ
;
4455 add_timer(&mdev
->start_resync_timer
);
4461 static int got_OVResult(struct drbd_conf
*mdev
, struct p_header80
*h
)
4463 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4464 struct drbd_work
*w
;
4468 sector
= be64_to_cpu(p
->sector
);
4469 size
= be32_to_cpu(p
->blksize
);
4471 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4473 if (be64_to_cpu(p
->block_id
) == ID_OUT_OF_SYNC
)
4474 drbd_ov_oos_found(mdev
, sector
, size
);
4478 if (!get_ldev(mdev
))
4481 drbd_rs_complete_io(mdev
, sector
);
4482 dec_rs_pending(mdev
);
4486 /* let's advance progress step marks only for every other megabyte */
4487 if ((mdev
->ov_left
& 0x200) == 0x200)
4488 drbd_advance_rs_marks(mdev
, mdev
->ov_left
);
4490 if (mdev
->ov_left
== 0) {
4491 w
= kmalloc(sizeof(*w
), GFP_NOIO
);
4493 w
->cb
= w_ov_finished
;
4494 drbd_queue_work_front(&mdev
->data
.work
, w
);
4496 dev_err(DEV
, "kmalloc(w) failed.");
4498 drbd_resync_finished(mdev
);
4505 static int got_skip(struct drbd_conf
*mdev
, struct p_header80
*h
)
4510 struct asender_cmd
{
4512 int (*process
)(struct drbd_conf
*mdev
, struct p_header80
*h
);
4515 static struct asender_cmd
*get_asender_cmd(int cmd
)
4517 static struct asender_cmd asender_tbl
[] = {
4518 /* anything missing from this table is in
4519 * the drbd_cmd_handler (drbd_default_handler) table,
4520 * see the beginning of drbdd() */
4521 [P_PING
] = { sizeof(struct p_header80
), got_Ping
},
4522 [P_PING_ACK
] = { sizeof(struct p_header80
), got_PingAck
},
4523 [P_RECV_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4524 [P_WRITE_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4525 [P_RS_WRITE_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4526 [P_DISCARD_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4527 [P_NEG_ACK
] = { sizeof(struct p_block_ack
), got_NegAck
},
4528 [P_NEG_DREPLY
] = { sizeof(struct p_block_ack
), got_NegDReply
},
4529 [P_NEG_RS_DREPLY
] = { sizeof(struct p_block_ack
), got_NegRSDReply
},
4530 [P_OV_RESULT
] = { sizeof(struct p_block_ack
), got_OVResult
},
4531 [P_BARRIER_ACK
] = { sizeof(struct p_barrier_ack
), got_BarrierAck
},
4532 [P_STATE_CHG_REPLY
] = { sizeof(struct p_req_state_reply
), got_RqSReply
},
4533 [P_RS_IS_IN_SYNC
] = { sizeof(struct p_block_ack
), got_IsInSync
},
4534 [P_DELAY_PROBE
] = { sizeof(struct p_delay_probe93
), got_skip
},
4535 [P_RS_CANCEL
] = { sizeof(struct p_block_ack
), got_NegRSDReply
},
4536 [P_MAX_CMD
] = { 0, NULL
},
4538 if (cmd
> P_MAX_CMD
|| asender_tbl
[cmd
].process
== NULL
)
4540 return &asender_tbl
[cmd
];
4543 int drbd_asender(struct drbd_thread
*thi
)
4545 struct drbd_conf
*mdev
= thi
->mdev
;
4546 struct p_header80
*h
= &mdev
->meta
.rbuf
.header
.h80
;
4547 struct asender_cmd
*cmd
= NULL
;
4552 int expect
= sizeof(struct p_header80
);
4555 sprintf(current
->comm
, "drbd%d_asender", mdev_to_minor(mdev
));
4557 current
->policy
= SCHED_RR
; /* Make this a realtime task! */
4558 current
->rt_priority
= 2; /* more important than all other tasks */
4560 while (get_t_state(thi
) == Running
) {
4561 drbd_thread_current_set_cpu(mdev
);
4562 if (test_and_clear_bit(SEND_PING
, &mdev
->flags
)) {
4563 ERR_IF(!drbd_send_ping(mdev
)) goto reconnect
;
4564 mdev
->meta
.socket
->sk
->sk_rcvtimeo
=
4565 mdev
->net_conf
->ping_timeo
*HZ
/10;
4568 /* conditionally cork;
4569 * it may hurt latency if we cork without much to send */
4570 if (!mdev
->net_conf
->no_cork
&&
4571 3 < atomic_read(&mdev
->unacked_cnt
))
4572 drbd_tcp_cork(mdev
->meta
.socket
);
4574 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4575 flush_signals(current
);
4576 if (!drbd_process_done_ee(mdev
))
4578 /* to avoid race with newly queued ACKs */
4579 set_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4580 spin_lock_irq(&mdev
->req_lock
);
4581 empty
= list_empty(&mdev
->done_ee
);
4582 spin_unlock_irq(&mdev
->req_lock
);
4583 /* new ack may have been queued right here,
4584 * but then there is also a signal pending,
4585 * and we start over... */
4589 /* but unconditionally uncork unless disabled */
4590 if (!mdev
->net_conf
->no_cork
)
4591 drbd_tcp_uncork(mdev
->meta
.socket
);
4593 /* short circuit, recv_msg would return EINTR anyways. */
4594 if (signal_pending(current
))
4597 rv
= drbd_recv_short(mdev
, mdev
->meta
.socket
,
4598 buf
, expect
-received
, 0);
4599 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4601 flush_signals(current
);
4604 * -EINTR (on meta) we got a signal
4605 * -EAGAIN (on meta) rcvtimeo expired
4606 * -ECONNRESET other side closed the connection
4607 * -ERESTARTSYS (on data) we got a signal
4608 * rv < 0 other than above: unexpected error!
4609 * rv == expected: full header or command
4610 * rv < expected: "woken" by signal during receive
4611 * rv == 0 : "connection shut down by peer"
4613 if (likely(rv
> 0)) {
4616 } else if (rv
== 0) {
4617 dev_err(DEV
, "meta connection shut down by peer.\n");
4619 } else if (rv
== -EAGAIN
) {
4620 if (mdev
->meta
.socket
->sk
->sk_rcvtimeo
==
4621 mdev
->net_conf
->ping_timeo
*HZ
/10) {
4622 dev_err(DEV
, "PingAck did not arrive in time.\n");
4625 set_bit(SEND_PING
, &mdev
->flags
);
4627 } else if (rv
== -EINTR
) {
4630 dev_err(DEV
, "sock_recvmsg returned %d\n", rv
);
4634 if (received
== expect
&& cmd
== NULL
) {
4635 if (unlikely(h
->magic
!= BE_DRBD_MAGIC
)) {
4636 dev_err(DEV
, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4637 be32_to_cpu(h
->magic
),
4638 be16_to_cpu(h
->command
),
4639 be16_to_cpu(h
->length
));
4642 cmd
= get_asender_cmd(be16_to_cpu(h
->command
));
4643 len
= be16_to_cpu(h
->length
);
4644 if (unlikely(cmd
== NULL
)) {
4645 dev_err(DEV
, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4646 be32_to_cpu(h
->magic
),
4647 be16_to_cpu(h
->command
),
4648 be16_to_cpu(h
->length
));
4651 expect
= cmd
->pkt_size
;
4652 ERR_IF(len
!= expect
-sizeof(struct p_header80
))
4655 if (received
== expect
) {
4656 D_ASSERT(cmd
!= NULL
);
4657 if (!cmd
->process(mdev
, h
))
4662 expect
= sizeof(struct p_header80
);
4669 drbd_force_state(mdev
, NS(conn
, C_NETWORK_FAILURE
));
4674 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
4677 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4679 D_ASSERT(mdev
->state
.conn
< C_CONNECTED
);
4680 dev_info(DEV
, "asender terminated\n");