4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
57 static int drbd_do_handshake(struct drbd_conf
*mdev
);
58 static int drbd_do_auth(struct drbd_conf
*mdev
);
60 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_conf
*, struct drbd_epoch
*, enum epoch_event
);
61 static int e_end_block(struct drbd_conf
*, struct drbd_work
*, int);
64 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
71 /* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
75 static struct page
*page_chain_del(struct page
**head
, int n
)
89 tmp
= page_chain_next(page
);
91 break; /* found sufficient pages */
93 /* insufficient pages, don't use any of them. */
98 /* add end of list marker for the returned list */
99 set_page_private(page
, 0);
100 /* actual return value, and adjustment of head */
106 /* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109 static struct page
*page_chain_tail(struct page
*page
, int *len
)
113 while ((tmp
= page_chain_next(page
)))
120 static int page_chain_free(struct page
*page
)
124 page_chain_for_each_safe(page
, tmp
) {
131 static void page_chain_add(struct page
**head
,
132 struct page
*chain_first
, struct page
*chain_last
)
136 tmp
= page_chain_tail(chain_first
, NULL
);
137 BUG_ON(tmp
!= chain_last
);
140 /* add chain to head */
141 set_page_private(chain_last
, (unsigned long)*head
);
145 static struct page
*drbd_pp_first_pages_or_try_alloc(struct drbd_conf
*mdev
, int number
)
147 struct page
*page
= NULL
;
148 struct page
*tmp
= NULL
;
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
153 if (drbd_pp_vacant
>= number
) {
154 spin_lock(&drbd_pp_lock
);
155 page
= page_chain_del(&drbd_pp_pool
, number
);
157 drbd_pp_vacant
-= number
;
158 spin_unlock(&drbd_pp_lock
);
163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
166 for (i
= 0; i
< number
; i
++) {
167 tmp
= alloc_page(GFP_TRY
);
170 set_page_private(tmp
, (unsigned long)page
);
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
181 tmp
= page_chain_tail(page
, NULL
);
182 spin_lock(&drbd_pp_lock
);
183 page_chain_add(&drbd_pp_pool
, page
, tmp
);
185 spin_unlock(&drbd_pp_lock
);
190 static void reclaim_net_ee(struct drbd_conf
*mdev
, struct list_head
*to_be_freed
)
192 struct drbd_epoch_entry
*e
;
193 struct list_head
*le
, *tle
;
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
200 list_for_each_safe(le
, tle
, &mdev
->net_ee
) {
201 e
= list_entry(le
, struct drbd_epoch_entry
, w
.list
);
202 if (drbd_ee_has_active_page(e
))
204 list_move(le
, to_be_freed
);
208 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf
*mdev
)
210 LIST_HEAD(reclaimed
);
211 struct drbd_epoch_entry
*e
, *t
;
213 spin_lock_irq(&mdev
->req_lock
);
214 reclaim_net_ee(mdev
, &reclaimed
);
215 spin_unlock_irq(&mdev
->req_lock
);
217 list_for_each_entry_safe(e
, t
, &reclaimed
, w
.list
)
218 drbd_free_net_ee(mdev
, e
);
222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
223 * @mdev: DRBD device.
224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
231 * Returns a page chain linked via page->private.
233 static struct page
*drbd_pp_alloc(struct drbd_conf
*mdev
, unsigned number
, bool retry
)
235 struct page
*page
= NULL
;
238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev
->pp_in_use
) < mdev
->net_conf
->max_buffers
)
241 page
= drbd_pp_first_pages_or_try_alloc(mdev
, number
);
243 while (page
== NULL
) {
244 prepare_to_wait(&drbd_pp_wait
, &wait
, TASK_INTERRUPTIBLE
);
246 drbd_kick_lo_and_reclaim_net(mdev
);
248 if (atomic_read(&mdev
->pp_in_use
) < mdev
->net_conf
->max_buffers
) {
249 page
= drbd_pp_first_pages_or_try_alloc(mdev
, number
);
257 if (signal_pending(current
)) {
258 dev_warn(DEV
, "drbd_pp_alloc interrupted!\n");
264 finish_wait(&drbd_pp_wait
, &wait
);
267 atomic_add(number
, &mdev
->pp_in_use
);
271 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
275 static void drbd_pp_free(struct drbd_conf
*mdev
, struct page
*page
, int is_net
)
277 atomic_t
*a
= is_net
? &mdev
->pp_in_use_by_net
: &mdev
->pp_in_use
;
280 if (drbd_pp_vacant
> (DRBD_MAX_BIO_SIZE
/PAGE_SIZE
)*minor_count
)
281 i
= page_chain_free(page
);
284 tmp
= page_chain_tail(page
, &i
);
285 spin_lock(&drbd_pp_lock
);
286 page_chain_add(&drbd_pp_pool
, page
, tmp
);
288 spin_unlock(&drbd_pp_lock
);
290 i
= atomic_sub_return(i
, a
);
292 dev_warn(DEV
, "ASSERTION FAILED: %s: %d < 0\n",
293 is_net
? "pp_in_use_by_net" : "pp_in_use", i
);
294 wake_up(&drbd_pp_wait
);
298 You need to hold the req_lock:
299 _drbd_wait_ee_list_empty()
301 You must not have the req_lock:
307 drbd_process_done_ee()
309 drbd_wait_ee_list_empty()
312 struct drbd_epoch_entry
*drbd_alloc_ee(struct drbd_conf
*mdev
,
315 unsigned int data_size
,
316 gfp_t gfp_mask
) __must_hold(local
)
318 struct drbd_epoch_entry
*e
;
320 unsigned nr_pages
= (data_size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
322 if (FAULT_ACTIVE(mdev
, DRBD_FAULT_AL_EE
))
325 e
= mempool_alloc(drbd_ee_mempool
, gfp_mask
& ~__GFP_HIGHMEM
);
327 if (!(gfp_mask
& __GFP_NOWARN
))
328 dev_err(DEV
, "alloc_ee: Allocation of an EE failed\n");
332 page
= drbd_pp_alloc(mdev
, nr_pages
, (gfp_mask
& __GFP_WAIT
));
336 INIT_HLIST_NODE(&e
->colision
);
340 atomic_set(&e
->pending_bios
, 0);
349 mempool_free(e
, drbd_ee_mempool
);
353 void drbd_free_some_ee(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
, int is_net
)
355 if (e
->flags
& EE_HAS_DIGEST
)
357 drbd_pp_free(mdev
, e
->pages
, is_net
);
358 D_ASSERT(atomic_read(&e
->pending_bios
) == 0);
359 D_ASSERT(hlist_unhashed(&e
->colision
));
360 mempool_free(e
, drbd_ee_mempool
);
363 int drbd_release_ee(struct drbd_conf
*mdev
, struct list_head
*list
)
365 LIST_HEAD(work_list
);
366 struct drbd_epoch_entry
*e
, *t
;
368 int is_net
= list
== &mdev
->net_ee
;
370 spin_lock_irq(&mdev
->req_lock
);
371 list_splice_init(list
, &work_list
);
372 spin_unlock_irq(&mdev
->req_lock
);
374 list_for_each_entry_safe(e
, t
, &work_list
, w
.list
) {
375 drbd_free_some_ee(mdev
, e
, is_net
);
383 * This function is called from _asender only_
384 * but see also comments in _req_mod(,barrier_acked)
385 * and receive_Barrier.
387 * Move entries from net_ee to done_ee, if ready.
388 * Grab done_ee, call all callbacks, free the entries.
389 * The callbacks typically send out ACKs.
391 static int drbd_process_done_ee(struct drbd_conf
*mdev
)
393 LIST_HEAD(work_list
);
394 LIST_HEAD(reclaimed
);
395 struct drbd_epoch_entry
*e
, *t
;
396 int ok
= (mdev
->state
.conn
>= C_WF_REPORT_PARAMS
);
398 spin_lock_irq(&mdev
->req_lock
);
399 reclaim_net_ee(mdev
, &reclaimed
);
400 list_splice_init(&mdev
->done_ee
, &work_list
);
401 spin_unlock_irq(&mdev
->req_lock
);
403 list_for_each_entry_safe(e
, t
, &reclaimed
, w
.list
)
404 drbd_free_net_ee(mdev
, e
);
406 /* possible callbacks here:
407 * e_end_block, and e_end_resync_block, e_send_discard_ack.
408 * all ignore the last argument.
410 list_for_each_entry_safe(e
, t
, &work_list
, w
.list
) {
411 /* list_del not necessary, next/prev members not touched */
412 ok
= e
->w
.cb(mdev
, &e
->w
, !ok
) && ok
;
413 drbd_free_ee(mdev
, e
);
415 wake_up(&mdev
->ee_wait
);
420 void _drbd_wait_ee_list_empty(struct drbd_conf
*mdev
, struct list_head
*head
)
424 /* avoids spin_lock/unlock
425 * and calling prepare_to_wait in the fast path */
426 while (!list_empty(head
)) {
427 prepare_to_wait(&mdev
->ee_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
428 spin_unlock_irq(&mdev
->req_lock
);
430 finish_wait(&mdev
->ee_wait
, &wait
);
431 spin_lock_irq(&mdev
->req_lock
);
435 void drbd_wait_ee_list_empty(struct drbd_conf
*mdev
, struct list_head
*head
)
437 spin_lock_irq(&mdev
->req_lock
);
438 _drbd_wait_ee_list_empty(mdev
, head
);
439 spin_unlock_irq(&mdev
->req_lock
);
442 /* see also kernel_accept; which is only present since 2.6.18.
443 * also we want to log which part of it failed, exactly */
444 static int drbd_accept(struct drbd_conf
*mdev
, const char **what
,
445 struct socket
*sock
, struct socket
**newsock
)
447 struct sock
*sk
= sock
->sk
;
451 err
= sock
->ops
->listen(sock
, 5);
455 *what
= "sock_create_lite";
456 err
= sock_create_lite(sk
->sk_family
, sk
->sk_type
, sk
->sk_protocol
,
462 err
= sock
->ops
->accept(sock
, *newsock
, 0);
464 sock_release(*newsock
);
468 (*newsock
)->ops
= sock
->ops
;
474 static int drbd_recv_short(struct drbd_conf
*mdev
, struct socket
*sock
,
475 void *buf
, size_t size
, int flags
)
482 struct msghdr msg
= {
484 .msg_iov
= (struct iovec
*)&iov
,
485 .msg_flags
= (flags
? flags
: MSG_WAITALL
| MSG_NOSIGNAL
)
491 rv
= sock_recvmsg(sock
, &msg
, size
, msg
.msg_flags
);
497 static int drbd_recv(struct drbd_conf
*mdev
, void *buf
, size_t size
)
504 struct msghdr msg
= {
506 .msg_iov
= (struct iovec
*)&iov
,
507 .msg_flags
= MSG_WAITALL
| MSG_NOSIGNAL
515 rv
= sock_recvmsg(mdev
->data
.socket
, &msg
, size
, msg
.msg_flags
);
520 * ECONNRESET other side closed the connection
521 * ERESTARTSYS (on sock) we got a signal
525 if (rv
== -ECONNRESET
)
526 dev_info(DEV
, "sock was reset by peer\n");
527 else if (rv
!= -ERESTARTSYS
)
528 dev_err(DEV
, "sock_recvmsg returned %d\n", rv
);
530 } else if (rv
== 0) {
531 dev_info(DEV
, "sock was shut down by peer\n");
534 /* signal came in, or peer/link went down,
535 * after we read a partial message
537 /* D_ASSERT(signal_pending(current)); */
545 drbd_force_state(mdev
, NS(conn
, C_BROKEN_PIPE
));
551 * On individual connections, the socket buffer size must be set prior to the
552 * listen(2) or connect(2) calls in order to have it take effect.
553 * This is our wrapper to do so.
555 static void drbd_setbufsize(struct socket
*sock
, unsigned int snd
,
558 /* open coded SO_SNDBUF, SO_RCVBUF */
560 sock
->sk
->sk_sndbuf
= snd
;
561 sock
->sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
564 sock
->sk
->sk_rcvbuf
= rcv
;
565 sock
->sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
569 static struct socket
*drbd_try_connect(struct drbd_conf
*mdev
)
573 struct sockaddr_in6 src_in6
;
575 int disconnect_on_error
= 1;
577 if (!get_net_conf(mdev
))
580 what
= "sock_create_kern";
581 err
= sock_create_kern(((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
,
582 SOCK_STREAM
, IPPROTO_TCP
, &sock
);
588 sock
->sk
->sk_rcvtimeo
=
589 sock
->sk
->sk_sndtimeo
= mdev
->net_conf
->try_connect_int
*HZ
;
590 drbd_setbufsize(sock
, mdev
->net_conf
->sndbuf_size
,
591 mdev
->net_conf
->rcvbuf_size
);
593 /* explicitly bind to the configured IP as source IP
594 * for the outgoing connections.
595 * This is needed for multihomed hosts and to be
596 * able to use lo: interfaces for drbd.
597 * Make sure to use 0 as port number, so linux selects
598 * a free one dynamically.
600 memcpy(&src_in6
, mdev
->net_conf
->my_addr
,
601 min_t(int, mdev
->net_conf
->my_addr_len
, sizeof(src_in6
)));
602 if (((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
== AF_INET6
)
603 src_in6
.sin6_port
= 0;
605 ((struct sockaddr_in
*)&src_in6
)->sin_port
= 0; /* AF_INET & AF_SCI */
607 what
= "bind before connect";
608 err
= sock
->ops
->bind(sock
,
609 (struct sockaddr
*) &src_in6
,
610 mdev
->net_conf
->my_addr_len
);
614 /* connect may fail, peer not yet available.
615 * stay C_WF_CONNECTION, don't go Disconnecting! */
616 disconnect_on_error
= 0;
618 err
= sock
->ops
->connect(sock
,
619 (struct sockaddr
*)mdev
->net_conf
->peer_addr
,
620 mdev
->net_conf
->peer_addr_len
, 0);
629 /* timeout, busy, signal pending */
630 case ETIMEDOUT
: case EAGAIN
: case EINPROGRESS
:
631 case EINTR
: case ERESTARTSYS
:
632 /* peer not (yet) available, network problem */
633 case ECONNREFUSED
: case ENETUNREACH
:
634 case EHOSTDOWN
: case EHOSTUNREACH
:
635 disconnect_on_error
= 0;
638 dev_err(DEV
, "%s failed, err = %d\n", what
, err
);
640 if (disconnect_on_error
)
641 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
647 static struct socket
*drbd_wait_for_connect(struct drbd_conf
*mdev
)
650 struct socket
*s_estab
= NULL
, *s_listen
;
653 if (!get_net_conf(mdev
))
656 what
= "sock_create_kern";
657 err
= sock_create_kern(((struct sockaddr
*)mdev
->net_conf
->my_addr
)->sa_family
,
658 SOCK_STREAM
, IPPROTO_TCP
, &s_listen
);
664 timeo
= mdev
->net_conf
->try_connect_int
* HZ
;
665 timeo
+= (random32() & 1) ? timeo
/ 7 : -timeo
/ 7; /* 28.5% random jitter */
667 s_listen
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
668 s_listen
->sk
->sk_rcvtimeo
= timeo
;
669 s_listen
->sk
->sk_sndtimeo
= timeo
;
670 drbd_setbufsize(s_listen
, mdev
->net_conf
->sndbuf_size
,
671 mdev
->net_conf
->rcvbuf_size
);
673 what
= "bind before listen";
674 err
= s_listen
->ops
->bind(s_listen
,
675 (struct sockaddr
*) mdev
->net_conf
->my_addr
,
676 mdev
->net_conf
->my_addr_len
);
680 err
= drbd_accept(mdev
, &what
, s_listen
, &s_estab
);
684 sock_release(s_listen
);
686 if (err
!= -EAGAIN
&& err
!= -EINTR
&& err
!= -ERESTARTSYS
) {
687 dev_err(DEV
, "%s failed, err = %d\n", what
, err
);
688 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
696 static int drbd_send_fp(struct drbd_conf
*mdev
,
697 struct socket
*sock
, enum drbd_packets cmd
)
699 struct p_header80
*h
= &mdev
->data
.sbuf
.header
.h80
;
701 return _drbd_send_cmd(mdev
, sock
, cmd
, h
, sizeof(*h
), 0);
704 static enum drbd_packets
drbd_recv_fp(struct drbd_conf
*mdev
, struct socket
*sock
)
706 struct p_header80
*h
= &mdev
->data
.rbuf
.header
.h80
;
709 rr
= drbd_recv_short(mdev
, sock
, h
, sizeof(*h
), 0);
711 if (rr
== sizeof(*h
) && h
->magic
== BE_DRBD_MAGIC
)
712 return be16_to_cpu(h
->command
);
718 * drbd_socket_okay() - Free the socket if its connection is not okay
719 * @mdev: DRBD device.
720 * @sock: pointer to the pointer to the socket.
722 static int drbd_socket_okay(struct drbd_conf
*mdev
, struct socket
**sock
)
730 rr
= drbd_recv_short(mdev
, *sock
, tb
, 4, MSG_DONTWAIT
| MSG_PEEK
);
732 if (rr
> 0 || rr
== -EAGAIN
) {
743 * 1 yes, we have a valid connection
744 * 0 oops, did not work out, please try again
745 * -1 peer talks different language,
746 * no point in trying again, please go standalone.
747 * -2 We do not have a network config...
749 static int drbd_connect(struct drbd_conf
*mdev
)
751 struct socket
*s
, *sock
, *msock
;
754 D_ASSERT(!mdev
->data
.socket
);
756 if (drbd_request_state(mdev
, NS(conn
, C_WF_CONNECTION
)) < SS_SUCCESS
)
759 clear_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
766 /* 3 tries, this should take less than a second! */
767 s
= drbd_try_connect(mdev
);
770 /* give the other side time to call bind() & listen() */
771 __set_current_state(TASK_INTERRUPTIBLE
);
772 schedule_timeout(HZ
/ 10);
777 drbd_send_fp(mdev
, s
, P_HAND_SHAKE_S
);
781 drbd_send_fp(mdev
, s
, P_HAND_SHAKE_M
);
785 dev_err(DEV
, "Logic error in drbd_connect()\n");
786 goto out_release_sockets
;
791 __set_current_state(TASK_INTERRUPTIBLE
);
792 schedule_timeout(HZ
/ 10);
793 ok
= drbd_socket_okay(mdev
, &sock
);
794 ok
= drbd_socket_okay(mdev
, &msock
) && ok
;
800 s
= drbd_wait_for_connect(mdev
);
802 try = drbd_recv_fp(mdev
, s
);
803 drbd_socket_okay(mdev
, &sock
);
804 drbd_socket_okay(mdev
, &msock
);
808 dev_warn(DEV
, "initial packet S crossed\n");
815 dev_warn(DEV
, "initial packet M crossed\n");
819 set_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
822 dev_warn(DEV
, "Error receiving initial packet\n");
829 if (mdev
->state
.conn
<= C_DISCONNECTING
)
830 goto out_release_sockets
;
831 if (signal_pending(current
)) {
832 flush_signals(current
);
834 if (get_t_state(&mdev
->receiver
) == Exiting
)
835 goto out_release_sockets
;
839 ok
= drbd_socket_okay(mdev
, &sock
);
840 ok
= drbd_socket_okay(mdev
, &msock
) && ok
;
846 msock
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
847 sock
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
849 sock
->sk
->sk_allocation
= GFP_NOIO
;
850 msock
->sk
->sk_allocation
= GFP_NOIO
;
852 sock
->sk
->sk_priority
= TC_PRIO_INTERACTIVE_BULK
;
853 msock
->sk
->sk_priority
= TC_PRIO_INTERACTIVE
;
856 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
857 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
858 * first set it to the P_HAND_SHAKE timeout,
859 * which we set to 4x the configured ping_timeout. */
860 sock
->sk
->sk_sndtimeo
=
861 sock
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_timeo
*4*HZ
/10;
863 msock
->sk
->sk_sndtimeo
= mdev
->net_conf
->timeout
*HZ
/10;
864 msock
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_int
*HZ
;
866 /* we don't want delays.
867 * we use TCP_CORK where apropriate, though */
868 drbd_tcp_nodelay(sock
);
869 drbd_tcp_nodelay(msock
);
871 mdev
->data
.socket
= sock
;
872 mdev
->meta
.socket
= msock
;
873 mdev
->last_received
= jiffies
;
875 D_ASSERT(mdev
->asender
.task
== NULL
);
877 h
= drbd_do_handshake(mdev
);
881 if (mdev
->cram_hmac_tfm
) {
882 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
883 switch (drbd_do_auth(mdev
)) {
885 dev_err(DEV
, "Authentication of peer failed\n");
888 dev_err(DEV
, "Authentication of peer failed, trying again.\n");
893 if (drbd_request_state(mdev
, NS(conn
, C_WF_REPORT_PARAMS
)) < SS_SUCCESS
)
896 sock
->sk
->sk_sndtimeo
= mdev
->net_conf
->timeout
*HZ
/10;
897 sock
->sk
->sk_rcvtimeo
= MAX_SCHEDULE_TIMEOUT
;
899 atomic_set(&mdev
->packet_seq
, 0);
902 drbd_thread_start(&mdev
->asender
);
904 if (mdev
->agreed_pro_version
< 95 && get_ldev(mdev
)) {
905 drbd_setup_queue_param(mdev
, DRBD_MAX_SIZE_H80_PACKET
);
909 if (!drbd_send_protocol(mdev
))
911 drbd_send_sync_param(mdev
, &mdev
->sync_conf
);
912 drbd_send_sizes(mdev
, 0, 0);
913 drbd_send_uuids(mdev
);
914 drbd_send_state(mdev
);
915 clear_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
916 clear_bit(RESIZE_PENDING
, &mdev
->flags
);
928 static int drbd_recv_header(struct drbd_conf
*mdev
, enum drbd_packets
*cmd
, unsigned int *packet_size
)
930 union p_header
*h
= &mdev
->data
.rbuf
.header
;
933 r
= drbd_recv(mdev
, h
, sizeof(*h
));
934 if (unlikely(r
!= sizeof(*h
))) {
935 dev_err(DEV
, "short read expecting header on sock: r=%d\n", r
);
939 if (likely(h
->h80
.magic
== BE_DRBD_MAGIC
)) {
940 *cmd
= be16_to_cpu(h
->h80
.command
);
941 *packet_size
= be16_to_cpu(h
->h80
.length
);
942 } else if (h
->h95
.magic
== BE_DRBD_MAGIC_BIG
) {
943 *cmd
= be16_to_cpu(h
->h95
.command
);
944 *packet_size
= be32_to_cpu(h
->h95
.length
);
946 dev_err(DEV
, "magic?? on data m: 0x%08x c: %d l: %d\n",
947 be32_to_cpu(h
->h80
.magic
),
948 be16_to_cpu(h
->h80
.command
),
949 be16_to_cpu(h
->h80
.length
));
952 mdev
->last_received
= jiffies
;
957 static void drbd_flush(struct drbd_conf
*mdev
)
961 if (mdev
->write_ordering
>= WO_bdev_flush
&& get_ldev(mdev
)) {
962 rv
= blkdev_issue_flush(mdev
->ldev
->backing_bdev
, GFP_KERNEL
,
965 dev_err(DEV
, "local disk flush failed with status %d\n", rv
);
966 /* would rather check on EOPNOTSUPP, but that is not reliable.
967 * don't try again for ANY return value != 0
968 * if (rv == -EOPNOTSUPP) */
969 drbd_bump_write_ordering(mdev
, WO_drain_io
);
976 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
977 * @mdev: DRBD device.
978 * @epoch: Epoch object.
981 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_conf
*mdev
,
982 struct drbd_epoch
*epoch
,
986 struct drbd_epoch
*next_epoch
;
987 enum finish_epoch rv
= FE_STILL_LIVE
;
989 spin_lock(&mdev
->epoch_lock
);
993 epoch_size
= atomic_read(&epoch
->epoch_size
);
995 switch (ev
& ~EV_CLEANUP
) {
997 atomic_dec(&epoch
->active
);
999 case EV_GOT_BARRIER_NR
:
1000 set_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
);
1002 case EV_BECAME_LAST
:
1007 if (epoch_size
!= 0 &&
1008 atomic_read(&epoch
->active
) == 0 &&
1009 test_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
)) {
1010 if (!(ev
& EV_CLEANUP
)) {
1011 spin_unlock(&mdev
->epoch_lock
);
1012 drbd_send_b_ack(mdev
, epoch
->barrier_nr
, epoch_size
);
1013 spin_lock(&mdev
->epoch_lock
);
1017 if (mdev
->current_epoch
!= epoch
) {
1018 next_epoch
= list_entry(epoch
->list
.next
, struct drbd_epoch
, list
);
1019 list_del(&epoch
->list
);
1020 ev
= EV_BECAME_LAST
| (ev
& EV_CLEANUP
);
1024 if (rv
== FE_STILL_LIVE
)
1028 atomic_set(&epoch
->epoch_size
, 0);
1029 /* atomic_set(&epoch->active, 0); is already zero */
1030 if (rv
== FE_STILL_LIVE
)
1032 wake_up(&mdev
->ee_wait
);
1042 spin_unlock(&mdev
->epoch_lock
);
1048 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1049 * @mdev: DRBD device.
1050 * @wo: Write ordering method to try.
1052 void drbd_bump_write_ordering(struct drbd_conf
*mdev
, enum write_ordering_e wo
) __must_hold(local
)
1054 enum write_ordering_e pwo
;
1055 static char *write_ordering_str
[] = {
1057 [WO_drain_io
] = "drain",
1058 [WO_bdev_flush
] = "flush",
1061 pwo
= mdev
->write_ordering
;
1063 if (wo
== WO_bdev_flush
&& mdev
->ldev
->dc
.no_disk_flush
)
1065 if (wo
== WO_drain_io
&& mdev
->ldev
->dc
.no_disk_drain
)
1067 mdev
->write_ordering
= wo
;
1068 if (pwo
!= mdev
->write_ordering
|| wo
== WO_bdev_flush
)
1069 dev_info(DEV
, "Method to ensure write ordering: %s\n", write_ordering_str
[mdev
->write_ordering
]);
1074 * @mdev: DRBD device.
1076 * @rw: flag field, see bio->bi_rw
1078 /* TODO allocate from our own bio_set. */
1079 int drbd_submit_ee(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
,
1080 const unsigned rw
, const int fault_type
)
1082 struct bio
*bios
= NULL
;
1084 struct page
*page
= e
->pages
;
1085 sector_t sector
= e
->sector
;
1086 unsigned ds
= e
->size
;
1087 unsigned n_bios
= 0;
1088 unsigned nr_pages
= (ds
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
1090 /* In most cases, we will only need one bio. But in case the lower
1091 * level restrictions happen to be different at this offset on this
1092 * side than those of the sending peer, we may need to submit the
1093 * request in more than one bio. */
1095 bio
= bio_alloc(GFP_NOIO
, nr_pages
);
1097 dev_err(DEV
, "submit_ee: Allocation of a bio failed\n");
1100 /* > e->sector, unless this is the first bio */
1101 bio
->bi_sector
= sector
;
1102 bio
->bi_bdev
= mdev
->ldev
->backing_bdev
;
1104 bio
->bi_private
= e
;
1105 bio
->bi_end_io
= drbd_endio_sec
;
1107 bio
->bi_next
= bios
;
1111 page_chain_for_each(page
) {
1112 unsigned len
= min_t(unsigned, ds
, PAGE_SIZE
);
1113 if (!bio_add_page(bio
, page
, len
, 0)) {
1114 /* a single page must always be possible! */
1115 BUG_ON(bio
->bi_vcnt
== 0);
1122 D_ASSERT(page
== NULL
);
1125 atomic_set(&e
->pending_bios
, n_bios
);
1128 bios
= bios
->bi_next
;
1129 bio
->bi_next
= NULL
;
1131 drbd_generic_make_request(mdev
, fault_type
, bio
);
1138 bios
= bios
->bi_next
;
1144 static int receive_Barrier(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1147 struct p_barrier
*p
= &mdev
->data
.rbuf
.barrier
;
1148 struct drbd_epoch
*epoch
;
1152 mdev
->current_epoch
->barrier_nr
= p
->barrier
;
1153 rv
= drbd_may_finish_epoch(mdev
, mdev
->current_epoch
, EV_GOT_BARRIER_NR
);
1155 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1156 * the activity log, which means it would not be resynced in case the
1157 * R_PRIMARY crashes now.
1158 * Therefore we must send the barrier_ack after the barrier request was
1160 switch (mdev
->write_ordering
) {
1162 if (rv
== FE_RECYCLED
)
1165 /* receiver context, in the writeout path of the other node.
1166 * avoid potential distributed deadlock */
1167 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1171 dev_warn(DEV
, "Allocation of an epoch failed, slowing down\n");
1176 drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
1179 if (atomic_read(&mdev
->current_epoch
->epoch_size
)) {
1180 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1185 epoch
= mdev
->current_epoch
;
1186 wait_event(mdev
->ee_wait
, atomic_read(&epoch
->epoch_size
) == 0);
1188 D_ASSERT(atomic_read(&epoch
->active
) == 0);
1189 D_ASSERT(epoch
->flags
== 0);
1193 dev_err(DEV
, "Strangeness in mdev->write_ordering %d\n", mdev
->write_ordering
);
1198 atomic_set(&epoch
->epoch_size
, 0);
1199 atomic_set(&epoch
->active
, 0);
1201 spin_lock(&mdev
->epoch_lock
);
1202 if (atomic_read(&mdev
->current_epoch
->epoch_size
)) {
1203 list_add(&epoch
->list
, &mdev
->current_epoch
->list
);
1204 mdev
->current_epoch
= epoch
;
1207 /* The current_epoch got recycled while we allocated this one... */
1210 spin_unlock(&mdev
->epoch_lock
);
1215 /* used from receive_RSDataReply (recv_resync_read)
1216 * and from receive_Data */
1217 static struct drbd_epoch_entry
*
1218 read_in_block(struct drbd_conf
*mdev
, u64 id
, sector_t sector
, int data_size
) __must_hold(local
)
1220 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
1221 struct drbd_epoch_entry
*e
;
1224 void *dig_in
= mdev
->int_dig_in
;
1225 void *dig_vv
= mdev
->int_dig_vv
;
1226 unsigned long *data
;
1228 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_r_tfm
) ?
1229 crypto_hash_digestsize(mdev
->integrity_r_tfm
) : 0;
1232 rr
= drbd_recv(mdev
, dig_in
, dgs
);
1234 dev_warn(DEV
, "short read receiving data digest: read %d expected %d\n",
1242 ERR_IF(data_size
& 0x1ff) return NULL
;
1243 ERR_IF(data_size
> DRBD_MAX_BIO_SIZE
) return NULL
;
1245 /* even though we trust out peer,
1246 * we sometimes have to double check. */
1247 if (sector
+ (data_size
>>9) > capacity
) {
1248 dev_err(DEV
, "capacity: %llus < sector: %llus + size: %u\n",
1249 (unsigned long long)capacity
,
1250 (unsigned long long)sector
, data_size
);
1254 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1255 * "criss-cross" setup, that might cause write-out on some other DRBD,
1256 * which in turn might block on the other node at this very place. */
1257 e
= drbd_alloc_ee(mdev
, id
, sector
, data_size
, GFP_NOIO
);
1263 page_chain_for_each(page
) {
1264 unsigned len
= min_t(int, ds
, PAGE_SIZE
);
1266 rr
= drbd_recv(mdev
, data
, len
);
1267 if (FAULT_ACTIVE(mdev
, DRBD_FAULT_RECEIVE
)) {
1268 dev_err(DEV
, "Fault injection: Corrupting data on receive\n");
1269 data
[0] = data
[0] ^ (unsigned long)-1;
1273 drbd_free_ee(mdev
, e
);
1274 dev_warn(DEV
, "short read receiving data: read %d expected %d\n",
1282 drbd_csum_ee(mdev
, mdev
->integrity_r_tfm
, e
, dig_vv
);
1283 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1284 dev_err(DEV
, "Digest integrity check FAILED: %llus +%u\n",
1285 (unsigned long long)sector
, data_size
);
1286 drbd_bcast_ee(mdev
, "digest failed",
1287 dgs
, dig_in
, dig_vv
, e
);
1288 drbd_free_ee(mdev
, e
);
1292 mdev
->recv_cnt
+= data_size
>>9;
1296 /* drbd_drain_block() just takes a data block
1297 * out of the socket input buffer, and discards it.
1299 static int drbd_drain_block(struct drbd_conf
*mdev
, int data_size
)
1308 page
= drbd_pp_alloc(mdev
, 1, 1);
1312 rr
= drbd_recv(mdev
, data
, min_t(int, data_size
, PAGE_SIZE
));
1313 if (rr
!= min_t(int, data_size
, PAGE_SIZE
)) {
1315 dev_warn(DEV
, "short read receiving data: read %d expected %d\n",
1316 rr
, min_t(int, data_size
, PAGE_SIZE
));
1322 drbd_pp_free(mdev
, page
, 0);
1326 static int recv_dless_read(struct drbd_conf
*mdev
, struct drbd_request
*req
,
1327 sector_t sector
, int data_size
)
1329 struct bio_vec
*bvec
;
1331 int dgs
, rr
, i
, expect
;
1332 void *dig_in
= mdev
->int_dig_in
;
1333 void *dig_vv
= mdev
->int_dig_vv
;
1335 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_r_tfm
) ?
1336 crypto_hash_digestsize(mdev
->integrity_r_tfm
) : 0;
1339 rr
= drbd_recv(mdev
, dig_in
, dgs
);
1341 dev_warn(DEV
, "short read receiving data reply digest: read %d expected %d\n",
1349 /* optimistically update recv_cnt. if receiving fails below,
1350 * we disconnect anyways, and counters will be reset. */
1351 mdev
->recv_cnt
+= data_size
>>9;
1353 bio
= req
->master_bio
;
1354 D_ASSERT(sector
== bio
->bi_sector
);
1356 bio_for_each_segment(bvec
, bio
, i
) {
1357 expect
= min_t(int, data_size
, bvec
->bv_len
);
1358 rr
= drbd_recv(mdev
,
1359 kmap(bvec
->bv_page
)+bvec
->bv_offset
,
1361 kunmap(bvec
->bv_page
);
1363 dev_warn(DEV
, "short read receiving data reply: "
1364 "read %d expected %d\n",
1372 drbd_csum_bio(mdev
, mdev
->integrity_r_tfm
, bio
, dig_vv
);
1373 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1374 dev_err(DEV
, "Digest integrity check FAILED. Broken NICs?\n");
1379 D_ASSERT(data_size
== 0);
1383 /* e_end_resync_block() is called via
1384 * drbd_process_done_ee() by asender only */
1385 static int e_end_resync_block(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1387 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1388 sector_t sector
= e
->sector
;
1391 D_ASSERT(hlist_unhashed(&e
->colision
));
1393 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1394 drbd_set_in_sync(mdev
, sector
, e
->size
);
1395 ok
= drbd_send_ack(mdev
, P_RS_WRITE_ACK
, e
);
1397 /* Record failure to sync */
1398 drbd_rs_failed_io(mdev
, sector
, e
->size
);
1400 ok
= drbd_send_ack(mdev
, P_NEG_ACK
, e
);
1407 static int recv_resync_read(struct drbd_conf
*mdev
, sector_t sector
, int data_size
) __releases(local
)
1409 struct drbd_epoch_entry
*e
;
1411 e
= read_in_block(mdev
, ID_SYNCER
, sector
, data_size
);
1415 dec_rs_pending(mdev
);
1418 /* corresponding dec_unacked() in e_end_resync_block()
1419 * respective _drbd_clear_done_ee */
1421 e
->w
.cb
= e_end_resync_block
;
1423 spin_lock_irq(&mdev
->req_lock
);
1424 list_add(&e
->w
.list
, &mdev
->sync_ee
);
1425 spin_unlock_irq(&mdev
->req_lock
);
1427 atomic_add(data_size
>> 9, &mdev
->rs_sect_ev
);
1428 if (drbd_submit_ee(mdev
, e
, WRITE
, DRBD_FAULT_RS_WR
) == 0)
1431 /* drbd_submit_ee currently fails for one reason only:
1432 * not being able to allocate enough bios.
1433 * Is dropping the connection going to help? */
1434 spin_lock_irq(&mdev
->req_lock
);
1435 list_del(&e
->w
.list
);
1436 spin_unlock_irq(&mdev
->req_lock
);
1438 drbd_free_ee(mdev
, e
);
1444 static int receive_DataReply(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1446 struct drbd_request
*req
;
1449 struct p_data
*p
= &mdev
->data
.rbuf
.data
;
1451 sector
= be64_to_cpu(p
->sector
);
1453 spin_lock_irq(&mdev
->req_lock
);
1454 req
= _ar_id_to_req(mdev
, p
->block_id
, sector
);
1455 spin_unlock_irq(&mdev
->req_lock
);
1456 if (unlikely(!req
)) {
1457 dev_err(DEV
, "Got a corrupt block_id/sector pair(1).\n");
1461 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1462 * special casing it there for the various failure cases.
1463 * still no race with drbd_fail_pending_reads */
1464 ok
= recv_dless_read(mdev
, req
, sector
, data_size
);
1467 req_mod(req
, data_received
);
1468 /* else: nothing. handled from drbd_disconnect...
1469 * I don't think we may complete this just yet
1470 * in case we are "on-disconnect: freeze" */
1475 static int receive_RSDataReply(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1479 struct p_data
*p
= &mdev
->data
.rbuf
.data
;
1481 sector
= be64_to_cpu(p
->sector
);
1482 D_ASSERT(p
->block_id
== ID_SYNCER
);
1484 if (get_ldev(mdev
)) {
1485 /* data is submitted to disk within recv_resync_read.
1486 * corresponding put_ldev done below on error,
1487 * or in drbd_endio_write_sec. */
1488 ok
= recv_resync_read(mdev
, sector
, data_size
);
1490 if (__ratelimit(&drbd_ratelimit_state
))
1491 dev_err(DEV
, "Can not write resync data to local disk.\n");
1493 ok
= drbd_drain_block(mdev
, data_size
);
1495 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
, data_size
);
1498 atomic_add(data_size
>> 9, &mdev
->rs_sect_in
);
1503 /* e_end_block() is called via drbd_process_done_ee().
1504 * this means this function only runs in the asender thread
1506 static int e_end_block(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1508 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1509 sector_t sector
= e
->sector
;
1512 if (mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
) {
1513 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1514 pcmd
= (mdev
->state
.conn
>= C_SYNC_SOURCE
&&
1515 mdev
->state
.conn
<= C_PAUSED_SYNC_T
&&
1516 e
->flags
& EE_MAY_SET_IN_SYNC
) ?
1517 P_RS_WRITE_ACK
: P_WRITE_ACK
;
1518 ok
&= drbd_send_ack(mdev
, pcmd
, e
);
1519 if (pcmd
== P_RS_WRITE_ACK
)
1520 drbd_set_in_sync(mdev
, sector
, e
->size
);
1522 ok
= drbd_send_ack(mdev
, P_NEG_ACK
, e
);
1523 /* we expect it to be marked out of sync anyways...
1524 * maybe assert this? */
1528 /* we delete from the conflict detection hash _after_ we sent out the
1529 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1530 if (mdev
->net_conf
->two_primaries
) {
1531 spin_lock_irq(&mdev
->req_lock
);
1532 D_ASSERT(!hlist_unhashed(&e
->colision
));
1533 hlist_del_init(&e
->colision
);
1534 spin_unlock_irq(&mdev
->req_lock
);
1536 D_ASSERT(hlist_unhashed(&e
->colision
));
1539 drbd_may_finish_epoch(mdev
, e
->epoch
, EV_PUT
+ (cancel
? EV_CLEANUP
: 0));
1544 static int e_send_discard_ack(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1546 struct drbd_epoch_entry
*e
= (struct drbd_epoch_entry
*)w
;
1549 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
1550 ok
= drbd_send_ack(mdev
, P_DISCARD_ACK
, e
);
1552 spin_lock_irq(&mdev
->req_lock
);
1553 D_ASSERT(!hlist_unhashed(&e
->colision
));
1554 hlist_del_init(&e
->colision
);
1555 spin_unlock_irq(&mdev
->req_lock
);
1562 /* Called from receive_Data.
1563 * Synchronize packets on sock with packets on msock.
1565 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1566 * packet traveling on msock, they are still processed in the order they have
1569 * Note: we don't care for Ack packets overtaking P_DATA packets.
1571 * In case packet_seq is larger than mdev->peer_seq number, there are
1572 * outstanding packets on the msock. We wait for them to arrive.
1573 * In case we are the logically next packet, we update mdev->peer_seq
1574 * ourselves. Correctly handles 32bit wrap around.
1576 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1577 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1578 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1579 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1581 * returns 0 if we may process the packet,
1582 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1583 static int drbd_wait_peer_seq(struct drbd_conf
*mdev
, const u32 packet_seq
)
1589 spin_lock(&mdev
->peer_seq_lock
);
1591 prepare_to_wait(&mdev
->seq_wait
, &wait
, TASK_INTERRUPTIBLE
);
1592 if (seq_le(packet_seq
, mdev
->peer_seq
+1))
1594 if (signal_pending(current
)) {
1598 p_seq
= mdev
->peer_seq
;
1599 spin_unlock(&mdev
->peer_seq_lock
);
1600 timeout
= schedule_timeout(30*HZ
);
1601 spin_lock(&mdev
->peer_seq_lock
);
1602 if (timeout
== 0 && p_seq
== mdev
->peer_seq
) {
1604 dev_err(DEV
, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1608 finish_wait(&mdev
->seq_wait
, &wait
);
1609 if (mdev
->peer_seq
+1 == packet_seq
)
1611 spin_unlock(&mdev
->peer_seq_lock
);
1615 /* see also bio_flags_to_wire()
1616 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1617 * flags and back. We may replicate to other kernel versions. */
1618 static unsigned long wire_flags_to_bio(struct drbd_conf
*mdev
, u32 dpf
)
1620 return (dpf
& DP_RW_SYNC
? REQ_SYNC
: 0) |
1621 (dpf
& DP_FUA
? REQ_FUA
: 0) |
1622 (dpf
& DP_FLUSH
? REQ_FLUSH
: 0) |
1623 (dpf
& DP_DISCARD
? REQ_DISCARD
: 0);
1626 /* mirrored write */
1627 static int receive_Data(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
1630 struct drbd_epoch_entry
*e
;
1631 struct p_data
*p
= &mdev
->data
.rbuf
.data
;
1635 if (!get_ldev(mdev
)) {
1636 if (__ratelimit(&drbd_ratelimit_state
))
1637 dev_err(DEV
, "Can not write mirrored data block "
1638 "to local disk.\n");
1639 spin_lock(&mdev
->peer_seq_lock
);
1640 if (mdev
->peer_seq
+1 == be32_to_cpu(p
->seq_num
))
1642 spin_unlock(&mdev
->peer_seq_lock
);
1644 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
, data_size
);
1645 atomic_inc(&mdev
->current_epoch
->epoch_size
);
1646 return drbd_drain_block(mdev
, data_size
);
1649 /* get_ldev(mdev) successful.
1650 * Corresponding put_ldev done either below (on various errors),
1651 * or in drbd_endio_write_sec, if we successfully submit the data at
1652 * the end of this function. */
1654 sector
= be64_to_cpu(p
->sector
);
1655 e
= read_in_block(mdev
, p
->block_id
, sector
, data_size
);
1661 e
->w
.cb
= e_end_block
;
1663 dp_flags
= be32_to_cpu(p
->dp_flags
);
1664 rw
|= wire_flags_to_bio(mdev
, dp_flags
);
1666 if (dp_flags
& DP_MAY_SET_IN_SYNC
)
1667 e
->flags
|= EE_MAY_SET_IN_SYNC
;
1669 spin_lock(&mdev
->epoch_lock
);
1670 e
->epoch
= mdev
->current_epoch
;
1671 atomic_inc(&e
->epoch
->epoch_size
);
1672 atomic_inc(&e
->epoch
->active
);
1673 spin_unlock(&mdev
->epoch_lock
);
1675 /* I'm the receiver, I do hold a net_cnt reference. */
1676 if (!mdev
->net_conf
->two_primaries
) {
1677 spin_lock_irq(&mdev
->req_lock
);
1679 /* don't get the req_lock yet,
1680 * we may sleep in drbd_wait_peer_seq */
1681 const int size
= e
->size
;
1682 const int discard
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
1684 struct drbd_request
*i
;
1685 struct hlist_node
*n
;
1686 struct hlist_head
*slot
;
1689 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
1690 BUG_ON(mdev
->ee_hash
== NULL
);
1691 BUG_ON(mdev
->tl_hash
== NULL
);
1693 /* conflict detection and handling:
1694 * 1. wait on the sequence number,
1695 * in case this data packet overtook ACK packets.
1696 * 2. check our hash tables for conflicting requests.
1697 * we only need to walk the tl_hash, since an ee can not
1698 * have a conflict with an other ee: on the submitting
1699 * node, the corresponding req had already been conflicting,
1700 * and a conflicting req is never sent.
1702 * Note: for two_primaries, we are protocol C,
1703 * so there cannot be any request that is DONE
1704 * but still on the transfer log.
1706 * unconditionally add to the ee_hash.
1708 * if no conflicting request is found:
1711 * if any conflicting request is found
1712 * that has not yet been acked,
1713 * AND I have the "discard concurrent writes" flag:
1714 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1716 * if any conflicting request is found:
1717 * block the receiver, waiting on misc_wait
1718 * until no more conflicting requests are there,
1719 * or we get interrupted (disconnect).
1721 * we do not just write after local io completion of those
1722 * requests, but only after req is done completely, i.e.
1723 * we wait for the P_DISCARD_ACK to arrive!
1725 * then proceed normally, i.e. submit.
1727 if (drbd_wait_peer_seq(mdev
, be32_to_cpu(p
->seq_num
)))
1728 goto out_interrupted
;
1730 spin_lock_irq(&mdev
->req_lock
);
1732 hlist_add_head(&e
->colision
, ee_hash_slot(mdev
, sector
));
1734 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1735 slot
= tl_hash_slot(mdev
, sector
);
1738 int have_unacked
= 0;
1739 int have_conflict
= 0;
1740 prepare_to_wait(&mdev
->misc_wait
, &wait
,
1741 TASK_INTERRUPTIBLE
);
1742 hlist_for_each_entry(i
, n
, slot
, colision
) {
1744 /* only ALERT on first iteration,
1745 * we may be woken up early... */
1747 dev_alert(DEV
, "%s[%u] Concurrent local write detected!"
1748 " new: %llus +%u; pending: %llus +%u\n",
1749 current
->comm
, current
->pid
,
1750 (unsigned long long)sector
, size
,
1751 (unsigned long long)i
->sector
, i
->size
);
1752 if (i
->rq_state
& RQ_NET_PENDING
)
1761 /* Discard Ack only for the _first_ iteration */
1762 if (first
&& discard
&& have_unacked
) {
1763 dev_alert(DEV
, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1764 (unsigned long long)sector
);
1766 e
->w
.cb
= e_send_discard_ack
;
1767 list_add_tail(&e
->w
.list
, &mdev
->done_ee
);
1769 spin_unlock_irq(&mdev
->req_lock
);
1771 /* we could probably send that P_DISCARD_ACK ourselves,
1772 * but I don't like the receiver using the msock */
1776 finish_wait(&mdev
->misc_wait
, &wait
);
1780 if (signal_pending(current
)) {
1781 hlist_del_init(&e
->colision
);
1783 spin_unlock_irq(&mdev
->req_lock
);
1785 finish_wait(&mdev
->misc_wait
, &wait
);
1786 goto out_interrupted
;
1789 spin_unlock_irq(&mdev
->req_lock
);
1792 dev_alert(DEV
, "Concurrent write! [W AFTERWARDS] "
1793 "sec=%llus\n", (unsigned long long)sector
);
1794 } else if (discard
) {
1795 /* we had none on the first iteration.
1796 * there must be none now. */
1797 D_ASSERT(have_unacked
== 0);
1800 spin_lock_irq(&mdev
->req_lock
);
1802 finish_wait(&mdev
->misc_wait
, &wait
);
1805 list_add(&e
->w
.list
, &mdev
->active_ee
);
1806 spin_unlock_irq(&mdev
->req_lock
);
1808 switch (mdev
->net_conf
->wire_protocol
) {
1811 /* corresponding dec_unacked() in e_end_block()
1812 * respective _drbd_clear_done_ee */
1815 /* I really don't like it that the receiver thread
1816 * sends on the msock, but anyways */
1817 drbd_send_ack(mdev
, P_RECV_ACK
, e
);
1824 if (mdev
->state
.pdsk
< D_INCONSISTENT
) {
1825 /* In case we have the only disk of the cluster, */
1826 drbd_set_out_of_sync(mdev
, e
->sector
, e
->size
);
1827 e
->flags
|= EE_CALL_AL_COMPLETE_IO
;
1828 e
->flags
&= ~EE_MAY_SET_IN_SYNC
;
1829 drbd_al_begin_io(mdev
, e
->sector
);
1832 if (drbd_submit_ee(mdev
, e
, rw
, DRBD_FAULT_DT_WR
) == 0)
1835 /* drbd_submit_ee currently fails for one reason only:
1836 * not being able to allocate enough bios.
1837 * Is dropping the connection going to help? */
1838 spin_lock_irq(&mdev
->req_lock
);
1839 list_del(&e
->w
.list
);
1840 hlist_del_init(&e
->colision
);
1841 spin_unlock_irq(&mdev
->req_lock
);
1842 if (e
->flags
& EE_CALL_AL_COMPLETE_IO
)
1843 drbd_al_complete_io(mdev
, e
->sector
);
1846 /* yes, the epoch_size now is imbalanced.
1847 * but we drop the connection anyways, so we don't have a chance to
1848 * receive a barrier... atomic_inc(&mdev->epoch_size); */
1850 drbd_free_ee(mdev
, e
);
1854 /* We may throttle resync, if the lower device seems to be busy,
1855 * and current sync rate is above c_min_rate.
1857 * To decide whether or not the lower device is busy, we use a scheme similar
1858 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1859 * (more than 64 sectors) of activity we cannot account for with our own resync
1860 * activity, it obviously is "busy".
1862 * The current sync rate used here uses only the most recent two step marks,
1863 * to have a short time average so we can react faster.
1865 int drbd_rs_should_slow_down(struct drbd_conf
*mdev
, sector_t sector
)
1867 struct gendisk
*disk
= mdev
->ldev
->backing_bdev
->bd_contains
->bd_disk
;
1868 unsigned long db
, dt
, dbdt
;
1869 struct lc_element
*tmp
;
1873 /* feature disabled? */
1874 if (mdev
->sync_conf
.c_min_rate
== 0)
1877 spin_lock_irq(&mdev
->al_lock
);
1878 tmp
= lc_find(mdev
->resync
, BM_SECT_TO_EXT(sector
));
1880 struct bm_extent
*bm_ext
= lc_entry(tmp
, struct bm_extent
, lce
);
1881 if (test_bit(BME_PRIORITY
, &bm_ext
->flags
)) {
1882 spin_unlock_irq(&mdev
->al_lock
);
1885 /* Do not slow down if app IO is already waiting for this extent */
1887 spin_unlock_irq(&mdev
->al_lock
);
1889 curr_events
= (int)part_stat_read(&disk
->part0
, sectors
[0]) +
1890 (int)part_stat_read(&disk
->part0
, sectors
[1]) -
1891 atomic_read(&mdev
->rs_sect_ev
);
1893 if (!mdev
->rs_last_events
|| curr_events
- mdev
->rs_last_events
> 64) {
1894 unsigned long rs_left
;
1897 mdev
->rs_last_events
= curr_events
;
1899 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1901 i
= (mdev
->rs_last_mark
+ DRBD_SYNC_MARKS
-1) % DRBD_SYNC_MARKS
;
1903 if (mdev
->state
.conn
== C_VERIFY_S
|| mdev
->state
.conn
== C_VERIFY_T
)
1904 rs_left
= mdev
->ov_left
;
1906 rs_left
= drbd_bm_total_weight(mdev
) - mdev
->rs_failed
;
1908 dt
= ((long)jiffies
- (long)mdev
->rs_mark_time
[i
]) / HZ
;
1911 db
= mdev
->rs_mark_left
[i
] - rs_left
;
1912 dbdt
= Bit2KB(db
/dt
);
1914 if (dbdt
> mdev
->sync_conf
.c_min_rate
)
1921 static int receive_DataRequest(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int digest_size
)
1924 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
1925 struct drbd_epoch_entry
*e
;
1926 struct digest_info
*di
= NULL
;
1928 unsigned int fault_type
;
1929 struct p_block_req
*p
= &mdev
->data
.rbuf
.block_req
;
1931 sector
= be64_to_cpu(p
->sector
);
1932 size
= be32_to_cpu(p
->blksize
);
1934 if (size
<= 0 || (size
& 0x1ff) != 0 || size
> DRBD_MAX_BIO_SIZE
) {
1935 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
1936 (unsigned long long)sector
, size
);
1939 if (sector
+ (size
>>9) > capacity
) {
1940 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
1941 (unsigned long long)sector
, size
);
1945 if (!get_ldev_if_state(mdev
, D_UP_TO_DATE
)) {
1948 case P_DATA_REQUEST
:
1949 drbd_send_ack_rp(mdev
, P_NEG_DREPLY
, p
);
1951 case P_RS_DATA_REQUEST
:
1952 case P_CSUM_RS_REQUEST
:
1954 drbd_send_ack_rp(mdev
, P_NEG_RS_DREPLY
, p
);
1958 dec_rs_pending(mdev
);
1959 drbd_send_ack_ex(mdev
, P_OV_RESULT
, sector
, size
, ID_IN_SYNC
);
1962 dev_err(DEV
, "unexpected command (%s) in receive_DataRequest\n",
1965 if (verb
&& __ratelimit(&drbd_ratelimit_state
))
1966 dev_err(DEV
, "Can not satisfy peer's read request, "
1967 "no local data.\n");
1969 /* drain possibly payload */
1970 return drbd_drain_block(mdev
, digest_size
);
1973 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1974 * "criss-cross" setup, that might cause write-out on some other DRBD,
1975 * which in turn might block on the other node at this very place. */
1976 e
= drbd_alloc_ee(mdev
, p
->block_id
, sector
, size
, GFP_NOIO
);
1983 case P_DATA_REQUEST
:
1984 e
->w
.cb
= w_e_end_data_req
;
1985 fault_type
= DRBD_FAULT_DT_RD
;
1986 /* application IO, don't drbd_rs_begin_io */
1989 case P_RS_DATA_REQUEST
:
1990 e
->w
.cb
= w_e_end_rsdata_req
;
1991 fault_type
= DRBD_FAULT_RS_RD
;
1992 /* used in the sector offset progress display */
1993 mdev
->bm_resync_fo
= BM_SECT_TO_BIT(sector
);
1997 case P_CSUM_RS_REQUEST
:
1998 fault_type
= DRBD_FAULT_RS_RD
;
1999 di
= kmalloc(sizeof(*di
) + digest_size
, GFP_NOIO
);
2003 di
->digest_size
= digest_size
;
2004 di
->digest
= (((char *)di
)+sizeof(struct digest_info
));
2007 e
->flags
|= EE_HAS_DIGEST
;
2009 if (drbd_recv(mdev
, di
->digest
, digest_size
) != digest_size
)
2012 if (cmd
== P_CSUM_RS_REQUEST
) {
2013 D_ASSERT(mdev
->agreed_pro_version
>= 89);
2014 e
->w
.cb
= w_e_end_csum_rs_req
;
2015 /* used in the sector offset progress display */
2016 mdev
->bm_resync_fo
= BM_SECT_TO_BIT(sector
);
2017 } else if (cmd
== P_OV_REPLY
) {
2018 /* track progress, we may need to throttle */
2019 atomic_add(size
>> 9, &mdev
->rs_sect_in
);
2020 e
->w
.cb
= w_e_end_ov_reply
;
2021 dec_rs_pending(mdev
);
2022 /* drbd_rs_begin_io done when we sent this request,
2023 * but accounting still needs to be done. */
2024 goto submit_for_resync
;
2029 if (mdev
->ov_start_sector
== ~(sector_t
)0 &&
2030 mdev
->agreed_pro_version
>= 90) {
2031 unsigned long now
= jiffies
;
2033 mdev
->ov_start_sector
= sector
;
2034 mdev
->ov_position
= sector
;
2035 mdev
->ov_left
= drbd_bm_bits(mdev
) - BM_SECT_TO_BIT(sector
);
2036 mdev
->rs_total
= mdev
->ov_left
;
2037 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
2038 mdev
->rs_mark_left
[i
] = mdev
->ov_left
;
2039 mdev
->rs_mark_time
[i
] = now
;
2041 dev_info(DEV
, "Online Verify start sector: %llu\n",
2042 (unsigned long long)sector
);
2044 e
->w
.cb
= w_e_end_ov_req
;
2045 fault_type
= DRBD_FAULT_RS_RD
;
2049 dev_err(DEV
, "unexpected command (%s) in receive_DataRequest\n",
2051 fault_type
= DRBD_FAULT_MAX
;
2055 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2056 * wrt the receiver, but it is not as straightforward as it may seem.
2057 * Various places in the resync start and stop logic assume resync
2058 * requests are processed in order, requeuing this on the worker thread
2059 * introduces a bunch of new code for synchronization between threads.
2061 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2062 * "forever", throttling after drbd_rs_begin_io will lock that extent
2063 * for application writes for the same time. For now, just throttle
2064 * here, where the rest of the code expects the receiver to sleep for
2068 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2069 * this defers syncer requests for some time, before letting at least
2070 * on request through. The resync controller on the receiving side
2071 * will adapt to the incoming rate accordingly.
2073 * We cannot throttle here if remote is Primary/SyncTarget:
2074 * we would also throttle its application reads.
2075 * In that case, throttling is done on the SyncTarget only.
2077 if (mdev
->state
.peer
!= R_PRIMARY
&& drbd_rs_should_slow_down(mdev
, sector
))
2078 schedule_timeout_uninterruptible(HZ
/10);
2079 if (drbd_rs_begin_io(mdev
, sector
))
2083 atomic_add(size
>> 9, &mdev
->rs_sect_ev
);
2087 spin_lock_irq(&mdev
->req_lock
);
2088 list_add_tail(&e
->w
.list
, &mdev
->read_ee
);
2089 spin_unlock_irq(&mdev
->req_lock
);
2091 if (drbd_submit_ee(mdev
, e
, READ
, fault_type
) == 0)
2094 /* drbd_submit_ee currently fails for one reason only:
2095 * not being able to allocate enough bios.
2096 * Is dropping the connection going to help? */
2097 spin_lock_irq(&mdev
->req_lock
);
2098 list_del(&e
->w
.list
);
2099 spin_unlock_irq(&mdev
->req_lock
);
2100 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2104 drbd_free_ee(mdev
, e
);
2108 static int drbd_asb_recover_0p(struct drbd_conf
*mdev
) __must_hold(local
)
2110 int self
, peer
, rv
= -100;
2111 unsigned long ch_self
, ch_peer
;
2113 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2114 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2116 ch_peer
= mdev
->p_uuid
[UI_SIZE
];
2117 ch_self
= mdev
->comm_bm_set
;
2119 switch (mdev
->net_conf
->after_sb_0p
) {
2121 case ASB_DISCARD_SECONDARY
:
2122 case ASB_CALL_HELPER
:
2123 dev_err(DEV
, "Configuration error.\n");
2125 case ASB_DISCONNECT
:
2127 case ASB_DISCARD_YOUNGER_PRI
:
2128 if (self
== 0 && peer
== 1) {
2132 if (self
== 1 && peer
== 0) {
2136 /* Else fall through to one of the other strategies... */
2137 case ASB_DISCARD_OLDER_PRI
:
2138 if (self
== 0 && peer
== 1) {
2142 if (self
== 1 && peer
== 0) {
2146 /* Else fall through to one of the other strategies... */
2147 dev_warn(DEV
, "Discard younger/older primary did not find a decision\n"
2148 "Using discard-least-changes instead\n");
2149 case ASB_DISCARD_ZERO_CHG
:
2150 if (ch_peer
== 0 && ch_self
== 0) {
2151 rv
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
)
2155 if (ch_peer
== 0) { rv
= 1; break; }
2156 if (ch_self
== 0) { rv
= -1; break; }
2158 if (mdev
->net_conf
->after_sb_0p
== ASB_DISCARD_ZERO_CHG
)
2160 case ASB_DISCARD_LEAST_CHG
:
2161 if (ch_self
< ch_peer
)
2163 else if (ch_self
> ch_peer
)
2165 else /* ( ch_self == ch_peer ) */
2166 /* Well, then use something else. */
2167 rv
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
)
2170 case ASB_DISCARD_LOCAL
:
2173 case ASB_DISCARD_REMOTE
:
2180 static int drbd_asb_recover_1p(struct drbd_conf
*mdev
) __must_hold(local
)
2182 int self
, peer
, hg
, rv
= -100;
2184 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2185 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2187 switch (mdev
->net_conf
->after_sb_1p
) {
2188 case ASB_DISCARD_YOUNGER_PRI
:
2189 case ASB_DISCARD_OLDER_PRI
:
2190 case ASB_DISCARD_LEAST_CHG
:
2191 case ASB_DISCARD_LOCAL
:
2192 case ASB_DISCARD_REMOTE
:
2193 dev_err(DEV
, "Configuration error.\n");
2195 case ASB_DISCONNECT
:
2198 hg
= drbd_asb_recover_0p(mdev
);
2199 if (hg
== -1 && mdev
->state
.role
== R_SECONDARY
)
2201 if (hg
== 1 && mdev
->state
.role
== R_PRIMARY
)
2205 rv
= drbd_asb_recover_0p(mdev
);
2207 case ASB_DISCARD_SECONDARY
:
2208 return mdev
->state
.role
== R_PRIMARY
? 1 : -1;
2209 case ASB_CALL_HELPER
:
2210 hg
= drbd_asb_recover_0p(mdev
);
2211 if (hg
== -1 && mdev
->state
.role
== R_PRIMARY
) {
2212 self
= drbd_set_role(mdev
, R_SECONDARY
, 0);
2213 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2214 * we might be here in C_WF_REPORT_PARAMS which is transient.
2215 * we do not need to wait for the after state change work either. */
2216 self
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2217 if (self
!= SS_SUCCESS
) {
2218 drbd_khelper(mdev
, "pri-lost-after-sb");
2220 dev_warn(DEV
, "Successfully gave up primary role.\n");
2230 static int drbd_asb_recover_2p(struct drbd_conf
*mdev
) __must_hold(local
)
2232 int self
, peer
, hg
, rv
= -100;
2234 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2235 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2237 switch (mdev
->net_conf
->after_sb_2p
) {
2238 case ASB_DISCARD_YOUNGER_PRI
:
2239 case ASB_DISCARD_OLDER_PRI
:
2240 case ASB_DISCARD_LEAST_CHG
:
2241 case ASB_DISCARD_LOCAL
:
2242 case ASB_DISCARD_REMOTE
:
2244 case ASB_DISCARD_SECONDARY
:
2245 dev_err(DEV
, "Configuration error.\n");
2248 rv
= drbd_asb_recover_0p(mdev
);
2250 case ASB_DISCONNECT
:
2252 case ASB_CALL_HELPER
:
2253 hg
= drbd_asb_recover_0p(mdev
);
2255 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2256 * we might be here in C_WF_REPORT_PARAMS which is transient.
2257 * we do not need to wait for the after state change work either. */
2258 self
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2259 if (self
!= SS_SUCCESS
) {
2260 drbd_khelper(mdev
, "pri-lost-after-sb");
2262 dev_warn(DEV
, "Successfully gave up primary role.\n");
2272 static void drbd_uuid_dump(struct drbd_conf
*mdev
, char *text
, u64
*uuid
,
2273 u64 bits
, u64 flags
)
2276 dev_info(DEV
, "%s uuid info vanished while I was looking!\n", text
);
2279 dev_info(DEV
, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2281 (unsigned long long)uuid
[UI_CURRENT
],
2282 (unsigned long long)uuid
[UI_BITMAP
],
2283 (unsigned long long)uuid
[UI_HISTORY_START
],
2284 (unsigned long long)uuid
[UI_HISTORY_END
],
2285 (unsigned long long)bits
,
2286 (unsigned long long)flags
);
2290 100 after split brain try auto recover
2291 2 C_SYNC_SOURCE set BitMap
2292 1 C_SYNC_SOURCE use BitMap
2294 -1 C_SYNC_TARGET use BitMap
2295 -2 C_SYNC_TARGET set BitMap
2296 -100 after split brain, disconnect
2297 -1000 unrelated data
2299 static int drbd_uuid_compare(struct drbd_conf
*mdev
, int *rule_nr
) __must_hold(local
)
2304 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2305 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2308 if (self
== UUID_JUST_CREATED
&& peer
== UUID_JUST_CREATED
)
2312 if ((self
== UUID_JUST_CREATED
|| self
== (u64
)0) &&
2313 peer
!= UUID_JUST_CREATED
)
2317 if (self
!= UUID_JUST_CREATED
&&
2318 (peer
== UUID_JUST_CREATED
|| peer
== (u64
)0))
2322 int rct
, dc
; /* roles at crash time */
2324 if (mdev
->p_uuid
[UI_BITMAP
] == (u64
)0 && mdev
->ldev
->md
.uuid
[UI_BITMAP
] != (u64
)0) {
2326 if (mdev
->agreed_pro_version
< 91)
2329 if ((mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1)) &&
2330 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1))) {
2331 dev_info(DEV
, "was SyncSource, missed the resync finished event, corrected myself:\n");
2332 drbd_uuid_set_bm(mdev
, 0UL);
2334 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2335 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2338 dev_info(DEV
, "was SyncSource (peer failed to write sync_uuid)\n");
2345 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
] == (u64
)0 && mdev
->p_uuid
[UI_BITMAP
] != (u64
)0) {
2347 if (mdev
->agreed_pro_version
< 91)
2350 if ((mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1)) &&
2351 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1))) {
2352 dev_info(DEV
, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2354 mdev
->p_uuid
[UI_HISTORY_START
+ 1] = mdev
->p_uuid
[UI_HISTORY_START
];
2355 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_BITMAP
];
2356 mdev
->p_uuid
[UI_BITMAP
] = 0UL;
2358 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
, mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2361 dev_info(DEV
, "was SyncTarget (failed to write sync_uuid)\n");
2368 /* Common power [off|failure] */
2369 rct
= (test_bit(CRASHED_PRIMARY
, &mdev
->flags
) ? 1 : 0) +
2370 (mdev
->p_uuid
[UI_FLAGS
] & 2);
2371 /* lowest bit is set when we were primary,
2372 * next bit (weight 2) is set when peer was primary */
2376 case 0: /* !self_pri && !peer_pri */ return 0;
2377 case 1: /* self_pri && !peer_pri */ return 1;
2378 case 2: /* !self_pri && peer_pri */ return -1;
2379 case 3: /* self_pri && peer_pri */
2380 dc
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
2386 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2391 peer
= mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1);
2393 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1);
2394 peer
= mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1);
2396 /* The last P_SYNC_UUID did not get though. Undo the last start of
2397 resync as sync source modifications of the peer's UUIDs. */
2399 if (mdev
->agreed_pro_version
< 91)
2402 mdev
->p_uuid
[UI_BITMAP
] = mdev
->p_uuid
[UI_HISTORY_START
];
2403 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_HISTORY_START
+ 1];
2409 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2410 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2411 peer
= mdev
->p_uuid
[i
] & ~((u64
)1);
2417 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2418 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2423 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1);
2425 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1);
2426 peer
= mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1);
2428 /* The last P_SYNC_UUID did not get though. Undo the last start of
2429 resync as sync source modifications of our UUIDs. */
2431 if (mdev
->agreed_pro_version
< 91)
2434 _drbd_uuid_set(mdev
, UI_BITMAP
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
]);
2435 _drbd_uuid_set(mdev
, UI_HISTORY_START
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1]);
2437 dev_info(DEV
, "Undid last start of resync:\n");
2439 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2440 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2448 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2449 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2450 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2456 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2457 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2458 if (self
== peer
&& self
!= ((u64
)0))
2462 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2463 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2464 for (j
= UI_HISTORY_START
; j
<= UI_HISTORY_END
; j
++) {
2465 peer
= mdev
->p_uuid
[j
] & ~((u64
)1);
2474 /* drbd_sync_handshake() returns the new conn state on success, or
2475 CONN_MASK (-1) on failure.
2477 static enum drbd_conns
drbd_sync_handshake(struct drbd_conf
*mdev
, enum drbd_role peer_role
,
2478 enum drbd_disk_state peer_disk
) __must_hold(local
)
2481 enum drbd_conns rv
= C_MASK
;
2482 enum drbd_disk_state mydisk
;
2484 mydisk
= mdev
->state
.disk
;
2485 if (mydisk
== D_NEGOTIATING
)
2486 mydisk
= mdev
->new_state_tmp
.disk
;
2488 dev_info(DEV
, "drbd_sync_handshake:\n");
2489 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
, mdev
->comm_bm_set
, 0);
2490 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
,
2491 mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2493 hg
= drbd_uuid_compare(mdev
, &rule_nr
);
2495 dev_info(DEV
, "uuid_compare()=%d by rule %d\n", hg
, rule_nr
);
2498 dev_alert(DEV
, "Unrelated data, aborting!\n");
2502 dev_alert(DEV
, "To resolve this both sides have to support at least protocol\n");
2506 if ((mydisk
== D_INCONSISTENT
&& peer_disk
> D_INCONSISTENT
) ||
2507 (peer_disk
== D_INCONSISTENT
&& mydisk
> D_INCONSISTENT
)) {
2508 int f
= (hg
== -100) || abs(hg
) == 2;
2509 hg
= mydisk
> D_INCONSISTENT
? 1 : -1;
2512 dev_info(DEV
, "Becoming sync %s due to disk states.\n",
2513 hg
> 0 ? "source" : "target");
2517 drbd_khelper(mdev
, "initial-split-brain");
2519 if (hg
== 100 || (hg
== -100 && mdev
->net_conf
->always_asbp
)) {
2520 int pcount
= (mdev
->state
.role
== R_PRIMARY
)
2521 + (peer_role
== R_PRIMARY
);
2522 int forced
= (hg
== -100);
2526 hg
= drbd_asb_recover_0p(mdev
);
2529 hg
= drbd_asb_recover_1p(mdev
);
2532 hg
= drbd_asb_recover_2p(mdev
);
2535 if (abs(hg
) < 100) {
2536 dev_warn(DEV
, "Split-Brain detected, %d primaries, "
2537 "automatically solved. Sync from %s node\n",
2538 pcount
, (hg
< 0) ? "peer" : "this");
2540 dev_warn(DEV
, "Doing a full sync, since"
2541 " UUIDs where ambiguous.\n");
2548 if (mdev
->net_conf
->want_lose
&& !(mdev
->p_uuid
[UI_FLAGS
]&1))
2550 if (!mdev
->net_conf
->want_lose
&& (mdev
->p_uuid
[UI_FLAGS
]&1))
2554 dev_warn(DEV
, "Split-Brain detected, manually solved. "
2555 "Sync from %s node\n",
2556 (hg
< 0) ? "peer" : "this");
2560 /* FIXME this log message is not correct if we end up here
2561 * after an attempted attach on a diskless node.
2562 * We just refuse to attach -- well, we drop the "connection"
2563 * to that disk, in a way... */
2564 dev_alert(DEV
, "Split-Brain detected but unresolved, dropping connection!\n");
2565 drbd_khelper(mdev
, "split-brain");
2569 if (hg
> 0 && mydisk
<= D_INCONSISTENT
) {
2570 dev_err(DEV
, "I shall become SyncSource, but I am inconsistent!\n");
2574 if (hg
< 0 && /* by intention we do not use mydisk here. */
2575 mdev
->state
.role
== R_PRIMARY
&& mdev
->state
.disk
>= D_CONSISTENT
) {
2576 switch (mdev
->net_conf
->rr_conflict
) {
2577 case ASB_CALL_HELPER
:
2578 drbd_khelper(mdev
, "pri-lost");
2580 case ASB_DISCONNECT
:
2581 dev_err(DEV
, "I shall become SyncTarget, but I am primary!\n");
2584 dev_warn(DEV
, "Becoming SyncTarget, violating the stable-data"
2589 if (mdev
->net_conf
->dry_run
|| test_bit(CONN_DRY_RUN
, &mdev
->flags
)) {
2591 dev_info(DEV
, "dry-run connect: No resync, would become Connected immediately.\n");
2593 dev_info(DEV
, "dry-run connect: Would become %s, doing a %s resync.",
2594 drbd_conn_str(hg
> 0 ? C_SYNC_SOURCE
: C_SYNC_TARGET
),
2595 abs(hg
) >= 2 ? "full" : "bit-map based");
2600 dev_info(DEV
, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2601 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_n_write
, "set_n_write from sync_handshake"))
2605 if (hg
> 0) { /* become sync source. */
2607 } else if (hg
< 0) { /* become sync target */
2611 if (drbd_bm_total_weight(mdev
)) {
2612 dev_info(DEV
, "No resync, but %lu bits in bitmap!\n",
2613 drbd_bm_total_weight(mdev
));
2620 /* returns 1 if invalid */
2621 static int cmp_after_sb(enum drbd_after_sb_p peer
, enum drbd_after_sb_p self
)
2623 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2624 if ((peer
== ASB_DISCARD_REMOTE
&& self
== ASB_DISCARD_LOCAL
) ||
2625 (self
== ASB_DISCARD_REMOTE
&& peer
== ASB_DISCARD_LOCAL
))
2628 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2629 if (peer
== ASB_DISCARD_REMOTE
|| peer
== ASB_DISCARD_LOCAL
||
2630 self
== ASB_DISCARD_REMOTE
|| self
== ASB_DISCARD_LOCAL
)
2633 /* everything else is valid if they are equal on both sides. */
2637 /* everything es is invalid. */
2641 static int receive_protocol(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
2643 struct p_protocol
*p
= &mdev
->data
.rbuf
.protocol
;
2644 int p_proto
, p_after_sb_0p
, p_after_sb_1p
, p_after_sb_2p
;
2645 int p_want_lose
, p_two_primaries
, cf
;
2646 char p_integrity_alg
[SHARED_SECRET_MAX
] = "";
2648 p_proto
= be32_to_cpu(p
->protocol
);
2649 p_after_sb_0p
= be32_to_cpu(p
->after_sb_0p
);
2650 p_after_sb_1p
= be32_to_cpu(p
->after_sb_1p
);
2651 p_after_sb_2p
= be32_to_cpu(p
->after_sb_2p
);
2652 p_two_primaries
= be32_to_cpu(p
->two_primaries
);
2653 cf
= be32_to_cpu(p
->conn_flags
);
2654 p_want_lose
= cf
& CF_WANT_LOSE
;
2656 clear_bit(CONN_DRY_RUN
, &mdev
->flags
);
2658 if (cf
& CF_DRY_RUN
)
2659 set_bit(CONN_DRY_RUN
, &mdev
->flags
);
2661 if (p_proto
!= mdev
->net_conf
->wire_protocol
) {
2662 dev_err(DEV
, "incompatible communication protocols\n");
2666 if (cmp_after_sb(p_after_sb_0p
, mdev
->net_conf
->after_sb_0p
)) {
2667 dev_err(DEV
, "incompatible after-sb-0pri settings\n");
2671 if (cmp_after_sb(p_after_sb_1p
, mdev
->net_conf
->after_sb_1p
)) {
2672 dev_err(DEV
, "incompatible after-sb-1pri settings\n");
2676 if (cmp_after_sb(p_after_sb_2p
, mdev
->net_conf
->after_sb_2p
)) {
2677 dev_err(DEV
, "incompatible after-sb-2pri settings\n");
2681 if (p_want_lose
&& mdev
->net_conf
->want_lose
) {
2682 dev_err(DEV
, "both sides have the 'want_lose' flag set\n");
2686 if (p_two_primaries
!= mdev
->net_conf
->two_primaries
) {
2687 dev_err(DEV
, "incompatible setting of the two-primaries options\n");
2691 if (mdev
->agreed_pro_version
>= 87) {
2692 unsigned char *my_alg
= mdev
->net_conf
->integrity_alg
;
2694 if (drbd_recv(mdev
, p_integrity_alg
, data_size
) != data_size
)
2697 p_integrity_alg
[SHARED_SECRET_MAX
-1] = 0;
2698 if (strcmp(p_integrity_alg
, my_alg
)) {
2699 dev_err(DEV
, "incompatible setting of the data-integrity-alg\n");
2702 dev_info(DEV
, "data-integrity-alg: %s\n",
2703 my_alg
[0] ? my_alg
: (unsigned char *)"<not-used>");
2709 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2714 * input: alg name, feature name
2715 * return: NULL (alg name was "")
2716 * ERR_PTR(error) if something goes wrong
2717 * or the crypto hash ptr, if it worked out ok. */
2718 struct crypto_hash
*drbd_crypto_alloc_digest_safe(const struct drbd_conf
*mdev
,
2719 const char *alg
, const char *name
)
2721 struct crypto_hash
*tfm
;
2726 tfm
= crypto_alloc_hash(alg
, 0, CRYPTO_ALG_ASYNC
);
2728 dev_err(DEV
, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2729 alg
, name
, PTR_ERR(tfm
));
2732 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm
))) {
2733 crypto_free_hash(tfm
);
2734 dev_err(DEV
, "\"%s\" is not a digest (%s)\n", alg
, name
);
2735 return ERR_PTR(-EINVAL
);
2740 static int receive_SyncParam(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int packet_size
)
2743 struct p_rs_param_95
*p
= &mdev
->data
.rbuf
.rs_param_95
;
2744 unsigned int header_size
, data_size
, exp_max_sz
;
2745 struct crypto_hash
*verify_tfm
= NULL
;
2746 struct crypto_hash
*csums_tfm
= NULL
;
2747 const int apv
= mdev
->agreed_pro_version
;
2748 int *rs_plan_s
= NULL
;
2751 exp_max_sz
= apv
<= 87 ? sizeof(struct p_rs_param
)
2752 : apv
== 88 ? sizeof(struct p_rs_param
)
2754 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
2755 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
2757 if (packet_size
> exp_max_sz
) {
2758 dev_err(DEV
, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2759 packet_size
, exp_max_sz
);
2764 header_size
= sizeof(struct p_rs_param
) - sizeof(struct p_header80
);
2765 data_size
= packet_size
- header_size
;
2766 } else if (apv
<= 94) {
2767 header_size
= sizeof(struct p_rs_param_89
) - sizeof(struct p_header80
);
2768 data_size
= packet_size
- header_size
;
2769 D_ASSERT(data_size
== 0);
2771 header_size
= sizeof(struct p_rs_param_95
) - sizeof(struct p_header80
);
2772 data_size
= packet_size
- header_size
;
2773 D_ASSERT(data_size
== 0);
2776 /* initialize verify_alg and csums_alg */
2777 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
2779 if (drbd_recv(mdev
, &p
->head
.payload
, header_size
) != header_size
)
2782 mdev
->sync_conf
.rate
= be32_to_cpu(p
->rate
);
2786 if (data_size
> SHARED_SECRET_MAX
) {
2787 dev_err(DEV
, "verify-alg too long, "
2788 "peer wants %u, accepting only %u byte\n",
2789 data_size
, SHARED_SECRET_MAX
);
2793 if (drbd_recv(mdev
, p
->verify_alg
, data_size
) != data_size
)
2796 /* we expect NUL terminated string */
2797 /* but just in case someone tries to be evil */
2798 D_ASSERT(p
->verify_alg
[data_size
-1] == 0);
2799 p
->verify_alg
[data_size
-1] = 0;
2801 } else /* apv >= 89 */ {
2802 /* we still expect NUL terminated strings */
2803 /* but just in case someone tries to be evil */
2804 D_ASSERT(p
->verify_alg
[SHARED_SECRET_MAX
-1] == 0);
2805 D_ASSERT(p
->csums_alg
[SHARED_SECRET_MAX
-1] == 0);
2806 p
->verify_alg
[SHARED_SECRET_MAX
-1] = 0;
2807 p
->csums_alg
[SHARED_SECRET_MAX
-1] = 0;
2810 if (strcmp(mdev
->sync_conf
.verify_alg
, p
->verify_alg
)) {
2811 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
2812 dev_err(DEV
, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2813 mdev
->sync_conf
.verify_alg
, p
->verify_alg
);
2816 verify_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
2817 p
->verify_alg
, "verify-alg");
2818 if (IS_ERR(verify_tfm
)) {
2824 if (apv
>= 89 && strcmp(mdev
->sync_conf
.csums_alg
, p
->csums_alg
)) {
2825 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
2826 dev_err(DEV
, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2827 mdev
->sync_conf
.csums_alg
, p
->csums_alg
);
2830 csums_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
2831 p
->csums_alg
, "csums-alg");
2832 if (IS_ERR(csums_tfm
)) {
2839 mdev
->sync_conf
.rate
= be32_to_cpu(p
->rate
);
2840 mdev
->sync_conf
.c_plan_ahead
= be32_to_cpu(p
->c_plan_ahead
);
2841 mdev
->sync_conf
.c_delay_target
= be32_to_cpu(p
->c_delay_target
);
2842 mdev
->sync_conf
.c_fill_target
= be32_to_cpu(p
->c_fill_target
);
2843 mdev
->sync_conf
.c_max_rate
= be32_to_cpu(p
->c_max_rate
);
2845 fifo_size
= (mdev
->sync_conf
.c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
2846 if (fifo_size
!= mdev
->rs_plan_s
.size
&& fifo_size
> 0) {
2847 rs_plan_s
= kzalloc(sizeof(int) * fifo_size
, GFP_KERNEL
);
2849 dev_err(DEV
, "kmalloc of fifo_buffer failed");
2855 spin_lock(&mdev
->peer_seq_lock
);
2856 /* lock against drbd_nl_syncer_conf() */
2858 strcpy(mdev
->sync_conf
.verify_alg
, p
->verify_alg
);
2859 mdev
->sync_conf
.verify_alg_len
= strlen(p
->verify_alg
) + 1;
2860 crypto_free_hash(mdev
->verify_tfm
);
2861 mdev
->verify_tfm
= verify_tfm
;
2862 dev_info(DEV
, "using verify-alg: \"%s\"\n", p
->verify_alg
);
2865 strcpy(mdev
->sync_conf
.csums_alg
, p
->csums_alg
);
2866 mdev
->sync_conf
.csums_alg_len
= strlen(p
->csums_alg
) + 1;
2867 crypto_free_hash(mdev
->csums_tfm
);
2868 mdev
->csums_tfm
= csums_tfm
;
2869 dev_info(DEV
, "using csums-alg: \"%s\"\n", p
->csums_alg
);
2871 if (fifo_size
!= mdev
->rs_plan_s
.size
) {
2872 kfree(mdev
->rs_plan_s
.values
);
2873 mdev
->rs_plan_s
.values
= rs_plan_s
;
2874 mdev
->rs_plan_s
.size
= fifo_size
;
2875 mdev
->rs_planed
= 0;
2877 spin_unlock(&mdev
->peer_seq_lock
);
2882 /* just for completeness: actually not needed,
2883 * as this is not reached if csums_tfm was ok. */
2884 crypto_free_hash(csums_tfm
);
2885 /* but free the verify_tfm again, if csums_tfm did not work out */
2886 crypto_free_hash(verify_tfm
);
2887 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2891 static void drbd_setup_order_type(struct drbd_conf
*mdev
, int peer
)
2893 /* sorry, we currently have no working implementation
2894 * of distributed TCQ */
2897 /* warn if the arguments differ by more than 12.5% */
2898 static void warn_if_differ_considerably(struct drbd_conf
*mdev
,
2899 const char *s
, sector_t a
, sector_t b
)
2902 if (a
== 0 || b
== 0)
2904 d
= (a
> b
) ? (a
- b
) : (b
- a
);
2905 if (d
> (a
>>3) || d
> (b
>>3))
2906 dev_warn(DEV
, "Considerable difference in %s: %llus vs. %llus\n", s
,
2907 (unsigned long long)a
, (unsigned long long)b
);
2910 static int receive_sizes(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
2912 struct p_sizes
*p
= &mdev
->data
.rbuf
.sizes
;
2913 enum determine_dev_size dd
= unchanged
;
2914 unsigned int max_bio_size
;
2915 sector_t p_size
, p_usize
, my_usize
;
2916 int ldsc
= 0; /* local disk size changed */
2917 enum dds_flags ddsf
;
2919 p_size
= be64_to_cpu(p
->d_size
);
2920 p_usize
= be64_to_cpu(p
->u_size
);
2922 if (p_size
== 0 && mdev
->state
.disk
== D_DISKLESS
) {
2923 dev_err(DEV
, "some backing storage is needed\n");
2924 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2928 /* just store the peer's disk size for now.
2929 * we still need to figure out whether we accept that. */
2930 mdev
->p_size
= p_size
;
2932 if (get_ldev(mdev
)) {
2933 warn_if_differ_considerably(mdev
, "lower level device sizes",
2934 p_size
, drbd_get_max_capacity(mdev
->ldev
));
2935 warn_if_differ_considerably(mdev
, "user requested size",
2936 p_usize
, mdev
->ldev
->dc
.disk_size
);
2938 /* if this is the first connect, or an otherwise expected
2939 * param exchange, choose the minimum */
2940 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
)
2941 p_usize
= min_not_zero((sector_t
)mdev
->ldev
->dc
.disk_size
,
2944 my_usize
= mdev
->ldev
->dc
.disk_size
;
2946 if (mdev
->ldev
->dc
.disk_size
!= p_usize
) {
2947 mdev
->ldev
->dc
.disk_size
= p_usize
;
2948 dev_info(DEV
, "Peer sets u_size to %lu sectors\n",
2949 (unsigned long)mdev
->ldev
->dc
.disk_size
);
2952 /* Never shrink a device with usable data during connect.
2953 But allow online shrinking if we are connected. */
2954 if (drbd_new_dev_size(mdev
, mdev
->ldev
, 0) <
2955 drbd_get_capacity(mdev
->this_bdev
) &&
2956 mdev
->state
.disk
>= D_OUTDATED
&&
2957 mdev
->state
.conn
< C_CONNECTED
) {
2958 dev_err(DEV
, "The peer's disk size is too small!\n");
2959 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
2960 mdev
->ldev
->dc
.disk_size
= my_usize
;
2968 ddsf
= be16_to_cpu(p
->dds_flags
);
2969 if (get_ldev(mdev
)) {
2970 dd
= drbd_determin_dev_size(mdev
, ddsf
);
2972 if (dd
== dev_size_error
)
2976 /* I am diskless, need to accept the peer's size. */
2977 drbd_set_my_capacity(mdev
, p_size
);
2980 if (get_ldev(mdev
)) {
2981 if (mdev
->ldev
->known_size
!= drbd_get_capacity(mdev
->ldev
->backing_bdev
)) {
2982 mdev
->ldev
->known_size
= drbd_get_capacity(mdev
->ldev
->backing_bdev
);
2986 if (mdev
->agreed_pro_version
< 94)
2987 max_bio_size
= be32_to_cpu(p
->max_bio_size
);
2988 else if (mdev
->agreed_pro_version
== 94)
2989 max_bio_size
= DRBD_MAX_SIZE_H80_PACKET
;
2990 else /* drbd 8.3.8 onwards */
2991 max_bio_size
= DRBD_MAX_BIO_SIZE
;
2993 if (max_bio_size
!= queue_max_hw_sectors(mdev
->rq_queue
) << 9)
2994 drbd_setup_queue_param(mdev
, max_bio_size
);
2996 drbd_setup_order_type(mdev
, be16_to_cpu(p
->queue_order_type
));
3000 if (mdev
->state
.conn
> C_WF_REPORT_PARAMS
) {
3001 if (be64_to_cpu(p
->c_size
) !=
3002 drbd_get_capacity(mdev
->this_bdev
) || ldsc
) {
3003 /* we have different sizes, probably peer
3004 * needs to know my new size... */
3005 drbd_send_sizes(mdev
, 0, ddsf
);
3007 if (test_and_clear_bit(RESIZE_PENDING
, &mdev
->flags
) ||
3008 (dd
== grew
&& mdev
->state
.conn
== C_CONNECTED
)) {
3009 if (mdev
->state
.pdsk
>= D_INCONSISTENT
&&
3010 mdev
->state
.disk
>= D_INCONSISTENT
) {
3011 if (ddsf
& DDSF_NO_RESYNC
)
3012 dev_info(DEV
, "Resync of new storage suppressed with --assume-clean\n");
3014 resync_after_online_grow(mdev
);
3016 set_bit(RESYNC_AFTER_NEG
, &mdev
->flags
);
3023 static int receive_uuids(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3025 struct p_uuids
*p
= &mdev
->data
.rbuf
.uuids
;
3029 p_uuid
= kmalloc(sizeof(u64
)*UI_EXTENDED_SIZE
, GFP_NOIO
);
3031 for (i
= UI_CURRENT
; i
< UI_EXTENDED_SIZE
; i
++)
3032 p_uuid
[i
] = be64_to_cpu(p
->uuid
[i
]);
3034 kfree(mdev
->p_uuid
);
3035 mdev
->p_uuid
= p_uuid
;
3037 if (mdev
->state
.conn
< C_CONNECTED
&&
3038 mdev
->state
.disk
< D_INCONSISTENT
&&
3039 mdev
->state
.role
== R_PRIMARY
&&
3040 (mdev
->ed_uuid
& ~((u64
)1)) != (p_uuid
[UI_CURRENT
] & ~((u64
)1))) {
3041 dev_err(DEV
, "Can only connect to data with current UUID=%016llX\n",
3042 (unsigned long long)mdev
->ed_uuid
);
3043 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3047 if (get_ldev(mdev
)) {
3048 int skip_initial_sync
=
3049 mdev
->state
.conn
== C_CONNECTED
&&
3050 mdev
->agreed_pro_version
>= 90 &&
3051 mdev
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&&
3052 (p_uuid
[UI_FLAGS
] & 8);
3053 if (skip_initial_sync
) {
3054 dev_info(DEV
, "Accepted new current UUID, preparing to skip initial sync\n");
3055 drbd_bitmap_io(mdev
, &drbd_bmio_clear_n_write
,
3056 "clear_n_write from receive_uuids");
3057 _drbd_uuid_set(mdev
, UI_CURRENT
, p_uuid
[UI_CURRENT
]);
3058 _drbd_uuid_set(mdev
, UI_BITMAP
, 0);
3059 _drbd_set_state(_NS2(mdev
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
3064 } else if (mdev
->state
.disk
< D_INCONSISTENT
&&
3065 mdev
->state
.role
== R_PRIMARY
) {
3066 /* I am a diskless primary, the peer just created a new current UUID
3068 drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3071 /* Before we test for the disk state, we should wait until an eventually
3072 ongoing cluster wide state change is finished. That is important if
3073 we are primary and are detaching from our disk. We need to see the
3074 new disk state... */
3075 wait_event(mdev
->misc_wait
, !test_bit(CLUSTER_ST_CHANGE
, &mdev
->flags
));
3076 if (mdev
->state
.conn
>= C_CONNECTED
&& mdev
->state
.disk
< D_INCONSISTENT
)
3077 drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3083 * convert_state() - Converts the peer's view of the cluster state to our point of view
3084 * @ps: The state as seen by the peer.
3086 static union drbd_state
convert_state(union drbd_state ps
)
3088 union drbd_state ms
;
3090 static enum drbd_conns c_tab
[] = {
3091 [C_CONNECTED
] = C_CONNECTED
,
3093 [C_STARTING_SYNC_S
] = C_STARTING_SYNC_T
,
3094 [C_STARTING_SYNC_T
] = C_STARTING_SYNC_S
,
3095 [C_DISCONNECTING
] = C_TEAR_DOWN
, /* C_NETWORK_FAILURE, */
3096 [C_VERIFY_S
] = C_VERIFY_T
,
3102 ms
.conn
= c_tab
[ps
.conn
];
3107 ms
.peer_isp
= (ps
.aftr_isp
| ps
.user_isp
);
3112 static int receive_req_state(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3114 struct p_req_state
*p
= &mdev
->data
.rbuf
.req_state
;
3115 union drbd_state mask
, val
;
3118 mask
.i
= be32_to_cpu(p
->mask
);
3119 val
.i
= be32_to_cpu(p
->val
);
3121 if (test_bit(DISCARD_CONCURRENT
, &mdev
->flags
) &&
3122 test_bit(CLUSTER_ST_CHANGE
, &mdev
->flags
)) {
3123 drbd_send_sr_reply(mdev
, SS_CONCURRENT_ST_CHG
);
3127 mask
= convert_state(mask
);
3128 val
= convert_state(val
);
3130 rv
= drbd_change_state(mdev
, CS_VERBOSE
, mask
, val
);
3132 drbd_send_sr_reply(mdev
, rv
);
3138 static int receive_state(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3140 struct p_state
*p
= &mdev
->data
.rbuf
.state
;
3141 union drbd_state os
, ns
, peer_state
;
3142 enum drbd_disk_state real_peer_disk
;
3143 enum chg_state_flags cs_flags
;
3146 peer_state
.i
= be32_to_cpu(p
->state
);
3148 real_peer_disk
= peer_state
.disk
;
3149 if (peer_state
.disk
== D_NEGOTIATING
) {
3150 real_peer_disk
= mdev
->p_uuid
[UI_FLAGS
] & 4 ? D_INCONSISTENT
: D_CONSISTENT
;
3151 dev_info(DEV
, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk
));
3154 spin_lock_irq(&mdev
->req_lock
);
3156 os
= ns
= mdev
->state
;
3157 spin_unlock_irq(&mdev
->req_lock
);
3159 /* peer says his disk is uptodate, while we think it is inconsistent,
3160 * and this happens while we think we have a sync going on. */
3161 if (os
.pdsk
== D_INCONSISTENT
&& real_peer_disk
== D_UP_TO_DATE
&&
3162 os
.conn
> C_CONNECTED
&& os
.disk
== D_UP_TO_DATE
) {
3163 /* If we are (becoming) SyncSource, but peer is still in sync
3164 * preparation, ignore its uptodate-ness to avoid flapping, it
3165 * will change to inconsistent once the peer reaches active
3167 * It may have changed syncer-paused flags, however, so we
3168 * cannot ignore this completely. */
3169 if (peer_state
.conn
> C_CONNECTED
&&
3170 peer_state
.conn
< C_SYNC_SOURCE
)
3171 real_peer_disk
= D_INCONSISTENT
;
3173 /* if peer_state changes to connected at the same time,
3174 * it explicitly notifies us that it finished resync.
3175 * Maybe we should finish it up, too? */
3176 else if (os
.conn
>= C_SYNC_SOURCE
&&
3177 peer_state
.conn
== C_CONNECTED
) {
3178 if (drbd_bm_total_weight(mdev
) <= mdev
->rs_failed
)
3179 drbd_resync_finished(mdev
);
3184 /* peer says his disk is inconsistent, while we think it is uptodate,
3185 * and this happens while the peer still thinks we have a sync going on,
3186 * but we think we are already done with the sync.
3187 * We ignore this to avoid flapping pdsk.
3188 * This should not happen, if the peer is a recent version of drbd. */
3189 if (os
.pdsk
== D_UP_TO_DATE
&& real_peer_disk
== D_INCONSISTENT
&&
3190 os
.conn
== C_CONNECTED
&& peer_state
.conn
> C_SYNC_SOURCE
)
3191 real_peer_disk
= D_UP_TO_DATE
;
3193 if (ns
.conn
== C_WF_REPORT_PARAMS
)
3194 ns
.conn
= C_CONNECTED
;
3196 if (peer_state
.conn
== C_AHEAD
)
3199 if (mdev
->p_uuid
&& peer_state
.disk
>= D_NEGOTIATING
&&
3200 get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3201 int cr
; /* consider resync */
3203 /* if we established a new connection */
3204 cr
= (os
.conn
< C_CONNECTED
);
3205 /* if we had an established connection
3206 * and one of the nodes newly attaches a disk */
3207 cr
|= (os
.conn
== C_CONNECTED
&&
3208 (peer_state
.disk
== D_NEGOTIATING
||
3209 os
.disk
== D_NEGOTIATING
));
3210 /* if we have both been inconsistent, and the peer has been
3211 * forced to be UpToDate with --overwrite-data */
3212 cr
|= test_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3213 /* if we had been plain connected, and the admin requested to
3214 * start a sync by "invalidate" or "invalidate-remote" */
3215 cr
|= (os
.conn
== C_CONNECTED
&&
3216 (peer_state
.conn
>= C_STARTING_SYNC_S
&&
3217 peer_state
.conn
<= C_WF_BITMAP_T
));
3220 ns
.conn
= drbd_sync_handshake(mdev
, peer_state
.role
, real_peer_disk
);
3223 if (ns
.conn
== C_MASK
) {
3224 ns
.conn
= C_CONNECTED
;
3225 if (mdev
->state
.disk
== D_NEGOTIATING
) {
3226 drbd_force_state(mdev
, NS(disk
, D_FAILED
));
3227 } else if (peer_state
.disk
== D_NEGOTIATING
) {
3228 dev_err(DEV
, "Disk attach process on the peer node was aborted.\n");
3229 peer_state
.disk
= D_DISKLESS
;
3230 real_peer_disk
= D_DISKLESS
;
3232 if (test_and_clear_bit(CONN_DRY_RUN
, &mdev
->flags
))
3234 D_ASSERT(os
.conn
== C_WF_REPORT_PARAMS
);
3235 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3241 spin_lock_irq(&mdev
->req_lock
);
3242 if (mdev
->state
.i
!= os
.i
)
3244 clear_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3245 ns
.peer
= peer_state
.role
;
3246 ns
.pdsk
= real_peer_disk
;
3247 ns
.peer_isp
= (peer_state
.aftr_isp
| peer_state
.user_isp
);
3248 if ((ns
.conn
== C_CONNECTED
|| ns
.conn
== C_WF_BITMAP_S
) && ns
.disk
== D_NEGOTIATING
)
3249 ns
.disk
= mdev
->new_state_tmp
.disk
;
3250 cs_flags
= CS_VERBOSE
+ (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
? 0 : CS_HARD
);
3251 if (ns
.pdsk
== D_CONSISTENT
&& is_susp(ns
) && ns
.conn
== C_CONNECTED
&& os
.conn
< C_CONNECTED
&&
3252 test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
3253 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3254 for temporal network outages! */
3255 spin_unlock_irq(&mdev
->req_lock
);
3256 dev_err(DEV
, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3258 drbd_uuid_new_current(mdev
);
3259 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
3260 drbd_force_state(mdev
, NS2(conn
, C_PROTOCOL_ERROR
, susp
, 0));
3263 rv
= _drbd_set_state(mdev
, ns
, cs_flags
, NULL
);
3265 spin_unlock_irq(&mdev
->req_lock
);
3267 if (rv
< SS_SUCCESS
) {
3268 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
3272 if (os
.conn
> C_WF_REPORT_PARAMS
) {
3273 if (ns
.conn
> C_CONNECTED
&& peer_state
.conn
<= C_CONNECTED
&&
3274 peer_state
.disk
!= D_NEGOTIATING
) {
3275 /* we want resync, peer has not yet decided to sync... */
3276 /* Nowadays only used when forcing a node into primary role and
3277 setting its disk to UpToDate with that */
3278 drbd_send_uuids(mdev
);
3279 drbd_send_state(mdev
);
3283 mdev
->net_conf
->want_lose
= 0;
3285 drbd_md_sync(mdev
); /* update connected indicator, la_size, ... */
3290 static int receive_sync_uuid(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3292 struct p_rs_uuid
*p
= &mdev
->data
.rbuf
.rs_uuid
;
3294 wait_event(mdev
->misc_wait
,
3295 mdev
->state
.conn
== C_WF_SYNC_UUID
||
3296 mdev
->state
.conn
== C_BEHIND
||
3297 mdev
->state
.conn
< C_CONNECTED
||
3298 mdev
->state
.disk
< D_NEGOTIATING
);
3300 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3302 /* Here the _drbd_uuid_ functions are right, current should
3303 _not_ be rotated into the history */
3304 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3305 _drbd_uuid_set(mdev
, UI_CURRENT
, be64_to_cpu(p
->uuid
));
3306 _drbd_uuid_set(mdev
, UI_BITMAP
, 0UL);
3308 drbd_start_resync(mdev
, C_SYNC_TARGET
);
3312 dev_err(DEV
, "Ignoring SyncUUID packet!\n");
3317 enum receive_bitmap_ret
{ OK
, DONE
, FAILED
};
3319 static enum receive_bitmap_ret
3320 receive_bitmap_plain(struct drbd_conf
*mdev
, unsigned int data_size
,
3321 unsigned long *buffer
, struct bm_xfer_ctx
*c
)
3323 unsigned num_words
= min_t(size_t, BM_PACKET_WORDS
, c
->bm_words
- c
->word_offset
);
3324 unsigned want
= num_words
* sizeof(long);
3326 if (want
!= data_size
) {
3327 dev_err(DEV
, "%s:want (%u) != data_size (%u)\n", __func__
, want
, data_size
);
3332 if (drbd_recv(mdev
, buffer
, want
) != want
)
3335 drbd_bm_merge_lel(mdev
, c
->word_offset
, num_words
, buffer
);
3337 c
->word_offset
+= num_words
;
3338 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
3339 if (c
->bit_offset
> c
->bm_bits
)
3340 c
->bit_offset
= c
->bm_bits
;
3345 static enum receive_bitmap_ret
3346 recv_bm_rle_bits(struct drbd_conf
*mdev
,
3347 struct p_compressed_bm
*p
,
3348 struct bm_xfer_ctx
*c
)
3350 struct bitstream bs
;
3354 unsigned long s
= c
->bit_offset
;
3356 int len
= be16_to_cpu(p
->head
.length
) - (sizeof(*p
) - sizeof(p
->head
));
3357 int toggle
= DCBP_get_start(p
);
3361 bitstream_init(&bs
, p
->code
, len
, DCBP_get_pad_bits(p
));
3363 bits
= bitstream_get_bits(&bs
, &look_ahead
, 64);
3367 for (have
= bits
; have
> 0; s
+= rl
, toggle
= !toggle
) {
3368 bits
= vli_decode_bits(&rl
, look_ahead
);
3374 if (e
>= c
->bm_bits
) {
3375 dev_err(DEV
, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e
);
3378 _drbd_bm_set_bits(mdev
, s
, e
);
3382 dev_err(DEV
, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3383 have
, bits
, look_ahead
,
3384 (unsigned int)(bs
.cur
.b
- p
->code
),
3385 (unsigned int)bs
.buf_len
);
3388 look_ahead
>>= bits
;
3391 bits
= bitstream_get_bits(&bs
, &tmp
, 64 - have
);
3394 look_ahead
|= tmp
<< have
;
3399 bm_xfer_ctx_bit_to_word_offset(c
);
3401 return (s
== c
->bm_bits
) ? DONE
: OK
;
3404 static enum receive_bitmap_ret
3405 decode_bitmap_c(struct drbd_conf
*mdev
,
3406 struct p_compressed_bm
*p
,
3407 struct bm_xfer_ctx
*c
)
3409 if (DCBP_get_code(p
) == RLE_VLI_Bits
)
3410 return recv_bm_rle_bits(mdev
, p
, c
);
3412 /* other variants had been implemented for evaluation,
3413 * but have been dropped as this one turned out to be "best"
3414 * during all our tests. */
3416 dev_err(DEV
, "receive_bitmap_c: unknown encoding %u\n", p
->encoding
);
3417 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
3421 void INFO_bm_xfer_stats(struct drbd_conf
*mdev
,
3422 const char *direction
, struct bm_xfer_ctx
*c
)
3424 /* what would it take to transfer it "plaintext" */
3425 unsigned plain
= sizeof(struct p_header80
) *
3426 ((c
->bm_words
+BM_PACKET_WORDS
-1)/BM_PACKET_WORDS
+1)
3427 + c
->bm_words
* sizeof(long);
3428 unsigned total
= c
->bytes
[0] + c
->bytes
[1];
3431 /* total can not be zero. but just in case: */
3435 /* don't report if not compressed */
3439 /* total < plain. check for overflow, still */
3440 r
= (total
> UINT_MAX
/1000) ? (total
/ (plain
/1000))
3441 : (1000 * total
/ plain
);
3447 dev_info(DEV
, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3448 "total %u; compression: %u.%u%%\n",
3450 c
->bytes
[1], c
->packets
[1],
3451 c
->bytes
[0], c
->packets
[0],
3452 total
, r
/10, r
% 10);
3455 /* Since we are processing the bitfield from lower addresses to higher,
3456 it does not matter if the process it in 32 bit chunks or 64 bit
3457 chunks as long as it is little endian. (Understand it as byte stream,
3458 beginning with the lowest byte...) If we would use big endian
3459 we would need to process it from the highest address to the lowest,
3460 in order to be agnostic to the 32 vs 64 bits issue.
3462 returns 0 on failure, 1 if we successfully received it. */
3463 static int receive_bitmap(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3465 struct bm_xfer_ctx c
;
3467 enum receive_bitmap_ret ret
;
3469 struct p_header80
*h
= &mdev
->data
.rbuf
.header
.h80
;
3471 /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */
3473 /* maybe we should use some per thread scratch page,
3474 * and allocate that during initial device creation? */
3475 buffer
= (unsigned long *) __get_free_page(GFP_NOIO
);
3477 dev_err(DEV
, "failed to allocate one page buffer in %s\n", __func__
);
3481 c
= (struct bm_xfer_ctx
) {
3482 .bm_bits
= drbd_bm_bits(mdev
),
3483 .bm_words
= drbd_bm_words(mdev
),
3487 if (cmd
== P_BITMAP
) {
3488 ret
= receive_bitmap_plain(mdev
, data_size
, buffer
, &c
);
3489 } else if (cmd
== P_COMPRESSED_BITMAP
) {
3490 /* MAYBE: sanity check that we speak proto >= 90,
3491 * and the feature is enabled! */
3492 struct p_compressed_bm
*p
;
3494 if (data_size
> BM_PACKET_PAYLOAD_BYTES
) {
3495 dev_err(DEV
, "ReportCBitmap packet too large\n");
3498 /* use the page buff */
3500 memcpy(p
, h
, sizeof(*h
));
3501 if (drbd_recv(mdev
, p
->head
.payload
, data_size
) != data_size
)
3503 if (data_size
<= (sizeof(*p
) - sizeof(p
->head
))) {
3504 dev_err(DEV
, "ReportCBitmap packet too small (l:%u)\n", data_size
);
3507 ret
= decode_bitmap_c(mdev
, p
, &c
);
3509 dev_warn(DEV
, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd
);
3513 c
.packets
[cmd
== P_BITMAP
]++;
3514 c
.bytes
[cmd
== P_BITMAP
] += sizeof(struct p_header80
) + data_size
;
3519 if (!drbd_recv_header(mdev
, &cmd
, &data_size
))
3521 } while (ret
== OK
);
3525 INFO_bm_xfer_stats(mdev
, "receive", &c
);
3527 if (mdev
->state
.conn
== C_WF_BITMAP_T
) {
3528 ok
= !drbd_send_bitmap(mdev
);
3531 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3532 ok
= _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
3533 D_ASSERT(ok
== SS_SUCCESS
);
3534 } else if (mdev
->state
.conn
!= C_WF_BITMAP_S
) {
3535 /* admin may have requested C_DISCONNECTING,
3536 * other threads may have noticed network errors */
3537 dev_info(DEV
, "unexpected cstate (%s) in receive_bitmap\n",
3538 drbd_conn_str(mdev
->state
.conn
));
3543 /* drbd_bm_unlock(mdev); by intention no lock */
3544 if (ok
&& mdev
->state
.conn
== C_WF_BITMAP_S
)
3545 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
3546 free_page((unsigned long) buffer
);
3550 static int receive_skip(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3552 /* TODO zero copy sink :) */
3553 static char sink
[128];
3556 dev_warn(DEV
, "skipping unknown optional packet type %d, l: %d!\n",
3561 want
= min_t(int, size
, sizeof(sink
));
3562 r
= drbd_recv(mdev
, sink
, want
);
3563 ERR_IF(r
<= 0) break;
3569 static int receive_UnplugRemote(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3571 /* Make sure we've acked all the TCP data associated
3572 * with the data requests being unplugged */
3573 drbd_tcp_quickack(mdev
->data
.socket
);
3578 static int receive_out_of_sync(struct drbd_conf
*mdev
, enum drbd_packets cmd
, unsigned int data_size
)
3580 struct p_block_desc
*p
= &mdev
->data
.rbuf
.block_desc
;
3582 drbd_set_out_of_sync(mdev
, be64_to_cpu(p
->sector
), be32_to_cpu(p
->blksize
));
3587 typedef int (*drbd_cmd_handler_f
)(struct drbd_conf
*, enum drbd_packets cmd
, unsigned int to_receive
);
3592 drbd_cmd_handler_f function
;
3595 static struct data_cmd drbd_cmd_handler
[] = {
3596 [P_DATA
] = { 1, sizeof(struct p_data
), receive_Data
},
3597 [P_DATA_REPLY
] = { 1, sizeof(struct p_data
), receive_DataReply
},
3598 [P_RS_DATA_REPLY
] = { 1, sizeof(struct p_data
), receive_RSDataReply
} ,
3599 [P_BARRIER
] = { 0, sizeof(struct p_barrier
), receive_Barrier
} ,
3600 [P_BITMAP
] = { 1, sizeof(struct p_header80
), receive_bitmap
} ,
3601 [P_COMPRESSED_BITMAP
] = { 1, sizeof(struct p_header80
), receive_bitmap
} ,
3602 [P_UNPLUG_REMOTE
] = { 0, sizeof(struct p_header80
), receive_UnplugRemote
},
3603 [P_DATA_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
3604 [P_RS_DATA_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
3605 [P_SYNC_PARAM
] = { 1, sizeof(struct p_header80
), receive_SyncParam
},
3606 [P_SYNC_PARAM89
] = { 1, sizeof(struct p_header80
), receive_SyncParam
},
3607 [P_PROTOCOL
] = { 1, sizeof(struct p_protocol
), receive_protocol
},
3608 [P_UUIDS
] = { 0, sizeof(struct p_uuids
), receive_uuids
},
3609 [P_SIZES
] = { 0, sizeof(struct p_sizes
), receive_sizes
},
3610 [P_STATE
] = { 0, sizeof(struct p_state
), receive_state
},
3611 [P_STATE_CHG_REQ
] = { 0, sizeof(struct p_req_state
), receive_req_state
},
3612 [P_SYNC_UUID
] = { 0, sizeof(struct p_rs_uuid
), receive_sync_uuid
},
3613 [P_OV_REQUEST
] = { 0, sizeof(struct p_block_req
), receive_DataRequest
},
3614 [P_OV_REPLY
] = { 1, sizeof(struct p_block_req
), receive_DataRequest
},
3615 [P_CSUM_RS_REQUEST
] = { 1, sizeof(struct p_block_req
), receive_DataRequest
},
3616 [P_DELAY_PROBE
] = { 0, sizeof(struct p_delay_probe93
), receive_skip
},
3617 [P_OUT_OF_SYNC
] = { 0, sizeof(struct p_block_desc
), receive_out_of_sync
},
3618 /* anything missing from this table is in
3619 * the asender_tbl, see get_asender_cmd */
3620 [P_MAX_CMD
] = { 0, 0, NULL
},
3623 /* All handler functions that expect a sub-header get that sub-heder in
3624 mdev->data.rbuf.header.head.payload.
3626 Usually in mdev->data.rbuf.header.head the callback can find the usual
3627 p_header, but they may not rely on that. Since there is also p_header95 !
3630 static void drbdd(struct drbd_conf
*mdev
)
3632 union p_header
*header
= &mdev
->data
.rbuf
.header
;
3633 unsigned int packet_size
;
3634 enum drbd_packets cmd
;
3635 size_t shs
; /* sub header size */
3638 while (get_t_state(&mdev
->receiver
) == Running
) {
3639 drbd_thread_current_set_cpu(mdev
);
3640 if (!drbd_recv_header(mdev
, &cmd
, &packet_size
))
3643 if (unlikely(cmd
>= P_MAX_CMD
|| !drbd_cmd_handler
[cmd
].function
)) {
3644 dev_err(DEV
, "unknown packet type %d, l: %d!\n", cmd
, packet_size
);
3648 shs
= drbd_cmd_handler
[cmd
].pkt_size
- sizeof(union p_header
);
3649 if (packet_size
- shs
> 0 && !drbd_cmd_handler
[cmd
].expect_payload
) {
3650 dev_err(DEV
, "No payload expected %s l:%d\n", cmdname(cmd
), packet_size
);
3655 rv
= drbd_recv(mdev
, &header
->h80
.payload
, shs
);
3656 if (unlikely(rv
!= shs
)) {
3657 dev_err(DEV
, "short read while reading sub header: rv=%d\n", rv
);
3662 rv
= drbd_cmd_handler
[cmd
].function(mdev
, cmd
, packet_size
- shs
);
3664 if (unlikely(!rv
)) {
3665 dev_err(DEV
, "error receiving %s, l: %d!\n",
3666 cmdname(cmd
), packet_size
);
3673 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
3675 /* If we leave here, we probably want to update at least the
3676 * "Connected" indicator on stable storage. Do so explicitly here. */
3680 void drbd_flush_workqueue(struct drbd_conf
*mdev
)
3682 struct drbd_wq_barrier barr
;
3684 barr
.w
.cb
= w_prev_work_done
;
3685 init_completion(&barr
.done
);
3686 drbd_queue_work(&mdev
->data
.work
, &barr
.w
);
3687 wait_for_completion(&barr
.done
);
3690 void drbd_free_tl_hash(struct drbd_conf
*mdev
)
3692 struct hlist_head
*h
;
3694 spin_lock_irq(&mdev
->req_lock
);
3696 if (!mdev
->tl_hash
|| mdev
->state
.conn
!= C_STANDALONE
) {
3697 spin_unlock_irq(&mdev
->req_lock
);
3701 for (h
= mdev
->ee_hash
; h
< mdev
->ee_hash
+ mdev
->ee_hash_s
; h
++)
3703 dev_err(DEV
, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3704 (int)(h
- mdev
->ee_hash
), h
->first
);
3705 kfree(mdev
->ee_hash
);
3706 mdev
->ee_hash
= NULL
;
3707 mdev
->ee_hash_s
= 0;
3710 for (h
= mdev
->tl_hash
; h
< mdev
->tl_hash
+ mdev
->tl_hash_s
; h
++)
3712 dev_err(DEV
, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3713 (int)(h
- mdev
->tl_hash
), h
->first
);
3714 kfree(mdev
->tl_hash
);
3715 mdev
->tl_hash
= NULL
;
3716 mdev
->tl_hash_s
= 0;
3717 spin_unlock_irq(&mdev
->req_lock
);
3720 static void drbd_disconnect(struct drbd_conf
*mdev
)
3722 enum drbd_fencing_p fp
;
3723 union drbd_state os
, ns
;
3724 int rv
= SS_UNKNOWN_ERROR
;
3727 if (mdev
->state
.conn
== C_STANDALONE
)
3729 if (mdev
->state
.conn
>= C_WF_CONNECTION
)
3730 dev_err(DEV
, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3731 drbd_conn_str(mdev
->state
.conn
));
3733 /* asender does not clean up anything. it must not interfere, either */
3734 drbd_thread_stop(&mdev
->asender
);
3735 drbd_free_sock(mdev
);
3737 /* wait for current activity to cease. */
3738 spin_lock_irq(&mdev
->req_lock
);
3739 _drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
3740 _drbd_wait_ee_list_empty(mdev
, &mdev
->sync_ee
);
3741 _drbd_wait_ee_list_empty(mdev
, &mdev
->read_ee
);
3742 spin_unlock_irq(&mdev
->req_lock
);
3744 /* We do not have data structures that would allow us to
3745 * get the rs_pending_cnt down to 0 again.
3746 * * On C_SYNC_TARGET we do not have any data structures describing
3747 * the pending RSDataRequest's we have sent.
3748 * * On C_SYNC_SOURCE there is no data structure that tracks
3749 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3750 * And no, it is not the sum of the reference counts in the
3751 * resync_LRU. The resync_LRU tracks the whole operation including
3752 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3754 drbd_rs_cancel_all(mdev
);
3756 mdev
->rs_failed
= 0;
3757 atomic_set(&mdev
->rs_pending_cnt
, 0);
3758 wake_up(&mdev
->misc_wait
);
3760 /* make sure syncer is stopped and w_resume_next_sg queued */
3761 del_timer_sync(&mdev
->resync_timer
);
3762 resync_timer_fn((unsigned long)mdev
);
3764 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3765 * w_make_resync_request etc. which may still be on the worker queue
3766 * to be "canceled" */
3767 drbd_flush_workqueue(mdev
);
3769 /* This also does reclaim_net_ee(). If we do this too early, we might
3770 * miss some resync ee and pages.*/
3771 drbd_process_done_ee(mdev
);
3773 kfree(mdev
->p_uuid
);
3774 mdev
->p_uuid
= NULL
;
3776 if (!is_susp(mdev
->state
))
3779 dev_info(DEV
, "Connection closed\n");
3784 if (get_ldev(mdev
)) {
3785 fp
= mdev
->ldev
->dc
.fencing
;
3789 if (mdev
->state
.role
== R_PRIMARY
&& fp
>= FP_RESOURCE
&& mdev
->state
.pdsk
>= D_UNKNOWN
)
3790 drbd_try_outdate_peer_async(mdev
);
3792 spin_lock_irq(&mdev
->req_lock
);
3794 if (os
.conn
>= C_UNCONNECTED
) {
3795 /* Do not restart in case we are C_DISCONNECTING */
3797 ns
.conn
= C_UNCONNECTED
;
3798 rv
= _drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
3800 spin_unlock_irq(&mdev
->req_lock
);
3802 if (os
.conn
== C_DISCONNECTING
) {
3803 wait_event(mdev
->net_cnt_wait
, atomic_read(&mdev
->net_cnt
) == 0);
3805 crypto_free_hash(mdev
->cram_hmac_tfm
);
3806 mdev
->cram_hmac_tfm
= NULL
;
3808 kfree(mdev
->net_conf
);
3809 mdev
->net_conf
= NULL
;
3810 drbd_request_state(mdev
, NS(conn
, C_STANDALONE
));
3813 /* tcp_close and release of sendpage pages can be deferred. I don't
3814 * want to use SO_LINGER, because apparently it can be deferred for
3815 * more than 20 seconds (longest time I checked).
3817 * Actually we don't care for exactly when the network stack does its
3818 * put_page(), but release our reference on these pages right here.
3820 i
= drbd_release_ee(mdev
, &mdev
->net_ee
);
3822 dev_info(DEV
, "net_ee not empty, killed %u entries\n", i
);
3823 i
= atomic_read(&mdev
->pp_in_use_by_net
);
3825 dev_info(DEV
, "pp_in_use_by_net = %d, expected 0\n", i
);
3826 i
= atomic_read(&mdev
->pp_in_use
);
3828 dev_info(DEV
, "pp_in_use = %d, expected 0\n", i
);
3830 D_ASSERT(list_empty(&mdev
->read_ee
));
3831 D_ASSERT(list_empty(&mdev
->active_ee
));
3832 D_ASSERT(list_empty(&mdev
->sync_ee
));
3833 D_ASSERT(list_empty(&mdev
->done_ee
));
3835 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3836 atomic_set(&mdev
->current_epoch
->epoch_size
, 0);
3837 D_ASSERT(list_empty(&mdev
->current_epoch
->list
));
3841 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3842 * we can agree on is stored in agreed_pro_version.
3844 * feature flags and the reserved array should be enough room for future
3845 * enhancements of the handshake protocol, and possible plugins...
3847 * for now, they are expected to be zero, but ignored.
3849 static int drbd_send_handshake(struct drbd_conf
*mdev
)
3851 /* ASSERT current == mdev->receiver ... */
3852 struct p_handshake
*p
= &mdev
->data
.sbuf
.handshake
;
3855 if (mutex_lock_interruptible(&mdev
->data
.mutex
)) {
3856 dev_err(DEV
, "interrupted during initial handshake\n");
3857 return 0; /* interrupted. not ok. */
3860 if (mdev
->data
.socket
== NULL
) {
3861 mutex_unlock(&mdev
->data
.mutex
);
3865 memset(p
, 0, sizeof(*p
));
3866 p
->protocol_min
= cpu_to_be32(PRO_VERSION_MIN
);
3867 p
->protocol_max
= cpu_to_be32(PRO_VERSION_MAX
);
3868 ok
= _drbd_send_cmd( mdev
, mdev
->data
.socket
, P_HAND_SHAKE
,
3869 (struct p_header80
*)p
, sizeof(*p
), 0 );
3870 mutex_unlock(&mdev
->data
.mutex
);
3876 * 1 yes, we have a valid connection
3877 * 0 oops, did not work out, please try again
3878 * -1 peer talks different language,
3879 * no point in trying again, please go standalone.
3881 static int drbd_do_handshake(struct drbd_conf
*mdev
)
3883 /* ASSERT current == mdev->receiver ... */
3884 struct p_handshake
*p
= &mdev
->data
.rbuf
.handshake
;
3885 const int expect
= sizeof(struct p_handshake
) - sizeof(struct p_header80
);
3886 unsigned int length
;
3887 enum drbd_packets cmd
;
3890 rv
= drbd_send_handshake(mdev
);
3894 rv
= drbd_recv_header(mdev
, &cmd
, &length
);
3898 if (cmd
!= P_HAND_SHAKE
) {
3899 dev_err(DEV
, "expected HandShake packet, received: %s (0x%04x)\n",
3904 if (length
!= expect
) {
3905 dev_err(DEV
, "expected HandShake length: %u, received: %u\n",
3910 rv
= drbd_recv(mdev
, &p
->head
.payload
, expect
);
3913 dev_err(DEV
, "short read receiving handshake packet: l=%u\n", rv
);
3917 p
->protocol_min
= be32_to_cpu(p
->protocol_min
);
3918 p
->protocol_max
= be32_to_cpu(p
->protocol_max
);
3919 if (p
->protocol_max
== 0)
3920 p
->protocol_max
= p
->protocol_min
;
3922 if (PRO_VERSION_MAX
< p
->protocol_min
||
3923 PRO_VERSION_MIN
> p
->protocol_max
)
3926 mdev
->agreed_pro_version
= min_t(int, PRO_VERSION_MAX
, p
->protocol_max
);
3928 dev_info(DEV
, "Handshake successful: "
3929 "Agreed network protocol version %d\n", mdev
->agreed_pro_version
);
3934 dev_err(DEV
, "incompatible DRBD dialects: "
3935 "I support %d-%d, peer supports %d-%d\n",
3936 PRO_VERSION_MIN
, PRO_VERSION_MAX
,
3937 p
->protocol_min
, p
->protocol_max
);
3941 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3942 static int drbd_do_auth(struct drbd_conf
*mdev
)
3944 dev_err(DEV
, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3945 dev_err(DEV
, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3949 #define CHALLENGE_LEN 64
3953 0 - failed, try again (network error),
3954 -1 - auth failed, don't try again.
3957 static int drbd_do_auth(struct drbd_conf
*mdev
)
3959 char my_challenge
[CHALLENGE_LEN
]; /* 64 Bytes... */
3960 struct scatterlist sg
;
3961 char *response
= NULL
;
3962 char *right_response
= NULL
;
3963 char *peers_ch
= NULL
;
3964 unsigned int key_len
= strlen(mdev
->net_conf
->shared_secret
);
3965 unsigned int resp_size
;
3966 struct hash_desc desc
;
3967 enum drbd_packets cmd
;
3968 unsigned int length
;
3971 desc
.tfm
= mdev
->cram_hmac_tfm
;
3974 rv
= crypto_hash_setkey(mdev
->cram_hmac_tfm
,
3975 (u8
*)mdev
->net_conf
->shared_secret
, key_len
);
3977 dev_err(DEV
, "crypto_hash_setkey() failed with %d\n", rv
);
3982 get_random_bytes(my_challenge
, CHALLENGE_LEN
);
3984 rv
= drbd_send_cmd2(mdev
, P_AUTH_CHALLENGE
, my_challenge
, CHALLENGE_LEN
);
3988 rv
= drbd_recv_header(mdev
, &cmd
, &length
);
3992 if (cmd
!= P_AUTH_CHALLENGE
) {
3993 dev_err(DEV
, "expected AuthChallenge packet, received: %s (0x%04x)\n",
3999 if (length
> CHALLENGE_LEN
* 2) {
4000 dev_err(DEV
, "expected AuthChallenge payload too big.\n");
4005 peers_ch
= kmalloc(length
, GFP_NOIO
);
4006 if (peers_ch
== NULL
) {
4007 dev_err(DEV
, "kmalloc of peers_ch failed\n");
4012 rv
= drbd_recv(mdev
, peers_ch
, length
);
4015 dev_err(DEV
, "short read AuthChallenge: l=%u\n", rv
);
4020 resp_size
= crypto_hash_digestsize(mdev
->cram_hmac_tfm
);
4021 response
= kmalloc(resp_size
, GFP_NOIO
);
4022 if (response
== NULL
) {
4023 dev_err(DEV
, "kmalloc of response failed\n");
4028 sg_init_table(&sg
, 1);
4029 sg_set_buf(&sg
, peers_ch
, length
);
4031 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, response
);
4033 dev_err(DEV
, "crypto_hash_digest() failed with %d\n", rv
);
4038 rv
= drbd_send_cmd2(mdev
, P_AUTH_RESPONSE
, response
, resp_size
);
4042 rv
= drbd_recv_header(mdev
, &cmd
, &length
);
4046 if (cmd
!= P_AUTH_RESPONSE
) {
4047 dev_err(DEV
, "expected AuthResponse packet, received: %s (0x%04x)\n",
4053 if (length
!= resp_size
) {
4054 dev_err(DEV
, "expected AuthResponse payload of wrong size\n");
4059 rv
= drbd_recv(mdev
, response
, resp_size
);
4061 if (rv
!= resp_size
) {
4062 dev_err(DEV
, "short read receiving AuthResponse: l=%u\n", rv
);
4067 right_response
= kmalloc(resp_size
, GFP_NOIO
);
4068 if (right_response
== NULL
) {
4069 dev_err(DEV
, "kmalloc of right_response failed\n");
4074 sg_set_buf(&sg
, my_challenge
, CHALLENGE_LEN
);
4076 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, right_response
);
4078 dev_err(DEV
, "crypto_hash_digest() failed with %d\n", rv
);
4083 rv
= !memcmp(response
, right_response
, resp_size
);
4086 dev_info(DEV
, "Peer authenticated using %d bytes of '%s' HMAC\n",
4087 resp_size
, mdev
->net_conf
->cram_hmac_alg
);
4094 kfree(right_response
);
4100 int drbdd_init(struct drbd_thread
*thi
)
4102 struct drbd_conf
*mdev
= thi
->mdev
;
4103 unsigned int minor
= mdev_to_minor(mdev
);
4106 sprintf(current
->comm
, "drbd%d_receiver", minor
);
4108 dev_info(DEV
, "receiver (re)started\n");
4111 h
= drbd_connect(mdev
);
4113 drbd_disconnect(mdev
);
4114 __set_current_state(TASK_INTERRUPTIBLE
);
4115 schedule_timeout(HZ
);
4118 dev_warn(DEV
, "Discarding network configuration.\n");
4119 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
4124 if (get_net_conf(mdev
)) {
4130 drbd_disconnect(mdev
);
4132 dev_info(DEV
, "receiver terminated\n");
4136 /* ********* acknowledge sender ******** */
4138 static int got_RqSReply(struct drbd_conf
*mdev
, struct p_header80
*h
)
4140 struct p_req_state_reply
*p
= (struct p_req_state_reply
*)h
;
4142 int retcode
= be32_to_cpu(p
->retcode
);
4144 if (retcode
>= SS_SUCCESS
) {
4145 set_bit(CL_ST_CHG_SUCCESS
, &mdev
->flags
);
4147 set_bit(CL_ST_CHG_FAIL
, &mdev
->flags
);
4148 dev_err(DEV
, "Requested state change failed by peer: %s (%d)\n",
4149 drbd_set_st_err_str(retcode
), retcode
);
4151 wake_up(&mdev
->state_wait
);
4156 static int got_Ping(struct drbd_conf
*mdev
, struct p_header80
*h
)
4158 return drbd_send_ping_ack(mdev
);
4162 static int got_PingAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4164 /* restore idle timeout */
4165 mdev
->meta
.socket
->sk
->sk_rcvtimeo
= mdev
->net_conf
->ping_int
*HZ
;
4166 if (!test_and_set_bit(GOT_PING_ACK
, &mdev
->flags
))
4167 wake_up(&mdev
->misc_wait
);
4172 static int got_IsInSync(struct drbd_conf
*mdev
, struct p_header80
*h
)
4174 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4175 sector_t sector
= be64_to_cpu(p
->sector
);
4176 int blksize
= be32_to_cpu(p
->blksize
);
4178 D_ASSERT(mdev
->agreed_pro_version
>= 89);
4180 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4182 if (get_ldev(mdev
)) {
4183 drbd_rs_complete_io(mdev
, sector
);
4184 drbd_set_in_sync(mdev
, sector
, blksize
);
4185 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4186 mdev
->rs_same_csum
+= (blksize
>> BM_BLOCK_SHIFT
);
4189 dec_rs_pending(mdev
);
4190 atomic_add(blksize
>> 9, &mdev
->rs_sect_in
);
4195 /* when we receive the ACK for a write request,
4196 * verify that we actually know about it */
4197 static struct drbd_request
*_ack_id_to_req(struct drbd_conf
*mdev
,
4198 u64 id
, sector_t sector
)
4200 struct hlist_head
*slot
= tl_hash_slot(mdev
, sector
);
4201 struct hlist_node
*n
;
4202 struct drbd_request
*req
;
4204 hlist_for_each_entry(req
, n
, slot
, colision
) {
4205 if ((unsigned long)req
== (unsigned long)id
) {
4206 if (req
->sector
!= sector
) {
4207 dev_err(DEV
, "_ack_id_to_req: found req %p but it has "
4208 "wrong sector (%llus versus %llus)\n", req
,
4209 (unsigned long long)req
->sector
,
4210 (unsigned long long)sector
);
4216 dev_err(DEV
, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4217 (void *)(unsigned long)id
, (unsigned long long)sector
);
4221 typedef struct drbd_request
*(req_validator_fn
)
4222 (struct drbd_conf
*mdev
, u64 id
, sector_t sector
);
4224 static int validate_req_change_req_state(struct drbd_conf
*mdev
,
4225 u64 id
, sector_t sector
, req_validator_fn validator
,
4226 const char *func
, enum drbd_req_event what
)
4228 struct drbd_request
*req
;
4229 struct bio_and_error m
;
4231 spin_lock_irq(&mdev
->req_lock
);
4232 req
= validator(mdev
, id
, sector
);
4233 if (unlikely(!req
)) {
4234 spin_unlock_irq(&mdev
->req_lock
);
4235 dev_err(DEV
, "%s: got a corrupt block_id/sector pair\n", func
);
4238 __req_mod(req
, what
, &m
);
4239 spin_unlock_irq(&mdev
->req_lock
);
4242 complete_master_bio(mdev
, &m
);
4246 static int got_BlockAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4248 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4249 sector_t sector
= be64_to_cpu(p
->sector
);
4250 int blksize
= be32_to_cpu(p
->blksize
);
4251 enum drbd_req_event what
;
4253 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4255 if (is_syncer_block_id(p
->block_id
)) {
4256 drbd_set_in_sync(mdev
, sector
, blksize
);
4257 dec_rs_pending(mdev
);
4260 switch (be16_to_cpu(h
->command
)) {
4261 case P_RS_WRITE_ACK
:
4262 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4263 what
= write_acked_by_peer_and_sis
;
4266 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4267 what
= write_acked_by_peer
;
4270 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_B
);
4271 what
= recv_acked_by_peer
;
4274 D_ASSERT(mdev
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4275 what
= conflict_discarded_by_peer
;
4282 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4283 _ack_id_to_req
, __func__
, what
);
4286 static int got_NegAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4288 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4289 sector_t sector
= be64_to_cpu(p
->sector
);
4291 if (__ratelimit(&drbd_ratelimit_state
))
4292 dev_warn(DEV
, "Got NegAck packet. Peer is in troubles?\n");
4294 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4296 if (is_syncer_block_id(p
->block_id
)) {
4297 int size
= be32_to_cpu(p
->blksize
);
4298 dec_rs_pending(mdev
);
4299 drbd_rs_failed_io(mdev
, sector
, size
);
4302 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4303 _ack_id_to_req
, __func__
, neg_acked
);
4306 static int got_NegDReply(struct drbd_conf
*mdev
, struct p_header80
*h
)
4308 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4309 sector_t sector
= be64_to_cpu(p
->sector
);
4311 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4312 dev_err(DEV
, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4313 (unsigned long long)sector
, be32_to_cpu(p
->blksize
));
4315 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4316 _ar_id_to_req
, __func__
, neg_acked
);
4319 static int got_NegRSDReply(struct drbd_conf
*mdev
, struct p_header80
*h
)
4323 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4325 sector
= be64_to_cpu(p
->sector
);
4326 size
= be32_to_cpu(p
->blksize
);
4328 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4330 dec_rs_pending(mdev
);
4332 if (get_ldev_if_state(mdev
, D_FAILED
)) {
4333 drbd_rs_complete_io(mdev
, sector
);
4334 drbd_rs_failed_io(mdev
, sector
, size
);
4341 static int got_BarrierAck(struct drbd_conf
*mdev
, struct p_header80
*h
)
4343 struct p_barrier_ack
*p
= (struct p_barrier_ack
*)h
;
4345 tl_release(mdev
, p
->barrier
, be32_to_cpu(p
->set_size
));
4347 if (mdev
->state
.conn
== C_AHEAD
&&
4348 atomic_read(&mdev
->ap_in_flight
) == 0 &&
4349 list_empty(&mdev
->start_resync_work
.list
)) {
4350 struct drbd_work
*w
= &mdev
->start_resync_work
;
4351 w
->cb
= w_start_resync
;
4352 drbd_queue_work_front(&mdev
->data
.work
, w
);
4358 static int got_OVResult(struct drbd_conf
*mdev
, struct p_header80
*h
)
4360 struct p_block_ack
*p
= (struct p_block_ack
*)h
;
4361 struct drbd_work
*w
;
4365 sector
= be64_to_cpu(p
->sector
);
4366 size
= be32_to_cpu(p
->blksize
);
4368 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4370 if (be64_to_cpu(p
->block_id
) == ID_OUT_OF_SYNC
)
4371 drbd_ov_oos_found(mdev
, sector
, size
);
4375 if (!get_ldev(mdev
))
4378 drbd_rs_complete_io(mdev
, sector
);
4379 dec_rs_pending(mdev
);
4383 /* let's advance progress step marks only for every other megabyte */
4384 if ((mdev
->ov_left
& 0x200) == 0x200)
4385 drbd_advance_rs_marks(mdev
, mdev
->ov_left
);
4387 if (mdev
->ov_left
== 0) {
4388 w
= kmalloc(sizeof(*w
), GFP_NOIO
);
4390 w
->cb
= w_ov_finished
;
4391 drbd_queue_work_front(&mdev
->data
.work
, w
);
4393 dev_err(DEV
, "kmalloc(w) failed.");
4395 drbd_resync_finished(mdev
);
4402 static int got_skip(struct drbd_conf
*mdev
, struct p_header80
*h
)
4407 struct asender_cmd
{
4409 int (*process
)(struct drbd_conf
*mdev
, struct p_header80
*h
);
4412 static struct asender_cmd
*get_asender_cmd(int cmd
)
4414 static struct asender_cmd asender_tbl
[] = {
4415 /* anything missing from this table is in
4416 * the drbd_cmd_handler (drbd_default_handler) table,
4417 * see the beginning of drbdd() */
4418 [P_PING
] = { sizeof(struct p_header80
), got_Ping
},
4419 [P_PING_ACK
] = { sizeof(struct p_header80
), got_PingAck
},
4420 [P_RECV_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4421 [P_WRITE_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4422 [P_RS_WRITE_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4423 [P_DISCARD_ACK
] = { sizeof(struct p_block_ack
), got_BlockAck
},
4424 [P_NEG_ACK
] = { sizeof(struct p_block_ack
), got_NegAck
},
4425 [P_NEG_DREPLY
] = { sizeof(struct p_block_ack
), got_NegDReply
},
4426 [P_NEG_RS_DREPLY
] = { sizeof(struct p_block_ack
), got_NegRSDReply
},
4427 [P_OV_RESULT
] = { sizeof(struct p_block_ack
), got_OVResult
},
4428 [P_BARRIER_ACK
] = { sizeof(struct p_barrier_ack
), got_BarrierAck
},
4429 [P_STATE_CHG_REPLY
] = { sizeof(struct p_req_state_reply
), got_RqSReply
},
4430 [P_RS_IS_IN_SYNC
] = { sizeof(struct p_block_ack
), got_IsInSync
},
4431 [P_DELAY_PROBE
] = { sizeof(struct p_delay_probe93
), got_skip
},
4432 [P_MAX_CMD
] = { 0, NULL
},
4434 if (cmd
> P_MAX_CMD
|| asender_tbl
[cmd
].process
== NULL
)
4436 return &asender_tbl
[cmd
];
4439 int drbd_asender(struct drbd_thread
*thi
)
4441 struct drbd_conf
*mdev
= thi
->mdev
;
4442 struct p_header80
*h
= &mdev
->meta
.rbuf
.header
.h80
;
4443 struct asender_cmd
*cmd
= NULL
;
4448 int expect
= sizeof(struct p_header80
);
4451 sprintf(current
->comm
, "drbd%d_asender", mdev_to_minor(mdev
));
4453 current
->policy
= SCHED_RR
; /* Make this a realtime task! */
4454 current
->rt_priority
= 2; /* more important than all other tasks */
4456 while (get_t_state(thi
) == Running
) {
4457 drbd_thread_current_set_cpu(mdev
);
4458 if (test_and_clear_bit(SEND_PING
, &mdev
->flags
)) {
4459 ERR_IF(!drbd_send_ping(mdev
)) goto reconnect
;
4460 mdev
->meta
.socket
->sk
->sk_rcvtimeo
=
4461 mdev
->net_conf
->ping_timeo
*HZ
/10;
4464 /* conditionally cork;
4465 * it may hurt latency if we cork without much to send */
4466 if (!mdev
->net_conf
->no_cork
&&
4467 3 < atomic_read(&mdev
->unacked_cnt
))
4468 drbd_tcp_cork(mdev
->meta
.socket
);
4470 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4471 flush_signals(current
);
4472 if (!drbd_process_done_ee(mdev
))
4474 /* to avoid race with newly queued ACKs */
4475 set_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4476 spin_lock_irq(&mdev
->req_lock
);
4477 empty
= list_empty(&mdev
->done_ee
);
4478 spin_unlock_irq(&mdev
->req_lock
);
4479 /* new ack may have been queued right here,
4480 * but then there is also a signal pending,
4481 * and we start over... */
4485 /* but unconditionally uncork unless disabled */
4486 if (!mdev
->net_conf
->no_cork
)
4487 drbd_tcp_uncork(mdev
->meta
.socket
);
4489 /* short circuit, recv_msg would return EINTR anyways. */
4490 if (signal_pending(current
))
4493 rv
= drbd_recv_short(mdev
, mdev
->meta
.socket
,
4494 buf
, expect
-received
, 0);
4495 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4497 flush_signals(current
);
4500 * -EINTR (on meta) we got a signal
4501 * -EAGAIN (on meta) rcvtimeo expired
4502 * -ECONNRESET other side closed the connection
4503 * -ERESTARTSYS (on data) we got a signal
4504 * rv < 0 other than above: unexpected error!
4505 * rv == expected: full header or command
4506 * rv < expected: "woken" by signal during receive
4507 * rv == 0 : "connection shut down by peer"
4509 if (likely(rv
> 0)) {
4512 } else if (rv
== 0) {
4513 dev_err(DEV
, "meta connection shut down by peer.\n");
4515 } else if (rv
== -EAGAIN
) {
4516 if (mdev
->meta
.socket
->sk
->sk_rcvtimeo
==
4517 mdev
->net_conf
->ping_timeo
*HZ
/10) {
4518 dev_err(DEV
, "PingAck did not arrive in time.\n");
4521 set_bit(SEND_PING
, &mdev
->flags
);
4523 } else if (rv
== -EINTR
) {
4526 dev_err(DEV
, "sock_recvmsg returned %d\n", rv
);
4530 if (received
== expect
&& cmd
== NULL
) {
4531 if (unlikely(h
->magic
!= BE_DRBD_MAGIC
)) {
4532 dev_err(DEV
, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4533 be32_to_cpu(h
->magic
),
4534 be16_to_cpu(h
->command
),
4535 be16_to_cpu(h
->length
));
4538 cmd
= get_asender_cmd(be16_to_cpu(h
->command
));
4539 len
= be16_to_cpu(h
->length
);
4540 if (unlikely(cmd
== NULL
)) {
4541 dev_err(DEV
, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4542 be32_to_cpu(h
->magic
),
4543 be16_to_cpu(h
->command
),
4544 be16_to_cpu(h
->length
));
4547 expect
= cmd
->pkt_size
;
4548 ERR_IF(len
!= expect
-sizeof(struct p_header80
))
4551 if (received
== expect
) {
4552 D_ASSERT(cmd
!= NULL
);
4553 if (!cmd
->process(mdev
, h
))
4558 expect
= sizeof(struct p_header80
);
4565 drbd_force_state(mdev
, NS(conn
, C_NETWORK_FAILURE
));
4570 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
4573 clear_bit(SIGNAL_ASENDER
, &mdev
->flags
);
4575 D_ASSERT(mdev
->state
.conn
< C_CONNECTED
);
4576 dev_info(DEV
, "asender terminated\n");