4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
68 static int drbd_do_handshake(struct drbd_tconn
*tconn
);
69 static int drbd_do_auth(struct drbd_tconn
*tconn
);
70 static int drbd_disconnected(int vnr
, void *p
, void *data
);
72 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_conf
*, struct drbd_epoch
*, enum epoch_event
);
73 static int e_end_block(struct drbd_work
*, int);
76 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
79 * some helper functions to deal with single linked page lists,
80 * page->private being our "next" pointer.
83 /* If at least n pages are linked at head, get n pages off.
84 * Otherwise, don't modify head, and return NULL.
85 * Locking is the responsibility of the caller.
87 static struct page
*page_chain_del(struct page
**head
, int n
)
101 tmp
= page_chain_next(page
);
103 break; /* found sufficient pages */
105 /* insufficient pages, don't use any of them. */
110 /* add end of list marker for the returned list */
111 set_page_private(page
, 0);
112 /* actual return value, and adjustment of head */
118 /* may be used outside of locks to find the tail of a (usually short)
119 * "private" page chain, before adding it back to a global chain head
120 * with page_chain_add() under a spinlock. */
121 static struct page
*page_chain_tail(struct page
*page
, int *len
)
125 while ((tmp
= page_chain_next(page
)))
132 static int page_chain_free(struct page
*page
)
136 page_chain_for_each_safe(page
, tmp
) {
143 static void page_chain_add(struct page
**head
,
144 struct page
*chain_first
, struct page
*chain_last
)
148 tmp
= page_chain_tail(chain_first
, NULL
);
149 BUG_ON(tmp
!= chain_last
);
152 /* add chain to head */
153 set_page_private(chain_last
, (unsigned long)*head
);
157 static struct page
*drbd_pp_first_pages_or_try_alloc(struct drbd_conf
*mdev
, int number
)
159 struct page
*page
= NULL
;
160 struct page
*tmp
= NULL
;
163 /* Yes, testing drbd_pp_vacant outside the lock is racy.
164 * So what. It saves a spin_lock. */
165 if (drbd_pp_vacant
>= number
) {
166 spin_lock(&drbd_pp_lock
);
167 page
= page_chain_del(&drbd_pp_pool
, number
);
169 drbd_pp_vacant
-= number
;
170 spin_unlock(&drbd_pp_lock
);
175 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
176 * "criss-cross" setup, that might cause write-out on some other DRBD,
177 * which in turn might block on the other node at this very place. */
178 for (i
= 0; i
< number
; i
++) {
179 tmp
= alloc_page(GFP_TRY
);
182 set_page_private(tmp
, (unsigned long)page
);
189 /* Not enough pages immediately available this time.
190 * No need to jump around here, drbd_pp_alloc will retry this
191 * function "soon". */
193 tmp
= page_chain_tail(page
, NULL
);
194 spin_lock(&drbd_pp_lock
);
195 page_chain_add(&drbd_pp_pool
, page
, tmp
);
197 spin_unlock(&drbd_pp_lock
);
202 static void reclaim_net_ee(struct drbd_conf
*mdev
, struct list_head
*to_be_freed
)
204 struct drbd_peer_request
*peer_req
;
205 struct list_head
*le
, *tle
;
207 /* The EEs are always appended to the end of the list. Since
208 they are sent in order over the wire, they have to finish
209 in order. As soon as we see the first not finished we can
210 stop to examine the list... */
212 list_for_each_safe(le
, tle
, &mdev
->net_ee
) {
213 peer_req
= list_entry(le
, struct drbd_peer_request
, w
.list
);
214 if (drbd_ee_has_active_page(peer_req
))
216 list_move(le
, to_be_freed
);
220 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf
*mdev
)
222 LIST_HEAD(reclaimed
);
223 struct drbd_peer_request
*peer_req
, *t
;
225 spin_lock_irq(&mdev
->tconn
->req_lock
);
226 reclaim_net_ee(mdev
, &reclaimed
);
227 spin_unlock_irq(&mdev
->tconn
->req_lock
);
229 list_for_each_entry_safe(peer_req
, t
, &reclaimed
, w
.list
)
230 drbd_free_net_ee(mdev
, peer_req
);
234 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
235 * @mdev: DRBD device.
236 * @number: number of pages requested
237 * @retry: whether to retry, if not enough pages are available right now
239 * Tries to allocate number pages, first from our own page pool, then from
240 * the kernel, unless this allocation would exceed the max_buffers setting.
241 * Possibly retry until DRBD frees sufficient pages somewhere else.
243 * Returns a page chain linked via page->private.
245 static struct page
*drbd_pp_alloc(struct drbd_conf
*mdev
, unsigned number
, bool retry
)
247 struct page
*page
= NULL
;
250 /* Yes, we may run up to @number over max_buffers. If we
251 * follow it strictly, the admin will get it wrong anyways. */
252 if (atomic_read(&mdev
->pp_in_use
) < mdev
->tconn
->net_conf
->max_buffers
)
253 page
= drbd_pp_first_pages_or_try_alloc(mdev
, number
);
255 while (page
== NULL
) {
256 prepare_to_wait(&drbd_pp_wait
, &wait
, TASK_INTERRUPTIBLE
);
258 drbd_kick_lo_and_reclaim_net(mdev
);
260 if (atomic_read(&mdev
->pp_in_use
) < mdev
->tconn
->net_conf
->max_buffers
) {
261 page
= drbd_pp_first_pages_or_try_alloc(mdev
, number
);
269 if (signal_pending(current
)) {
270 dev_warn(DEV
, "drbd_pp_alloc interrupted!\n");
276 finish_wait(&drbd_pp_wait
, &wait
);
279 atomic_add(number
, &mdev
->pp_in_use
);
283 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
284 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
285 * Either links the page chain back to the global pool,
286 * or returns all pages to the system. */
287 static void drbd_pp_free(struct drbd_conf
*mdev
, struct page
*page
, int is_net
)
289 atomic_t
*a
= is_net
? &mdev
->pp_in_use_by_net
: &mdev
->pp_in_use
;
292 if (drbd_pp_vacant
> (DRBD_MAX_BIO_SIZE
/PAGE_SIZE
) * minor_count
)
293 i
= page_chain_free(page
);
296 tmp
= page_chain_tail(page
, &i
);
297 spin_lock(&drbd_pp_lock
);
298 page_chain_add(&drbd_pp_pool
, page
, tmp
);
300 spin_unlock(&drbd_pp_lock
);
302 i
= atomic_sub_return(i
, a
);
304 dev_warn(DEV
, "ASSERTION FAILED: %s: %d < 0\n",
305 is_net
? "pp_in_use_by_net" : "pp_in_use", i
);
306 wake_up(&drbd_pp_wait
);
310 You need to hold the req_lock:
311 _drbd_wait_ee_list_empty()
313 You must not have the req_lock:
319 drbd_process_done_ee()
321 drbd_wait_ee_list_empty()
324 struct drbd_peer_request
*
325 drbd_alloc_ee(struct drbd_conf
*mdev
, u64 id
, sector_t sector
,
326 unsigned int data_size
, gfp_t gfp_mask
) __must_hold(local
)
328 struct drbd_peer_request
*peer_req
;
330 unsigned nr_pages
= (data_size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
332 if (drbd_insert_fault(mdev
, DRBD_FAULT_AL_EE
))
335 peer_req
= mempool_alloc(drbd_ee_mempool
, gfp_mask
& ~__GFP_HIGHMEM
);
337 if (!(gfp_mask
& __GFP_NOWARN
))
338 dev_err(DEV
, "alloc_ee: Allocation of an EE failed\n");
342 page
= drbd_pp_alloc(mdev
, nr_pages
, (gfp_mask
& __GFP_WAIT
));
346 drbd_clear_interval(&peer_req
->i
);
347 peer_req
->i
.size
= data_size
;
348 peer_req
->i
.sector
= sector
;
349 peer_req
->i
.local
= false;
350 peer_req
->i
.waiting
= false;
352 peer_req
->epoch
= NULL
;
353 peer_req
->w
.mdev
= mdev
;
354 peer_req
->pages
= page
;
355 atomic_set(&peer_req
->pending_bios
, 0);
358 * The block_id is opaque to the receiver. It is not endianness
359 * converted, and sent back to the sender unchanged.
361 peer_req
->block_id
= id
;
366 mempool_free(peer_req
, drbd_ee_mempool
);
370 void drbd_free_some_ee(struct drbd_conf
*mdev
, struct drbd_peer_request
*peer_req
,
373 if (peer_req
->flags
& EE_HAS_DIGEST
)
374 kfree(peer_req
->digest
);
375 drbd_pp_free(mdev
, peer_req
->pages
, is_net
);
376 D_ASSERT(atomic_read(&peer_req
->pending_bios
) == 0);
377 D_ASSERT(drbd_interval_empty(&peer_req
->i
));
378 mempool_free(peer_req
, drbd_ee_mempool
);
381 int drbd_release_ee(struct drbd_conf
*mdev
, struct list_head
*list
)
383 LIST_HEAD(work_list
);
384 struct drbd_peer_request
*peer_req
, *t
;
386 int is_net
= list
== &mdev
->net_ee
;
388 spin_lock_irq(&mdev
->tconn
->req_lock
);
389 list_splice_init(list
, &work_list
);
390 spin_unlock_irq(&mdev
->tconn
->req_lock
);
392 list_for_each_entry_safe(peer_req
, t
, &work_list
, w
.list
) {
393 drbd_free_some_ee(mdev
, peer_req
, is_net
);
400 /* See also comments in _req_mod(,BARRIER_ACKED)
401 * and receive_Barrier.
403 * Move entries from net_ee to done_ee, if ready.
404 * Grab done_ee, call all callbacks, free the entries.
405 * The callbacks typically send out ACKs.
407 static int drbd_process_done_ee(struct drbd_conf
*mdev
)
409 LIST_HEAD(work_list
);
410 LIST_HEAD(reclaimed
);
411 struct drbd_peer_request
*peer_req
, *t
;
414 spin_lock_irq(&mdev
->tconn
->req_lock
);
415 reclaim_net_ee(mdev
, &reclaimed
);
416 list_splice_init(&mdev
->done_ee
, &work_list
);
417 spin_unlock_irq(&mdev
->tconn
->req_lock
);
419 list_for_each_entry_safe(peer_req
, t
, &reclaimed
, w
.list
)
420 drbd_free_net_ee(mdev
, peer_req
);
422 /* possible callbacks here:
423 * e_end_block, and e_end_resync_block, e_send_discard_write.
424 * all ignore the last argument.
426 list_for_each_entry_safe(peer_req
, t
, &work_list
, w
.list
) {
429 /* list_del not necessary, next/prev members not touched */
430 err2
= peer_req
->w
.cb(&peer_req
->w
, !!err
);
433 drbd_free_ee(mdev
, peer_req
);
435 wake_up(&mdev
->ee_wait
);
440 void _drbd_wait_ee_list_empty(struct drbd_conf
*mdev
, struct list_head
*head
)
444 /* avoids spin_lock/unlock
445 * and calling prepare_to_wait in the fast path */
446 while (!list_empty(head
)) {
447 prepare_to_wait(&mdev
->ee_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
448 spin_unlock_irq(&mdev
->tconn
->req_lock
);
450 finish_wait(&mdev
->ee_wait
, &wait
);
451 spin_lock_irq(&mdev
->tconn
->req_lock
);
455 void drbd_wait_ee_list_empty(struct drbd_conf
*mdev
, struct list_head
*head
)
457 spin_lock_irq(&mdev
->tconn
->req_lock
);
458 _drbd_wait_ee_list_empty(mdev
, head
);
459 spin_unlock_irq(&mdev
->tconn
->req_lock
);
462 /* see also kernel_accept; which is only present since 2.6.18.
463 * also we want to log which part of it failed, exactly */
464 static int drbd_accept(const char **what
, struct socket
*sock
, struct socket
**newsock
)
466 struct sock
*sk
= sock
->sk
;
470 err
= sock
->ops
->listen(sock
, 5);
474 *what
= "sock_create_lite";
475 err
= sock_create_lite(sk
->sk_family
, sk
->sk_type
, sk
->sk_protocol
,
481 err
= sock
->ops
->accept(sock
, *newsock
, 0);
483 sock_release(*newsock
);
487 (*newsock
)->ops
= sock
->ops
;
493 static int drbd_recv_short(struct socket
*sock
, void *buf
, size_t size
, int flags
)
500 struct msghdr msg
= {
502 .msg_iov
= (struct iovec
*)&iov
,
503 .msg_flags
= (flags
? flags
: MSG_WAITALL
| MSG_NOSIGNAL
)
509 rv
= sock_recvmsg(sock
, &msg
, size
, msg
.msg_flags
);
515 static int drbd_recv(struct drbd_tconn
*tconn
, void *buf
, size_t size
)
522 struct msghdr msg
= {
524 .msg_iov
= (struct iovec
*)&iov
,
525 .msg_flags
= MSG_WAITALL
| MSG_NOSIGNAL
533 rv
= sock_recvmsg(tconn
->data
.socket
, &msg
, size
, msg
.msg_flags
);
538 * ECONNRESET other side closed the connection
539 * ERESTARTSYS (on sock) we got a signal
543 if (rv
== -ECONNRESET
)
544 conn_info(tconn
, "sock was reset by peer\n");
545 else if (rv
!= -ERESTARTSYS
)
546 conn_err(tconn
, "sock_recvmsg returned %d\n", rv
);
548 } else if (rv
== 0) {
549 conn_info(tconn
, "sock was shut down by peer\n");
552 /* signal came in, or peer/link went down,
553 * after we read a partial message
555 /* D_ASSERT(signal_pending(current)); */
563 conn_request_state(tconn
, NS(conn
, C_BROKEN_PIPE
), CS_HARD
);
568 static int drbd_recv_all(struct drbd_tconn
*tconn
, void *buf
, size_t size
)
572 err
= drbd_recv(tconn
, buf
, size
);
581 static int drbd_recv_all_warn(struct drbd_tconn
*tconn
, void *buf
, size_t size
)
585 err
= drbd_recv_all(tconn
, buf
, size
);
586 if (err
&& !signal_pending(current
))
587 conn_warn(tconn
, "short read (expected size %d)\n", (int)size
);
592 * On individual connections, the socket buffer size must be set prior to the
593 * listen(2) or connect(2) calls in order to have it take effect.
594 * This is our wrapper to do so.
596 static void drbd_setbufsize(struct socket
*sock
, unsigned int snd
,
599 /* open coded SO_SNDBUF, SO_RCVBUF */
601 sock
->sk
->sk_sndbuf
= snd
;
602 sock
->sk
->sk_userlocks
|= SOCK_SNDBUF_LOCK
;
605 sock
->sk
->sk_rcvbuf
= rcv
;
606 sock
->sk
->sk_userlocks
|= SOCK_RCVBUF_LOCK
;
610 static struct socket
*drbd_try_connect(struct drbd_tconn
*tconn
)
614 struct sockaddr_in6 src_in6
;
616 int disconnect_on_error
= 1;
618 if (!get_net_conf(tconn
))
621 what
= "sock_create_kern";
622 err
= sock_create_kern(((struct sockaddr
*)tconn
->net_conf
->my_addr
)->sa_family
,
623 SOCK_STREAM
, IPPROTO_TCP
, &sock
);
629 sock
->sk
->sk_rcvtimeo
=
630 sock
->sk
->sk_sndtimeo
= tconn
->net_conf
->try_connect_int
*HZ
;
631 drbd_setbufsize(sock
, tconn
->net_conf
->sndbuf_size
,
632 tconn
->net_conf
->rcvbuf_size
);
634 /* explicitly bind to the configured IP as source IP
635 * for the outgoing connections.
636 * This is needed for multihomed hosts and to be
637 * able to use lo: interfaces for drbd.
638 * Make sure to use 0 as port number, so linux selects
639 * a free one dynamically.
641 memcpy(&src_in6
, tconn
->net_conf
->my_addr
,
642 min_t(int, tconn
->net_conf
->my_addr_len
, sizeof(src_in6
)));
643 if (((struct sockaddr
*)tconn
->net_conf
->my_addr
)->sa_family
== AF_INET6
)
644 src_in6
.sin6_port
= 0;
646 ((struct sockaddr_in
*)&src_in6
)->sin_port
= 0; /* AF_INET & AF_SCI */
648 what
= "bind before connect";
649 err
= sock
->ops
->bind(sock
,
650 (struct sockaddr
*) &src_in6
,
651 tconn
->net_conf
->my_addr_len
);
655 /* connect may fail, peer not yet available.
656 * stay C_WF_CONNECTION, don't go Disconnecting! */
657 disconnect_on_error
= 0;
659 err
= sock
->ops
->connect(sock
,
660 (struct sockaddr
*)tconn
->net_conf
->peer_addr
,
661 tconn
->net_conf
->peer_addr_len
, 0);
670 /* timeout, busy, signal pending */
671 case ETIMEDOUT
: case EAGAIN
: case EINPROGRESS
:
672 case EINTR
: case ERESTARTSYS
:
673 /* peer not (yet) available, network problem */
674 case ECONNREFUSED
: case ENETUNREACH
:
675 case EHOSTDOWN
: case EHOSTUNREACH
:
676 disconnect_on_error
= 0;
679 conn_err(tconn
, "%s failed, err = %d\n", what
, err
);
681 if (disconnect_on_error
)
682 conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
688 static struct socket
*drbd_wait_for_connect(struct drbd_tconn
*tconn
)
691 struct socket
*s_estab
= NULL
, *s_listen
;
694 if (!get_net_conf(tconn
))
697 what
= "sock_create_kern";
698 err
= sock_create_kern(((struct sockaddr
*)tconn
->net_conf
->my_addr
)->sa_family
,
699 SOCK_STREAM
, IPPROTO_TCP
, &s_listen
);
705 timeo
= tconn
->net_conf
->try_connect_int
* HZ
;
706 timeo
+= (random32() & 1) ? timeo
/ 7 : -timeo
/ 7; /* 28.5% random jitter */
708 s_listen
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
709 s_listen
->sk
->sk_rcvtimeo
= timeo
;
710 s_listen
->sk
->sk_sndtimeo
= timeo
;
711 drbd_setbufsize(s_listen
, tconn
->net_conf
->sndbuf_size
,
712 tconn
->net_conf
->rcvbuf_size
);
714 what
= "bind before listen";
715 err
= s_listen
->ops
->bind(s_listen
,
716 (struct sockaddr
*) tconn
->net_conf
->my_addr
,
717 tconn
->net_conf
->my_addr_len
);
721 err
= drbd_accept(&what
, s_listen
, &s_estab
);
725 sock_release(s_listen
);
727 if (err
!= -EAGAIN
&& err
!= -EINTR
&& err
!= -ERESTARTSYS
) {
728 conn_err(tconn
, "%s failed, err = %d\n", what
, err
);
729 conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
737 static int drbd_send_fp(struct drbd_tconn
*tconn
, struct socket
*sock
, enum drbd_packet cmd
)
739 struct p_header
*h
= &tconn
->data
.sbuf
.header
;
741 return !_conn_send_cmd(tconn
, 0, sock
, cmd
, h
, sizeof(*h
), 0);
744 static enum drbd_packet
drbd_recv_fp(struct drbd_tconn
*tconn
, struct socket
*sock
)
746 struct p_header80
*h
= tconn
->data
.rbuf
;
749 rr
= drbd_recv_short(sock
, h
, sizeof(*h
), 0);
751 if (rr
== sizeof(*h
) && h
->magic
== cpu_to_be32(DRBD_MAGIC
))
752 return be16_to_cpu(h
->command
);
758 * drbd_socket_okay() - Free the socket if its connection is not okay
759 * @sock: pointer to the pointer to the socket.
761 static int drbd_socket_okay(struct socket
**sock
)
769 rr
= drbd_recv_short(*sock
, tb
, 4, MSG_DONTWAIT
| MSG_PEEK
);
771 if (rr
> 0 || rr
== -EAGAIN
) {
779 /* Gets called if a connection is established, or if a new minor gets created
781 int drbd_connected(int vnr
, void *p
, void *data
)
783 struct drbd_conf
*mdev
= (struct drbd_conf
*)p
;
786 atomic_set(&mdev
->packet_seq
, 0);
789 mdev
->state_mutex
= mdev
->tconn
->agreed_pro_version
< 100 ?
790 &mdev
->tconn
->cstate_mutex
:
791 &mdev
->own_state_mutex
;
793 err
= drbd_send_sync_param(mdev
);
795 err
= drbd_send_sizes(mdev
, 0, 0);
797 err
= drbd_send_uuids(mdev
);
799 err
= drbd_send_state(mdev
);
800 clear_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
801 clear_bit(RESIZE_PENDING
, &mdev
->flags
);
807 * 1 yes, we have a valid connection
808 * 0 oops, did not work out, please try again
809 * -1 peer talks different language,
810 * no point in trying again, please go standalone.
811 * -2 We do not have a network config...
813 static int drbd_connect(struct drbd_tconn
*tconn
)
815 struct socket
*s
, *sock
, *msock
;
818 if (conn_request_state(tconn
, NS(conn
, C_WF_CONNECTION
), CS_VERBOSE
) < SS_SUCCESS
)
821 clear_bit(DISCARD_CONCURRENT
, &tconn
->flags
);
823 /* Assume that the peer only understands protocol 80 until we know better. */
824 tconn
->agreed_pro_version
= 80;
831 /* 3 tries, this should take less than a second! */
832 s
= drbd_try_connect(tconn
);
835 /* give the other side time to call bind() & listen() */
836 schedule_timeout_interruptible(HZ
/ 10);
841 drbd_send_fp(tconn
, s
, P_HAND_SHAKE_S
);
845 drbd_send_fp(tconn
, s
, P_HAND_SHAKE_M
);
849 conn_err(tconn
, "Logic error in drbd_connect()\n");
850 goto out_release_sockets
;
855 schedule_timeout_interruptible(tconn
->net_conf
->ping_timeo
*HZ
/10);
856 ok
= drbd_socket_okay(&sock
);
857 ok
= drbd_socket_okay(&msock
) && ok
;
863 s
= drbd_wait_for_connect(tconn
);
865 try = drbd_recv_fp(tconn
, s
);
866 drbd_socket_okay(&sock
);
867 drbd_socket_okay(&msock
);
871 conn_warn(tconn
, "initial packet S crossed\n");
878 conn_warn(tconn
, "initial packet M crossed\n");
882 set_bit(DISCARD_CONCURRENT
, &tconn
->flags
);
885 conn_warn(tconn
, "Error receiving initial packet\n");
892 if (tconn
->cstate
<= C_DISCONNECTING
)
893 goto out_release_sockets
;
894 if (signal_pending(current
)) {
895 flush_signals(current
);
897 if (get_t_state(&tconn
->receiver
) == EXITING
)
898 goto out_release_sockets
;
902 ok
= drbd_socket_okay(&sock
);
903 ok
= drbd_socket_okay(&msock
) && ok
;
909 msock
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
910 sock
->sk
->sk_reuse
= 1; /* SO_REUSEADDR */
912 sock
->sk
->sk_allocation
= GFP_NOIO
;
913 msock
->sk
->sk_allocation
= GFP_NOIO
;
915 sock
->sk
->sk_priority
= TC_PRIO_INTERACTIVE_BULK
;
916 msock
->sk
->sk_priority
= TC_PRIO_INTERACTIVE
;
919 * sock->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
920 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
921 * first set it to the P_HAND_SHAKE timeout,
922 * which we set to 4x the configured ping_timeout. */
923 sock
->sk
->sk_sndtimeo
=
924 sock
->sk
->sk_rcvtimeo
= tconn
->net_conf
->ping_timeo
*4*HZ
/10;
926 msock
->sk
->sk_sndtimeo
= tconn
->net_conf
->timeout
*HZ
/10;
927 msock
->sk
->sk_rcvtimeo
= tconn
->net_conf
->ping_int
*HZ
;
929 /* we don't want delays.
930 * we use TCP_CORK where appropriate, though */
931 drbd_tcp_nodelay(sock
);
932 drbd_tcp_nodelay(msock
);
934 tconn
->data
.socket
= sock
;
935 tconn
->meta
.socket
= msock
;
936 tconn
->last_received
= jiffies
;
938 h
= drbd_do_handshake(tconn
);
942 if (tconn
->cram_hmac_tfm
) {
943 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
944 switch (drbd_do_auth(tconn
)) {
946 conn_err(tconn
, "Authentication of peer failed\n");
949 conn_err(tconn
, "Authentication of peer failed, trying again.\n");
954 if (conn_request_state(tconn
, NS(conn
, C_WF_REPORT_PARAMS
), CS_VERBOSE
) < SS_SUCCESS
)
957 sock
->sk
->sk_sndtimeo
= tconn
->net_conf
->timeout
*HZ
/10;
958 sock
->sk
->sk_rcvtimeo
= MAX_SCHEDULE_TIMEOUT
;
960 drbd_thread_start(&tconn
->asender
);
962 if (drbd_send_protocol(tconn
) == -EOPNOTSUPP
)
965 return !idr_for_each(&tconn
->volumes
, drbd_connected
, tconn
);
975 static int decode_header(struct drbd_tconn
*tconn
, struct p_header
*h
, struct packet_info
*pi
)
977 if (h
->h80
.magic
== cpu_to_be32(DRBD_MAGIC
)) {
978 pi
->cmd
= be16_to_cpu(h
->h80
.command
);
979 pi
->size
= be16_to_cpu(h
->h80
.length
);
981 } else if (h
->h95
.magic
== cpu_to_be16(DRBD_MAGIC_BIG
)) {
982 pi
->cmd
= be16_to_cpu(h
->h95
.command
);
983 pi
->size
= be32_to_cpu(h
->h95
.length
) & 0x00ffffff;
986 conn_err(tconn
, "magic?? on data m: 0x%08x c: %d l: %d\n",
987 be32_to_cpu(h
->h80
.magic
),
988 be16_to_cpu(h
->h80
.command
),
989 be16_to_cpu(h
->h80
.length
));
995 static int drbd_recv_header(struct drbd_tconn
*tconn
, struct packet_info
*pi
)
997 struct p_header
*h
= tconn
->data
.rbuf
;
1000 err
= drbd_recv_all_warn(tconn
, h
, sizeof(*h
));
1004 err
= decode_header(tconn
, h
, pi
);
1005 tconn
->last_received
= jiffies
;
1010 static void drbd_flush(struct drbd_conf
*mdev
)
1014 if (mdev
->write_ordering
>= WO_bdev_flush
&& get_ldev(mdev
)) {
1015 rv
= blkdev_issue_flush(mdev
->ldev
->backing_bdev
, GFP_KERNEL
,
1018 dev_err(DEV
, "local disk flush failed with status %d\n", rv
);
1019 /* would rather check on EOPNOTSUPP, but that is not reliable.
1020 * don't try again for ANY return value != 0
1021 * if (rv == -EOPNOTSUPP) */
1022 drbd_bump_write_ordering(mdev
, WO_drain_io
);
1029 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1030 * @mdev: DRBD device.
1031 * @epoch: Epoch object.
1034 static enum finish_epoch
drbd_may_finish_epoch(struct drbd_conf
*mdev
,
1035 struct drbd_epoch
*epoch
,
1036 enum epoch_event ev
)
1039 struct drbd_epoch
*next_epoch
;
1040 enum finish_epoch rv
= FE_STILL_LIVE
;
1042 spin_lock(&mdev
->epoch_lock
);
1046 epoch_size
= atomic_read(&epoch
->epoch_size
);
1048 switch (ev
& ~EV_CLEANUP
) {
1050 atomic_dec(&epoch
->active
);
1052 case EV_GOT_BARRIER_NR
:
1053 set_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
);
1055 case EV_BECAME_LAST
:
1060 if (epoch_size
!= 0 &&
1061 atomic_read(&epoch
->active
) == 0 &&
1062 test_bit(DE_HAVE_BARRIER_NUMBER
, &epoch
->flags
)) {
1063 if (!(ev
& EV_CLEANUP
)) {
1064 spin_unlock(&mdev
->epoch_lock
);
1065 drbd_send_b_ack(mdev
, epoch
->barrier_nr
, epoch_size
);
1066 spin_lock(&mdev
->epoch_lock
);
1070 if (mdev
->current_epoch
!= epoch
) {
1071 next_epoch
= list_entry(epoch
->list
.next
, struct drbd_epoch
, list
);
1072 list_del(&epoch
->list
);
1073 ev
= EV_BECAME_LAST
| (ev
& EV_CLEANUP
);
1077 if (rv
== FE_STILL_LIVE
)
1081 atomic_set(&epoch
->epoch_size
, 0);
1082 /* atomic_set(&epoch->active, 0); is already zero */
1083 if (rv
== FE_STILL_LIVE
)
1085 wake_up(&mdev
->ee_wait
);
1095 spin_unlock(&mdev
->epoch_lock
);
1101 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1102 * @mdev: DRBD device.
1103 * @wo: Write ordering method to try.
1105 void drbd_bump_write_ordering(struct drbd_conf
*mdev
, enum write_ordering_e wo
) __must_hold(local
)
1107 enum write_ordering_e pwo
;
1108 static char *write_ordering_str
[] = {
1110 [WO_drain_io
] = "drain",
1111 [WO_bdev_flush
] = "flush",
1114 pwo
= mdev
->write_ordering
;
1116 if (wo
== WO_bdev_flush
&& mdev
->ldev
->dc
.no_disk_flush
)
1118 if (wo
== WO_drain_io
&& mdev
->ldev
->dc
.no_disk_drain
)
1120 mdev
->write_ordering
= wo
;
1121 if (pwo
!= mdev
->write_ordering
|| wo
== WO_bdev_flush
)
1122 dev_info(DEV
, "Method to ensure write ordering: %s\n", write_ordering_str
[mdev
->write_ordering
]);
1126 * drbd_submit_peer_request()
1127 * @mdev: DRBD device.
1128 * @peer_req: peer request
1129 * @rw: flag field, see bio->bi_rw
1131 * May spread the pages to multiple bios,
1132 * depending on bio_add_page restrictions.
1134 * Returns 0 if all bios have been submitted,
1135 * -ENOMEM if we could not allocate enough bios,
1136 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1137 * single page to an empty bio (which should never happen and likely indicates
1138 * that the lower level IO stack is in some way broken). This has been observed
1139 * on certain Xen deployments.
1141 /* TODO allocate from our own bio_set. */
1142 int drbd_submit_peer_request(struct drbd_conf
*mdev
,
1143 struct drbd_peer_request
*peer_req
,
1144 const unsigned rw
, const int fault_type
)
1146 struct bio
*bios
= NULL
;
1148 struct page
*page
= peer_req
->pages
;
1149 sector_t sector
= peer_req
->i
.sector
;
1150 unsigned ds
= peer_req
->i
.size
;
1151 unsigned n_bios
= 0;
1152 unsigned nr_pages
= (ds
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
1155 /* In most cases, we will only need one bio. But in case the lower
1156 * level restrictions happen to be different at this offset on this
1157 * side than those of the sending peer, we may need to submit the
1158 * request in more than one bio.
1160 * Plain bio_alloc is good enough here, this is no DRBD internally
1161 * generated bio, but a bio allocated on behalf of the peer.
1164 bio
= bio_alloc(GFP_NOIO
, nr_pages
);
1166 dev_err(DEV
, "submit_ee: Allocation of a bio failed\n");
1169 /* > peer_req->i.sector, unless this is the first bio */
1170 bio
->bi_sector
= sector
;
1171 bio
->bi_bdev
= mdev
->ldev
->backing_bdev
;
1173 bio
->bi_private
= peer_req
;
1174 bio
->bi_end_io
= drbd_peer_request_endio
;
1176 bio
->bi_next
= bios
;
1180 page_chain_for_each(page
) {
1181 unsigned len
= min_t(unsigned, ds
, PAGE_SIZE
);
1182 if (!bio_add_page(bio
, page
, len
, 0)) {
1183 /* A single page must always be possible!
1184 * But in case it fails anyways,
1185 * we deal with it, and complain (below). */
1186 if (bio
->bi_vcnt
== 0) {
1188 "bio_add_page failed for len=%u, "
1189 "bi_vcnt=0 (bi_sector=%llu)\n",
1190 len
, (unsigned long long)bio
->bi_sector
);
1200 D_ASSERT(page
== NULL
);
1203 atomic_set(&peer_req
->pending_bios
, n_bios
);
1206 bios
= bios
->bi_next
;
1207 bio
->bi_next
= NULL
;
1209 drbd_generic_make_request(mdev
, fault_type
, bio
);
1216 bios
= bios
->bi_next
;
1222 static void drbd_remove_epoch_entry_interval(struct drbd_conf
*mdev
,
1223 struct drbd_peer_request
*peer_req
)
1225 struct drbd_interval
*i
= &peer_req
->i
;
1227 drbd_remove_interval(&mdev
->write_requests
, i
);
1228 drbd_clear_interval(i
);
1230 /* Wake up any processes waiting for this peer request to complete. */
1232 wake_up(&mdev
->misc_wait
);
1235 static int receive_Barrier(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1236 unsigned int data_size
)
1239 struct p_barrier
*p
= mdev
->tconn
->data
.rbuf
;
1240 struct drbd_epoch
*epoch
;
1244 mdev
->current_epoch
->barrier_nr
= p
->barrier
;
1245 rv
= drbd_may_finish_epoch(mdev
, mdev
->current_epoch
, EV_GOT_BARRIER_NR
);
1247 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1248 * the activity log, which means it would not be resynced in case the
1249 * R_PRIMARY crashes now.
1250 * Therefore we must send the barrier_ack after the barrier request was
1252 switch (mdev
->write_ordering
) {
1254 if (rv
== FE_RECYCLED
)
1257 /* receiver context, in the writeout path of the other node.
1258 * avoid potential distributed deadlock */
1259 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1263 dev_warn(DEV
, "Allocation of an epoch failed, slowing down\n");
1268 drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
1271 if (atomic_read(&mdev
->current_epoch
->epoch_size
)) {
1272 epoch
= kmalloc(sizeof(struct drbd_epoch
), GFP_NOIO
);
1277 epoch
= mdev
->current_epoch
;
1278 wait_event(mdev
->ee_wait
, atomic_read(&epoch
->epoch_size
) == 0);
1280 D_ASSERT(atomic_read(&epoch
->active
) == 0);
1281 D_ASSERT(epoch
->flags
== 0);
1285 dev_err(DEV
, "Strangeness in mdev->write_ordering %d\n", mdev
->write_ordering
);
1290 atomic_set(&epoch
->epoch_size
, 0);
1291 atomic_set(&epoch
->active
, 0);
1293 spin_lock(&mdev
->epoch_lock
);
1294 if (atomic_read(&mdev
->current_epoch
->epoch_size
)) {
1295 list_add(&epoch
->list
, &mdev
->current_epoch
->list
);
1296 mdev
->current_epoch
= epoch
;
1299 /* The current_epoch got recycled while we allocated this one... */
1302 spin_unlock(&mdev
->epoch_lock
);
1307 /* used from receive_RSDataReply (recv_resync_read)
1308 * and from receive_Data */
1309 static struct drbd_peer_request
*
1310 read_in_block(struct drbd_conf
*mdev
, u64 id
, sector_t sector
,
1311 int data_size
) __must_hold(local
)
1313 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
1314 struct drbd_peer_request
*peer_req
;
1317 void *dig_in
= mdev
->tconn
->int_dig_in
;
1318 void *dig_vv
= mdev
->tconn
->int_dig_vv
;
1319 unsigned long *data
;
1321 dgs
= (mdev
->tconn
->agreed_pro_version
>= 87 && mdev
->tconn
->integrity_r_tfm
) ?
1322 crypto_hash_digestsize(mdev
->tconn
->integrity_r_tfm
) : 0;
1325 err
= drbd_recv_all_warn(mdev
->tconn
, dig_in
, dgs
);
1332 if (!expect(data_size
!= 0))
1334 if (!expect(IS_ALIGNED(data_size
, 512)))
1336 if (!expect(data_size
<= DRBD_MAX_BIO_SIZE
))
1339 /* even though we trust out peer,
1340 * we sometimes have to double check. */
1341 if (sector
+ (data_size
>>9) > capacity
) {
1342 dev_err(DEV
, "request from peer beyond end of local disk: "
1343 "capacity: %llus < sector: %llus + size: %u\n",
1344 (unsigned long long)capacity
,
1345 (unsigned long long)sector
, data_size
);
1349 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1350 * "criss-cross" setup, that might cause write-out on some other DRBD,
1351 * which in turn might block on the other node at this very place. */
1352 peer_req
= drbd_alloc_ee(mdev
, id
, sector
, data_size
, GFP_NOIO
);
1357 page
= peer_req
->pages
;
1358 page_chain_for_each(page
) {
1359 unsigned len
= min_t(int, ds
, PAGE_SIZE
);
1361 err
= drbd_recv_all_warn(mdev
->tconn
, data
, len
);
1362 if (drbd_insert_fault(mdev
, DRBD_FAULT_RECEIVE
)) {
1363 dev_err(DEV
, "Fault injection: Corrupting data on receive\n");
1364 data
[0] = data
[0] ^ (unsigned long)-1;
1368 drbd_free_ee(mdev
, peer_req
);
1375 drbd_csum_ee(mdev
, mdev
->tconn
->integrity_r_tfm
, peer_req
, dig_vv
);
1376 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1377 dev_err(DEV
, "Digest integrity check FAILED: %llus +%u\n",
1378 (unsigned long long)sector
, data_size
);
1379 drbd_free_ee(mdev
, peer_req
);
1383 mdev
->recv_cnt
+= data_size
>>9;
1387 /* drbd_drain_block() just takes a data block
1388 * out of the socket input buffer, and discards it.
1390 static int drbd_drain_block(struct drbd_conf
*mdev
, int data_size
)
1399 page
= drbd_pp_alloc(mdev
, 1, 1);
1403 unsigned int len
= min_t(int, data_size
, PAGE_SIZE
);
1405 err
= drbd_recv_all_warn(mdev
->tconn
, data
, len
);
1411 drbd_pp_free(mdev
, page
, 0);
1415 static int recv_dless_read(struct drbd_conf
*mdev
, struct drbd_request
*req
,
1416 sector_t sector
, int data_size
)
1418 struct bio_vec
*bvec
;
1420 int dgs
, err
, i
, expect
;
1421 void *dig_in
= mdev
->tconn
->int_dig_in
;
1422 void *dig_vv
= mdev
->tconn
->int_dig_vv
;
1424 dgs
= (mdev
->tconn
->agreed_pro_version
>= 87 && mdev
->tconn
->integrity_r_tfm
) ?
1425 crypto_hash_digestsize(mdev
->tconn
->integrity_r_tfm
) : 0;
1428 err
= drbd_recv_all_warn(mdev
->tconn
, dig_in
, dgs
);
1435 /* optimistically update recv_cnt. if receiving fails below,
1436 * we disconnect anyways, and counters will be reset. */
1437 mdev
->recv_cnt
+= data_size
>>9;
1439 bio
= req
->master_bio
;
1440 D_ASSERT(sector
== bio
->bi_sector
);
1442 bio_for_each_segment(bvec
, bio
, i
) {
1443 void *mapped
= kmap(bvec
->bv_page
) + bvec
->bv_offset
;
1444 expect
= min_t(int, data_size
, bvec
->bv_len
);
1445 err
= drbd_recv_all_warn(mdev
->tconn
, mapped
, expect
);
1446 kunmap(bvec
->bv_page
);
1449 data_size
-= expect
;
1453 drbd_csum_bio(mdev
, mdev
->tconn
->integrity_r_tfm
, bio
, dig_vv
);
1454 if (memcmp(dig_in
, dig_vv
, dgs
)) {
1455 dev_err(DEV
, "Digest integrity check FAILED. Broken NICs?\n");
1460 D_ASSERT(data_size
== 0);
1464 /* e_end_resync_block() is called via
1465 * drbd_process_done_ee() by asender only */
1466 static int e_end_resync_block(struct drbd_work
*w
, int unused
)
1468 struct drbd_peer_request
*peer_req
=
1469 container_of(w
, struct drbd_peer_request
, w
);
1470 struct drbd_conf
*mdev
= w
->mdev
;
1471 sector_t sector
= peer_req
->i
.sector
;
1474 D_ASSERT(drbd_interval_empty(&peer_req
->i
));
1476 if (likely((peer_req
->flags
& EE_WAS_ERROR
) == 0)) {
1477 drbd_set_in_sync(mdev
, sector
, peer_req
->i
.size
);
1478 err
= drbd_send_ack(mdev
, P_RS_WRITE_ACK
, peer_req
);
1480 /* Record failure to sync */
1481 drbd_rs_failed_io(mdev
, sector
, peer_req
->i
.size
);
1483 err
= drbd_send_ack(mdev
, P_NEG_ACK
, peer_req
);
1490 static int recv_resync_read(struct drbd_conf
*mdev
, sector_t sector
, int data_size
) __releases(local
)
1492 struct drbd_peer_request
*peer_req
;
1494 peer_req
= read_in_block(mdev
, ID_SYNCER
, sector
, data_size
);
1498 dec_rs_pending(mdev
);
1501 /* corresponding dec_unacked() in e_end_resync_block()
1502 * respective _drbd_clear_done_ee */
1504 peer_req
->w
.cb
= e_end_resync_block
;
1506 spin_lock_irq(&mdev
->tconn
->req_lock
);
1507 list_add(&peer_req
->w
.list
, &mdev
->sync_ee
);
1508 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1510 atomic_add(data_size
>> 9, &mdev
->rs_sect_ev
);
1511 if (drbd_submit_peer_request(mdev
, peer_req
, WRITE
, DRBD_FAULT_RS_WR
) == 0)
1514 /* don't care for the reason here */
1515 dev_err(DEV
, "submit failed, triggering re-connect\n");
1516 spin_lock_irq(&mdev
->tconn
->req_lock
);
1517 list_del(&peer_req
->w
.list
);
1518 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1520 drbd_free_ee(mdev
, peer_req
);
1526 static struct drbd_request
*
1527 find_request(struct drbd_conf
*mdev
, struct rb_root
*root
, u64 id
,
1528 sector_t sector
, bool missing_ok
, const char *func
)
1530 struct drbd_request
*req
;
1532 /* Request object according to our peer */
1533 req
= (struct drbd_request
*)(unsigned long)id
;
1534 if (drbd_contains_interval(root
, sector
, &req
->i
) && req
->i
.local
)
1537 dev_err(DEV
, "%s: failed to find request %lu, sector %llus\n", func
,
1538 (unsigned long)id
, (unsigned long long)sector
);
1543 static int receive_DataReply(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1544 unsigned int data_size
)
1546 struct drbd_request
*req
;
1549 struct p_data
*p
= mdev
->tconn
->data
.rbuf
;
1551 sector
= be64_to_cpu(p
->sector
);
1553 spin_lock_irq(&mdev
->tconn
->req_lock
);
1554 req
= find_request(mdev
, &mdev
->read_requests
, p
->block_id
, sector
, false, __func__
);
1555 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1559 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1560 * special casing it there for the various failure cases.
1561 * still no race with drbd_fail_pending_reads */
1562 err
= recv_dless_read(mdev
, req
, sector
, data_size
);
1564 req_mod(req
, DATA_RECEIVED
);
1565 /* else: nothing. handled from drbd_disconnect...
1566 * I don't think we may complete this just yet
1567 * in case we are "on-disconnect: freeze" */
1572 static int receive_RSDataReply(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1573 unsigned int data_size
)
1577 struct p_data
*p
= mdev
->tconn
->data
.rbuf
;
1579 sector
= be64_to_cpu(p
->sector
);
1580 D_ASSERT(p
->block_id
== ID_SYNCER
);
1582 if (get_ldev(mdev
)) {
1583 /* data is submitted to disk within recv_resync_read.
1584 * corresponding put_ldev done below on error,
1585 * or in drbd_peer_request_endio. */
1586 err
= recv_resync_read(mdev
, sector
, data_size
);
1588 if (__ratelimit(&drbd_ratelimit_state
))
1589 dev_err(DEV
, "Can not write resync data to local disk.\n");
1591 err
= drbd_drain_block(mdev
, data_size
);
1593 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
, data_size
);
1596 atomic_add(data_size
>> 9, &mdev
->rs_sect_in
);
1601 static int w_restart_write(struct drbd_work
*w
, int cancel
)
1603 struct drbd_request
*req
= container_of(w
, struct drbd_request
, w
);
1604 struct drbd_conf
*mdev
= w
->mdev
;
1606 unsigned long start_time
;
1607 unsigned long flags
;
1609 spin_lock_irqsave(&mdev
->tconn
->req_lock
, flags
);
1610 if (!expect(req
->rq_state
& RQ_POSTPONED
)) {
1611 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
1614 bio
= req
->master_bio
;
1615 start_time
= req
->start_time
;
1616 /* Postponed requests will not have their master_bio completed! */
1617 __req_mod(req
, DISCARD_WRITE
, NULL
);
1618 spin_unlock_irqrestore(&mdev
->tconn
->req_lock
, flags
);
1620 while (__drbd_make_request(mdev
, bio
, start_time
))
1625 static void restart_conflicting_writes(struct drbd_conf
*mdev
,
1626 sector_t sector
, int size
)
1628 struct drbd_interval
*i
;
1629 struct drbd_request
*req
;
1631 drbd_for_each_overlap(i
, &mdev
->write_requests
, sector
, size
) {
1634 req
= container_of(i
, struct drbd_request
, i
);
1635 if (req
->rq_state
& RQ_LOCAL_PENDING
||
1636 !(req
->rq_state
& RQ_POSTPONED
))
1638 if (expect(list_empty(&req
->w
.list
))) {
1640 req
->w
.cb
= w_restart_write
;
1641 drbd_queue_work(&mdev
->tconn
->data
.work
, &req
->w
);
1646 /* e_end_block() is called via drbd_process_done_ee().
1647 * this means this function only runs in the asender thread
1649 static int e_end_block(struct drbd_work
*w
, int cancel
)
1651 struct drbd_peer_request
*peer_req
=
1652 container_of(w
, struct drbd_peer_request
, w
);
1653 struct drbd_conf
*mdev
= w
->mdev
;
1654 sector_t sector
= peer_req
->i
.sector
;
1657 if (mdev
->tconn
->net_conf
->wire_protocol
== DRBD_PROT_C
) {
1658 if (likely((peer_req
->flags
& EE_WAS_ERROR
) == 0)) {
1659 pcmd
= (mdev
->state
.conn
>= C_SYNC_SOURCE
&&
1660 mdev
->state
.conn
<= C_PAUSED_SYNC_T
&&
1661 peer_req
->flags
& EE_MAY_SET_IN_SYNC
) ?
1662 P_RS_WRITE_ACK
: P_WRITE_ACK
;
1663 err
= drbd_send_ack(mdev
, pcmd
, peer_req
);
1664 if (pcmd
== P_RS_WRITE_ACK
)
1665 drbd_set_in_sync(mdev
, sector
, peer_req
->i
.size
);
1667 err
= drbd_send_ack(mdev
, P_NEG_ACK
, peer_req
);
1668 /* we expect it to be marked out of sync anyways...
1669 * maybe assert this? */
1673 /* we delete from the conflict detection hash _after_ we sent out the
1674 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1675 if (mdev
->tconn
->net_conf
->two_primaries
) {
1676 spin_lock_irq(&mdev
->tconn
->req_lock
);
1677 D_ASSERT(!drbd_interval_empty(&peer_req
->i
));
1678 drbd_remove_epoch_entry_interval(mdev
, peer_req
);
1679 if (peer_req
->flags
& EE_RESTART_REQUESTS
)
1680 restart_conflicting_writes(mdev
, sector
, peer_req
->i
.size
);
1681 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1683 D_ASSERT(drbd_interval_empty(&peer_req
->i
));
1685 drbd_may_finish_epoch(mdev
, peer_req
->epoch
, EV_PUT
+ (cancel
? EV_CLEANUP
: 0));
1690 static int e_send_ack(struct drbd_work
*w
, enum drbd_packet ack
)
1692 struct drbd_conf
*mdev
= w
->mdev
;
1693 struct drbd_peer_request
*peer_req
=
1694 container_of(w
, struct drbd_peer_request
, w
);
1697 err
= drbd_send_ack(mdev
, ack
, peer_req
);
1703 static int e_send_discard_write(struct drbd_work
*w
, int unused
)
1705 return e_send_ack(w
, P_DISCARD_WRITE
);
1708 static int e_send_retry_write(struct drbd_work
*w
, int unused
)
1710 struct drbd_tconn
*tconn
= w
->mdev
->tconn
;
1712 return e_send_ack(w
, tconn
->agreed_pro_version
>= 100 ?
1713 P_RETRY_WRITE
: P_DISCARD_WRITE
);
1716 static bool seq_greater(u32 a
, u32 b
)
1719 * We assume 32-bit wrap-around here.
1720 * For 24-bit wrap-around, we would have to shift:
1723 return (s32
)a
- (s32
)b
> 0;
1726 static u32
seq_max(u32 a
, u32 b
)
1728 return seq_greater(a
, b
) ? a
: b
;
1731 static bool need_peer_seq(struct drbd_conf
*mdev
)
1733 struct drbd_tconn
*tconn
= mdev
->tconn
;
1736 * We only need to keep track of the last packet_seq number of our peer
1737 * if we are in dual-primary mode and we have the discard flag set; see
1738 * handle_write_conflicts().
1740 return tconn
->net_conf
->two_primaries
&&
1741 test_bit(DISCARD_CONCURRENT
, &tconn
->flags
);
1744 static void update_peer_seq(struct drbd_conf
*mdev
, unsigned int peer_seq
)
1746 unsigned int newest_peer_seq
;
1748 if (need_peer_seq(mdev
)) {
1749 spin_lock(&mdev
->peer_seq_lock
);
1750 newest_peer_seq
= seq_max(mdev
->peer_seq
, peer_seq
);
1751 mdev
->peer_seq
= newest_peer_seq
;
1752 spin_unlock(&mdev
->peer_seq_lock
);
1753 /* wake up only if we actually changed mdev->peer_seq */
1754 if (peer_seq
== newest_peer_seq
)
1755 wake_up(&mdev
->seq_wait
);
1759 /* Called from receive_Data.
1760 * Synchronize packets on sock with packets on msock.
1762 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1763 * packet traveling on msock, they are still processed in the order they have
1766 * Note: we don't care for Ack packets overtaking P_DATA packets.
1768 * In case packet_seq is larger than mdev->peer_seq number, there are
1769 * outstanding packets on the msock. We wait for them to arrive.
1770 * In case we are the logically next packet, we update mdev->peer_seq
1771 * ourselves. Correctly handles 32bit wrap around.
1773 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1774 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1775 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1776 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1778 * returns 0 if we may process the packet,
1779 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1780 static int wait_for_and_update_peer_seq(struct drbd_conf
*mdev
, const u32 peer_seq
)
1786 if (!need_peer_seq(mdev
))
1789 spin_lock(&mdev
->peer_seq_lock
);
1791 if (!seq_greater(peer_seq
- 1, mdev
->peer_seq
)) {
1792 mdev
->peer_seq
= seq_max(mdev
->peer_seq
, peer_seq
);
1796 if (signal_pending(current
)) {
1800 prepare_to_wait(&mdev
->seq_wait
, &wait
, TASK_INTERRUPTIBLE
);
1801 spin_unlock(&mdev
->peer_seq_lock
);
1802 timeout
= mdev
->tconn
->net_conf
->ping_timeo
*HZ
/10;
1803 timeout
= schedule_timeout(timeout
);
1804 spin_lock(&mdev
->peer_seq_lock
);
1807 dev_err(DEV
, "Timed out waiting for missing ack packets; disconnecting\n");
1811 spin_unlock(&mdev
->peer_seq_lock
);
1812 finish_wait(&mdev
->seq_wait
, &wait
);
1816 /* see also bio_flags_to_wire()
1817 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1818 * flags and back. We may replicate to other kernel versions. */
1819 static unsigned long wire_flags_to_bio(struct drbd_conf
*mdev
, u32 dpf
)
1821 return (dpf
& DP_RW_SYNC
? REQ_SYNC
: 0) |
1822 (dpf
& DP_FUA
? REQ_FUA
: 0) |
1823 (dpf
& DP_FLUSH
? REQ_FLUSH
: 0) |
1824 (dpf
& DP_DISCARD
? REQ_DISCARD
: 0);
1827 static void fail_postponed_requests(struct drbd_conf
*mdev
, sector_t sector
,
1830 struct drbd_interval
*i
;
1833 drbd_for_each_overlap(i
, &mdev
->write_requests
, sector
, size
) {
1834 struct drbd_request
*req
;
1835 struct bio_and_error m
;
1839 req
= container_of(i
, struct drbd_request
, i
);
1840 if (!(req
->rq_state
& RQ_POSTPONED
))
1842 req
->rq_state
&= ~RQ_POSTPONED
;
1843 __req_mod(req
, NEG_ACKED
, &m
);
1844 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1846 complete_master_bio(mdev
, &m
);
1847 spin_lock_irq(&mdev
->tconn
->req_lock
);
1852 static int handle_write_conflicts(struct drbd_conf
*mdev
,
1853 struct drbd_peer_request
*peer_req
)
1855 struct drbd_tconn
*tconn
= mdev
->tconn
;
1856 bool resolve_conflicts
= test_bit(DISCARD_CONCURRENT
, &tconn
->flags
);
1857 sector_t sector
= peer_req
->i
.sector
;
1858 const unsigned int size
= peer_req
->i
.size
;
1859 struct drbd_interval
*i
;
1864 * Inserting the peer request into the write_requests tree will prevent
1865 * new conflicting local requests from being added.
1867 drbd_insert_interval(&mdev
->write_requests
, &peer_req
->i
);
1870 drbd_for_each_overlap(i
, &mdev
->write_requests
, sector
, size
) {
1871 if (i
== &peer_req
->i
)
1876 * Our peer has sent a conflicting remote request; this
1877 * should not happen in a two-node setup. Wait for the
1878 * earlier peer request to complete.
1880 err
= drbd_wait_misc(mdev
, i
);
1886 equal
= i
->sector
== sector
&& i
->size
== size
;
1887 if (resolve_conflicts
) {
1889 * If the peer request is fully contained within the
1890 * overlapping request, it can be discarded; otherwise,
1891 * it will be retried once all overlapping requests
1894 bool discard
= i
->sector
<= sector
&& i
->sector
+
1895 (i
->size
>> 9) >= sector
+ (size
>> 9);
1898 dev_alert(DEV
, "Concurrent writes detected: "
1899 "local=%llus +%u, remote=%llus +%u, "
1900 "assuming %s came first\n",
1901 (unsigned long long)i
->sector
, i
->size
,
1902 (unsigned long long)sector
, size
,
1903 discard
? "local" : "remote");
1906 peer_req
->w
.cb
= discard
? e_send_discard_write
:
1908 list_add_tail(&peer_req
->w
.list
, &mdev
->done_ee
);
1909 wake_asender(mdev
->tconn
);
1914 struct drbd_request
*req
=
1915 container_of(i
, struct drbd_request
, i
);
1918 dev_alert(DEV
, "Concurrent writes detected: "
1919 "local=%llus +%u, remote=%llus +%u\n",
1920 (unsigned long long)i
->sector
, i
->size
,
1921 (unsigned long long)sector
, size
);
1923 if (req
->rq_state
& RQ_LOCAL_PENDING
||
1924 !(req
->rq_state
& RQ_POSTPONED
)) {
1926 * Wait for the node with the discard flag to
1927 * decide if this request will be discarded or
1928 * retried. Requests that are discarded will
1929 * disappear from the write_requests tree.
1931 * In addition, wait for the conflicting
1932 * request to finish locally before submitting
1933 * the conflicting peer request.
1935 err
= drbd_wait_misc(mdev
, &req
->i
);
1937 _conn_request_state(mdev
->tconn
,
1938 NS(conn
, C_TIMEOUT
),
1940 fail_postponed_requests(mdev
, sector
, size
);
1946 * Remember to restart the conflicting requests after
1947 * the new peer request has completed.
1949 peer_req
->flags
|= EE_RESTART_REQUESTS
;
1956 drbd_remove_epoch_entry_interval(mdev
, peer_req
);
1960 /* mirrored write */
1961 static int receive_Data(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
1962 unsigned int data_size
)
1965 struct drbd_peer_request
*peer_req
;
1966 struct p_data
*p
= mdev
->tconn
->data
.rbuf
;
1967 u32 peer_seq
= be32_to_cpu(p
->seq_num
);
1972 if (!get_ldev(mdev
)) {
1975 err
= wait_for_and_update_peer_seq(mdev
, peer_seq
);
1976 drbd_send_ack_dp(mdev
, P_NEG_ACK
, p
, data_size
);
1977 atomic_inc(&mdev
->current_epoch
->epoch_size
);
1978 err2
= drbd_drain_block(mdev
, data_size
);
1985 * Corresponding put_ldev done either below (on various errors), or in
1986 * drbd_peer_request_endio, if we successfully submit the data at the
1987 * end of this function.
1990 sector
= be64_to_cpu(p
->sector
);
1991 peer_req
= read_in_block(mdev
, p
->block_id
, sector
, data_size
);
1997 peer_req
->w
.cb
= e_end_block
;
1999 dp_flags
= be32_to_cpu(p
->dp_flags
);
2000 rw
|= wire_flags_to_bio(mdev
, dp_flags
);
2002 if (dp_flags
& DP_MAY_SET_IN_SYNC
)
2003 peer_req
->flags
|= EE_MAY_SET_IN_SYNC
;
2005 spin_lock(&mdev
->epoch_lock
);
2006 peer_req
->epoch
= mdev
->current_epoch
;
2007 atomic_inc(&peer_req
->epoch
->epoch_size
);
2008 atomic_inc(&peer_req
->epoch
->active
);
2009 spin_unlock(&mdev
->epoch_lock
);
2011 if (mdev
->tconn
->net_conf
->two_primaries
) {
2012 err
= wait_for_and_update_peer_seq(mdev
, peer_seq
);
2014 goto out_interrupted
;
2015 spin_lock_irq(&mdev
->tconn
->req_lock
);
2016 err
= handle_write_conflicts(mdev
, peer_req
);
2018 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2019 if (err
== -ENOENT
) {
2023 goto out_interrupted
;
2026 spin_lock_irq(&mdev
->tconn
->req_lock
);
2027 list_add(&peer_req
->w
.list
, &mdev
->active_ee
);
2028 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2030 switch (mdev
->tconn
->net_conf
->wire_protocol
) {
2033 /* corresponding dec_unacked() in e_end_block()
2034 * respective _drbd_clear_done_ee */
2037 /* I really don't like it that the receiver thread
2038 * sends on the msock, but anyways */
2039 drbd_send_ack(mdev
, P_RECV_ACK
, peer_req
);
2046 if (mdev
->state
.pdsk
< D_INCONSISTENT
) {
2047 /* In case we have the only disk of the cluster, */
2048 drbd_set_out_of_sync(mdev
, peer_req
->i
.sector
, peer_req
->i
.size
);
2049 peer_req
->flags
|= EE_CALL_AL_COMPLETE_IO
;
2050 peer_req
->flags
&= ~EE_MAY_SET_IN_SYNC
;
2051 drbd_al_begin_io(mdev
, peer_req
->i
.sector
);
2054 err
= drbd_submit_peer_request(mdev
, peer_req
, rw
, DRBD_FAULT_DT_WR
);
2058 /* don't care for the reason here */
2059 dev_err(DEV
, "submit failed, triggering re-connect\n");
2060 spin_lock_irq(&mdev
->tconn
->req_lock
);
2061 list_del(&peer_req
->w
.list
);
2062 drbd_remove_epoch_entry_interval(mdev
, peer_req
);
2063 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2064 if (peer_req
->flags
& EE_CALL_AL_COMPLETE_IO
)
2065 drbd_al_complete_io(mdev
, peer_req
->i
.sector
);
2068 drbd_may_finish_epoch(mdev
, peer_req
->epoch
, EV_PUT
+ EV_CLEANUP
);
2070 drbd_free_ee(mdev
, peer_req
);
2074 /* We may throttle resync, if the lower device seems to be busy,
2075 * and current sync rate is above c_min_rate.
2077 * To decide whether or not the lower device is busy, we use a scheme similar
2078 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2079 * (more than 64 sectors) of activity we cannot account for with our own resync
2080 * activity, it obviously is "busy".
2082 * The current sync rate used here uses only the most recent two step marks,
2083 * to have a short time average so we can react faster.
2085 int drbd_rs_should_slow_down(struct drbd_conf
*mdev
, sector_t sector
)
2087 struct gendisk
*disk
= mdev
->ldev
->backing_bdev
->bd_contains
->bd_disk
;
2088 unsigned long db
, dt
, dbdt
;
2089 struct lc_element
*tmp
;
2093 /* feature disabled? */
2094 if (mdev
->ldev
->dc
.c_min_rate
== 0)
2097 spin_lock_irq(&mdev
->al_lock
);
2098 tmp
= lc_find(mdev
->resync
, BM_SECT_TO_EXT(sector
));
2100 struct bm_extent
*bm_ext
= lc_entry(tmp
, struct bm_extent
, lce
);
2101 if (test_bit(BME_PRIORITY
, &bm_ext
->flags
)) {
2102 spin_unlock_irq(&mdev
->al_lock
);
2105 /* Do not slow down if app IO is already waiting for this extent */
2107 spin_unlock_irq(&mdev
->al_lock
);
2109 curr_events
= (int)part_stat_read(&disk
->part0
, sectors
[0]) +
2110 (int)part_stat_read(&disk
->part0
, sectors
[1]) -
2111 atomic_read(&mdev
->rs_sect_ev
);
2113 if (!mdev
->rs_last_events
|| curr_events
- mdev
->rs_last_events
> 64) {
2114 unsigned long rs_left
;
2117 mdev
->rs_last_events
= curr_events
;
2119 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2121 i
= (mdev
->rs_last_mark
+ DRBD_SYNC_MARKS
-1) % DRBD_SYNC_MARKS
;
2123 if (mdev
->state
.conn
== C_VERIFY_S
|| mdev
->state
.conn
== C_VERIFY_T
)
2124 rs_left
= mdev
->ov_left
;
2126 rs_left
= drbd_bm_total_weight(mdev
) - mdev
->rs_failed
;
2128 dt
= ((long)jiffies
- (long)mdev
->rs_mark_time
[i
]) / HZ
;
2131 db
= mdev
->rs_mark_left
[i
] - rs_left
;
2132 dbdt
= Bit2KB(db
/dt
);
2134 if (dbdt
> mdev
->ldev
->dc
.c_min_rate
)
2141 static int receive_DataRequest(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
2142 unsigned int digest_size
)
2145 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
2146 struct drbd_peer_request
*peer_req
;
2147 struct digest_info
*di
= NULL
;
2149 unsigned int fault_type
;
2150 struct p_block_req
*p
= mdev
->tconn
->data
.rbuf
;
2152 sector
= be64_to_cpu(p
->sector
);
2153 size
= be32_to_cpu(p
->blksize
);
2155 if (size
<= 0 || !IS_ALIGNED(size
, 512) || size
> DRBD_MAX_BIO_SIZE
) {
2156 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
2157 (unsigned long long)sector
, size
);
2160 if (sector
+ (size
>>9) > capacity
) {
2161 dev_err(DEV
, "%s:%d: sector: %llus, size: %u\n", __FILE__
, __LINE__
,
2162 (unsigned long long)sector
, size
);
2166 if (!get_ldev_if_state(mdev
, D_UP_TO_DATE
)) {
2169 case P_DATA_REQUEST
:
2170 drbd_send_ack_rp(mdev
, P_NEG_DREPLY
, p
);
2172 case P_RS_DATA_REQUEST
:
2173 case P_CSUM_RS_REQUEST
:
2175 drbd_send_ack_rp(mdev
, P_NEG_RS_DREPLY
, p
);
2179 dec_rs_pending(mdev
);
2180 drbd_send_ack_ex(mdev
, P_OV_RESULT
, sector
, size
, ID_IN_SYNC
);
2183 dev_err(DEV
, "unexpected command (%s) in receive_DataRequest\n",
2186 if (verb
&& __ratelimit(&drbd_ratelimit_state
))
2187 dev_err(DEV
, "Can not satisfy peer's read request, "
2188 "no local data.\n");
2190 /* drain possibly payload */
2191 return drbd_drain_block(mdev
, digest_size
);
2194 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2195 * "criss-cross" setup, that might cause write-out on some other DRBD,
2196 * which in turn might block on the other node at this very place. */
2197 peer_req
= drbd_alloc_ee(mdev
, p
->block_id
, sector
, size
, GFP_NOIO
);
2204 case P_DATA_REQUEST
:
2205 peer_req
->w
.cb
= w_e_end_data_req
;
2206 fault_type
= DRBD_FAULT_DT_RD
;
2207 /* application IO, don't drbd_rs_begin_io */
2210 case P_RS_DATA_REQUEST
:
2211 peer_req
->w
.cb
= w_e_end_rsdata_req
;
2212 fault_type
= DRBD_FAULT_RS_RD
;
2213 /* used in the sector offset progress display */
2214 mdev
->bm_resync_fo
= BM_SECT_TO_BIT(sector
);
2218 case P_CSUM_RS_REQUEST
:
2219 fault_type
= DRBD_FAULT_RS_RD
;
2220 di
= kmalloc(sizeof(*di
) + digest_size
, GFP_NOIO
);
2224 di
->digest_size
= digest_size
;
2225 di
->digest
= (((char *)di
)+sizeof(struct digest_info
));
2227 peer_req
->digest
= di
;
2228 peer_req
->flags
|= EE_HAS_DIGEST
;
2230 if (drbd_recv(mdev
->tconn
, di
->digest
, digest_size
) != digest_size
)
2233 if (cmd
== P_CSUM_RS_REQUEST
) {
2234 D_ASSERT(mdev
->tconn
->agreed_pro_version
>= 89);
2235 peer_req
->w
.cb
= w_e_end_csum_rs_req
;
2236 /* used in the sector offset progress display */
2237 mdev
->bm_resync_fo
= BM_SECT_TO_BIT(sector
);
2238 } else if (cmd
== P_OV_REPLY
) {
2239 /* track progress, we may need to throttle */
2240 atomic_add(size
>> 9, &mdev
->rs_sect_in
);
2241 peer_req
->w
.cb
= w_e_end_ov_reply
;
2242 dec_rs_pending(mdev
);
2243 /* drbd_rs_begin_io done when we sent this request,
2244 * but accounting still needs to be done. */
2245 goto submit_for_resync
;
2250 if (mdev
->ov_start_sector
== ~(sector_t
)0 &&
2251 mdev
->tconn
->agreed_pro_version
>= 90) {
2252 unsigned long now
= jiffies
;
2254 mdev
->ov_start_sector
= sector
;
2255 mdev
->ov_position
= sector
;
2256 mdev
->ov_left
= drbd_bm_bits(mdev
) - BM_SECT_TO_BIT(sector
);
2257 mdev
->rs_total
= mdev
->ov_left
;
2258 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
2259 mdev
->rs_mark_left
[i
] = mdev
->ov_left
;
2260 mdev
->rs_mark_time
[i
] = now
;
2262 dev_info(DEV
, "Online Verify start sector: %llu\n",
2263 (unsigned long long)sector
);
2265 peer_req
->w
.cb
= w_e_end_ov_req
;
2266 fault_type
= DRBD_FAULT_RS_RD
;
2270 dev_err(DEV
, "unexpected command (%s) in receive_DataRequest\n",
2272 fault_type
= DRBD_FAULT_MAX
;
2276 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2277 * wrt the receiver, but it is not as straightforward as it may seem.
2278 * Various places in the resync start and stop logic assume resync
2279 * requests are processed in order, requeuing this on the worker thread
2280 * introduces a bunch of new code for synchronization between threads.
2282 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2283 * "forever", throttling after drbd_rs_begin_io will lock that extent
2284 * for application writes for the same time. For now, just throttle
2285 * here, where the rest of the code expects the receiver to sleep for
2289 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2290 * this defers syncer requests for some time, before letting at least
2291 * on request through. The resync controller on the receiving side
2292 * will adapt to the incoming rate accordingly.
2294 * We cannot throttle here if remote is Primary/SyncTarget:
2295 * we would also throttle its application reads.
2296 * In that case, throttling is done on the SyncTarget only.
2298 if (mdev
->state
.peer
!= R_PRIMARY
&& drbd_rs_should_slow_down(mdev
, sector
))
2299 schedule_timeout_uninterruptible(HZ
/10);
2300 if (drbd_rs_begin_io(mdev
, sector
))
2304 atomic_add(size
>> 9, &mdev
->rs_sect_ev
);
2308 spin_lock_irq(&mdev
->tconn
->req_lock
);
2309 list_add_tail(&peer_req
->w
.list
, &mdev
->read_ee
);
2310 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2312 if (drbd_submit_peer_request(mdev
, peer_req
, READ
, fault_type
) == 0)
2315 /* don't care for the reason here */
2316 dev_err(DEV
, "submit failed, triggering re-connect\n");
2317 spin_lock_irq(&mdev
->tconn
->req_lock
);
2318 list_del(&peer_req
->w
.list
);
2319 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2320 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2324 drbd_free_ee(mdev
, peer_req
);
2328 static int drbd_asb_recover_0p(struct drbd_conf
*mdev
) __must_hold(local
)
2330 int self
, peer
, rv
= -100;
2331 unsigned long ch_self
, ch_peer
;
2333 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & 1;
2334 peer
= mdev
->p_uuid
[UI_BITMAP
] & 1;
2336 ch_peer
= mdev
->p_uuid
[UI_SIZE
];
2337 ch_self
= mdev
->comm_bm_set
;
2339 switch (mdev
->tconn
->net_conf
->after_sb_0p
) {
2341 case ASB_DISCARD_SECONDARY
:
2342 case ASB_CALL_HELPER
:
2343 dev_err(DEV
, "Configuration error.\n");
2345 case ASB_DISCONNECT
:
2347 case ASB_DISCARD_YOUNGER_PRI
:
2348 if (self
== 0 && peer
== 1) {
2352 if (self
== 1 && peer
== 0) {
2356 /* Else fall through to one of the other strategies... */
2357 case ASB_DISCARD_OLDER_PRI
:
2358 if (self
== 0 && peer
== 1) {
2362 if (self
== 1 && peer
== 0) {
2366 /* Else fall through to one of the other strategies... */
2367 dev_warn(DEV
, "Discard younger/older primary did not find a decision\n"
2368 "Using discard-least-changes instead\n");
2369 case ASB_DISCARD_ZERO_CHG
:
2370 if (ch_peer
== 0 && ch_self
== 0) {
2371 rv
= test_bit(DISCARD_CONCURRENT
, &mdev
->tconn
->flags
)
2375 if (ch_peer
== 0) { rv
= 1; break; }
2376 if (ch_self
== 0) { rv
= -1; break; }
2378 if (mdev
->tconn
->net_conf
->after_sb_0p
== ASB_DISCARD_ZERO_CHG
)
2380 case ASB_DISCARD_LEAST_CHG
:
2381 if (ch_self
< ch_peer
)
2383 else if (ch_self
> ch_peer
)
2385 else /* ( ch_self == ch_peer ) */
2386 /* Well, then use something else. */
2387 rv
= test_bit(DISCARD_CONCURRENT
, &mdev
->tconn
->flags
)
2390 case ASB_DISCARD_LOCAL
:
2393 case ASB_DISCARD_REMOTE
:
2400 static int drbd_asb_recover_1p(struct drbd_conf
*mdev
) __must_hold(local
)
2404 switch (mdev
->tconn
->net_conf
->after_sb_1p
) {
2405 case ASB_DISCARD_YOUNGER_PRI
:
2406 case ASB_DISCARD_OLDER_PRI
:
2407 case ASB_DISCARD_LEAST_CHG
:
2408 case ASB_DISCARD_LOCAL
:
2409 case ASB_DISCARD_REMOTE
:
2410 dev_err(DEV
, "Configuration error.\n");
2412 case ASB_DISCONNECT
:
2415 hg
= drbd_asb_recover_0p(mdev
);
2416 if (hg
== -1 && mdev
->state
.role
== R_SECONDARY
)
2418 if (hg
== 1 && mdev
->state
.role
== R_PRIMARY
)
2422 rv
= drbd_asb_recover_0p(mdev
);
2424 case ASB_DISCARD_SECONDARY
:
2425 return mdev
->state
.role
== R_PRIMARY
? 1 : -1;
2426 case ASB_CALL_HELPER
:
2427 hg
= drbd_asb_recover_0p(mdev
);
2428 if (hg
== -1 && mdev
->state
.role
== R_PRIMARY
) {
2429 enum drbd_state_rv rv2
;
2431 drbd_set_role(mdev
, R_SECONDARY
, 0);
2432 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2433 * we might be here in C_WF_REPORT_PARAMS which is transient.
2434 * we do not need to wait for the after state change work either. */
2435 rv2
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2436 if (rv2
!= SS_SUCCESS
) {
2437 drbd_khelper(mdev
, "pri-lost-after-sb");
2439 dev_warn(DEV
, "Successfully gave up primary role.\n");
2449 static int drbd_asb_recover_2p(struct drbd_conf
*mdev
) __must_hold(local
)
2453 switch (mdev
->tconn
->net_conf
->after_sb_2p
) {
2454 case ASB_DISCARD_YOUNGER_PRI
:
2455 case ASB_DISCARD_OLDER_PRI
:
2456 case ASB_DISCARD_LEAST_CHG
:
2457 case ASB_DISCARD_LOCAL
:
2458 case ASB_DISCARD_REMOTE
:
2460 case ASB_DISCARD_SECONDARY
:
2461 dev_err(DEV
, "Configuration error.\n");
2464 rv
= drbd_asb_recover_0p(mdev
);
2466 case ASB_DISCONNECT
:
2468 case ASB_CALL_HELPER
:
2469 hg
= drbd_asb_recover_0p(mdev
);
2471 enum drbd_state_rv rv2
;
2473 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2474 * we might be here in C_WF_REPORT_PARAMS which is transient.
2475 * we do not need to wait for the after state change work either. */
2476 rv2
= drbd_change_state(mdev
, CS_VERBOSE
, NS(role
, R_SECONDARY
));
2477 if (rv2
!= SS_SUCCESS
) {
2478 drbd_khelper(mdev
, "pri-lost-after-sb");
2480 dev_warn(DEV
, "Successfully gave up primary role.\n");
2490 static void drbd_uuid_dump(struct drbd_conf
*mdev
, char *text
, u64
*uuid
,
2491 u64 bits
, u64 flags
)
2494 dev_info(DEV
, "%s uuid info vanished while I was looking!\n", text
);
2497 dev_info(DEV
, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2499 (unsigned long long)uuid
[UI_CURRENT
],
2500 (unsigned long long)uuid
[UI_BITMAP
],
2501 (unsigned long long)uuid
[UI_HISTORY_START
],
2502 (unsigned long long)uuid
[UI_HISTORY_END
],
2503 (unsigned long long)bits
,
2504 (unsigned long long)flags
);
2508 100 after split brain try auto recover
2509 2 C_SYNC_SOURCE set BitMap
2510 1 C_SYNC_SOURCE use BitMap
2512 -1 C_SYNC_TARGET use BitMap
2513 -2 C_SYNC_TARGET set BitMap
2514 -100 after split brain, disconnect
2515 -1000 unrelated data
2516 -1091 requires proto 91
2517 -1096 requires proto 96
2519 static int drbd_uuid_compare(struct drbd_conf
*mdev
, int *rule_nr
) __must_hold(local
)
2524 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2525 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2528 if (self
== UUID_JUST_CREATED
&& peer
== UUID_JUST_CREATED
)
2532 if ((self
== UUID_JUST_CREATED
|| self
== (u64
)0) &&
2533 peer
!= UUID_JUST_CREATED
)
2537 if (self
!= UUID_JUST_CREATED
&&
2538 (peer
== UUID_JUST_CREATED
|| peer
== (u64
)0))
2542 int rct
, dc
; /* roles at crash time */
2544 if (mdev
->p_uuid
[UI_BITMAP
] == (u64
)0 && mdev
->ldev
->md
.uuid
[UI_BITMAP
] != (u64
)0) {
2546 if (mdev
->tconn
->agreed_pro_version
< 91)
2549 if ((mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1)) &&
2550 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1))) {
2551 dev_info(DEV
, "was SyncSource, missed the resync finished event, corrected myself:\n");
2552 drbd_uuid_set_bm(mdev
, 0UL);
2554 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2555 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2558 dev_info(DEV
, "was SyncSource (peer failed to write sync_uuid)\n");
2565 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
] == (u64
)0 && mdev
->p_uuid
[UI_BITMAP
] != (u64
)0) {
2567 if (mdev
->tconn
->agreed_pro_version
< 91)
2570 if ((mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) == (mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1)) &&
2571 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) == (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1))) {
2572 dev_info(DEV
, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2574 mdev
->p_uuid
[UI_HISTORY_START
+ 1] = mdev
->p_uuid
[UI_HISTORY_START
];
2575 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_BITMAP
];
2576 mdev
->p_uuid
[UI_BITMAP
] = 0UL;
2578 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
, mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2581 dev_info(DEV
, "was SyncTarget (failed to write sync_uuid)\n");
2588 /* Common power [off|failure] */
2589 rct
= (test_bit(CRASHED_PRIMARY
, &mdev
->flags
) ? 1 : 0) +
2590 (mdev
->p_uuid
[UI_FLAGS
] & 2);
2591 /* lowest bit is set when we were primary,
2592 * next bit (weight 2) is set when peer was primary */
2596 case 0: /* !self_pri && !peer_pri */ return 0;
2597 case 1: /* self_pri && !peer_pri */ return 1;
2598 case 2: /* !self_pri && peer_pri */ return -1;
2599 case 3: /* self_pri && peer_pri */
2600 dc
= test_bit(DISCARD_CONCURRENT
, &mdev
->tconn
->flags
);
2606 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2611 peer
= mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1);
2613 if (mdev
->tconn
->agreed_pro_version
< 96 ?
2614 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1)) ==
2615 (mdev
->p_uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) :
2616 peer
+ UUID_NEW_BM_OFFSET
== (mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1))) {
2617 /* The last P_SYNC_UUID did not get though. Undo the last start of
2618 resync as sync source modifications of the peer's UUIDs. */
2620 if (mdev
->tconn
->agreed_pro_version
< 91)
2623 mdev
->p_uuid
[UI_BITMAP
] = mdev
->p_uuid
[UI_HISTORY_START
];
2624 mdev
->p_uuid
[UI_HISTORY_START
] = mdev
->p_uuid
[UI_HISTORY_START
+ 1];
2626 dev_info(DEV
, "Did not got last syncUUID packet, corrected:\n");
2627 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
, mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2634 self
= mdev
->ldev
->md
.uuid
[UI_CURRENT
] & ~((u64
)1);
2635 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2636 peer
= mdev
->p_uuid
[i
] & ~((u64
)1);
2642 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2643 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2648 self
= mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] & ~((u64
)1);
2650 if (mdev
->tconn
->agreed_pro_version
< 96 ?
2651 (mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1] & ~((u64
)1)) ==
2652 (mdev
->p_uuid
[UI_HISTORY_START
] & ~((u64
)1)) :
2653 self
+ UUID_NEW_BM_OFFSET
== (mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1))) {
2654 /* The last P_SYNC_UUID did not get though. Undo the last start of
2655 resync as sync source modifications of our UUIDs. */
2657 if (mdev
->tconn
->agreed_pro_version
< 91)
2660 _drbd_uuid_set(mdev
, UI_BITMAP
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
]);
2661 _drbd_uuid_set(mdev
, UI_HISTORY_START
, mdev
->ldev
->md
.uuid
[UI_HISTORY_START
+ 1]);
2663 dev_info(DEV
, "Last syncUUID did not get through, corrected:\n");
2664 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
,
2665 mdev
->state
.disk
>= D_NEGOTIATING
? drbd_bm_total_weight(mdev
) : 0, 0);
2673 peer
= mdev
->p_uuid
[UI_CURRENT
] & ~((u64
)1);
2674 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2675 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2681 self
= mdev
->ldev
->md
.uuid
[UI_BITMAP
] & ~((u64
)1);
2682 peer
= mdev
->p_uuid
[UI_BITMAP
] & ~((u64
)1);
2683 if (self
== peer
&& self
!= ((u64
)0))
2687 for (i
= UI_HISTORY_START
; i
<= UI_HISTORY_END
; i
++) {
2688 self
= mdev
->ldev
->md
.uuid
[i
] & ~((u64
)1);
2689 for (j
= UI_HISTORY_START
; j
<= UI_HISTORY_END
; j
++) {
2690 peer
= mdev
->p_uuid
[j
] & ~((u64
)1);
2699 /* drbd_sync_handshake() returns the new conn state on success, or
2700 CONN_MASK (-1) on failure.
2702 static enum drbd_conns
drbd_sync_handshake(struct drbd_conf
*mdev
, enum drbd_role peer_role
,
2703 enum drbd_disk_state peer_disk
) __must_hold(local
)
2706 enum drbd_conns rv
= C_MASK
;
2707 enum drbd_disk_state mydisk
;
2709 mydisk
= mdev
->state
.disk
;
2710 if (mydisk
== D_NEGOTIATING
)
2711 mydisk
= mdev
->new_state_tmp
.disk
;
2713 dev_info(DEV
, "drbd_sync_handshake:\n");
2714 drbd_uuid_dump(mdev
, "self", mdev
->ldev
->md
.uuid
, mdev
->comm_bm_set
, 0);
2715 drbd_uuid_dump(mdev
, "peer", mdev
->p_uuid
,
2716 mdev
->p_uuid
[UI_SIZE
], mdev
->p_uuid
[UI_FLAGS
]);
2718 hg
= drbd_uuid_compare(mdev
, &rule_nr
);
2720 dev_info(DEV
, "uuid_compare()=%d by rule %d\n", hg
, rule_nr
);
2723 dev_alert(DEV
, "Unrelated data, aborting!\n");
2727 dev_alert(DEV
, "To resolve this both sides have to support at least protocol %d\n", -hg
- 1000);
2731 if ((mydisk
== D_INCONSISTENT
&& peer_disk
> D_INCONSISTENT
) ||
2732 (peer_disk
== D_INCONSISTENT
&& mydisk
> D_INCONSISTENT
)) {
2733 int f
= (hg
== -100) || abs(hg
) == 2;
2734 hg
= mydisk
> D_INCONSISTENT
? 1 : -1;
2737 dev_info(DEV
, "Becoming sync %s due to disk states.\n",
2738 hg
> 0 ? "source" : "target");
2742 drbd_khelper(mdev
, "initial-split-brain");
2744 if (hg
== 100 || (hg
== -100 && mdev
->tconn
->net_conf
->always_asbp
)) {
2745 int pcount
= (mdev
->state
.role
== R_PRIMARY
)
2746 + (peer_role
== R_PRIMARY
);
2747 int forced
= (hg
== -100);
2751 hg
= drbd_asb_recover_0p(mdev
);
2754 hg
= drbd_asb_recover_1p(mdev
);
2757 hg
= drbd_asb_recover_2p(mdev
);
2760 if (abs(hg
) < 100) {
2761 dev_warn(DEV
, "Split-Brain detected, %d primaries, "
2762 "automatically solved. Sync from %s node\n",
2763 pcount
, (hg
< 0) ? "peer" : "this");
2765 dev_warn(DEV
, "Doing a full sync, since"
2766 " UUIDs where ambiguous.\n");
2773 if (mdev
->tconn
->net_conf
->want_lose
&& !(mdev
->p_uuid
[UI_FLAGS
]&1))
2775 if (!mdev
->tconn
->net_conf
->want_lose
&& (mdev
->p_uuid
[UI_FLAGS
]&1))
2779 dev_warn(DEV
, "Split-Brain detected, manually solved. "
2780 "Sync from %s node\n",
2781 (hg
< 0) ? "peer" : "this");
2785 /* FIXME this log message is not correct if we end up here
2786 * after an attempted attach on a diskless node.
2787 * We just refuse to attach -- well, we drop the "connection"
2788 * to that disk, in a way... */
2789 dev_alert(DEV
, "Split-Brain detected but unresolved, dropping connection!\n");
2790 drbd_khelper(mdev
, "split-brain");
2794 if (hg
> 0 && mydisk
<= D_INCONSISTENT
) {
2795 dev_err(DEV
, "I shall become SyncSource, but I am inconsistent!\n");
2799 if (hg
< 0 && /* by intention we do not use mydisk here. */
2800 mdev
->state
.role
== R_PRIMARY
&& mdev
->state
.disk
>= D_CONSISTENT
) {
2801 switch (mdev
->tconn
->net_conf
->rr_conflict
) {
2802 case ASB_CALL_HELPER
:
2803 drbd_khelper(mdev
, "pri-lost");
2805 case ASB_DISCONNECT
:
2806 dev_err(DEV
, "I shall become SyncTarget, but I am primary!\n");
2809 dev_warn(DEV
, "Becoming SyncTarget, violating the stable-data"
2814 if (mdev
->tconn
->net_conf
->dry_run
|| test_bit(CONN_DRY_RUN
, &mdev
->tconn
->flags
)) {
2816 dev_info(DEV
, "dry-run connect: No resync, would become Connected immediately.\n");
2818 dev_info(DEV
, "dry-run connect: Would become %s, doing a %s resync.",
2819 drbd_conn_str(hg
> 0 ? C_SYNC_SOURCE
: C_SYNC_TARGET
),
2820 abs(hg
) >= 2 ? "full" : "bit-map based");
2825 dev_info(DEV
, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2826 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_n_write
, "set_n_write from sync_handshake",
2827 BM_LOCKED_SET_ALLOWED
))
2831 if (hg
> 0) { /* become sync source. */
2833 } else if (hg
< 0) { /* become sync target */
2837 if (drbd_bm_total_weight(mdev
)) {
2838 dev_info(DEV
, "No resync, but %lu bits in bitmap!\n",
2839 drbd_bm_total_weight(mdev
));
2846 /* returns 1 if invalid */
2847 static int cmp_after_sb(enum drbd_after_sb_p peer
, enum drbd_after_sb_p self
)
2849 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2850 if ((peer
== ASB_DISCARD_REMOTE
&& self
== ASB_DISCARD_LOCAL
) ||
2851 (self
== ASB_DISCARD_REMOTE
&& peer
== ASB_DISCARD_LOCAL
))
2854 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2855 if (peer
== ASB_DISCARD_REMOTE
|| peer
== ASB_DISCARD_LOCAL
||
2856 self
== ASB_DISCARD_REMOTE
|| self
== ASB_DISCARD_LOCAL
)
2859 /* everything else is valid if they are equal on both sides. */
2863 /* everything es is invalid. */
2867 static int receive_protocol(struct drbd_tconn
*tconn
, enum drbd_packet cmd
,
2868 unsigned int data_size
)
2870 struct p_protocol
*p
= tconn
->data
.rbuf
;
2871 int p_proto
, p_after_sb_0p
, p_after_sb_1p
, p_after_sb_2p
;
2872 int p_want_lose
, p_two_primaries
, cf
;
2873 char p_integrity_alg
[SHARED_SECRET_MAX
] = "";
2875 p_proto
= be32_to_cpu(p
->protocol
);
2876 p_after_sb_0p
= be32_to_cpu(p
->after_sb_0p
);
2877 p_after_sb_1p
= be32_to_cpu(p
->after_sb_1p
);
2878 p_after_sb_2p
= be32_to_cpu(p
->after_sb_2p
);
2879 p_two_primaries
= be32_to_cpu(p
->two_primaries
);
2880 cf
= be32_to_cpu(p
->conn_flags
);
2881 p_want_lose
= cf
& CF_WANT_LOSE
;
2883 clear_bit(CONN_DRY_RUN
, &tconn
->flags
);
2885 if (cf
& CF_DRY_RUN
)
2886 set_bit(CONN_DRY_RUN
, &tconn
->flags
);
2888 if (p_proto
!= tconn
->net_conf
->wire_protocol
) {
2889 conn_err(tconn
, "incompatible communication protocols\n");
2893 if (cmp_after_sb(p_after_sb_0p
, tconn
->net_conf
->after_sb_0p
)) {
2894 conn_err(tconn
, "incompatible after-sb-0pri settings\n");
2898 if (cmp_after_sb(p_after_sb_1p
, tconn
->net_conf
->after_sb_1p
)) {
2899 conn_err(tconn
, "incompatible after-sb-1pri settings\n");
2903 if (cmp_after_sb(p_after_sb_2p
, tconn
->net_conf
->after_sb_2p
)) {
2904 conn_err(tconn
, "incompatible after-sb-2pri settings\n");
2908 if (p_want_lose
&& tconn
->net_conf
->want_lose
) {
2909 conn_err(tconn
, "both sides have the 'want_lose' flag set\n");
2913 if (p_two_primaries
!= tconn
->net_conf
->two_primaries
) {
2914 conn_err(tconn
, "incompatible setting of the two-primaries options\n");
2918 if (tconn
->agreed_pro_version
>= 87) {
2919 unsigned char *my_alg
= tconn
->net_conf
->integrity_alg
;
2922 err
= drbd_recv_all(tconn
, p_integrity_alg
, data_size
);
2926 p_integrity_alg
[SHARED_SECRET_MAX
-1] = 0;
2927 if (strcmp(p_integrity_alg
, my_alg
)) {
2928 conn_err(tconn
, "incompatible setting of the data-integrity-alg\n");
2931 conn_info(tconn
, "data-integrity-alg: %s\n",
2932 my_alg
[0] ? my_alg
: (unsigned char *)"<not-used>");
2938 conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
2943 * input: alg name, feature name
2944 * return: NULL (alg name was "")
2945 * ERR_PTR(error) if something goes wrong
2946 * or the crypto hash ptr, if it worked out ok. */
2947 struct crypto_hash
*drbd_crypto_alloc_digest_safe(const struct drbd_conf
*mdev
,
2948 const char *alg
, const char *name
)
2950 struct crypto_hash
*tfm
;
2955 tfm
= crypto_alloc_hash(alg
, 0, CRYPTO_ALG_ASYNC
);
2957 dev_err(DEV
, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2958 alg
, name
, PTR_ERR(tfm
));
2961 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm
))) {
2962 crypto_free_hash(tfm
);
2963 dev_err(DEV
, "\"%s\" is not a digest (%s)\n", alg
, name
);
2964 return ERR_PTR(-EINVAL
);
2969 static int receive_SyncParam(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
2970 unsigned int packet_size
)
2972 struct p_rs_param_95
*p
= mdev
->tconn
->data
.rbuf
;
2973 unsigned int header_size
, data_size
, exp_max_sz
;
2974 struct crypto_hash
*verify_tfm
= NULL
;
2975 struct crypto_hash
*csums_tfm
= NULL
;
2976 const int apv
= mdev
->tconn
->agreed_pro_version
;
2977 int *rs_plan_s
= NULL
;
2981 exp_max_sz
= apv
<= 87 ? sizeof(struct p_rs_param
)
2982 : apv
== 88 ? sizeof(struct p_rs_param
)
2984 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
2985 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
2987 if (packet_size
> exp_max_sz
) {
2988 dev_err(DEV
, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2989 packet_size
, exp_max_sz
);
2994 header_size
= sizeof(struct p_rs_param
) - sizeof(struct p_header
);
2995 data_size
= packet_size
- header_size
;
2996 } else if (apv
<= 94) {
2997 header_size
= sizeof(struct p_rs_param_89
) - sizeof(struct p_header
);
2998 data_size
= packet_size
- header_size
;
2999 D_ASSERT(data_size
== 0);
3001 header_size
= sizeof(struct p_rs_param_95
) - sizeof(struct p_header
);
3002 data_size
= packet_size
- header_size
;
3003 D_ASSERT(data_size
== 0);
3006 /* initialize verify_alg and csums_alg */
3007 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
3009 err
= drbd_recv_all(mdev
->tconn
, &p
->head
.payload
, header_size
);
3013 if (get_ldev(mdev
)) {
3014 mdev
->ldev
->dc
.resync_rate
= be32_to_cpu(p
->rate
);
3020 if (data_size
> SHARED_SECRET_MAX
) {
3021 dev_err(DEV
, "verify-alg too long, "
3022 "peer wants %u, accepting only %u byte\n",
3023 data_size
, SHARED_SECRET_MAX
);
3027 err
= drbd_recv_all(mdev
->tconn
, p
->verify_alg
, data_size
);
3031 /* we expect NUL terminated string */
3032 /* but just in case someone tries to be evil */
3033 D_ASSERT(p
->verify_alg
[data_size
-1] == 0);
3034 p
->verify_alg
[data_size
-1] = 0;
3036 } else /* apv >= 89 */ {
3037 /* we still expect NUL terminated strings */
3038 /* but just in case someone tries to be evil */
3039 D_ASSERT(p
->verify_alg
[SHARED_SECRET_MAX
-1] == 0);
3040 D_ASSERT(p
->csums_alg
[SHARED_SECRET_MAX
-1] == 0);
3041 p
->verify_alg
[SHARED_SECRET_MAX
-1] = 0;
3042 p
->csums_alg
[SHARED_SECRET_MAX
-1] = 0;
3045 if (strcmp(mdev
->tconn
->net_conf
->verify_alg
, p
->verify_alg
)) {
3046 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
3047 dev_err(DEV
, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3048 mdev
->tconn
->net_conf
->verify_alg
, p
->verify_alg
);
3051 verify_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
3052 p
->verify_alg
, "verify-alg");
3053 if (IS_ERR(verify_tfm
)) {
3059 if (apv
>= 89 && strcmp(mdev
->tconn
->net_conf
->csums_alg
, p
->csums_alg
)) {
3060 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
) {
3061 dev_err(DEV
, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3062 mdev
->tconn
->net_conf
->csums_alg
, p
->csums_alg
);
3065 csums_tfm
= drbd_crypto_alloc_digest_safe(mdev
,
3066 p
->csums_alg
, "csums-alg");
3067 if (IS_ERR(csums_tfm
)) {
3073 if (apv
> 94 && get_ldev(mdev
)) {
3074 mdev
->ldev
->dc
.resync_rate
= be32_to_cpu(p
->rate
);
3075 mdev
->ldev
->dc
.c_plan_ahead
= be32_to_cpu(p
->c_plan_ahead
);
3076 mdev
->ldev
->dc
.c_delay_target
= be32_to_cpu(p
->c_delay_target
);
3077 mdev
->ldev
->dc
.c_fill_target
= be32_to_cpu(p
->c_fill_target
);
3078 mdev
->ldev
->dc
.c_max_rate
= be32_to_cpu(p
->c_max_rate
);
3080 fifo_size
= (mdev
->ldev
->dc
.c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
3081 if (fifo_size
!= mdev
->rs_plan_s
.size
&& fifo_size
> 0) {
3082 rs_plan_s
= kzalloc(sizeof(int) * fifo_size
, GFP_KERNEL
);
3084 dev_err(DEV
, "kmalloc of fifo_buffer failed");
3092 spin_lock(&mdev
->peer_seq_lock
);
3093 /* lock against drbd_nl_syncer_conf() */
3095 strcpy(mdev
->tconn
->net_conf
->verify_alg
, p
->verify_alg
);
3096 mdev
->tconn
->net_conf
->verify_alg_len
= strlen(p
->verify_alg
) + 1;
3097 crypto_free_hash(mdev
->tconn
->verify_tfm
);
3098 mdev
->tconn
->verify_tfm
= verify_tfm
;
3099 dev_info(DEV
, "using verify-alg: \"%s\"\n", p
->verify_alg
);
3102 strcpy(mdev
->tconn
->net_conf
->csums_alg
, p
->csums_alg
);
3103 mdev
->tconn
->net_conf
->csums_alg_len
= strlen(p
->csums_alg
) + 1;
3104 crypto_free_hash(mdev
->tconn
->csums_tfm
);
3105 mdev
->tconn
->csums_tfm
= csums_tfm
;
3106 dev_info(DEV
, "using csums-alg: \"%s\"\n", p
->csums_alg
);
3108 if (fifo_size
!= mdev
->rs_plan_s
.size
) {
3109 kfree(mdev
->rs_plan_s
.values
);
3110 mdev
->rs_plan_s
.values
= rs_plan_s
;
3111 mdev
->rs_plan_s
.size
= fifo_size
;
3112 mdev
->rs_planed
= 0;
3114 spin_unlock(&mdev
->peer_seq_lock
);
3119 /* just for completeness: actually not needed,
3120 * as this is not reached if csums_tfm was ok. */
3121 crypto_free_hash(csums_tfm
);
3122 /* but free the verify_tfm again, if csums_tfm did not work out */
3123 crypto_free_hash(verify_tfm
);
3124 conn_request_state(mdev
->tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
3128 /* warn if the arguments differ by more than 12.5% */
3129 static void warn_if_differ_considerably(struct drbd_conf
*mdev
,
3130 const char *s
, sector_t a
, sector_t b
)
3133 if (a
== 0 || b
== 0)
3135 d
= (a
> b
) ? (a
- b
) : (b
- a
);
3136 if (d
> (a
>>3) || d
> (b
>>3))
3137 dev_warn(DEV
, "Considerable difference in %s: %llus vs. %llus\n", s
,
3138 (unsigned long long)a
, (unsigned long long)b
);
3141 static int receive_sizes(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
3142 unsigned int data_size
)
3144 struct p_sizes
*p
= mdev
->tconn
->data
.rbuf
;
3145 enum determine_dev_size dd
= unchanged
;
3146 sector_t p_size
, p_usize
, my_usize
;
3147 int ldsc
= 0; /* local disk size changed */
3148 enum dds_flags ddsf
;
3150 p_size
= be64_to_cpu(p
->d_size
);
3151 p_usize
= be64_to_cpu(p
->u_size
);
3153 /* just store the peer's disk size for now.
3154 * we still need to figure out whether we accept that. */
3155 mdev
->p_size
= p_size
;
3157 if (get_ldev(mdev
)) {
3158 warn_if_differ_considerably(mdev
, "lower level device sizes",
3159 p_size
, drbd_get_max_capacity(mdev
->ldev
));
3160 warn_if_differ_considerably(mdev
, "user requested size",
3161 p_usize
, mdev
->ldev
->dc
.disk_size
);
3163 /* if this is the first connect, or an otherwise expected
3164 * param exchange, choose the minimum */
3165 if (mdev
->state
.conn
== C_WF_REPORT_PARAMS
)
3166 p_usize
= min_not_zero((sector_t
)mdev
->ldev
->dc
.disk_size
,
3169 my_usize
= mdev
->ldev
->dc
.disk_size
;
3171 if (mdev
->ldev
->dc
.disk_size
!= p_usize
) {
3172 mdev
->ldev
->dc
.disk_size
= p_usize
;
3173 dev_info(DEV
, "Peer sets u_size to %lu sectors\n",
3174 (unsigned long)mdev
->ldev
->dc
.disk_size
);
3177 /* Never shrink a device with usable data during connect.
3178 But allow online shrinking if we are connected. */
3179 if (drbd_new_dev_size(mdev
, mdev
->ldev
, 0) <
3180 drbd_get_capacity(mdev
->this_bdev
) &&
3181 mdev
->state
.disk
>= D_OUTDATED
&&
3182 mdev
->state
.conn
< C_CONNECTED
) {
3183 dev_err(DEV
, "The peer's disk size is too small!\n");
3184 conn_request_state(mdev
->tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
3185 mdev
->ldev
->dc
.disk_size
= my_usize
;
3192 ddsf
= be16_to_cpu(p
->dds_flags
);
3193 if (get_ldev(mdev
)) {
3194 dd
= drbd_determine_dev_size(mdev
, ddsf
);
3196 if (dd
== dev_size_error
)
3200 /* I am diskless, need to accept the peer's size. */
3201 drbd_set_my_capacity(mdev
, p_size
);
3204 mdev
->peer_max_bio_size
= be32_to_cpu(p
->max_bio_size
);
3205 drbd_reconsider_max_bio_size(mdev
);
3207 if (get_ldev(mdev
)) {
3208 if (mdev
->ldev
->known_size
!= drbd_get_capacity(mdev
->ldev
->backing_bdev
)) {
3209 mdev
->ldev
->known_size
= drbd_get_capacity(mdev
->ldev
->backing_bdev
);
3216 if (mdev
->state
.conn
> C_WF_REPORT_PARAMS
) {
3217 if (be64_to_cpu(p
->c_size
) !=
3218 drbd_get_capacity(mdev
->this_bdev
) || ldsc
) {
3219 /* we have different sizes, probably peer
3220 * needs to know my new size... */
3221 drbd_send_sizes(mdev
, 0, ddsf
);
3223 if (test_and_clear_bit(RESIZE_PENDING
, &mdev
->flags
) ||
3224 (dd
== grew
&& mdev
->state
.conn
== C_CONNECTED
)) {
3225 if (mdev
->state
.pdsk
>= D_INCONSISTENT
&&
3226 mdev
->state
.disk
>= D_INCONSISTENT
) {
3227 if (ddsf
& DDSF_NO_RESYNC
)
3228 dev_info(DEV
, "Resync of new storage suppressed with --assume-clean\n");
3230 resync_after_online_grow(mdev
);
3232 set_bit(RESYNC_AFTER_NEG
, &mdev
->flags
);
3239 static int receive_uuids(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
3240 unsigned int data_size
)
3242 struct p_uuids
*p
= mdev
->tconn
->data
.rbuf
;
3244 int i
, updated_uuids
= 0;
3246 p_uuid
= kmalloc(sizeof(u64
)*UI_EXTENDED_SIZE
, GFP_NOIO
);
3248 for (i
= UI_CURRENT
; i
< UI_EXTENDED_SIZE
; i
++)
3249 p_uuid
[i
] = be64_to_cpu(p
->uuid
[i
]);
3251 kfree(mdev
->p_uuid
);
3252 mdev
->p_uuid
= p_uuid
;
3254 if (mdev
->state
.conn
< C_CONNECTED
&&
3255 mdev
->state
.disk
< D_INCONSISTENT
&&
3256 mdev
->state
.role
== R_PRIMARY
&&
3257 (mdev
->ed_uuid
& ~((u64
)1)) != (p_uuid
[UI_CURRENT
] & ~((u64
)1))) {
3258 dev_err(DEV
, "Can only connect to data with current UUID=%016llX\n",
3259 (unsigned long long)mdev
->ed_uuid
);
3260 conn_request_state(mdev
->tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
3264 if (get_ldev(mdev
)) {
3265 int skip_initial_sync
=
3266 mdev
->state
.conn
== C_CONNECTED
&&
3267 mdev
->tconn
->agreed_pro_version
>= 90 &&
3268 mdev
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&&
3269 (p_uuid
[UI_FLAGS
] & 8);
3270 if (skip_initial_sync
) {
3271 dev_info(DEV
, "Accepted new current UUID, preparing to skip initial sync\n");
3272 drbd_bitmap_io(mdev
, &drbd_bmio_clear_n_write
,
3273 "clear_n_write from receive_uuids",
3274 BM_LOCKED_TEST_ALLOWED
);
3275 _drbd_uuid_set(mdev
, UI_CURRENT
, p_uuid
[UI_CURRENT
]);
3276 _drbd_uuid_set(mdev
, UI_BITMAP
, 0);
3277 _drbd_set_state(_NS2(mdev
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
3283 } else if (mdev
->state
.disk
< D_INCONSISTENT
&&
3284 mdev
->state
.role
== R_PRIMARY
) {
3285 /* I am a diskless primary, the peer just created a new current UUID
3287 updated_uuids
= drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3290 /* Before we test for the disk state, we should wait until an eventually
3291 ongoing cluster wide state change is finished. That is important if
3292 we are primary and are detaching from our disk. We need to see the
3293 new disk state... */
3294 mutex_lock(mdev
->state_mutex
);
3295 mutex_unlock(mdev
->state_mutex
);
3296 if (mdev
->state
.conn
>= C_CONNECTED
&& mdev
->state
.disk
< D_INCONSISTENT
)
3297 updated_uuids
|= drbd_set_ed_uuid(mdev
, p_uuid
[UI_CURRENT
]);
3300 drbd_print_uuids(mdev
, "receiver updated UUIDs to");
3306 * convert_state() - Converts the peer's view of the cluster state to our point of view
3307 * @ps: The state as seen by the peer.
3309 static union drbd_state
convert_state(union drbd_state ps
)
3311 union drbd_state ms
;
3313 static enum drbd_conns c_tab
[] = {
3314 [C_CONNECTED
] = C_CONNECTED
,
3316 [C_STARTING_SYNC_S
] = C_STARTING_SYNC_T
,
3317 [C_STARTING_SYNC_T
] = C_STARTING_SYNC_S
,
3318 [C_DISCONNECTING
] = C_TEAR_DOWN
, /* C_NETWORK_FAILURE, */
3319 [C_VERIFY_S
] = C_VERIFY_T
,
3325 ms
.conn
= c_tab
[ps
.conn
];
3330 ms
.peer_isp
= (ps
.aftr_isp
| ps
.user_isp
);
3335 static int receive_req_state(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
3336 unsigned int data_size
)
3338 struct p_req_state
*p
= mdev
->tconn
->data
.rbuf
;
3339 union drbd_state mask
, val
;
3340 enum drbd_state_rv rv
;
3342 mask
.i
= be32_to_cpu(p
->mask
);
3343 val
.i
= be32_to_cpu(p
->val
);
3345 if (test_bit(DISCARD_CONCURRENT
, &mdev
->tconn
->flags
) &&
3346 mutex_is_locked(mdev
->state_mutex
)) {
3347 drbd_send_sr_reply(mdev
, SS_CONCURRENT_ST_CHG
);
3351 mask
= convert_state(mask
);
3352 val
= convert_state(val
);
3354 rv
= drbd_change_state(mdev
, CS_VERBOSE
, mask
, val
);
3355 drbd_send_sr_reply(mdev
, rv
);
3362 static int receive_req_conn_state(struct drbd_tconn
*tconn
, enum drbd_packet cmd
,
3363 unsigned int data_size
)
3365 struct p_req_state
*p
= tconn
->data
.rbuf
;
3366 union drbd_state mask
, val
;
3367 enum drbd_state_rv rv
;
3369 mask
.i
= be32_to_cpu(p
->mask
);
3370 val
.i
= be32_to_cpu(p
->val
);
3372 if (test_bit(DISCARD_CONCURRENT
, &tconn
->flags
) &&
3373 mutex_is_locked(&tconn
->cstate_mutex
)) {
3374 conn_send_sr_reply(tconn
, SS_CONCURRENT_ST_CHG
);
3378 mask
= convert_state(mask
);
3379 val
= convert_state(val
);
3381 rv
= conn_request_state(tconn
, mask
, val
, CS_VERBOSE
| CS_LOCAL_ONLY
);
3382 conn_send_sr_reply(tconn
, rv
);
3387 static int receive_state(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
3388 unsigned int data_size
)
3390 struct p_state
*p
= mdev
->tconn
->data
.rbuf
;
3391 union drbd_state os
, ns
, peer_state
;
3392 enum drbd_disk_state real_peer_disk
;
3393 enum chg_state_flags cs_flags
;
3396 peer_state
.i
= be32_to_cpu(p
->state
);
3398 real_peer_disk
= peer_state
.disk
;
3399 if (peer_state
.disk
== D_NEGOTIATING
) {
3400 real_peer_disk
= mdev
->p_uuid
[UI_FLAGS
] & 4 ? D_INCONSISTENT
: D_CONSISTENT
;
3401 dev_info(DEV
, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk
));
3404 spin_lock_irq(&mdev
->tconn
->req_lock
);
3406 os
= ns
= mdev
->state
;
3407 spin_unlock_irq(&mdev
->tconn
->req_lock
);
3409 /* peer says his disk is uptodate, while we think it is inconsistent,
3410 * and this happens while we think we have a sync going on. */
3411 if (os
.pdsk
== D_INCONSISTENT
&& real_peer_disk
== D_UP_TO_DATE
&&
3412 os
.conn
> C_CONNECTED
&& os
.disk
== D_UP_TO_DATE
) {
3413 /* If we are (becoming) SyncSource, but peer is still in sync
3414 * preparation, ignore its uptodate-ness to avoid flapping, it
3415 * will change to inconsistent once the peer reaches active
3417 * It may have changed syncer-paused flags, however, so we
3418 * cannot ignore this completely. */
3419 if (peer_state
.conn
> C_CONNECTED
&&
3420 peer_state
.conn
< C_SYNC_SOURCE
)
3421 real_peer_disk
= D_INCONSISTENT
;
3423 /* if peer_state changes to connected at the same time,
3424 * it explicitly notifies us that it finished resync.
3425 * Maybe we should finish it up, too? */
3426 else if (os
.conn
>= C_SYNC_SOURCE
&&
3427 peer_state
.conn
== C_CONNECTED
) {
3428 if (drbd_bm_total_weight(mdev
) <= mdev
->rs_failed
)
3429 drbd_resync_finished(mdev
);
3434 /* peer says his disk is inconsistent, while we think it is uptodate,
3435 * and this happens while the peer still thinks we have a sync going on,
3436 * but we think we are already done with the sync.
3437 * We ignore this to avoid flapping pdsk.
3438 * This should not happen, if the peer is a recent version of drbd. */
3439 if (os
.pdsk
== D_UP_TO_DATE
&& real_peer_disk
== D_INCONSISTENT
&&
3440 os
.conn
== C_CONNECTED
&& peer_state
.conn
> C_SYNC_SOURCE
)
3441 real_peer_disk
= D_UP_TO_DATE
;
3443 if (ns
.conn
== C_WF_REPORT_PARAMS
)
3444 ns
.conn
= C_CONNECTED
;
3446 if (peer_state
.conn
== C_AHEAD
)
3449 if (mdev
->p_uuid
&& peer_state
.disk
>= D_NEGOTIATING
&&
3450 get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3451 int cr
; /* consider resync */
3453 /* if we established a new connection */
3454 cr
= (os
.conn
< C_CONNECTED
);
3455 /* if we had an established connection
3456 * and one of the nodes newly attaches a disk */
3457 cr
|= (os
.conn
== C_CONNECTED
&&
3458 (peer_state
.disk
== D_NEGOTIATING
||
3459 os
.disk
== D_NEGOTIATING
));
3460 /* if we have both been inconsistent, and the peer has been
3461 * forced to be UpToDate with --overwrite-data */
3462 cr
|= test_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3463 /* if we had been plain connected, and the admin requested to
3464 * start a sync by "invalidate" or "invalidate-remote" */
3465 cr
|= (os
.conn
== C_CONNECTED
&&
3466 (peer_state
.conn
>= C_STARTING_SYNC_S
&&
3467 peer_state
.conn
<= C_WF_BITMAP_T
));
3470 ns
.conn
= drbd_sync_handshake(mdev
, peer_state
.role
, real_peer_disk
);
3473 if (ns
.conn
== C_MASK
) {
3474 ns
.conn
= C_CONNECTED
;
3475 if (mdev
->state
.disk
== D_NEGOTIATING
) {
3476 drbd_force_state(mdev
, NS(disk
, D_FAILED
));
3477 } else if (peer_state
.disk
== D_NEGOTIATING
) {
3478 dev_err(DEV
, "Disk attach process on the peer node was aborted.\n");
3479 peer_state
.disk
= D_DISKLESS
;
3480 real_peer_disk
= D_DISKLESS
;
3482 if (test_and_clear_bit(CONN_DRY_RUN
, &mdev
->tconn
->flags
))
3484 D_ASSERT(os
.conn
== C_WF_REPORT_PARAMS
);
3485 conn_request_state(mdev
->tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
3491 spin_lock_irq(&mdev
->tconn
->req_lock
);
3492 if (mdev
->state
.i
!= os
.i
)
3494 clear_bit(CONSIDER_RESYNC
, &mdev
->flags
);
3495 ns
.peer
= peer_state
.role
;
3496 ns
.pdsk
= real_peer_disk
;
3497 ns
.peer_isp
= (peer_state
.aftr_isp
| peer_state
.user_isp
);
3498 if ((ns
.conn
== C_CONNECTED
|| ns
.conn
== C_WF_BITMAP_S
) && ns
.disk
== D_NEGOTIATING
)
3499 ns
.disk
= mdev
->new_state_tmp
.disk
;
3500 cs_flags
= CS_VERBOSE
+ (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
? 0 : CS_HARD
);
3501 if (ns
.pdsk
== D_CONSISTENT
&& is_susp(ns
) && ns
.conn
== C_CONNECTED
&& os
.conn
< C_CONNECTED
&&
3502 test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
3503 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3504 for temporal network outages! */
3505 spin_unlock_irq(&mdev
->tconn
->req_lock
);
3506 dev_err(DEV
, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3507 tl_clear(mdev
->tconn
);
3508 drbd_uuid_new_current(mdev
);
3509 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
3510 conn_request_state(mdev
->tconn
, NS2(conn
, C_PROTOCOL_ERROR
, susp
, 0), CS_HARD
);
3513 rv
= _drbd_set_state(mdev
, ns
, cs_flags
, NULL
);
3515 spin_unlock_irq(&mdev
->tconn
->req_lock
);
3517 if (rv
< SS_SUCCESS
) {
3518 conn_request_state(mdev
->tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
3522 if (os
.conn
> C_WF_REPORT_PARAMS
) {
3523 if (ns
.conn
> C_CONNECTED
&& peer_state
.conn
<= C_CONNECTED
&&
3524 peer_state
.disk
!= D_NEGOTIATING
) {
3525 /* we want resync, peer has not yet decided to sync... */
3526 /* Nowadays only used when forcing a node into primary role and
3527 setting its disk to UpToDate with that */
3528 drbd_send_uuids(mdev
);
3529 drbd_send_state(mdev
);
3533 mdev
->tconn
->net_conf
->want_lose
= 0;
3535 drbd_md_sync(mdev
); /* update connected indicator, la_size, ... */
3540 static int receive_sync_uuid(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
3541 unsigned int data_size
)
3543 struct p_rs_uuid
*p
= mdev
->tconn
->data
.rbuf
;
3545 wait_event(mdev
->misc_wait
,
3546 mdev
->state
.conn
== C_WF_SYNC_UUID
||
3547 mdev
->state
.conn
== C_BEHIND
||
3548 mdev
->state
.conn
< C_CONNECTED
||
3549 mdev
->state
.disk
< D_NEGOTIATING
);
3551 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3553 /* Here the _drbd_uuid_ functions are right, current should
3554 _not_ be rotated into the history */
3555 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
3556 _drbd_uuid_set(mdev
, UI_CURRENT
, be64_to_cpu(p
->uuid
));
3557 _drbd_uuid_set(mdev
, UI_BITMAP
, 0UL);
3559 drbd_print_uuids(mdev
, "updated sync uuid");
3560 drbd_start_resync(mdev
, C_SYNC_TARGET
);
3564 dev_err(DEV
, "Ignoring SyncUUID packet!\n");
3570 * receive_bitmap_plain
3572 * Return 0 when done, 1 when another iteration is needed, and a negative error
3573 * code upon failure.
3576 receive_bitmap_plain(struct drbd_conf
*mdev
, unsigned int data_size
,
3577 struct p_header
*h
, struct bm_xfer_ctx
*c
)
3579 unsigned long *buffer
= (unsigned long *)h
->payload
;
3580 unsigned num_words
= min_t(size_t, BM_PACKET_WORDS
, c
->bm_words
- c
->word_offset
);
3581 unsigned want
= num_words
* sizeof(long);
3584 if (want
!= data_size
) {
3585 dev_err(DEV
, "%s:want (%u) != data_size (%u)\n", __func__
, want
, data_size
);
3590 err
= drbd_recv_all(mdev
->tconn
, buffer
, want
);
3594 drbd_bm_merge_lel(mdev
, c
->word_offset
, num_words
, buffer
);
3596 c
->word_offset
+= num_words
;
3597 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
3598 if (c
->bit_offset
> c
->bm_bits
)
3599 c
->bit_offset
= c
->bm_bits
;
3604 static enum drbd_bitmap_code
dcbp_get_code(struct p_compressed_bm
*p
)
3606 return (enum drbd_bitmap_code
)(p
->encoding
& 0x0f);
3609 static int dcbp_get_start(struct p_compressed_bm
*p
)
3611 return (p
->encoding
& 0x80) != 0;
3614 static int dcbp_get_pad_bits(struct p_compressed_bm
*p
)
3616 return (p
->encoding
>> 4) & 0x7;
3622 * Return 0 when done, 1 when another iteration is needed, and a negative error
3623 * code upon failure.
3626 recv_bm_rle_bits(struct drbd_conf
*mdev
,
3627 struct p_compressed_bm
*p
,
3628 struct bm_xfer_ctx
*c
,
3631 struct bitstream bs
;
3635 unsigned long s
= c
->bit_offset
;
3637 int toggle
= dcbp_get_start(p
);
3641 bitstream_init(&bs
, p
->code
, len
, dcbp_get_pad_bits(p
));
3643 bits
= bitstream_get_bits(&bs
, &look_ahead
, 64);
3647 for (have
= bits
; have
> 0; s
+= rl
, toggle
= !toggle
) {
3648 bits
= vli_decode_bits(&rl
, look_ahead
);
3654 if (e
>= c
->bm_bits
) {
3655 dev_err(DEV
, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e
);
3658 _drbd_bm_set_bits(mdev
, s
, e
);
3662 dev_err(DEV
, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3663 have
, bits
, look_ahead
,
3664 (unsigned int)(bs
.cur
.b
- p
->code
),
3665 (unsigned int)bs
.buf_len
);
3668 look_ahead
>>= bits
;
3671 bits
= bitstream_get_bits(&bs
, &tmp
, 64 - have
);
3674 look_ahead
|= tmp
<< have
;
3679 bm_xfer_ctx_bit_to_word_offset(c
);
3681 return (s
!= c
->bm_bits
);
3687 * Return 0 when done, 1 when another iteration is needed, and a negative error
3688 * code upon failure.
3691 decode_bitmap_c(struct drbd_conf
*mdev
,
3692 struct p_compressed_bm
*p
,
3693 struct bm_xfer_ctx
*c
,
3696 if (dcbp_get_code(p
) == RLE_VLI_Bits
)
3697 return recv_bm_rle_bits(mdev
, p
, c
, len
);
3699 /* other variants had been implemented for evaluation,
3700 * but have been dropped as this one turned out to be "best"
3701 * during all our tests. */
3703 dev_err(DEV
, "receive_bitmap_c: unknown encoding %u\n", p
->encoding
);
3704 conn_request_state(mdev
->tconn
, NS(conn
, C_PROTOCOL_ERROR
), CS_HARD
);
3708 void INFO_bm_xfer_stats(struct drbd_conf
*mdev
,
3709 const char *direction
, struct bm_xfer_ctx
*c
)
3711 /* what would it take to transfer it "plaintext" */
3712 unsigned plain
= sizeof(struct p_header
) *
3713 ((c
->bm_words
+BM_PACKET_WORDS
-1)/BM_PACKET_WORDS
+1)
3714 + c
->bm_words
* sizeof(long);
3715 unsigned total
= c
->bytes
[0] + c
->bytes
[1];
3718 /* total can not be zero. but just in case: */
3722 /* don't report if not compressed */
3726 /* total < plain. check for overflow, still */
3727 r
= (total
> UINT_MAX
/1000) ? (total
/ (plain
/1000))
3728 : (1000 * total
/ plain
);
3734 dev_info(DEV
, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3735 "total %u; compression: %u.%u%%\n",
3737 c
->bytes
[1], c
->packets
[1],
3738 c
->bytes
[0], c
->packets
[0],
3739 total
, r
/10, r
% 10);
3742 /* Since we are processing the bitfield from lower addresses to higher,
3743 it does not matter if the process it in 32 bit chunks or 64 bit
3744 chunks as long as it is little endian. (Understand it as byte stream,
3745 beginning with the lowest byte...) If we would use big endian
3746 we would need to process it from the highest address to the lowest,
3747 in order to be agnostic to the 32 vs 64 bits issue.
3749 returns 0 on failure, 1 if we successfully received it. */
3750 static int receive_bitmap(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
3751 unsigned int data_size
)
3753 struct bm_xfer_ctx c
;
3755 struct p_header
*h
= mdev
->tconn
->data
.rbuf
;
3756 struct packet_info pi
;
3758 drbd_bm_lock(mdev
, "receive bitmap", BM_LOCKED_SET_ALLOWED
);
3759 /* you are supposed to send additional out-of-sync information
3760 * if you actually set bits during this phase */
3762 c
= (struct bm_xfer_ctx
) {
3763 .bm_bits
= drbd_bm_bits(mdev
),
3764 .bm_words
= drbd_bm_words(mdev
),
3768 if (cmd
== P_BITMAP
) {
3769 err
= receive_bitmap_plain(mdev
, data_size
, h
, &c
);
3770 } else if (cmd
== P_COMPRESSED_BITMAP
) {
3771 /* MAYBE: sanity check that we speak proto >= 90,
3772 * and the feature is enabled! */
3773 struct p_compressed_bm
*p
;
3775 if (data_size
> BM_PACKET_PAYLOAD_BYTES
) {
3776 dev_err(DEV
, "ReportCBitmap packet too large\n");
3781 p
= mdev
->tconn
->data
.rbuf
;
3782 err
= drbd_recv_all(mdev
->tconn
, p
->head
.payload
, data_size
);
3785 if (data_size
<= (sizeof(*p
) - sizeof(p
->head
))) {
3786 dev_err(DEV
, "ReportCBitmap packet too small (l:%u)\n", data_size
);
3790 err
= decode_bitmap_c(mdev
, p
, &c
, data_size
);
3792 dev_warn(DEV
, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd
);
3797 c
.packets
[cmd
== P_BITMAP
]++;
3798 c
.bytes
[cmd
== P_BITMAP
] += sizeof(struct p_header
) + data_size
;
3805 err
= drbd_recv_header(mdev
->tconn
, &pi
);
3809 data_size
= pi
.size
;
3812 INFO_bm_xfer_stats(mdev
, "receive", &c
);
3814 if (mdev
->state
.conn
== C_WF_BITMAP_T
) {
3815 enum drbd_state_rv rv
;
3817 err
= drbd_send_bitmap(mdev
);
3820 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3821 rv
= _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
3822 D_ASSERT(rv
== SS_SUCCESS
);
3823 } else if (mdev
->state
.conn
!= C_WF_BITMAP_S
) {
3824 /* admin may have requested C_DISCONNECTING,
3825 * other threads may have noticed network errors */
3826 dev_info(DEV
, "unexpected cstate (%s) in receive_bitmap\n",
3827 drbd_conn_str(mdev
->state
.conn
));
3832 drbd_bm_unlock(mdev
);
3833 if (!err
&& mdev
->state
.conn
== C_WF_BITMAP_S
)
3834 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
3838 static int _tconn_receive_skip(struct drbd_tconn
*tconn
, unsigned int data_size
)
3840 /* TODO zero copy sink :) */
3841 static char sink
[128];
3846 want
= min_t(int, size
, sizeof(sink
));
3847 r
= drbd_recv(tconn
, sink
, want
);
3852 return size
? -EIO
: 0;
3855 static int receive_skip(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
3856 unsigned int data_size
)
3858 dev_warn(DEV
, "skipping unknown optional packet type %d, l: %d!\n",
3861 return _tconn_receive_skip(mdev
->tconn
, data_size
);
3864 static int tconn_receive_skip(struct drbd_tconn
*tconn
, enum drbd_packet cmd
, unsigned int data_size
)
3866 conn_warn(tconn
, "skipping packet for non existing volume type %d, l: %d!\n",
3869 return _tconn_receive_skip(tconn
, data_size
);
3872 static int receive_UnplugRemote(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
3873 unsigned int data_size
)
3875 /* Make sure we've acked all the TCP data associated
3876 * with the data requests being unplugged */
3877 drbd_tcp_quickack(mdev
->tconn
->data
.socket
);
3882 static int receive_out_of_sync(struct drbd_conf
*mdev
, enum drbd_packet cmd
,
3883 unsigned int data_size
)
3885 struct p_block_desc
*p
= mdev
->tconn
->data
.rbuf
;
3887 switch (mdev
->state
.conn
) {
3888 case C_WF_SYNC_UUID
:
3893 dev_err(DEV
, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3894 drbd_conn_str(mdev
->state
.conn
));
3897 drbd_set_out_of_sync(mdev
, be64_to_cpu(p
->sector
), be32_to_cpu(p
->blksize
));
3905 enum mdev_or_conn fa_type
; /* first argument's type */
3907 int (*mdev_fn
)(struct drbd_conf
*, enum drbd_packet cmd
,
3908 unsigned int to_receive
);
3909 int (*conn_fn
)(struct drbd_tconn
*, enum drbd_packet cmd
,
3910 unsigned int to_receive
);
3914 static struct data_cmd drbd_cmd_handler
[] = {
3915 [P_DATA
] = { 1, sizeof(struct p_data
), MDEV
, { receive_Data
} },
3916 [P_DATA_REPLY
] = { 1, sizeof(struct p_data
), MDEV
, { receive_DataReply
} },
3917 [P_RS_DATA_REPLY
] = { 1, sizeof(struct p_data
), MDEV
, { receive_RSDataReply
} } ,
3918 [P_BARRIER
] = { 0, sizeof(struct p_barrier
), MDEV
, { receive_Barrier
} } ,
3919 [P_BITMAP
] = { 1, sizeof(struct p_header
), MDEV
, { receive_bitmap
} } ,
3920 [P_COMPRESSED_BITMAP
] = { 1, sizeof(struct p_header
), MDEV
, { receive_bitmap
} } ,
3921 [P_UNPLUG_REMOTE
] = { 0, sizeof(struct p_header
), MDEV
, { receive_UnplugRemote
} },
3922 [P_DATA_REQUEST
] = { 0, sizeof(struct p_block_req
), MDEV
, { receive_DataRequest
} },
3923 [P_RS_DATA_REQUEST
] = { 0, sizeof(struct p_block_req
), MDEV
, { receive_DataRequest
} },
3924 [P_SYNC_PARAM
] = { 1, sizeof(struct p_header
), MDEV
, { receive_SyncParam
} },
3925 [P_SYNC_PARAM89
] = { 1, sizeof(struct p_header
), MDEV
, { receive_SyncParam
} },
3926 [P_PROTOCOL
] = { 1, sizeof(struct p_protocol
), CONN
, { .conn_fn
= receive_protocol
} },
3927 [P_UUIDS
] = { 0, sizeof(struct p_uuids
), MDEV
, { receive_uuids
} },
3928 [P_SIZES
] = { 0, sizeof(struct p_sizes
), MDEV
, { receive_sizes
} },
3929 [P_STATE
] = { 0, sizeof(struct p_state
), MDEV
, { receive_state
} },
3930 [P_STATE_CHG_REQ
] = { 0, sizeof(struct p_req_state
), MDEV
, { receive_req_state
} },
3931 [P_SYNC_UUID
] = { 0, sizeof(struct p_rs_uuid
), MDEV
, { receive_sync_uuid
} },
3932 [P_OV_REQUEST
] = { 0, sizeof(struct p_block_req
), MDEV
, { receive_DataRequest
} },
3933 [P_OV_REPLY
] = { 1, sizeof(struct p_block_req
), MDEV
, { receive_DataRequest
} },
3934 [P_CSUM_RS_REQUEST
] = { 1, sizeof(struct p_block_req
), MDEV
, { receive_DataRequest
} },
3935 [P_DELAY_PROBE
] = { 0, sizeof(struct p_delay_probe93
), MDEV
, { receive_skip
} },
3936 [P_OUT_OF_SYNC
] = { 0, sizeof(struct p_block_desc
), MDEV
, { receive_out_of_sync
} },
3937 [P_CONN_ST_CHG_REQ
] = { 0, sizeof(struct p_req_state
), CONN
, { .conn_fn
= receive_req_conn_state
} },
3940 static void drbdd(struct drbd_tconn
*tconn
)
3942 struct p_header
*header
= tconn
->data
.rbuf
;
3943 struct packet_info pi
;
3944 size_t shs
; /* sub header size */
3947 while (get_t_state(&tconn
->receiver
) == RUNNING
) {
3948 drbd_thread_current_set_cpu(&tconn
->receiver
);
3949 if (drbd_recv_header(tconn
, &pi
))
3952 if (unlikely(pi
.cmd
>= ARRAY_SIZE(drbd_cmd_handler
) ||
3953 !drbd_cmd_handler
[pi
.cmd
].mdev_fn
)) {
3954 conn_err(tconn
, "unknown packet type %d, l: %d!\n", pi
.cmd
, pi
.size
);
3958 shs
= drbd_cmd_handler
[pi
.cmd
].pkt_size
- sizeof(struct p_header
);
3959 if (pi
.size
- shs
> 0 && !drbd_cmd_handler
[pi
.cmd
].expect_payload
) {
3960 conn_err(tconn
, "No payload expected %s l:%d\n", cmdname(pi
.cmd
), pi
.size
);
3965 err
= drbd_recv_all_warn(tconn
, &header
->payload
, shs
);
3970 if (drbd_cmd_handler
[pi
.cmd
].fa_type
== CONN
) {
3971 err
= drbd_cmd_handler
[pi
.cmd
].conn_fn(tconn
, pi
.cmd
, pi
.size
- shs
);
3973 struct drbd_conf
*mdev
= vnr_to_mdev(tconn
, pi
.vnr
);
3975 drbd_cmd_handler
[pi
.cmd
].mdev_fn(mdev
, pi
.cmd
, pi
.size
- shs
) :
3976 tconn_receive_skip(tconn
, pi
.cmd
, pi
.size
- shs
);
3979 if (unlikely(err
)) {
3980 conn_err(tconn
, "error receiving %s, l: %d!\n",
3981 cmdname(pi
.cmd
), pi
.size
);
3988 conn_request_state(tconn
, NS(conn
, C_PROTOCOL_ERROR
), CS_HARD
);
3991 void conn_flush_workqueue(struct drbd_tconn
*tconn
)
3993 struct drbd_wq_barrier barr
;
3995 barr
.w
.cb
= w_prev_work_done
;
3996 barr
.w
.tconn
= tconn
;
3997 init_completion(&barr
.done
);
3998 drbd_queue_work(&tconn
->data
.work
, &barr
.w
);
3999 wait_for_completion(&barr
.done
);
4002 static void drbd_disconnect(struct drbd_tconn
*tconn
)
4005 int rv
= SS_UNKNOWN_ERROR
;
4007 if (tconn
->cstate
== C_STANDALONE
)
4010 /* asender does not clean up anything. it must not interfere, either */
4011 drbd_thread_stop(&tconn
->asender
);
4012 drbd_free_sock(tconn
);
4014 idr_for_each(&tconn
->volumes
, drbd_disconnected
, tconn
);
4015 conn_info(tconn
, "Connection closed\n");
4017 if (conn_highest_role(tconn
) == R_PRIMARY
&& conn_highest_pdsk(tconn
) >= D_UNKNOWN
)
4018 conn_try_outdate_peer_async(tconn
);
4020 spin_lock_irq(&tconn
->req_lock
);
4022 if (oc
>= C_UNCONNECTED
)
4023 rv
= _conn_request_state(tconn
, NS(conn
, C_UNCONNECTED
), CS_VERBOSE
);
4025 spin_unlock_irq(&tconn
->req_lock
);
4027 if (oc
== C_DISCONNECTING
) {
4028 wait_event(tconn
->net_cnt_wait
, atomic_read(&tconn
->net_cnt
) == 0);
4030 crypto_free_hash(tconn
->cram_hmac_tfm
);
4031 tconn
->cram_hmac_tfm
= NULL
;
4033 kfree(tconn
->net_conf
);
4034 tconn
->net_conf
= NULL
;
4035 conn_request_state(tconn
, NS(conn
, C_STANDALONE
), CS_VERBOSE
);
4039 static int drbd_disconnected(int vnr
, void *p
, void *data
)
4041 struct drbd_conf
*mdev
= (struct drbd_conf
*)p
;
4042 enum drbd_fencing_p fp
;
4045 /* wait for current activity to cease. */
4046 spin_lock_irq(&mdev
->tconn
->req_lock
);
4047 _drbd_wait_ee_list_empty(mdev
, &mdev
->active_ee
);
4048 _drbd_wait_ee_list_empty(mdev
, &mdev
->sync_ee
);
4049 _drbd_wait_ee_list_empty(mdev
, &mdev
->read_ee
);
4050 spin_unlock_irq(&mdev
->tconn
->req_lock
);
4052 /* We do not have data structures that would allow us to
4053 * get the rs_pending_cnt down to 0 again.
4054 * * On C_SYNC_TARGET we do not have any data structures describing
4055 * the pending RSDataRequest's we have sent.
4056 * * On C_SYNC_SOURCE there is no data structure that tracks
4057 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4058 * And no, it is not the sum of the reference counts in the
4059 * resync_LRU. The resync_LRU tracks the whole operation including
4060 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4062 drbd_rs_cancel_all(mdev
);
4064 mdev
->rs_failed
= 0;
4065 atomic_set(&mdev
->rs_pending_cnt
, 0);
4066 wake_up(&mdev
->misc_wait
);
4068 del_timer(&mdev
->request_timer
);
4070 del_timer_sync(&mdev
->resync_timer
);
4071 resync_timer_fn((unsigned long)mdev
);
4073 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4074 * w_make_resync_request etc. which may still be on the worker queue
4075 * to be "canceled" */
4076 drbd_flush_workqueue(mdev
);
4078 /* This also does reclaim_net_ee(). If we do this too early, we might
4079 * miss some resync ee and pages.*/
4080 drbd_process_done_ee(mdev
);
4082 kfree(mdev
->p_uuid
);
4083 mdev
->p_uuid
= NULL
;
4085 if (!is_susp(mdev
->state
))
4086 tl_clear(mdev
->tconn
);
4091 if (get_ldev(mdev
)) {
4092 fp
= mdev
->ldev
->dc
.fencing
;
4096 /* serialize with bitmap writeout triggered by the state change,
4098 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
4100 /* tcp_close and release of sendpage pages can be deferred. I don't
4101 * want to use SO_LINGER, because apparently it can be deferred for
4102 * more than 20 seconds (longest time I checked).
4104 * Actually we don't care for exactly when the network stack does its
4105 * put_page(), but release our reference on these pages right here.
4107 i
= drbd_release_ee(mdev
, &mdev
->net_ee
);
4109 dev_info(DEV
, "net_ee not empty, killed %u entries\n", i
);
4110 i
= atomic_read(&mdev
->pp_in_use_by_net
);
4112 dev_info(DEV
, "pp_in_use_by_net = %d, expected 0\n", i
);
4113 i
= atomic_read(&mdev
->pp_in_use
);
4115 dev_info(DEV
, "pp_in_use = %d, expected 0\n", i
);
4117 D_ASSERT(list_empty(&mdev
->read_ee
));
4118 D_ASSERT(list_empty(&mdev
->active_ee
));
4119 D_ASSERT(list_empty(&mdev
->sync_ee
));
4120 D_ASSERT(list_empty(&mdev
->done_ee
));
4122 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4123 atomic_set(&mdev
->current_epoch
->epoch_size
, 0);
4124 D_ASSERT(list_empty(&mdev
->current_epoch
->list
));
4130 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4131 * we can agree on is stored in agreed_pro_version.
4133 * feature flags and the reserved array should be enough room for future
4134 * enhancements of the handshake protocol, and possible plugins...
4136 * for now, they are expected to be zero, but ignored.
4138 static int drbd_send_handshake(struct drbd_tconn
*tconn
)
4140 /* ASSERT current == mdev->tconn->receiver ... */
4141 struct p_handshake
*p
= &tconn
->data
.sbuf
.handshake
;
4144 if (mutex_lock_interruptible(&tconn
->data
.mutex
)) {
4145 conn_err(tconn
, "interrupted during initial handshake\n");
4149 if (tconn
->data
.socket
== NULL
) {
4150 mutex_unlock(&tconn
->data
.mutex
);
4154 memset(p
, 0, sizeof(*p
));
4155 p
->protocol_min
= cpu_to_be32(PRO_VERSION_MIN
);
4156 p
->protocol_max
= cpu_to_be32(PRO_VERSION_MAX
);
4157 err
= _conn_send_cmd(tconn
, 0, tconn
->data
.socket
, P_HAND_SHAKE
,
4158 &p
->head
, sizeof(*p
), 0);
4159 mutex_unlock(&tconn
->data
.mutex
);
4165 * 1 yes, we have a valid connection
4166 * 0 oops, did not work out, please try again
4167 * -1 peer talks different language,
4168 * no point in trying again, please go standalone.
4170 static int drbd_do_handshake(struct drbd_tconn
*tconn
)
4172 /* ASSERT current == tconn->receiver ... */
4173 struct p_handshake
*p
= tconn
->data
.rbuf
;
4174 const int expect
= sizeof(struct p_handshake
) - sizeof(struct p_header80
);
4175 struct packet_info pi
;
4178 err
= drbd_send_handshake(tconn
);
4182 err
= drbd_recv_header(tconn
, &pi
);
4186 if (pi
.cmd
!= P_HAND_SHAKE
) {
4187 conn_err(tconn
, "expected HandShake packet, received: %s (0x%04x)\n",
4188 cmdname(pi
.cmd
), pi
.cmd
);
4192 if (pi
.size
!= expect
) {
4193 conn_err(tconn
, "expected HandShake length: %u, received: %u\n",
4198 err
= drbd_recv_all_warn(tconn
, &p
->head
.payload
, expect
);
4202 p
->protocol_min
= be32_to_cpu(p
->protocol_min
);
4203 p
->protocol_max
= be32_to_cpu(p
->protocol_max
);
4204 if (p
->protocol_max
== 0)
4205 p
->protocol_max
= p
->protocol_min
;
4207 if (PRO_VERSION_MAX
< p
->protocol_min
||
4208 PRO_VERSION_MIN
> p
->protocol_max
)
4211 tconn
->agreed_pro_version
= min_t(int, PRO_VERSION_MAX
, p
->protocol_max
);
4213 conn_info(tconn
, "Handshake successful: "
4214 "Agreed network protocol version %d\n", tconn
->agreed_pro_version
);
4219 conn_err(tconn
, "incompatible DRBD dialects: "
4220 "I support %d-%d, peer supports %d-%d\n",
4221 PRO_VERSION_MIN
, PRO_VERSION_MAX
,
4222 p
->protocol_min
, p
->protocol_max
);
4226 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4227 static int drbd_do_auth(struct drbd_tconn
*tconn
)
4229 dev_err(DEV
, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4230 dev_err(DEV
, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4234 #define CHALLENGE_LEN 64
4238 0 - failed, try again (network error),
4239 -1 - auth failed, don't try again.
4242 static int drbd_do_auth(struct drbd_tconn
*tconn
)
4244 char my_challenge
[CHALLENGE_LEN
]; /* 64 Bytes... */
4245 struct scatterlist sg
;
4246 char *response
= NULL
;
4247 char *right_response
= NULL
;
4248 char *peers_ch
= NULL
;
4249 unsigned int key_len
= strlen(tconn
->net_conf
->shared_secret
);
4250 unsigned int resp_size
;
4251 struct hash_desc desc
;
4252 struct packet_info pi
;
4255 desc
.tfm
= tconn
->cram_hmac_tfm
;
4258 rv
= crypto_hash_setkey(tconn
->cram_hmac_tfm
,
4259 (u8
*)tconn
->net_conf
->shared_secret
, key_len
);
4261 conn_err(tconn
, "crypto_hash_setkey() failed with %d\n", rv
);
4266 get_random_bytes(my_challenge
, CHALLENGE_LEN
);
4268 rv
= !conn_send_cmd2(tconn
, P_AUTH_CHALLENGE
, my_challenge
, CHALLENGE_LEN
);
4272 err
= drbd_recv_header(tconn
, &pi
);
4278 if (pi
.cmd
!= P_AUTH_CHALLENGE
) {
4279 conn_err(tconn
, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4280 cmdname(pi
.cmd
), pi
.cmd
);
4285 if (pi
.size
> CHALLENGE_LEN
* 2) {
4286 conn_err(tconn
, "expected AuthChallenge payload too big.\n");
4291 peers_ch
= kmalloc(pi
.size
, GFP_NOIO
);
4292 if (peers_ch
== NULL
) {
4293 conn_err(tconn
, "kmalloc of peers_ch failed\n");
4298 err
= drbd_recv_all_warn(tconn
, peers_ch
, pi
.size
);
4304 resp_size
= crypto_hash_digestsize(tconn
->cram_hmac_tfm
);
4305 response
= kmalloc(resp_size
, GFP_NOIO
);
4306 if (response
== NULL
) {
4307 conn_err(tconn
, "kmalloc of response failed\n");
4312 sg_init_table(&sg
, 1);
4313 sg_set_buf(&sg
, peers_ch
, pi
.size
);
4315 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, response
);
4317 conn_err(tconn
, "crypto_hash_digest() failed with %d\n", rv
);
4322 rv
= !conn_send_cmd2(tconn
, P_AUTH_RESPONSE
, response
, resp_size
);
4326 err
= drbd_recv_header(tconn
, &pi
);
4332 if (pi
.cmd
!= P_AUTH_RESPONSE
) {
4333 conn_err(tconn
, "expected AuthResponse packet, received: %s (0x%04x)\n",
4334 cmdname(pi
.cmd
), pi
.cmd
);
4339 if (pi
.size
!= resp_size
) {
4340 conn_err(tconn
, "expected AuthResponse payload of wrong size\n");
4345 err
= drbd_recv_all_warn(tconn
, response
, resp_size
);
4351 right_response
= kmalloc(resp_size
, GFP_NOIO
);
4352 if (right_response
== NULL
) {
4353 conn_err(tconn
, "kmalloc of right_response failed\n");
4358 sg_set_buf(&sg
, my_challenge
, CHALLENGE_LEN
);
4360 rv
= crypto_hash_digest(&desc
, &sg
, sg
.length
, right_response
);
4362 conn_err(tconn
, "crypto_hash_digest() failed with %d\n", rv
);
4367 rv
= !memcmp(response
, right_response
, resp_size
);
4370 conn_info(tconn
, "Peer authenticated using %d bytes of '%s' HMAC\n",
4371 resp_size
, tconn
->net_conf
->cram_hmac_alg
);
4378 kfree(right_response
);
4384 int drbdd_init(struct drbd_thread
*thi
)
4386 struct drbd_tconn
*tconn
= thi
->tconn
;
4389 conn_info(tconn
, "receiver (re)started\n");
4392 h
= drbd_connect(tconn
);
4394 drbd_disconnect(tconn
);
4395 schedule_timeout_interruptible(HZ
);
4398 conn_warn(tconn
, "Discarding network configuration.\n");
4399 conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
4404 if (get_net_conf(tconn
)) {
4406 put_net_conf(tconn
);
4410 drbd_disconnect(tconn
);
4412 conn_info(tconn
, "receiver terminated\n");
4416 /* ********* acknowledge sender ******** */
4418 static int got_conn_RqSReply(struct drbd_tconn
*tconn
, enum drbd_packet cmd
)
4420 struct p_req_state_reply
*p
= tconn
->meta
.rbuf
;
4421 int retcode
= be32_to_cpu(p
->retcode
);
4423 if (retcode
>= SS_SUCCESS
) {
4424 set_bit(CONN_WD_ST_CHG_OKAY
, &tconn
->flags
);
4426 set_bit(CONN_WD_ST_CHG_FAIL
, &tconn
->flags
);
4427 conn_err(tconn
, "Requested state change failed by peer: %s (%d)\n",
4428 drbd_set_st_err_str(retcode
), retcode
);
4430 wake_up(&tconn
->ping_wait
);
4435 static int got_RqSReply(struct drbd_conf
*mdev
, enum drbd_packet cmd
)
4437 struct p_req_state_reply
*p
= mdev
->tconn
->meta
.rbuf
;
4438 int retcode
= be32_to_cpu(p
->retcode
);
4440 if (retcode
>= SS_SUCCESS
) {
4441 set_bit(CL_ST_CHG_SUCCESS
, &mdev
->flags
);
4443 set_bit(CL_ST_CHG_FAIL
, &mdev
->flags
);
4444 dev_err(DEV
, "Requested state change failed by peer: %s (%d)\n",
4445 drbd_set_st_err_str(retcode
), retcode
);
4447 wake_up(&mdev
->state_wait
);
4452 static int got_Ping(struct drbd_tconn
*tconn
, enum drbd_packet cmd
)
4454 return drbd_send_ping_ack(tconn
);
4458 static int got_PingAck(struct drbd_tconn
*tconn
, enum drbd_packet cmd
)
4460 /* restore idle timeout */
4461 tconn
->meta
.socket
->sk
->sk_rcvtimeo
= tconn
->net_conf
->ping_int
*HZ
;
4462 if (!test_and_set_bit(GOT_PING_ACK
, &tconn
->flags
))
4463 wake_up(&tconn
->ping_wait
);
4468 static int got_IsInSync(struct drbd_conf
*mdev
, enum drbd_packet cmd
)
4470 struct p_block_ack
*p
= mdev
->tconn
->meta
.rbuf
;
4471 sector_t sector
= be64_to_cpu(p
->sector
);
4472 int blksize
= be32_to_cpu(p
->blksize
);
4474 D_ASSERT(mdev
->tconn
->agreed_pro_version
>= 89);
4476 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4478 if (get_ldev(mdev
)) {
4479 drbd_rs_complete_io(mdev
, sector
);
4480 drbd_set_in_sync(mdev
, sector
, blksize
);
4481 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4482 mdev
->rs_same_csum
+= (blksize
>> BM_BLOCK_SHIFT
);
4485 dec_rs_pending(mdev
);
4486 atomic_add(blksize
>> 9, &mdev
->rs_sect_in
);
4492 validate_req_change_req_state(struct drbd_conf
*mdev
, u64 id
, sector_t sector
,
4493 struct rb_root
*root
, const char *func
,
4494 enum drbd_req_event what
, bool missing_ok
)
4496 struct drbd_request
*req
;
4497 struct bio_and_error m
;
4499 spin_lock_irq(&mdev
->tconn
->req_lock
);
4500 req
= find_request(mdev
, root
, id
, sector
, missing_ok
, func
);
4501 if (unlikely(!req
)) {
4502 spin_unlock_irq(&mdev
->tconn
->req_lock
);
4505 __req_mod(req
, what
, &m
);
4506 spin_unlock_irq(&mdev
->tconn
->req_lock
);
4509 complete_master_bio(mdev
, &m
);
4513 static int got_BlockAck(struct drbd_conf
*mdev
, enum drbd_packet cmd
)
4515 struct p_block_ack
*p
= mdev
->tconn
->meta
.rbuf
;
4516 sector_t sector
= be64_to_cpu(p
->sector
);
4517 int blksize
= be32_to_cpu(p
->blksize
);
4518 enum drbd_req_event what
;
4520 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4522 if (p
->block_id
== ID_SYNCER
) {
4523 drbd_set_in_sync(mdev
, sector
, blksize
);
4524 dec_rs_pending(mdev
);
4528 case P_RS_WRITE_ACK
:
4529 D_ASSERT(mdev
->tconn
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4530 what
= WRITE_ACKED_BY_PEER_AND_SIS
;
4533 D_ASSERT(mdev
->tconn
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4534 what
= WRITE_ACKED_BY_PEER
;
4537 D_ASSERT(mdev
->tconn
->net_conf
->wire_protocol
== DRBD_PROT_B
);
4538 what
= RECV_ACKED_BY_PEER
;
4540 case P_DISCARD_WRITE
:
4541 D_ASSERT(mdev
->tconn
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4542 what
= DISCARD_WRITE
;
4545 D_ASSERT(mdev
->tconn
->net_conf
->wire_protocol
== DRBD_PROT_C
);
4546 what
= POSTPONE_WRITE
;
4553 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4554 &mdev
->write_requests
, __func__
,
4558 static int got_NegAck(struct drbd_conf
*mdev
, enum drbd_packet cmd
)
4560 struct p_block_ack
*p
= mdev
->tconn
->meta
.rbuf
;
4561 sector_t sector
= be64_to_cpu(p
->sector
);
4562 int size
= be32_to_cpu(p
->blksize
);
4563 bool missing_ok
= mdev
->tconn
->net_conf
->wire_protocol
== DRBD_PROT_A
||
4564 mdev
->tconn
->net_conf
->wire_protocol
== DRBD_PROT_B
;
4567 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4569 if (p
->block_id
== ID_SYNCER
) {
4570 dec_rs_pending(mdev
);
4571 drbd_rs_failed_io(mdev
, sector
, size
);
4575 found
= validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4576 &mdev
->write_requests
, __func__
,
4577 NEG_ACKED
, missing_ok
);
4579 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4580 The master bio might already be completed, therefore the
4581 request is no longer in the collision hash. */
4582 /* In Protocol B we might already have got a P_RECV_ACK
4583 but then get a P_NEG_ACK afterwards. */
4586 drbd_set_out_of_sync(mdev
, sector
, size
);
4591 static int got_NegDReply(struct drbd_conf
*mdev
, enum drbd_packet cmd
)
4593 struct p_block_ack
*p
= mdev
->tconn
->meta
.rbuf
;
4594 sector_t sector
= be64_to_cpu(p
->sector
);
4596 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4598 dev_err(DEV
, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4599 (unsigned long long)sector
, be32_to_cpu(p
->blksize
));
4601 return validate_req_change_req_state(mdev
, p
->block_id
, sector
,
4602 &mdev
->read_requests
, __func__
,
4606 static int got_NegRSDReply(struct drbd_conf
*mdev
, enum drbd_packet cmd
)
4610 struct p_block_ack
*p
= mdev
->tconn
->meta
.rbuf
;
4612 sector
= be64_to_cpu(p
->sector
);
4613 size
= be32_to_cpu(p
->blksize
);
4615 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4617 dec_rs_pending(mdev
);
4619 if (get_ldev_if_state(mdev
, D_FAILED
)) {
4620 drbd_rs_complete_io(mdev
, sector
);
4622 case P_NEG_RS_DREPLY
:
4623 drbd_rs_failed_io(mdev
, sector
, size
);
4637 static int got_BarrierAck(struct drbd_conf
*mdev
, enum drbd_packet cmd
)
4639 struct p_barrier_ack
*p
= mdev
->tconn
->meta
.rbuf
;
4641 tl_release(mdev
->tconn
, p
->barrier
, be32_to_cpu(p
->set_size
));
4643 if (mdev
->state
.conn
== C_AHEAD
&&
4644 atomic_read(&mdev
->ap_in_flight
) == 0 &&
4645 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE
, &mdev
->current_epoch
->flags
)) {
4646 mdev
->start_resync_timer
.expires
= jiffies
+ HZ
;
4647 add_timer(&mdev
->start_resync_timer
);
4653 static int got_OVResult(struct drbd_conf
*mdev
, enum drbd_packet cmd
)
4655 struct p_block_ack
*p
= mdev
->tconn
->meta
.rbuf
;
4656 struct drbd_work
*w
;
4660 sector
= be64_to_cpu(p
->sector
);
4661 size
= be32_to_cpu(p
->blksize
);
4663 update_peer_seq(mdev
, be32_to_cpu(p
->seq_num
));
4665 if (be64_to_cpu(p
->block_id
) == ID_OUT_OF_SYNC
)
4666 drbd_ov_out_of_sync_found(mdev
, sector
, size
);
4668 ov_out_of_sync_print(mdev
);
4670 if (!get_ldev(mdev
))
4673 drbd_rs_complete_io(mdev
, sector
);
4674 dec_rs_pending(mdev
);
4678 /* let's advance progress step marks only for every other megabyte */
4679 if ((mdev
->ov_left
& 0x200) == 0x200)
4680 drbd_advance_rs_marks(mdev
, mdev
->ov_left
);
4682 if (mdev
->ov_left
== 0) {
4683 w
= kmalloc(sizeof(*w
), GFP_NOIO
);
4685 w
->cb
= w_ov_finished
;
4687 drbd_queue_work_front(&mdev
->tconn
->data
.work
, w
);
4689 dev_err(DEV
, "kmalloc(w) failed.");
4690 ov_out_of_sync_print(mdev
);
4691 drbd_resync_finished(mdev
);
4698 static int got_skip(struct drbd_conf
*mdev
, enum drbd_packet cmd
)
4703 static int tconn_process_done_ee(struct drbd_tconn
*tconn
)
4705 struct drbd_conf
*mdev
;
4706 int i
, not_empty
= 0;
4709 clear_bit(SIGNAL_ASENDER
, &tconn
->flags
);
4710 flush_signals(current
);
4711 idr_for_each_entry(&tconn
->volumes
, mdev
, i
) {
4712 if (drbd_process_done_ee(mdev
))
4713 return 1; /* error */
4715 set_bit(SIGNAL_ASENDER
, &tconn
->flags
);
4717 spin_lock_irq(&tconn
->req_lock
);
4718 idr_for_each_entry(&tconn
->volumes
, mdev
, i
) {
4719 not_empty
= !list_empty(&mdev
->done_ee
);
4723 spin_unlock_irq(&tconn
->req_lock
);
4724 } while (not_empty
);
4729 struct asender_cmd
{
4731 enum mdev_or_conn fa_type
; /* first argument's type */
4733 int (*mdev_fn
)(struct drbd_conf
*mdev
, enum drbd_packet cmd
);
4734 int (*conn_fn
)(struct drbd_tconn
*tconn
, enum drbd_packet cmd
);
4738 static struct asender_cmd asender_tbl
[] = {
4739 [P_PING
] = { sizeof(struct p_header
), CONN
, { .conn_fn
= got_Ping
} },
4740 [P_PING_ACK
] = { sizeof(struct p_header
), CONN
, { .conn_fn
= got_PingAck
} },
4741 [P_RECV_ACK
] = { sizeof(struct p_block_ack
), MDEV
, { got_BlockAck
} },
4742 [P_WRITE_ACK
] = { sizeof(struct p_block_ack
), MDEV
, { got_BlockAck
} },
4743 [P_RS_WRITE_ACK
] = { sizeof(struct p_block_ack
), MDEV
, { got_BlockAck
} },
4744 [P_DISCARD_WRITE
] = { sizeof(struct p_block_ack
), MDEV
, { got_BlockAck
} },
4745 [P_NEG_ACK
] = { sizeof(struct p_block_ack
), MDEV
, { got_NegAck
} },
4746 [P_NEG_DREPLY
] = { sizeof(struct p_block_ack
), MDEV
, { got_NegDReply
} },
4747 [P_NEG_RS_DREPLY
] = { sizeof(struct p_block_ack
), MDEV
, { got_NegRSDReply
} },
4748 [P_OV_RESULT
] = { sizeof(struct p_block_ack
), MDEV
, { got_OVResult
} },
4749 [P_BARRIER_ACK
] = { sizeof(struct p_barrier_ack
), MDEV
, { got_BarrierAck
} },
4750 [P_STATE_CHG_REPLY
] = { sizeof(struct p_req_state_reply
), MDEV
, { got_RqSReply
} },
4751 [P_RS_IS_IN_SYNC
] = { sizeof(struct p_block_ack
), MDEV
, { got_IsInSync
} },
4752 [P_DELAY_PROBE
] = { sizeof(struct p_delay_probe93
), MDEV
, { got_skip
} },
4753 [P_RS_CANCEL
] = { sizeof(struct p_block_ack
), MDEV
, { got_NegRSDReply
} },
4754 [P_CONN_ST_CHG_REPLY
]={ sizeof(struct p_req_state_reply
), CONN
, {.conn_fn
= got_conn_RqSReply
}},
4755 [P_RETRY_WRITE
] = { sizeof(struct p_block_ack
), MDEV
, { got_BlockAck
} },
4758 int drbd_asender(struct drbd_thread
*thi
)
4760 struct drbd_tconn
*tconn
= thi
->tconn
;
4761 struct p_header
*h
= tconn
->meta
.rbuf
;
4762 struct asender_cmd
*cmd
= NULL
;
4763 struct packet_info pi
;
4767 int expect
= sizeof(struct p_header
);
4768 int ping_timeout_active
= 0;
4770 current
->policy
= SCHED_RR
; /* Make this a realtime task! */
4771 current
->rt_priority
= 2; /* more important than all other tasks */
4773 while (get_t_state(thi
) == RUNNING
) {
4774 drbd_thread_current_set_cpu(thi
);
4775 if (test_and_clear_bit(SEND_PING
, &tconn
->flags
)) {
4776 if (!drbd_send_ping(tconn
)) {
4777 conn_err(tconn
, "drbd_send_ping has failed\n");
4780 tconn
->meta
.socket
->sk
->sk_rcvtimeo
=
4781 tconn
->net_conf
->ping_timeo
*HZ
/10;
4782 ping_timeout_active
= 1;
4785 /* TODO: conditionally cork; it may hurt latency if we cork without
4787 if (!tconn
->net_conf
->no_cork
)
4788 drbd_tcp_cork(tconn
->meta
.socket
);
4789 if (tconn_process_done_ee(tconn
)) {
4790 conn_err(tconn
, "tconn_process_done_ee() failed\n");
4793 /* but unconditionally uncork unless disabled */
4794 if (!tconn
->net_conf
->no_cork
)
4795 drbd_tcp_uncork(tconn
->meta
.socket
);
4797 /* short circuit, recv_msg would return EINTR anyways. */
4798 if (signal_pending(current
))
4801 rv
= drbd_recv_short(tconn
->meta
.socket
, buf
, expect
-received
, 0);
4802 clear_bit(SIGNAL_ASENDER
, &tconn
->flags
);
4804 flush_signals(current
);
4807 * -EINTR (on meta) we got a signal
4808 * -EAGAIN (on meta) rcvtimeo expired
4809 * -ECONNRESET other side closed the connection
4810 * -ERESTARTSYS (on data) we got a signal
4811 * rv < 0 other than above: unexpected error!
4812 * rv == expected: full header or command
4813 * rv < expected: "woken" by signal during receive
4814 * rv == 0 : "connection shut down by peer"
4816 if (likely(rv
> 0)) {
4819 } else if (rv
== 0) {
4820 conn_err(tconn
, "meta connection shut down by peer.\n");
4822 } else if (rv
== -EAGAIN
) {
4823 /* If the data socket received something meanwhile,
4824 * that is good enough: peer is still alive. */
4825 if (time_after(tconn
->last_received
,
4826 jiffies
- tconn
->meta
.socket
->sk
->sk_rcvtimeo
))
4828 if (ping_timeout_active
) {
4829 conn_err(tconn
, "PingAck did not arrive in time.\n");
4832 set_bit(SEND_PING
, &tconn
->flags
);
4834 } else if (rv
== -EINTR
) {
4837 conn_err(tconn
, "sock_recvmsg returned %d\n", rv
);
4841 if (received
== expect
&& cmd
== NULL
) {
4842 if (decode_header(tconn
, h
, &pi
))
4844 cmd
= &asender_tbl
[pi
.cmd
];
4845 if (pi
.cmd
>= ARRAY_SIZE(asender_tbl
) || !cmd
) {
4846 conn_err(tconn
, "unknown command %d on meta (l: %d)\n",
4850 expect
= cmd
->pkt_size
;
4851 if (pi
.size
!= expect
- sizeof(struct p_header
)) {
4852 conn_err(tconn
, "Wrong packet size on meta (c: %d, l: %d)\n",
4857 if (received
== expect
) {
4860 if (cmd
->fa_type
== CONN
) {
4861 rv
= cmd
->conn_fn(tconn
, pi
.cmd
);
4863 struct drbd_conf
*mdev
= vnr_to_mdev(tconn
, pi
.vnr
);
4864 rv
= cmd
->mdev_fn(mdev
, pi
.cmd
);
4870 tconn
->last_received
= jiffies
;
4872 /* the idle_timeout (ping-int)
4873 * has been restored in got_PingAck() */
4874 if (cmd
== &asender_tbl
[P_PING_ACK
])
4875 ping_timeout_active
= 0;
4879 expect
= sizeof(struct p_header
);
4886 conn_request_state(tconn
, NS(conn
, C_NETWORK_FAILURE
), CS_HARD
);
4890 conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
), CS_HARD
);
4892 clear_bit(SIGNAL_ASENDER
, &tconn
->flags
);
4894 conn_info(tconn
, "asender terminated\n");