drbd: Now we need to handle the ed_uuid of an diskless, unconnected primary correctly
[deliverable/linux.git] / drivers / block / drbd / drbd_receiver.c
1 /*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
26 #include <linux/module.h>
27
28 #include <asm/uaccess.h>
29 #include <net/sock.h>
30
31 #include <linux/drbd.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/in.h>
35 #include <linux/mm.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/smp_lock.h>
40 #include <linux/pkt_sched.h>
41 #define __KERNEL_SYSCALLS__
42 #include <linux/unistd.h>
43 #include <linux/vmalloc.h>
44 #include <linux/random.h>
45 #include <linux/string.h>
46 #include <linux/scatterlist.h>
47 #include "drbd_int.h"
48 #include "drbd_req.h"
49
50 #include "drbd_vli.h"
51
52 struct flush_work {
53 struct drbd_work w;
54 struct drbd_epoch *epoch;
55 };
56
57 enum finish_epoch {
58 FE_STILL_LIVE,
59 FE_DESTROYED,
60 FE_RECYCLED,
61 };
62
63 static int drbd_do_handshake(struct drbd_conf *mdev);
64 static int drbd_do_auth(struct drbd_conf *mdev);
65
66 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
67 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
68
69 static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
70 {
71 struct drbd_epoch *prev;
72 spin_lock(&mdev->epoch_lock);
73 prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
74 if (prev == epoch || prev == mdev->current_epoch)
75 prev = NULL;
76 spin_unlock(&mdev->epoch_lock);
77 return prev;
78 }
79
80 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
81
82 /*
83 * some helper functions to deal with single linked page lists,
84 * page->private being our "next" pointer.
85 */
86
87 /* If at least n pages are linked at head, get n pages off.
88 * Otherwise, don't modify head, and return NULL.
89 * Locking is the responsibility of the caller.
90 */
91 static struct page *page_chain_del(struct page **head, int n)
92 {
93 struct page *page;
94 struct page *tmp;
95
96 BUG_ON(!n);
97 BUG_ON(!head);
98
99 page = *head;
100
101 if (!page)
102 return NULL;
103
104 while (page) {
105 tmp = page_chain_next(page);
106 if (--n == 0)
107 break; /* found sufficient pages */
108 if (tmp == NULL)
109 /* insufficient pages, don't use any of them. */
110 return NULL;
111 page = tmp;
112 }
113
114 /* add end of list marker for the returned list */
115 set_page_private(page, 0);
116 /* actual return value, and adjustment of head */
117 page = *head;
118 *head = tmp;
119 return page;
120 }
121
122 /* may be used outside of locks to find the tail of a (usually short)
123 * "private" page chain, before adding it back to a global chain head
124 * with page_chain_add() under a spinlock. */
125 static struct page *page_chain_tail(struct page *page, int *len)
126 {
127 struct page *tmp;
128 int i = 1;
129 while ((tmp = page_chain_next(page)))
130 ++i, page = tmp;
131 if (len)
132 *len = i;
133 return page;
134 }
135
136 static int page_chain_free(struct page *page)
137 {
138 struct page *tmp;
139 int i = 0;
140 page_chain_for_each_safe(page, tmp) {
141 put_page(page);
142 ++i;
143 }
144 return i;
145 }
146
147 static void page_chain_add(struct page **head,
148 struct page *chain_first, struct page *chain_last)
149 {
150 #if 1
151 struct page *tmp;
152 tmp = page_chain_tail(chain_first, NULL);
153 BUG_ON(tmp != chain_last);
154 #endif
155
156 /* add chain to head */
157 set_page_private(chain_last, (unsigned long)*head);
158 *head = chain_first;
159 }
160
161 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
162 {
163 struct page *page = NULL;
164 struct page *tmp = NULL;
165 int i = 0;
166
167 /* Yes, testing drbd_pp_vacant outside the lock is racy.
168 * So what. It saves a spin_lock. */
169 if (drbd_pp_vacant >= number) {
170 spin_lock(&drbd_pp_lock);
171 page = page_chain_del(&drbd_pp_pool, number);
172 if (page)
173 drbd_pp_vacant -= number;
174 spin_unlock(&drbd_pp_lock);
175 if (page)
176 return page;
177 }
178
179 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
180 * "criss-cross" setup, that might cause write-out on some other DRBD,
181 * which in turn might block on the other node at this very place. */
182 for (i = 0; i < number; i++) {
183 tmp = alloc_page(GFP_TRY);
184 if (!tmp)
185 break;
186 set_page_private(tmp, (unsigned long)page);
187 page = tmp;
188 }
189
190 if (i == number)
191 return page;
192
193 /* Not enough pages immediately available this time.
194 * No need to jump around here, drbd_pp_alloc will retry this
195 * function "soon". */
196 if (page) {
197 tmp = page_chain_tail(page, NULL);
198 spin_lock(&drbd_pp_lock);
199 page_chain_add(&drbd_pp_pool, page, tmp);
200 drbd_pp_vacant += i;
201 spin_unlock(&drbd_pp_lock);
202 }
203 return NULL;
204 }
205
206 /* kick lower level device, if we have more than (arbitrary number)
207 * reference counts on it, which typically are locally submitted io
208 * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
209 static void maybe_kick_lo(struct drbd_conf *mdev)
210 {
211 if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
212 drbd_kick_lo(mdev);
213 }
214
215 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
216 {
217 struct drbd_epoch_entry *e;
218 struct list_head *le, *tle;
219
220 /* The EEs are always appended to the end of the list. Since
221 they are sent in order over the wire, they have to finish
222 in order. As soon as we see the first not finished we can
223 stop to examine the list... */
224
225 list_for_each_safe(le, tle, &mdev->net_ee) {
226 e = list_entry(le, struct drbd_epoch_entry, w.list);
227 if (drbd_ee_has_active_page(e))
228 break;
229 list_move(le, to_be_freed);
230 }
231 }
232
233 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
234 {
235 LIST_HEAD(reclaimed);
236 struct drbd_epoch_entry *e, *t;
237
238 maybe_kick_lo(mdev);
239 spin_lock_irq(&mdev->req_lock);
240 reclaim_net_ee(mdev, &reclaimed);
241 spin_unlock_irq(&mdev->req_lock);
242
243 list_for_each_entry_safe(e, t, &reclaimed, w.list)
244 drbd_free_ee(mdev, e);
245 }
246
247 /**
248 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
249 * @mdev: DRBD device.
250 * @number: number of pages requested
251 * @retry: whether to retry, if not enough pages are available right now
252 *
253 * Tries to allocate number pages, first from our own page pool, then from
254 * the kernel, unless this allocation would exceed the max_buffers setting.
255 * Possibly retry until DRBD frees sufficient pages somewhere else.
256 *
257 * Returns a page chain linked via page->private.
258 */
259 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
260 {
261 struct page *page = NULL;
262 DEFINE_WAIT(wait);
263
264 /* Yes, we may run up to @number over max_buffers. If we
265 * follow it strictly, the admin will get it wrong anyways. */
266 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
267 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
268
269 while (page == NULL) {
270 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
271
272 drbd_kick_lo_and_reclaim_net(mdev);
273
274 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
275 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
276 if (page)
277 break;
278 }
279
280 if (!retry)
281 break;
282
283 if (signal_pending(current)) {
284 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
285 break;
286 }
287
288 schedule();
289 }
290 finish_wait(&drbd_pp_wait, &wait);
291
292 if (page)
293 atomic_add(number, &mdev->pp_in_use);
294 return page;
295 }
296
297 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
298 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
299 * Either links the page chain back to the global pool,
300 * or returns all pages to the system. */
301 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page)
302 {
303 int i;
304 if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count)
305 i = page_chain_free(page);
306 else {
307 struct page *tmp;
308 tmp = page_chain_tail(page, &i);
309 spin_lock(&drbd_pp_lock);
310 page_chain_add(&drbd_pp_pool, page, tmp);
311 drbd_pp_vacant += i;
312 spin_unlock(&drbd_pp_lock);
313 }
314 atomic_sub(i, &mdev->pp_in_use);
315 i = atomic_read(&mdev->pp_in_use);
316 if (i < 0)
317 dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
318 wake_up(&drbd_pp_wait);
319 }
320
321 /*
322 You need to hold the req_lock:
323 _drbd_wait_ee_list_empty()
324
325 You must not have the req_lock:
326 drbd_free_ee()
327 drbd_alloc_ee()
328 drbd_init_ee()
329 drbd_release_ee()
330 drbd_ee_fix_bhs()
331 drbd_process_done_ee()
332 drbd_clear_done_ee()
333 drbd_wait_ee_list_empty()
334 */
335
336 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
337 u64 id,
338 sector_t sector,
339 unsigned int data_size,
340 gfp_t gfp_mask) __must_hold(local)
341 {
342 struct drbd_epoch_entry *e;
343 struct page *page;
344 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
345
346 if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE))
347 return NULL;
348
349 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
350 if (!e) {
351 if (!(gfp_mask & __GFP_NOWARN))
352 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
353 return NULL;
354 }
355
356 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
357 if (!page)
358 goto fail;
359
360 INIT_HLIST_NODE(&e->colision);
361 e->epoch = NULL;
362 e->mdev = mdev;
363 e->pages = page;
364 atomic_set(&e->pending_bios, 0);
365 e->size = data_size;
366 e->flags = 0;
367 e->sector = sector;
368 e->sector = sector;
369 e->block_id = id;
370
371 return e;
372
373 fail:
374 mempool_free(e, drbd_ee_mempool);
375 return NULL;
376 }
377
378 void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
379 {
380 drbd_pp_free(mdev, e->pages);
381 D_ASSERT(atomic_read(&e->pending_bios) == 0);
382 D_ASSERT(hlist_unhashed(&e->colision));
383 mempool_free(e, drbd_ee_mempool);
384 }
385
386 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
387 {
388 LIST_HEAD(work_list);
389 struct drbd_epoch_entry *e, *t;
390 int count = 0;
391
392 spin_lock_irq(&mdev->req_lock);
393 list_splice_init(list, &work_list);
394 spin_unlock_irq(&mdev->req_lock);
395
396 list_for_each_entry_safe(e, t, &work_list, w.list) {
397 drbd_free_ee(mdev, e);
398 count++;
399 }
400 return count;
401 }
402
403
404 /*
405 * This function is called from _asender only_
406 * but see also comments in _req_mod(,barrier_acked)
407 * and receive_Barrier.
408 *
409 * Move entries from net_ee to done_ee, if ready.
410 * Grab done_ee, call all callbacks, free the entries.
411 * The callbacks typically send out ACKs.
412 */
413 static int drbd_process_done_ee(struct drbd_conf *mdev)
414 {
415 LIST_HEAD(work_list);
416 LIST_HEAD(reclaimed);
417 struct drbd_epoch_entry *e, *t;
418 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
419
420 spin_lock_irq(&mdev->req_lock);
421 reclaim_net_ee(mdev, &reclaimed);
422 list_splice_init(&mdev->done_ee, &work_list);
423 spin_unlock_irq(&mdev->req_lock);
424
425 list_for_each_entry_safe(e, t, &reclaimed, w.list)
426 drbd_free_ee(mdev, e);
427
428 /* possible callbacks here:
429 * e_end_block, and e_end_resync_block, e_send_discard_ack.
430 * all ignore the last argument.
431 */
432 list_for_each_entry_safe(e, t, &work_list, w.list) {
433 /* list_del not necessary, next/prev members not touched */
434 ok = e->w.cb(mdev, &e->w, !ok) && ok;
435 drbd_free_ee(mdev, e);
436 }
437 wake_up(&mdev->ee_wait);
438
439 return ok;
440 }
441
442 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
443 {
444 DEFINE_WAIT(wait);
445
446 /* avoids spin_lock/unlock
447 * and calling prepare_to_wait in the fast path */
448 while (!list_empty(head)) {
449 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
450 spin_unlock_irq(&mdev->req_lock);
451 drbd_kick_lo(mdev);
452 schedule();
453 finish_wait(&mdev->ee_wait, &wait);
454 spin_lock_irq(&mdev->req_lock);
455 }
456 }
457
458 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
459 {
460 spin_lock_irq(&mdev->req_lock);
461 _drbd_wait_ee_list_empty(mdev, head);
462 spin_unlock_irq(&mdev->req_lock);
463 }
464
465 /* see also kernel_accept; which is only present since 2.6.18.
466 * also we want to log which part of it failed, exactly */
467 static int drbd_accept(struct drbd_conf *mdev, const char **what,
468 struct socket *sock, struct socket **newsock)
469 {
470 struct sock *sk = sock->sk;
471 int err = 0;
472
473 *what = "listen";
474 err = sock->ops->listen(sock, 5);
475 if (err < 0)
476 goto out;
477
478 *what = "sock_create_lite";
479 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
480 newsock);
481 if (err < 0)
482 goto out;
483
484 *what = "accept";
485 err = sock->ops->accept(sock, *newsock, 0);
486 if (err < 0) {
487 sock_release(*newsock);
488 *newsock = NULL;
489 goto out;
490 }
491 (*newsock)->ops = sock->ops;
492
493 out:
494 return err;
495 }
496
497 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
498 void *buf, size_t size, int flags)
499 {
500 mm_segment_t oldfs;
501 struct kvec iov = {
502 .iov_base = buf,
503 .iov_len = size,
504 };
505 struct msghdr msg = {
506 .msg_iovlen = 1,
507 .msg_iov = (struct iovec *)&iov,
508 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
509 };
510 int rv;
511
512 oldfs = get_fs();
513 set_fs(KERNEL_DS);
514 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
515 set_fs(oldfs);
516
517 return rv;
518 }
519
520 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
521 {
522 mm_segment_t oldfs;
523 struct kvec iov = {
524 .iov_base = buf,
525 .iov_len = size,
526 };
527 struct msghdr msg = {
528 .msg_iovlen = 1,
529 .msg_iov = (struct iovec *)&iov,
530 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
531 };
532 int rv;
533
534 oldfs = get_fs();
535 set_fs(KERNEL_DS);
536
537 for (;;) {
538 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
539 if (rv == size)
540 break;
541
542 /* Note:
543 * ECONNRESET other side closed the connection
544 * ERESTARTSYS (on sock) we got a signal
545 */
546
547 if (rv < 0) {
548 if (rv == -ECONNRESET)
549 dev_info(DEV, "sock was reset by peer\n");
550 else if (rv != -ERESTARTSYS)
551 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
552 break;
553 } else if (rv == 0) {
554 dev_info(DEV, "sock was shut down by peer\n");
555 break;
556 } else {
557 /* signal came in, or peer/link went down,
558 * after we read a partial message
559 */
560 /* D_ASSERT(signal_pending(current)); */
561 break;
562 }
563 };
564
565 set_fs(oldfs);
566
567 if (rv != size)
568 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
569
570 return rv;
571 }
572
573 /* quoting tcp(7):
574 * On individual connections, the socket buffer size must be set prior to the
575 * listen(2) or connect(2) calls in order to have it take effect.
576 * This is our wrapper to do so.
577 */
578 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
579 unsigned int rcv)
580 {
581 /* open coded SO_SNDBUF, SO_RCVBUF */
582 if (snd) {
583 sock->sk->sk_sndbuf = snd;
584 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
585 }
586 if (rcv) {
587 sock->sk->sk_rcvbuf = rcv;
588 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
589 }
590 }
591
592 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
593 {
594 const char *what;
595 struct socket *sock;
596 struct sockaddr_in6 src_in6;
597 int err;
598 int disconnect_on_error = 1;
599
600 if (!get_net_conf(mdev))
601 return NULL;
602
603 what = "sock_create_kern";
604 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
605 SOCK_STREAM, IPPROTO_TCP, &sock);
606 if (err < 0) {
607 sock = NULL;
608 goto out;
609 }
610
611 sock->sk->sk_rcvtimeo =
612 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
613 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
614 mdev->net_conf->rcvbuf_size);
615
616 /* explicitly bind to the configured IP as source IP
617 * for the outgoing connections.
618 * This is needed for multihomed hosts and to be
619 * able to use lo: interfaces for drbd.
620 * Make sure to use 0 as port number, so linux selects
621 * a free one dynamically.
622 */
623 memcpy(&src_in6, mdev->net_conf->my_addr,
624 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
625 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
626 src_in6.sin6_port = 0;
627 else
628 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
629
630 what = "bind before connect";
631 err = sock->ops->bind(sock,
632 (struct sockaddr *) &src_in6,
633 mdev->net_conf->my_addr_len);
634 if (err < 0)
635 goto out;
636
637 /* connect may fail, peer not yet available.
638 * stay C_WF_CONNECTION, don't go Disconnecting! */
639 disconnect_on_error = 0;
640 what = "connect";
641 err = sock->ops->connect(sock,
642 (struct sockaddr *)mdev->net_conf->peer_addr,
643 mdev->net_conf->peer_addr_len, 0);
644
645 out:
646 if (err < 0) {
647 if (sock) {
648 sock_release(sock);
649 sock = NULL;
650 }
651 switch (-err) {
652 /* timeout, busy, signal pending */
653 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
654 case EINTR: case ERESTARTSYS:
655 /* peer not (yet) available, network problem */
656 case ECONNREFUSED: case ENETUNREACH:
657 case EHOSTDOWN: case EHOSTUNREACH:
658 disconnect_on_error = 0;
659 break;
660 default:
661 dev_err(DEV, "%s failed, err = %d\n", what, err);
662 }
663 if (disconnect_on_error)
664 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
665 }
666 put_net_conf(mdev);
667 return sock;
668 }
669
670 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
671 {
672 int timeo, err;
673 struct socket *s_estab = NULL, *s_listen;
674 const char *what;
675
676 if (!get_net_conf(mdev))
677 return NULL;
678
679 what = "sock_create_kern";
680 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
681 SOCK_STREAM, IPPROTO_TCP, &s_listen);
682 if (err) {
683 s_listen = NULL;
684 goto out;
685 }
686
687 timeo = mdev->net_conf->try_connect_int * HZ;
688 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
689
690 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
691 s_listen->sk->sk_rcvtimeo = timeo;
692 s_listen->sk->sk_sndtimeo = timeo;
693 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
694 mdev->net_conf->rcvbuf_size);
695
696 what = "bind before listen";
697 err = s_listen->ops->bind(s_listen,
698 (struct sockaddr *) mdev->net_conf->my_addr,
699 mdev->net_conf->my_addr_len);
700 if (err < 0)
701 goto out;
702
703 err = drbd_accept(mdev, &what, s_listen, &s_estab);
704
705 out:
706 if (s_listen)
707 sock_release(s_listen);
708 if (err < 0) {
709 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
710 dev_err(DEV, "%s failed, err = %d\n", what, err);
711 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
712 }
713 }
714 put_net_conf(mdev);
715
716 return s_estab;
717 }
718
719 static int drbd_send_fp(struct drbd_conf *mdev,
720 struct socket *sock, enum drbd_packets cmd)
721 {
722 struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
723
724 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
725 }
726
727 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
728 {
729 struct p_header *h = (struct p_header *) &mdev->data.sbuf.header;
730 int rr;
731
732 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
733
734 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
735 return be16_to_cpu(h->command);
736
737 return 0xffff;
738 }
739
740 /**
741 * drbd_socket_okay() - Free the socket if its connection is not okay
742 * @mdev: DRBD device.
743 * @sock: pointer to the pointer to the socket.
744 */
745 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
746 {
747 int rr;
748 char tb[4];
749
750 if (!*sock)
751 return FALSE;
752
753 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
754
755 if (rr > 0 || rr == -EAGAIN) {
756 return TRUE;
757 } else {
758 sock_release(*sock);
759 *sock = NULL;
760 return FALSE;
761 }
762 }
763
764 /*
765 * return values:
766 * 1 yes, we have a valid connection
767 * 0 oops, did not work out, please try again
768 * -1 peer talks different language,
769 * no point in trying again, please go standalone.
770 * -2 We do not have a network config...
771 */
772 static int drbd_connect(struct drbd_conf *mdev)
773 {
774 struct socket *s, *sock, *msock;
775 int try, h, ok;
776
777 D_ASSERT(!mdev->data.socket);
778
779 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
780 return -2;
781
782 clear_bit(DISCARD_CONCURRENT, &mdev->flags);
783
784 sock = NULL;
785 msock = NULL;
786
787 do {
788 for (try = 0;;) {
789 /* 3 tries, this should take less than a second! */
790 s = drbd_try_connect(mdev);
791 if (s || ++try >= 3)
792 break;
793 /* give the other side time to call bind() & listen() */
794 __set_current_state(TASK_INTERRUPTIBLE);
795 schedule_timeout(HZ / 10);
796 }
797
798 if (s) {
799 if (!sock) {
800 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
801 sock = s;
802 s = NULL;
803 } else if (!msock) {
804 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
805 msock = s;
806 s = NULL;
807 } else {
808 dev_err(DEV, "Logic error in drbd_connect()\n");
809 goto out_release_sockets;
810 }
811 }
812
813 if (sock && msock) {
814 __set_current_state(TASK_INTERRUPTIBLE);
815 schedule_timeout(HZ / 10);
816 ok = drbd_socket_okay(mdev, &sock);
817 ok = drbd_socket_okay(mdev, &msock) && ok;
818 if (ok)
819 break;
820 }
821
822 retry:
823 s = drbd_wait_for_connect(mdev);
824 if (s) {
825 try = drbd_recv_fp(mdev, s);
826 drbd_socket_okay(mdev, &sock);
827 drbd_socket_okay(mdev, &msock);
828 switch (try) {
829 case P_HAND_SHAKE_S:
830 if (sock) {
831 dev_warn(DEV, "initial packet S crossed\n");
832 sock_release(sock);
833 }
834 sock = s;
835 break;
836 case P_HAND_SHAKE_M:
837 if (msock) {
838 dev_warn(DEV, "initial packet M crossed\n");
839 sock_release(msock);
840 }
841 msock = s;
842 set_bit(DISCARD_CONCURRENT, &mdev->flags);
843 break;
844 default:
845 dev_warn(DEV, "Error receiving initial packet\n");
846 sock_release(s);
847 if (random32() & 1)
848 goto retry;
849 }
850 }
851
852 if (mdev->state.conn <= C_DISCONNECTING)
853 goto out_release_sockets;
854 if (signal_pending(current)) {
855 flush_signals(current);
856 smp_rmb();
857 if (get_t_state(&mdev->receiver) == Exiting)
858 goto out_release_sockets;
859 }
860
861 if (sock && msock) {
862 ok = drbd_socket_okay(mdev, &sock);
863 ok = drbd_socket_okay(mdev, &msock) && ok;
864 if (ok)
865 break;
866 }
867 } while (1);
868
869 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */
870 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */
871
872 sock->sk->sk_allocation = GFP_NOIO;
873 msock->sk->sk_allocation = GFP_NOIO;
874
875 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
876 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
877
878 /* NOT YET ...
879 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
880 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
881 * first set it to the P_HAND_SHAKE timeout,
882 * which we set to 4x the configured ping_timeout. */
883 sock->sk->sk_sndtimeo =
884 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
885
886 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
887 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
888
889 /* we don't want delays.
890 * we use TCP_CORK where apropriate, though */
891 drbd_tcp_nodelay(sock);
892 drbd_tcp_nodelay(msock);
893
894 mdev->data.socket = sock;
895 mdev->meta.socket = msock;
896 mdev->last_received = jiffies;
897
898 D_ASSERT(mdev->asender.task == NULL);
899
900 h = drbd_do_handshake(mdev);
901 if (h <= 0)
902 return h;
903
904 if (mdev->cram_hmac_tfm) {
905 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
906 switch (drbd_do_auth(mdev)) {
907 case -1:
908 dev_err(DEV, "Authentication of peer failed\n");
909 return -1;
910 case 0:
911 dev_err(DEV, "Authentication of peer failed, trying again.\n");
912 return 0;
913 }
914 }
915
916 if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS)
917 return 0;
918
919 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
920 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
921
922 atomic_set(&mdev->packet_seq, 0);
923 mdev->peer_seq = 0;
924
925 drbd_thread_start(&mdev->asender);
926
927 if (!drbd_send_protocol(mdev))
928 return -1;
929 drbd_send_sync_param(mdev, &mdev->sync_conf);
930 drbd_send_sizes(mdev, 0, 0);
931 drbd_send_uuids(mdev);
932 drbd_send_state(mdev);
933 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
934 clear_bit(RESIZE_PENDING, &mdev->flags);
935
936 return 1;
937
938 out_release_sockets:
939 if (sock)
940 sock_release(sock);
941 if (msock)
942 sock_release(msock);
943 return -1;
944 }
945
946 static int drbd_recv_header(struct drbd_conf *mdev, struct p_header *h)
947 {
948 int r;
949
950 r = drbd_recv(mdev, h, sizeof(*h));
951
952 if (unlikely(r != sizeof(*h))) {
953 dev_err(DEV, "short read expecting header on sock: r=%d\n", r);
954 return FALSE;
955 };
956 h->command = be16_to_cpu(h->command);
957 h->length = be16_to_cpu(h->length);
958 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
959 dev_err(DEV, "magic?? on data m: 0x%lx c: %d l: %d\n",
960 (long)be32_to_cpu(h->magic),
961 h->command, h->length);
962 return FALSE;
963 }
964 mdev->last_received = jiffies;
965
966 return TRUE;
967 }
968
969 static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
970 {
971 int rv;
972
973 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
974 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
975 NULL, BLKDEV_IFL_WAIT);
976 if (rv) {
977 dev_err(DEV, "local disk flush failed with status %d\n", rv);
978 /* would rather check on EOPNOTSUPP, but that is not reliable.
979 * don't try again for ANY return value != 0
980 * if (rv == -EOPNOTSUPP) */
981 drbd_bump_write_ordering(mdev, WO_drain_io);
982 }
983 put_ldev(mdev);
984 }
985
986 return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
987 }
988
989 static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
990 {
991 struct flush_work *fw = (struct flush_work *)w;
992 struct drbd_epoch *epoch = fw->epoch;
993
994 kfree(w);
995
996 if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
997 drbd_flush_after_epoch(mdev, epoch);
998
999 drbd_may_finish_epoch(mdev, epoch, EV_PUT |
1000 (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
1001
1002 return 1;
1003 }
1004
1005 /**
1006 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1007 * @mdev: DRBD device.
1008 * @epoch: Epoch object.
1009 * @ev: Epoch event.
1010 */
1011 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
1012 struct drbd_epoch *epoch,
1013 enum epoch_event ev)
1014 {
1015 int finish, epoch_size;
1016 struct drbd_epoch *next_epoch;
1017 int schedule_flush = 0;
1018 enum finish_epoch rv = FE_STILL_LIVE;
1019
1020 spin_lock(&mdev->epoch_lock);
1021 do {
1022 next_epoch = NULL;
1023 finish = 0;
1024
1025 epoch_size = atomic_read(&epoch->epoch_size);
1026
1027 switch (ev & ~EV_CLEANUP) {
1028 case EV_PUT:
1029 atomic_dec(&epoch->active);
1030 break;
1031 case EV_GOT_BARRIER_NR:
1032 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1033
1034 /* Special case: If we just switched from WO_bio_barrier to
1035 WO_bdev_flush we should not finish the current epoch */
1036 if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
1037 mdev->write_ordering != WO_bio_barrier &&
1038 epoch == mdev->current_epoch)
1039 clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
1040 break;
1041 case EV_BARRIER_DONE:
1042 set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
1043 break;
1044 case EV_BECAME_LAST:
1045 /* nothing to do*/
1046 break;
1047 }
1048
1049 if (epoch_size != 0 &&
1050 atomic_read(&epoch->active) == 0 &&
1051 test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
1052 epoch->list.prev == &mdev->current_epoch->list &&
1053 !test_bit(DE_IS_FINISHING, &epoch->flags)) {
1054 /* Nearly all conditions are met to finish that epoch... */
1055 if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
1056 mdev->write_ordering == WO_none ||
1057 (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
1058 ev & EV_CLEANUP) {
1059 finish = 1;
1060 set_bit(DE_IS_FINISHING, &epoch->flags);
1061 } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
1062 mdev->write_ordering == WO_bio_barrier) {
1063 atomic_inc(&epoch->active);
1064 schedule_flush = 1;
1065 }
1066 }
1067 if (finish) {
1068 if (!(ev & EV_CLEANUP)) {
1069 spin_unlock(&mdev->epoch_lock);
1070 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1071 spin_lock(&mdev->epoch_lock);
1072 }
1073 dec_unacked(mdev);
1074
1075 if (mdev->current_epoch != epoch) {
1076 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1077 list_del(&epoch->list);
1078 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1079 mdev->epochs--;
1080 kfree(epoch);
1081
1082 if (rv == FE_STILL_LIVE)
1083 rv = FE_DESTROYED;
1084 } else {
1085 epoch->flags = 0;
1086 atomic_set(&epoch->epoch_size, 0);
1087 /* atomic_set(&epoch->active, 0); is already zero */
1088 if (rv == FE_STILL_LIVE)
1089 rv = FE_RECYCLED;
1090 }
1091 }
1092
1093 if (!next_epoch)
1094 break;
1095
1096 epoch = next_epoch;
1097 } while (1);
1098
1099 spin_unlock(&mdev->epoch_lock);
1100
1101 if (schedule_flush) {
1102 struct flush_work *fw;
1103 fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
1104 if (fw) {
1105 fw->w.cb = w_flush;
1106 fw->epoch = epoch;
1107 drbd_queue_work(&mdev->data.work, &fw->w);
1108 } else {
1109 dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
1110 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1111 /* That is not a recursion, only one level */
1112 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
1113 drbd_may_finish_epoch(mdev, epoch, EV_PUT);
1114 }
1115 }
1116
1117 return rv;
1118 }
1119
1120 /**
1121 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1122 * @mdev: DRBD device.
1123 * @wo: Write ordering method to try.
1124 */
1125 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1126 {
1127 enum write_ordering_e pwo;
1128 static char *write_ordering_str[] = {
1129 [WO_none] = "none",
1130 [WO_drain_io] = "drain",
1131 [WO_bdev_flush] = "flush",
1132 [WO_bio_barrier] = "barrier",
1133 };
1134
1135 pwo = mdev->write_ordering;
1136 wo = min(pwo, wo);
1137 if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
1138 wo = WO_bdev_flush;
1139 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1140 wo = WO_drain_io;
1141 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1142 wo = WO_none;
1143 mdev->write_ordering = wo;
1144 if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
1145 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1146 }
1147
1148 /**
1149 * drbd_submit_ee()
1150 * @mdev: DRBD device.
1151 * @e: epoch entry
1152 * @rw: flag field, see bio->bi_rw
1153 */
1154 /* TODO allocate from our own bio_set. */
1155 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1156 const unsigned rw, const int fault_type)
1157 {
1158 struct bio *bios = NULL;
1159 struct bio *bio;
1160 struct page *page = e->pages;
1161 sector_t sector = e->sector;
1162 unsigned ds = e->size;
1163 unsigned n_bios = 0;
1164 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1165
1166 /* In most cases, we will only need one bio. But in case the lower
1167 * level restrictions happen to be different at this offset on this
1168 * side than those of the sending peer, we may need to submit the
1169 * request in more than one bio. */
1170 next_bio:
1171 bio = bio_alloc(GFP_NOIO, nr_pages);
1172 if (!bio) {
1173 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1174 goto fail;
1175 }
1176 /* > e->sector, unless this is the first bio */
1177 bio->bi_sector = sector;
1178 bio->bi_bdev = mdev->ldev->backing_bdev;
1179 /* we special case some flags in the multi-bio case, see below
1180 * (REQ_UNPLUG, REQ_HARDBARRIER) */
1181 bio->bi_rw = rw;
1182 bio->bi_private = e;
1183 bio->bi_end_io = drbd_endio_sec;
1184
1185 bio->bi_next = bios;
1186 bios = bio;
1187 ++n_bios;
1188
1189 page_chain_for_each(page) {
1190 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1191 if (!bio_add_page(bio, page, len, 0)) {
1192 /* a single page must always be possible! */
1193 BUG_ON(bio->bi_vcnt == 0);
1194 goto next_bio;
1195 }
1196 ds -= len;
1197 sector += len >> 9;
1198 --nr_pages;
1199 }
1200 D_ASSERT(page == NULL);
1201 D_ASSERT(ds == 0);
1202
1203 atomic_set(&e->pending_bios, n_bios);
1204 do {
1205 bio = bios;
1206 bios = bios->bi_next;
1207 bio->bi_next = NULL;
1208
1209 /* strip off REQ_UNPLUG unless it is the last bio */
1210 if (bios)
1211 bio->bi_rw &= ~REQ_UNPLUG;
1212
1213 drbd_generic_make_request(mdev, fault_type, bio);
1214
1215 /* strip off REQ_HARDBARRIER,
1216 * unless it is the first or last bio */
1217 if (bios && bios->bi_next)
1218 bios->bi_rw &= ~REQ_HARDBARRIER;
1219 } while (bios);
1220 maybe_kick_lo(mdev);
1221 return 0;
1222
1223 fail:
1224 while (bios) {
1225 bio = bios;
1226 bios = bios->bi_next;
1227 bio_put(bio);
1228 }
1229 return -ENOMEM;
1230 }
1231
1232 /**
1233 * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
1234 * @mdev: DRBD device.
1235 * @w: work object.
1236 * @cancel: The connection will be closed anyways (unused in this callback)
1237 */
1238 int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
1239 {
1240 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1241 /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
1242 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
1243 so that we can finish that epoch in drbd_may_finish_epoch().
1244 That is necessary if we already have a long chain of Epochs, before
1245 we realize that REQ_HARDBARRIER is actually not supported */
1246
1247 /* As long as the -ENOTSUPP on the barrier is reported immediately
1248 that will never trigger. If it is reported late, we will just
1249 print that warning and continue correctly for all future requests
1250 with WO_bdev_flush */
1251 if (previous_epoch(mdev, e->epoch))
1252 dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
1253
1254 /* we still have a local reference,
1255 * get_ldev was done in receive_Data. */
1256
1257 e->w.cb = e_end_block;
1258 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
1259 /* drbd_submit_ee fails for one reason only:
1260 * if was not able to allocate sufficient bios.
1261 * requeue, try again later. */
1262 e->w.cb = w_e_reissue;
1263 drbd_queue_work(&mdev->data.work, &e->w);
1264 }
1265 return 1;
1266 }
1267
1268 static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
1269 {
1270 int rv, issue_flush;
1271 struct p_barrier *p = (struct p_barrier *)h;
1272 struct drbd_epoch *epoch;
1273
1274 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
1275
1276 rv = drbd_recv(mdev, h->payload, h->length);
1277 ERR_IF(rv != h->length) return FALSE;
1278
1279 inc_unacked(mdev);
1280
1281 if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
1282 drbd_kick_lo(mdev);
1283
1284 mdev->current_epoch->barrier_nr = p->barrier;
1285 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1286
1287 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1288 * the activity log, which means it would not be resynced in case the
1289 * R_PRIMARY crashes now.
1290 * Therefore we must send the barrier_ack after the barrier request was
1291 * completed. */
1292 switch (mdev->write_ordering) {
1293 case WO_bio_barrier:
1294 case WO_none:
1295 if (rv == FE_RECYCLED)
1296 return TRUE;
1297 break;
1298
1299 case WO_bdev_flush:
1300 case WO_drain_io:
1301 if (rv == FE_STILL_LIVE) {
1302 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1303 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1304 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1305 }
1306 if (rv == FE_RECYCLED)
1307 return TRUE;
1308
1309 /* The asender will send all the ACKs and barrier ACKs out, since
1310 all EEs moved from the active_ee to the done_ee. We need to
1311 provide a new epoch object for the EEs that come in soon */
1312 break;
1313 }
1314
1315 /* receiver context, in the writeout path of the other node.
1316 * avoid potential distributed deadlock */
1317 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1318 if (!epoch) {
1319 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1320 issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
1321 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1322 if (issue_flush) {
1323 rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
1324 if (rv == FE_RECYCLED)
1325 return TRUE;
1326 }
1327
1328 drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
1329
1330 return TRUE;
1331 }
1332
1333 epoch->flags = 0;
1334 atomic_set(&epoch->epoch_size, 0);
1335 atomic_set(&epoch->active, 0);
1336
1337 spin_lock(&mdev->epoch_lock);
1338 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1339 list_add(&epoch->list, &mdev->current_epoch->list);
1340 mdev->current_epoch = epoch;
1341 mdev->epochs++;
1342 } else {
1343 /* The current_epoch got recycled while we allocated this one... */
1344 kfree(epoch);
1345 }
1346 spin_unlock(&mdev->epoch_lock);
1347
1348 return TRUE;
1349 }
1350
1351 /* used from receive_RSDataReply (recv_resync_read)
1352 * and from receive_Data */
1353 static struct drbd_epoch_entry *
1354 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1355 {
1356 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1357 struct drbd_epoch_entry *e;
1358 struct page *page;
1359 int dgs, ds, rr;
1360 void *dig_in = mdev->int_dig_in;
1361 void *dig_vv = mdev->int_dig_vv;
1362 unsigned long *data;
1363
1364 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1365 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1366
1367 if (dgs) {
1368 rr = drbd_recv(mdev, dig_in, dgs);
1369 if (rr != dgs) {
1370 dev_warn(DEV, "short read receiving data digest: read %d expected %d\n",
1371 rr, dgs);
1372 return NULL;
1373 }
1374 }
1375
1376 data_size -= dgs;
1377
1378 ERR_IF(data_size & 0x1ff) return NULL;
1379 ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL;
1380
1381 /* even though we trust out peer,
1382 * we sometimes have to double check. */
1383 if (sector + (data_size>>9) > capacity) {
1384 dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n",
1385 (unsigned long long)capacity,
1386 (unsigned long long)sector, data_size);
1387 return NULL;
1388 }
1389
1390 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1391 * "criss-cross" setup, that might cause write-out on some other DRBD,
1392 * which in turn might block on the other node at this very place. */
1393 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1394 if (!e)
1395 return NULL;
1396
1397 ds = data_size;
1398 page = e->pages;
1399 page_chain_for_each(page) {
1400 unsigned len = min_t(int, ds, PAGE_SIZE);
1401 data = kmap(page);
1402 rr = drbd_recv(mdev, data, len);
1403 if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) {
1404 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1405 data[0] = data[0] ^ (unsigned long)-1;
1406 }
1407 kunmap(page);
1408 if (rr != len) {
1409 drbd_free_ee(mdev, e);
1410 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1411 rr, len);
1412 return NULL;
1413 }
1414 ds -= rr;
1415 }
1416
1417 if (dgs) {
1418 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1419 if (memcmp(dig_in, dig_vv, dgs)) {
1420 dev_err(DEV, "Digest integrity check FAILED.\n");
1421 drbd_bcast_ee(mdev, "digest failed",
1422 dgs, dig_in, dig_vv, e);
1423 drbd_free_ee(mdev, e);
1424 return NULL;
1425 }
1426 }
1427 mdev->recv_cnt += data_size>>9;
1428 return e;
1429 }
1430
1431 /* drbd_drain_block() just takes a data block
1432 * out of the socket input buffer, and discards it.
1433 */
1434 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1435 {
1436 struct page *page;
1437 int rr, rv = 1;
1438 void *data;
1439
1440 if (!data_size)
1441 return TRUE;
1442
1443 page = drbd_pp_alloc(mdev, 1, 1);
1444
1445 data = kmap(page);
1446 while (data_size) {
1447 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1448 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1449 rv = 0;
1450 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1451 rr, min_t(int, data_size, PAGE_SIZE));
1452 break;
1453 }
1454 data_size -= rr;
1455 }
1456 kunmap(page);
1457 drbd_pp_free(mdev, page);
1458 return rv;
1459 }
1460
1461 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1462 sector_t sector, int data_size)
1463 {
1464 struct bio_vec *bvec;
1465 struct bio *bio;
1466 int dgs, rr, i, expect;
1467 void *dig_in = mdev->int_dig_in;
1468 void *dig_vv = mdev->int_dig_vv;
1469
1470 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1471 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1472
1473 if (dgs) {
1474 rr = drbd_recv(mdev, dig_in, dgs);
1475 if (rr != dgs) {
1476 dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n",
1477 rr, dgs);
1478 return 0;
1479 }
1480 }
1481
1482 data_size -= dgs;
1483
1484 /* optimistically update recv_cnt. if receiving fails below,
1485 * we disconnect anyways, and counters will be reset. */
1486 mdev->recv_cnt += data_size>>9;
1487
1488 bio = req->master_bio;
1489 D_ASSERT(sector == bio->bi_sector);
1490
1491 bio_for_each_segment(bvec, bio, i) {
1492 expect = min_t(int, data_size, bvec->bv_len);
1493 rr = drbd_recv(mdev,
1494 kmap(bvec->bv_page)+bvec->bv_offset,
1495 expect);
1496 kunmap(bvec->bv_page);
1497 if (rr != expect) {
1498 dev_warn(DEV, "short read receiving data reply: "
1499 "read %d expected %d\n",
1500 rr, expect);
1501 return 0;
1502 }
1503 data_size -= rr;
1504 }
1505
1506 if (dgs) {
1507 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1508 if (memcmp(dig_in, dig_vv, dgs)) {
1509 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1510 return 0;
1511 }
1512 }
1513
1514 D_ASSERT(data_size == 0);
1515 return 1;
1516 }
1517
1518 /* e_end_resync_block() is called via
1519 * drbd_process_done_ee() by asender only */
1520 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1521 {
1522 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1523 sector_t sector = e->sector;
1524 int ok;
1525
1526 D_ASSERT(hlist_unhashed(&e->colision));
1527
1528 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1529 drbd_set_in_sync(mdev, sector, e->size);
1530 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1531 } else {
1532 /* Record failure to sync */
1533 drbd_rs_failed_io(mdev, sector, e->size);
1534
1535 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1536 }
1537 dec_unacked(mdev);
1538
1539 return ok;
1540 }
1541
1542 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1543 {
1544 struct drbd_epoch_entry *e;
1545
1546 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1547 if (!e)
1548 goto fail;
1549
1550 dec_rs_pending(mdev);
1551
1552 inc_unacked(mdev);
1553 /* corresponding dec_unacked() in e_end_resync_block()
1554 * respective _drbd_clear_done_ee */
1555
1556 e->w.cb = e_end_resync_block;
1557
1558 spin_lock_irq(&mdev->req_lock);
1559 list_add(&e->w.list, &mdev->sync_ee);
1560 spin_unlock_irq(&mdev->req_lock);
1561
1562 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1563 return TRUE;
1564
1565 drbd_free_ee(mdev, e);
1566 fail:
1567 put_ldev(mdev);
1568 return FALSE;
1569 }
1570
1571 static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h)
1572 {
1573 struct drbd_request *req;
1574 sector_t sector;
1575 unsigned int header_size, data_size;
1576 int ok;
1577 struct p_data *p = (struct p_data *)h;
1578
1579 header_size = sizeof(*p) - sizeof(*h);
1580 data_size = h->length - header_size;
1581
1582 ERR_IF(data_size == 0) return FALSE;
1583
1584 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1585 return FALSE;
1586
1587 sector = be64_to_cpu(p->sector);
1588
1589 spin_lock_irq(&mdev->req_lock);
1590 req = _ar_id_to_req(mdev, p->block_id, sector);
1591 spin_unlock_irq(&mdev->req_lock);
1592 if (unlikely(!req)) {
1593 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1594 return FALSE;
1595 }
1596
1597 /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid
1598 * special casing it there for the various failure cases.
1599 * still no race with drbd_fail_pending_reads */
1600 ok = recv_dless_read(mdev, req, sector, data_size);
1601
1602 if (ok)
1603 req_mod(req, data_received);
1604 /* else: nothing. handled from drbd_disconnect...
1605 * I don't think we may complete this just yet
1606 * in case we are "on-disconnect: freeze" */
1607
1608 return ok;
1609 }
1610
1611 static int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h)
1612 {
1613 sector_t sector;
1614 unsigned int header_size, data_size;
1615 int ok;
1616 struct p_data *p = (struct p_data *)h;
1617
1618 header_size = sizeof(*p) - sizeof(*h);
1619 data_size = h->length - header_size;
1620
1621 ERR_IF(data_size == 0) return FALSE;
1622
1623 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1624 return FALSE;
1625
1626 sector = be64_to_cpu(p->sector);
1627 D_ASSERT(p->block_id == ID_SYNCER);
1628
1629 if (get_ldev(mdev)) {
1630 /* data is submitted to disk within recv_resync_read.
1631 * corresponding put_ldev done below on error,
1632 * or in drbd_endio_write_sec. */
1633 ok = recv_resync_read(mdev, sector, data_size);
1634 } else {
1635 if (__ratelimit(&drbd_ratelimit_state))
1636 dev_err(DEV, "Can not write resync data to local disk.\n");
1637
1638 ok = drbd_drain_block(mdev, data_size);
1639
1640 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1641 }
1642
1643 return ok;
1644 }
1645
1646 /* e_end_block() is called via drbd_process_done_ee().
1647 * this means this function only runs in the asender thread
1648 */
1649 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1650 {
1651 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1652 sector_t sector = e->sector;
1653 struct drbd_epoch *epoch;
1654 int ok = 1, pcmd;
1655
1656 if (e->flags & EE_IS_BARRIER) {
1657 epoch = previous_epoch(mdev, e->epoch);
1658 if (epoch)
1659 drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
1660 }
1661
1662 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1663 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1664 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1665 mdev->state.conn <= C_PAUSED_SYNC_T &&
1666 e->flags & EE_MAY_SET_IN_SYNC) ?
1667 P_RS_WRITE_ACK : P_WRITE_ACK;
1668 ok &= drbd_send_ack(mdev, pcmd, e);
1669 if (pcmd == P_RS_WRITE_ACK)
1670 drbd_set_in_sync(mdev, sector, e->size);
1671 } else {
1672 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1673 /* we expect it to be marked out of sync anyways...
1674 * maybe assert this? */
1675 }
1676 dec_unacked(mdev);
1677 }
1678 /* we delete from the conflict detection hash _after_ we sent out the
1679 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1680 if (mdev->net_conf->two_primaries) {
1681 spin_lock_irq(&mdev->req_lock);
1682 D_ASSERT(!hlist_unhashed(&e->colision));
1683 hlist_del_init(&e->colision);
1684 spin_unlock_irq(&mdev->req_lock);
1685 } else {
1686 D_ASSERT(hlist_unhashed(&e->colision));
1687 }
1688
1689 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1690
1691 return ok;
1692 }
1693
1694 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1695 {
1696 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1697 int ok = 1;
1698
1699 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1700 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1701
1702 spin_lock_irq(&mdev->req_lock);
1703 D_ASSERT(!hlist_unhashed(&e->colision));
1704 hlist_del_init(&e->colision);
1705 spin_unlock_irq(&mdev->req_lock);
1706
1707 dec_unacked(mdev);
1708
1709 return ok;
1710 }
1711
1712 /* Called from receive_Data.
1713 * Synchronize packets on sock with packets on msock.
1714 *
1715 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1716 * packet traveling on msock, they are still processed in the order they have
1717 * been sent.
1718 *
1719 * Note: we don't care for Ack packets overtaking P_DATA packets.
1720 *
1721 * In case packet_seq is larger than mdev->peer_seq number, there are
1722 * outstanding packets on the msock. We wait for them to arrive.
1723 * In case we are the logically next packet, we update mdev->peer_seq
1724 * ourselves. Correctly handles 32bit wrap around.
1725 *
1726 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1727 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1728 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1729 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1730 *
1731 * returns 0 if we may process the packet,
1732 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1733 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1734 {
1735 DEFINE_WAIT(wait);
1736 unsigned int p_seq;
1737 long timeout;
1738 int ret = 0;
1739 spin_lock(&mdev->peer_seq_lock);
1740 for (;;) {
1741 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1742 if (seq_le(packet_seq, mdev->peer_seq+1))
1743 break;
1744 if (signal_pending(current)) {
1745 ret = -ERESTARTSYS;
1746 break;
1747 }
1748 p_seq = mdev->peer_seq;
1749 spin_unlock(&mdev->peer_seq_lock);
1750 timeout = schedule_timeout(30*HZ);
1751 spin_lock(&mdev->peer_seq_lock);
1752 if (timeout == 0 && p_seq == mdev->peer_seq) {
1753 ret = -ETIMEDOUT;
1754 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1755 break;
1756 }
1757 }
1758 finish_wait(&mdev->seq_wait, &wait);
1759 if (mdev->peer_seq+1 == packet_seq)
1760 mdev->peer_seq++;
1761 spin_unlock(&mdev->peer_seq_lock);
1762 return ret;
1763 }
1764
1765 /* mirrored write */
1766 static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
1767 {
1768 sector_t sector;
1769 struct drbd_epoch_entry *e;
1770 struct p_data *p = (struct p_data *)h;
1771 int header_size, data_size;
1772 int rw = WRITE;
1773 u32 dp_flags;
1774
1775 header_size = sizeof(*p) - sizeof(*h);
1776 data_size = h->length - header_size;
1777
1778 ERR_IF(data_size == 0) return FALSE;
1779
1780 if (drbd_recv(mdev, h->payload, header_size) != header_size)
1781 return FALSE;
1782
1783 if (!get_ldev(mdev)) {
1784 if (__ratelimit(&drbd_ratelimit_state))
1785 dev_err(DEV, "Can not write mirrored data block "
1786 "to local disk.\n");
1787 spin_lock(&mdev->peer_seq_lock);
1788 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1789 mdev->peer_seq++;
1790 spin_unlock(&mdev->peer_seq_lock);
1791
1792 drbd_send_ack_dp(mdev, P_NEG_ACK, p);
1793 atomic_inc(&mdev->current_epoch->epoch_size);
1794 return drbd_drain_block(mdev, data_size);
1795 }
1796
1797 /* get_ldev(mdev) successful.
1798 * Corresponding put_ldev done either below (on various errors),
1799 * or in drbd_endio_write_sec, if we successfully submit the data at
1800 * the end of this function. */
1801
1802 sector = be64_to_cpu(p->sector);
1803 e = read_in_block(mdev, p->block_id, sector, data_size);
1804 if (!e) {
1805 put_ldev(mdev);
1806 return FALSE;
1807 }
1808
1809 e->w.cb = e_end_block;
1810
1811 spin_lock(&mdev->epoch_lock);
1812 e->epoch = mdev->current_epoch;
1813 atomic_inc(&e->epoch->epoch_size);
1814 atomic_inc(&e->epoch->active);
1815
1816 if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
1817 struct drbd_epoch *epoch;
1818 /* Issue a barrier if we start a new epoch, and the previous epoch
1819 was not a epoch containing a single request which already was
1820 a Barrier. */
1821 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
1822 if (epoch == e->epoch) {
1823 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1824 rw |= REQ_HARDBARRIER;
1825 e->flags |= EE_IS_BARRIER;
1826 } else {
1827 if (atomic_read(&epoch->epoch_size) > 1 ||
1828 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
1829 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
1830 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
1831 rw |= REQ_HARDBARRIER;
1832 e->flags |= EE_IS_BARRIER;
1833 }
1834 }
1835 }
1836 spin_unlock(&mdev->epoch_lock);
1837
1838 dp_flags = be32_to_cpu(p->dp_flags);
1839 if (dp_flags & DP_HARDBARRIER) {
1840 dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
1841 /* rw |= REQ_HARDBARRIER; */
1842 }
1843 if (dp_flags & DP_RW_SYNC)
1844 rw |= REQ_SYNC | REQ_UNPLUG;
1845 if (dp_flags & DP_MAY_SET_IN_SYNC)
1846 e->flags |= EE_MAY_SET_IN_SYNC;
1847
1848 /* I'm the receiver, I do hold a net_cnt reference. */
1849 if (!mdev->net_conf->two_primaries) {
1850 spin_lock_irq(&mdev->req_lock);
1851 } else {
1852 /* don't get the req_lock yet,
1853 * we may sleep in drbd_wait_peer_seq */
1854 const int size = e->size;
1855 const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
1856 DEFINE_WAIT(wait);
1857 struct drbd_request *i;
1858 struct hlist_node *n;
1859 struct hlist_head *slot;
1860 int first;
1861
1862 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1863 BUG_ON(mdev->ee_hash == NULL);
1864 BUG_ON(mdev->tl_hash == NULL);
1865
1866 /* conflict detection and handling:
1867 * 1. wait on the sequence number,
1868 * in case this data packet overtook ACK packets.
1869 * 2. check our hash tables for conflicting requests.
1870 * we only need to walk the tl_hash, since an ee can not
1871 * have a conflict with an other ee: on the submitting
1872 * node, the corresponding req had already been conflicting,
1873 * and a conflicting req is never sent.
1874 *
1875 * Note: for two_primaries, we are protocol C,
1876 * so there cannot be any request that is DONE
1877 * but still on the transfer log.
1878 *
1879 * unconditionally add to the ee_hash.
1880 *
1881 * if no conflicting request is found:
1882 * submit.
1883 *
1884 * if any conflicting request is found
1885 * that has not yet been acked,
1886 * AND I have the "discard concurrent writes" flag:
1887 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1888 *
1889 * if any conflicting request is found:
1890 * block the receiver, waiting on misc_wait
1891 * until no more conflicting requests are there,
1892 * or we get interrupted (disconnect).
1893 *
1894 * we do not just write after local io completion of those
1895 * requests, but only after req is done completely, i.e.
1896 * we wait for the P_DISCARD_ACK to arrive!
1897 *
1898 * then proceed normally, i.e. submit.
1899 */
1900 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1901 goto out_interrupted;
1902
1903 spin_lock_irq(&mdev->req_lock);
1904
1905 hlist_add_head(&e->colision, ee_hash_slot(mdev, sector));
1906
1907 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1908 slot = tl_hash_slot(mdev, sector);
1909 first = 1;
1910 for (;;) {
1911 int have_unacked = 0;
1912 int have_conflict = 0;
1913 prepare_to_wait(&mdev->misc_wait, &wait,
1914 TASK_INTERRUPTIBLE);
1915 hlist_for_each_entry(i, n, slot, colision) {
1916 if (OVERLAPS) {
1917 /* only ALERT on first iteration,
1918 * we may be woken up early... */
1919 if (first)
1920 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1921 " new: %llus +%u; pending: %llus +%u\n",
1922 current->comm, current->pid,
1923 (unsigned long long)sector, size,
1924 (unsigned long long)i->sector, i->size);
1925 if (i->rq_state & RQ_NET_PENDING)
1926 ++have_unacked;
1927 ++have_conflict;
1928 }
1929 }
1930 #undef OVERLAPS
1931 if (!have_conflict)
1932 break;
1933
1934 /* Discard Ack only for the _first_ iteration */
1935 if (first && discard && have_unacked) {
1936 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1937 (unsigned long long)sector);
1938 inc_unacked(mdev);
1939 e->w.cb = e_send_discard_ack;
1940 list_add_tail(&e->w.list, &mdev->done_ee);
1941
1942 spin_unlock_irq(&mdev->req_lock);
1943
1944 /* we could probably send that P_DISCARD_ACK ourselves,
1945 * but I don't like the receiver using the msock */
1946
1947 put_ldev(mdev);
1948 wake_asender(mdev);
1949 finish_wait(&mdev->misc_wait, &wait);
1950 return TRUE;
1951 }
1952
1953 if (signal_pending(current)) {
1954 hlist_del_init(&e->colision);
1955
1956 spin_unlock_irq(&mdev->req_lock);
1957
1958 finish_wait(&mdev->misc_wait, &wait);
1959 goto out_interrupted;
1960 }
1961
1962 spin_unlock_irq(&mdev->req_lock);
1963 if (first) {
1964 first = 0;
1965 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1966 "sec=%llus\n", (unsigned long long)sector);
1967 } else if (discard) {
1968 /* we had none on the first iteration.
1969 * there must be none now. */
1970 D_ASSERT(have_unacked == 0);
1971 }
1972 schedule();
1973 spin_lock_irq(&mdev->req_lock);
1974 }
1975 finish_wait(&mdev->misc_wait, &wait);
1976 }
1977
1978 list_add(&e->w.list, &mdev->active_ee);
1979 spin_unlock_irq(&mdev->req_lock);
1980
1981 switch (mdev->net_conf->wire_protocol) {
1982 case DRBD_PROT_C:
1983 inc_unacked(mdev);
1984 /* corresponding dec_unacked() in e_end_block()
1985 * respective _drbd_clear_done_ee */
1986 break;
1987 case DRBD_PROT_B:
1988 /* I really don't like it that the receiver thread
1989 * sends on the msock, but anyways */
1990 drbd_send_ack(mdev, P_RECV_ACK, e);
1991 break;
1992 case DRBD_PROT_A:
1993 /* nothing to do */
1994 break;
1995 }
1996
1997 if (mdev->state.pdsk == D_DISKLESS) {
1998 /* In case we have the only disk of the cluster, */
1999 drbd_set_out_of_sync(mdev, e->sector, e->size);
2000 e->flags |= EE_CALL_AL_COMPLETE_IO;
2001 drbd_al_begin_io(mdev, e->sector);
2002 }
2003
2004 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
2005 return TRUE;
2006
2007 out_interrupted:
2008 /* yes, the epoch_size now is imbalanced.
2009 * but we drop the connection anyways, so we don't have a chance to
2010 * receive a barrier... atomic_inc(&mdev->epoch_size); */
2011 put_ldev(mdev);
2012 drbd_free_ee(mdev, e);
2013 return FALSE;
2014 }
2015
2016 static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h)
2017 {
2018 sector_t sector;
2019 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
2020 struct drbd_epoch_entry *e;
2021 struct digest_info *di = NULL;
2022 int size, digest_size;
2023 unsigned int fault_type;
2024 struct p_block_req *p =
2025 (struct p_block_req *)h;
2026 const int brps = sizeof(*p)-sizeof(*h);
2027
2028 if (drbd_recv(mdev, h->payload, brps) != brps)
2029 return FALSE;
2030
2031 sector = be64_to_cpu(p->sector);
2032 size = be32_to_cpu(p->blksize);
2033
2034 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) {
2035 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2036 (unsigned long long)sector, size);
2037 return FALSE;
2038 }
2039 if (sector + (size>>9) > capacity) {
2040 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2041 (unsigned long long)sector, size);
2042 return FALSE;
2043 }
2044
2045 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2046 if (__ratelimit(&drbd_ratelimit_state))
2047 dev_err(DEV, "Can not satisfy peer's read request, "
2048 "no local data.\n");
2049 drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY :
2050 P_NEG_RS_DREPLY , p);
2051 return drbd_drain_block(mdev, h->length - brps);
2052 }
2053
2054 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2055 * "criss-cross" setup, that might cause write-out on some other DRBD,
2056 * which in turn might block on the other node at this very place. */
2057 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2058 if (!e) {
2059 put_ldev(mdev);
2060 return FALSE;
2061 }
2062
2063 switch (h->command) {
2064 case P_DATA_REQUEST:
2065 e->w.cb = w_e_end_data_req;
2066 fault_type = DRBD_FAULT_DT_RD;
2067 break;
2068 case P_RS_DATA_REQUEST:
2069 e->w.cb = w_e_end_rsdata_req;
2070 fault_type = DRBD_FAULT_RS_RD;
2071 /* Eventually this should become asynchronously. Currently it
2072 * blocks the whole receiver just to delay the reading of a
2073 * resync data block.
2074 * the drbd_work_queue mechanism is made for this...
2075 */
2076 if (!drbd_rs_begin_io(mdev, sector)) {
2077 /* we have been interrupted,
2078 * probably connection lost! */
2079 D_ASSERT(signal_pending(current));
2080 goto out_free_e;
2081 }
2082 break;
2083
2084 case P_OV_REPLY:
2085 case P_CSUM_RS_REQUEST:
2086 fault_type = DRBD_FAULT_RS_RD;
2087 digest_size = h->length - brps ;
2088 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2089 if (!di)
2090 goto out_free_e;
2091
2092 di->digest_size = digest_size;
2093 di->digest = (((char *)di)+sizeof(struct digest_info));
2094
2095 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2096 goto out_free_e;
2097
2098 e->block_id = (u64)(unsigned long)di;
2099 if (h->command == P_CSUM_RS_REQUEST) {
2100 D_ASSERT(mdev->agreed_pro_version >= 89);
2101 e->w.cb = w_e_end_csum_rs_req;
2102 } else if (h->command == P_OV_REPLY) {
2103 e->w.cb = w_e_end_ov_reply;
2104 dec_rs_pending(mdev);
2105 break;
2106 }
2107
2108 if (!drbd_rs_begin_io(mdev, sector)) {
2109 /* we have been interrupted, probably connection lost! */
2110 D_ASSERT(signal_pending(current));
2111 goto out_free_e;
2112 }
2113 break;
2114
2115 case P_OV_REQUEST:
2116 if (mdev->state.conn >= C_CONNECTED &&
2117 mdev->state.conn != C_VERIFY_T)
2118 dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n",
2119 drbd_conn_str(mdev->state.conn));
2120 if (mdev->ov_start_sector == ~(sector_t)0 &&
2121 mdev->agreed_pro_version >= 90) {
2122 mdev->ov_start_sector = sector;
2123 mdev->ov_position = sector;
2124 mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector);
2125 dev_info(DEV, "Online Verify start sector: %llu\n",
2126 (unsigned long long)sector);
2127 }
2128 e->w.cb = w_e_end_ov_req;
2129 fault_type = DRBD_FAULT_RS_RD;
2130 /* Eventually this should become asynchronous. Currently it
2131 * blocks the whole receiver just to delay the reading of a
2132 * resync data block.
2133 * the drbd_work_queue mechanism is made for this...
2134 */
2135 if (!drbd_rs_begin_io(mdev, sector)) {
2136 /* we have been interrupted,
2137 * probably connection lost! */
2138 D_ASSERT(signal_pending(current));
2139 goto out_free_e;
2140 }
2141 break;
2142
2143
2144 default:
2145 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2146 cmdname(h->command));
2147 fault_type = DRBD_FAULT_MAX;
2148 }
2149
2150 spin_lock_irq(&mdev->req_lock);
2151 list_add(&e->w.list, &mdev->read_ee);
2152 spin_unlock_irq(&mdev->req_lock);
2153
2154 inc_unacked(mdev);
2155
2156 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2157 return TRUE;
2158
2159 out_free_e:
2160 kfree(di);
2161 put_ldev(mdev);
2162 drbd_free_ee(mdev, e);
2163 return FALSE;
2164 }
2165
2166 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2167 {
2168 int self, peer, rv = -100;
2169 unsigned long ch_self, ch_peer;
2170
2171 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2172 peer = mdev->p_uuid[UI_BITMAP] & 1;
2173
2174 ch_peer = mdev->p_uuid[UI_SIZE];
2175 ch_self = mdev->comm_bm_set;
2176
2177 switch (mdev->net_conf->after_sb_0p) {
2178 case ASB_CONSENSUS:
2179 case ASB_DISCARD_SECONDARY:
2180 case ASB_CALL_HELPER:
2181 dev_err(DEV, "Configuration error.\n");
2182 break;
2183 case ASB_DISCONNECT:
2184 break;
2185 case ASB_DISCARD_YOUNGER_PRI:
2186 if (self == 0 && peer == 1) {
2187 rv = -1;
2188 break;
2189 }
2190 if (self == 1 && peer == 0) {
2191 rv = 1;
2192 break;
2193 }
2194 /* Else fall through to one of the other strategies... */
2195 case ASB_DISCARD_OLDER_PRI:
2196 if (self == 0 && peer == 1) {
2197 rv = 1;
2198 break;
2199 }
2200 if (self == 1 && peer == 0) {
2201 rv = -1;
2202 break;
2203 }
2204 /* Else fall through to one of the other strategies... */
2205 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2206 "Using discard-least-changes instead\n");
2207 case ASB_DISCARD_ZERO_CHG:
2208 if (ch_peer == 0 && ch_self == 0) {
2209 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2210 ? -1 : 1;
2211 break;
2212 } else {
2213 if (ch_peer == 0) { rv = 1; break; }
2214 if (ch_self == 0) { rv = -1; break; }
2215 }
2216 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2217 break;
2218 case ASB_DISCARD_LEAST_CHG:
2219 if (ch_self < ch_peer)
2220 rv = -1;
2221 else if (ch_self > ch_peer)
2222 rv = 1;
2223 else /* ( ch_self == ch_peer ) */
2224 /* Well, then use something else. */
2225 rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
2226 ? -1 : 1;
2227 break;
2228 case ASB_DISCARD_LOCAL:
2229 rv = -1;
2230 break;
2231 case ASB_DISCARD_REMOTE:
2232 rv = 1;
2233 }
2234
2235 return rv;
2236 }
2237
2238 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2239 {
2240 int self, peer, hg, rv = -100;
2241
2242 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2243 peer = mdev->p_uuid[UI_BITMAP] & 1;
2244
2245 switch (mdev->net_conf->after_sb_1p) {
2246 case ASB_DISCARD_YOUNGER_PRI:
2247 case ASB_DISCARD_OLDER_PRI:
2248 case ASB_DISCARD_LEAST_CHG:
2249 case ASB_DISCARD_LOCAL:
2250 case ASB_DISCARD_REMOTE:
2251 dev_err(DEV, "Configuration error.\n");
2252 break;
2253 case ASB_DISCONNECT:
2254 break;
2255 case ASB_CONSENSUS:
2256 hg = drbd_asb_recover_0p(mdev);
2257 if (hg == -1 && mdev->state.role == R_SECONDARY)
2258 rv = hg;
2259 if (hg == 1 && mdev->state.role == R_PRIMARY)
2260 rv = hg;
2261 break;
2262 case ASB_VIOLENTLY:
2263 rv = drbd_asb_recover_0p(mdev);
2264 break;
2265 case ASB_DISCARD_SECONDARY:
2266 return mdev->state.role == R_PRIMARY ? 1 : -1;
2267 case ASB_CALL_HELPER:
2268 hg = drbd_asb_recover_0p(mdev);
2269 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2270 self = drbd_set_role(mdev, R_SECONDARY, 0);
2271 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2272 * we might be here in C_WF_REPORT_PARAMS which is transient.
2273 * we do not need to wait for the after state change work either. */
2274 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2275 if (self != SS_SUCCESS) {
2276 drbd_khelper(mdev, "pri-lost-after-sb");
2277 } else {
2278 dev_warn(DEV, "Successfully gave up primary role.\n");
2279 rv = hg;
2280 }
2281 } else
2282 rv = hg;
2283 }
2284
2285 return rv;
2286 }
2287
2288 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2289 {
2290 int self, peer, hg, rv = -100;
2291
2292 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2293 peer = mdev->p_uuid[UI_BITMAP] & 1;
2294
2295 switch (mdev->net_conf->after_sb_2p) {
2296 case ASB_DISCARD_YOUNGER_PRI:
2297 case ASB_DISCARD_OLDER_PRI:
2298 case ASB_DISCARD_LEAST_CHG:
2299 case ASB_DISCARD_LOCAL:
2300 case ASB_DISCARD_REMOTE:
2301 case ASB_CONSENSUS:
2302 case ASB_DISCARD_SECONDARY:
2303 dev_err(DEV, "Configuration error.\n");
2304 break;
2305 case ASB_VIOLENTLY:
2306 rv = drbd_asb_recover_0p(mdev);
2307 break;
2308 case ASB_DISCONNECT:
2309 break;
2310 case ASB_CALL_HELPER:
2311 hg = drbd_asb_recover_0p(mdev);
2312 if (hg == -1) {
2313 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2314 * we might be here in C_WF_REPORT_PARAMS which is transient.
2315 * we do not need to wait for the after state change work either. */
2316 self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2317 if (self != SS_SUCCESS) {
2318 drbd_khelper(mdev, "pri-lost-after-sb");
2319 } else {
2320 dev_warn(DEV, "Successfully gave up primary role.\n");
2321 rv = hg;
2322 }
2323 } else
2324 rv = hg;
2325 }
2326
2327 return rv;
2328 }
2329
2330 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2331 u64 bits, u64 flags)
2332 {
2333 if (!uuid) {
2334 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2335 return;
2336 }
2337 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2338 text,
2339 (unsigned long long)uuid[UI_CURRENT],
2340 (unsigned long long)uuid[UI_BITMAP],
2341 (unsigned long long)uuid[UI_HISTORY_START],
2342 (unsigned long long)uuid[UI_HISTORY_END],
2343 (unsigned long long)bits,
2344 (unsigned long long)flags);
2345 }
2346
2347 /*
2348 100 after split brain try auto recover
2349 2 C_SYNC_SOURCE set BitMap
2350 1 C_SYNC_SOURCE use BitMap
2351 0 no Sync
2352 -1 C_SYNC_TARGET use BitMap
2353 -2 C_SYNC_TARGET set BitMap
2354 -100 after split brain, disconnect
2355 -1000 unrelated data
2356 */
2357 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2358 {
2359 u64 self, peer;
2360 int i, j;
2361
2362 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2363 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2364
2365 *rule_nr = 10;
2366 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2367 return 0;
2368
2369 *rule_nr = 20;
2370 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2371 peer != UUID_JUST_CREATED)
2372 return -2;
2373
2374 *rule_nr = 30;
2375 if (self != UUID_JUST_CREATED &&
2376 (peer == UUID_JUST_CREATED || peer == (u64)0))
2377 return 2;
2378
2379 if (self == peer) {
2380 int rct, dc; /* roles at crash time */
2381
2382 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2383
2384 if (mdev->agreed_pro_version < 91)
2385 return -1001;
2386
2387 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2388 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2389 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2390 drbd_uuid_set_bm(mdev, 0UL);
2391
2392 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2393 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2394 *rule_nr = 34;
2395 } else {
2396 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2397 *rule_nr = 36;
2398 }
2399
2400 return 1;
2401 }
2402
2403 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2404
2405 if (mdev->agreed_pro_version < 91)
2406 return -1001;
2407
2408 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2409 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2410 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2411
2412 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2413 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2414 mdev->p_uuid[UI_BITMAP] = 0UL;
2415
2416 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2417 *rule_nr = 35;
2418 } else {
2419 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2420 *rule_nr = 37;
2421 }
2422
2423 return -1;
2424 }
2425
2426 /* Common power [off|failure] */
2427 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2428 (mdev->p_uuid[UI_FLAGS] & 2);
2429 /* lowest bit is set when we were primary,
2430 * next bit (weight 2) is set when peer was primary */
2431 *rule_nr = 40;
2432
2433 switch (rct) {
2434 case 0: /* !self_pri && !peer_pri */ return 0;
2435 case 1: /* self_pri && !peer_pri */ return 1;
2436 case 2: /* !self_pri && peer_pri */ return -1;
2437 case 3: /* self_pri && peer_pri */
2438 dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
2439 return dc ? -1 : 1;
2440 }
2441 }
2442
2443 *rule_nr = 50;
2444 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2445 if (self == peer)
2446 return -1;
2447
2448 *rule_nr = 51;
2449 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2450 if (self == peer) {
2451 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2452 peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1);
2453 if (self == peer) {
2454 /* The last P_SYNC_UUID did not get though. Undo the last start of
2455 resync as sync source modifications of the peer's UUIDs. */
2456
2457 if (mdev->agreed_pro_version < 91)
2458 return -1001;
2459
2460 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2461 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2462 return -1;
2463 }
2464 }
2465
2466 *rule_nr = 60;
2467 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2468 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2469 peer = mdev->p_uuid[i] & ~((u64)1);
2470 if (self == peer)
2471 return -2;
2472 }
2473
2474 *rule_nr = 70;
2475 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2476 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2477 if (self == peer)
2478 return 1;
2479
2480 *rule_nr = 71;
2481 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2482 if (self == peer) {
2483 self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1);
2484 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2485 if (self == peer) {
2486 /* The last P_SYNC_UUID did not get though. Undo the last start of
2487 resync as sync source modifications of our UUIDs. */
2488
2489 if (mdev->agreed_pro_version < 91)
2490 return -1001;
2491
2492 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2493 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2494
2495 dev_info(DEV, "Undid last start of resync:\n");
2496
2497 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2498 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2499
2500 return 1;
2501 }
2502 }
2503
2504
2505 *rule_nr = 80;
2506 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2507 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2508 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2509 if (self == peer)
2510 return 2;
2511 }
2512
2513 *rule_nr = 90;
2514 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2515 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2516 if (self == peer && self != ((u64)0))
2517 return 100;
2518
2519 *rule_nr = 100;
2520 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2521 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2522 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2523 peer = mdev->p_uuid[j] & ~((u64)1);
2524 if (self == peer)
2525 return -100;
2526 }
2527 }
2528
2529 return -1000;
2530 }
2531
2532 /* drbd_sync_handshake() returns the new conn state on success, or
2533 CONN_MASK (-1) on failure.
2534 */
2535 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2536 enum drbd_disk_state peer_disk) __must_hold(local)
2537 {
2538 int hg, rule_nr;
2539 enum drbd_conns rv = C_MASK;
2540 enum drbd_disk_state mydisk;
2541
2542 mydisk = mdev->state.disk;
2543 if (mydisk == D_NEGOTIATING)
2544 mydisk = mdev->new_state_tmp.disk;
2545
2546 dev_info(DEV, "drbd_sync_handshake:\n");
2547 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2548 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2549 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2550
2551 hg = drbd_uuid_compare(mdev, &rule_nr);
2552
2553 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2554
2555 if (hg == -1000) {
2556 dev_alert(DEV, "Unrelated data, aborting!\n");
2557 return C_MASK;
2558 }
2559 if (hg == -1001) {
2560 dev_alert(DEV, "To resolve this both sides have to support at least protocol\n");
2561 return C_MASK;
2562 }
2563
2564 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2565 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2566 int f = (hg == -100) || abs(hg) == 2;
2567 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2568 if (f)
2569 hg = hg*2;
2570 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2571 hg > 0 ? "source" : "target");
2572 }
2573
2574 if (abs(hg) == 100)
2575 drbd_khelper(mdev, "initial-split-brain");
2576
2577 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2578 int pcount = (mdev->state.role == R_PRIMARY)
2579 + (peer_role == R_PRIMARY);
2580 int forced = (hg == -100);
2581
2582 switch (pcount) {
2583 case 0:
2584 hg = drbd_asb_recover_0p(mdev);
2585 break;
2586 case 1:
2587 hg = drbd_asb_recover_1p(mdev);
2588 break;
2589 case 2:
2590 hg = drbd_asb_recover_2p(mdev);
2591 break;
2592 }
2593 if (abs(hg) < 100) {
2594 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2595 "automatically solved. Sync from %s node\n",
2596 pcount, (hg < 0) ? "peer" : "this");
2597 if (forced) {
2598 dev_warn(DEV, "Doing a full sync, since"
2599 " UUIDs where ambiguous.\n");
2600 hg = hg*2;
2601 }
2602 }
2603 }
2604
2605 if (hg == -100) {
2606 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2607 hg = -1;
2608 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2609 hg = 1;
2610
2611 if (abs(hg) < 100)
2612 dev_warn(DEV, "Split-Brain detected, manually solved. "
2613 "Sync from %s node\n",
2614 (hg < 0) ? "peer" : "this");
2615 }
2616
2617 if (hg == -100) {
2618 /* FIXME this log message is not correct if we end up here
2619 * after an attempted attach on a diskless node.
2620 * We just refuse to attach -- well, we drop the "connection"
2621 * to that disk, in a way... */
2622 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2623 drbd_khelper(mdev, "split-brain");
2624 return C_MASK;
2625 }
2626
2627 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2628 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2629 return C_MASK;
2630 }
2631
2632 if (hg < 0 && /* by intention we do not use mydisk here. */
2633 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2634 switch (mdev->net_conf->rr_conflict) {
2635 case ASB_CALL_HELPER:
2636 drbd_khelper(mdev, "pri-lost");
2637 /* fall through */
2638 case ASB_DISCONNECT:
2639 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2640 return C_MASK;
2641 case ASB_VIOLENTLY:
2642 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2643 "assumption\n");
2644 }
2645 }
2646
2647 if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
2648 if (hg == 0)
2649 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2650 else
2651 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2652 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2653 abs(hg) >= 2 ? "full" : "bit-map based");
2654 return C_MASK;
2655 }
2656
2657 if (abs(hg) >= 2) {
2658 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2659 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake"))
2660 return C_MASK;
2661 }
2662
2663 if (hg > 0) { /* become sync source. */
2664 rv = C_WF_BITMAP_S;
2665 } else if (hg < 0) { /* become sync target */
2666 rv = C_WF_BITMAP_T;
2667 } else {
2668 rv = C_CONNECTED;
2669 if (drbd_bm_total_weight(mdev)) {
2670 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2671 drbd_bm_total_weight(mdev));
2672 }
2673 }
2674
2675 return rv;
2676 }
2677
2678 /* returns 1 if invalid */
2679 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2680 {
2681 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2682 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2683 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2684 return 0;
2685
2686 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2687 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2688 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2689 return 1;
2690
2691 /* everything else is valid if they are equal on both sides. */
2692 if (peer == self)
2693 return 0;
2694
2695 /* everything es is invalid. */
2696 return 1;
2697 }
2698
2699 static int receive_protocol(struct drbd_conf *mdev, struct p_header *h)
2700 {
2701 struct p_protocol *p = (struct p_protocol *)h;
2702 int header_size, data_size;
2703 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2704 int p_want_lose, p_two_primaries, cf;
2705 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2706
2707 header_size = sizeof(*p) - sizeof(*h);
2708 data_size = h->length - header_size;
2709
2710 if (drbd_recv(mdev, h->payload, header_size) != header_size)
2711 return FALSE;
2712
2713 p_proto = be32_to_cpu(p->protocol);
2714 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2715 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2716 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2717 p_two_primaries = be32_to_cpu(p->two_primaries);
2718 cf = be32_to_cpu(p->conn_flags);
2719 p_want_lose = cf & CF_WANT_LOSE;
2720
2721 clear_bit(CONN_DRY_RUN, &mdev->flags);
2722
2723 if (cf & CF_DRY_RUN)
2724 set_bit(CONN_DRY_RUN, &mdev->flags);
2725
2726 if (p_proto != mdev->net_conf->wire_protocol) {
2727 dev_err(DEV, "incompatible communication protocols\n");
2728 goto disconnect;
2729 }
2730
2731 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2732 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2733 goto disconnect;
2734 }
2735
2736 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2737 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2738 goto disconnect;
2739 }
2740
2741 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2742 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2743 goto disconnect;
2744 }
2745
2746 if (p_want_lose && mdev->net_conf->want_lose) {
2747 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2748 goto disconnect;
2749 }
2750
2751 if (p_two_primaries != mdev->net_conf->two_primaries) {
2752 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2753 goto disconnect;
2754 }
2755
2756 if (mdev->agreed_pro_version >= 87) {
2757 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2758
2759 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2760 return FALSE;
2761
2762 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2763 if (strcmp(p_integrity_alg, my_alg)) {
2764 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2765 goto disconnect;
2766 }
2767 dev_info(DEV, "data-integrity-alg: %s\n",
2768 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2769 }
2770
2771 return TRUE;
2772
2773 disconnect:
2774 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2775 return FALSE;
2776 }
2777
2778 /* helper function
2779 * input: alg name, feature name
2780 * return: NULL (alg name was "")
2781 * ERR_PTR(error) if something goes wrong
2782 * or the crypto hash ptr, if it worked out ok. */
2783 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2784 const char *alg, const char *name)
2785 {
2786 struct crypto_hash *tfm;
2787
2788 if (!alg[0])
2789 return NULL;
2790
2791 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2792 if (IS_ERR(tfm)) {
2793 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2794 alg, name, PTR_ERR(tfm));
2795 return tfm;
2796 }
2797 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2798 crypto_free_hash(tfm);
2799 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2800 return ERR_PTR(-EINVAL);
2801 }
2802 return tfm;
2803 }
2804
2805 static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h)
2806 {
2807 int ok = TRUE;
2808 struct p_rs_param_89 *p = (struct p_rs_param_89 *)h;
2809 unsigned int header_size, data_size, exp_max_sz;
2810 struct crypto_hash *verify_tfm = NULL;
2811 struct crypto_hash *csums_tfm = NULL;
2812 const int apv = mdev->agreed_pro_version;
2813
2814 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2815 : apv == 88 ? sizeof(struct p_rs_param)
2816 + SHARED_SECRET_MAX
2817 : /* 89 */ sizeof(struct p_rs_param_89);
2818
2819 if (h->length > exp_max_sz) {
2820 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2821 h->length, exp_max_sz);
2822 return FALSE;
2823 }
2824
2825 if (apv <= 88) {
2826 header_size = sizeof(struct p_rs_param) - sizeof(*h);
2827 data_size = h->length - header_size;
2828 } else /* apv >= 89 */ {
2829 header_size = sizeof(struct p_rs_param_89) - sizeof(*h);
2830 data_size = h->length - header_size;
2831 D_ASSERT(data_size == 0);
2832 }
2833
2834 /* initialize verify_alg and csums_alg */
2835 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2836
2837 if (drbd_recv(mdev, h->payload, header_size) != header_size)
2838 return FALSE;
2839
2840 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2841
2842 if (apv >= 88) {
2843 if (apv == 88) {
2844 if (data_size > SHARED_SECRET_MAX) {
2845 dev_err(DEV, "verify-alg too long, "
2846 "peer wants %u, accepting only %u byte\n",
2847 data_size, SHARED_SECRET_MAX);
2848 return FALSE;
2849 }
2850
2851 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2852 return FALSE;
2853
2854 /* we expect NUL terminated string */
2855 /* but just in case someone tries to be evil */
2856 D_ASSERT(p->verify_alg[data_size-1] == 0);
2857 p->verify_alg[data_size-1] = 0;
2858
2859 } else /* apv >= 89 */ {
2860 /* we still expect NUL terminated strings */
2861 /* but just in case someone tries to be evil */
2862 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2863 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2864 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2865 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2866 }
2867
2868 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2869 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2870 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2871 mdev->sync_conf.verify_alg, p->verify_alg);
2872 goto disconnect;
2873 }
2874 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2875 p->verify_alg, "verify-alg");
2876 if (IS_ERR(verify_tfm)) {
2877 verify_tfm = NULL;
2878 goto disconnect;
2879 }
2880 }
2881
2882 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2883 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2884 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2885 mdev->sync_conf.csums_alg, p->csums_alg);
2886 goto disconnect;
2887 }
2888 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2889 p->csums_alg, "csums-alg");
2890 if (IS_ERR(csums_tfm)) {
2891 csums_tfm = NULL;
2892 goto disconnect;
2893 }
2894 }
2895
2896
2897 spin_lock(&mdev->peer_seq_lock);
2898 /* lock against drbd_nl_syncer_conf() */
2899 if (verify_tfm) {
2900 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2901 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2902 crypto_free_hash(mdev->verify_tfm);
2903 mdev->verify_tfm = verify_tfm;
2904 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2905 }
2906 if (csums_tfm) {
2907 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2908 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2909 crypto_free_hash(mdev->csums_tfm);
2910 mdev->csums_tfm = csums_tfm;
2911 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2912 }
2913 spin_unlock(&mdev->peer_seq_lock);
2914 }
2915
2916 return ok;
2917 disconnect:
2918 /* just for completeness: actually not needed,
2919 * as this is not reached if csums_tfm was ok. */
2920 crypto_free_hash(csums_tfm);
2921 /* but free the verify_tfm again, if csums_tfm did not work out */
2922 crypto_free_hash(verify_tfm);
2923 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2924 return FALSE;
2925 }
2926
2927 static void drbd_setup_order_type(struct drbd_conf *mdev, int peer)
2928 {
2929 /* sorry, we currently have no working implementation
2930 * of distributed TCQ */
2931 }
2932
2933 /* warn if the arguments differ by more than 12.5% */
2934 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2935 const char *s, sector_t a, sector_t b)
2936 {
2937 sector_t d;
2938 if (a == 0 || b == 0)
2939 return;
2940 d = (a > b) ? (a - b) : (b - a);
2941 if (d > (a>>3) || d > (b>>3))
2942 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2943 (unsigned long long)a, (unsigned long long)b);
2944 }
2945
2946 static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
2947 {
2948 struct p_sizes *p = (struct p_sizes *)h;
2949 enum determine_dev_size dd = unchanged;
2950 unsigned int max_seg_s;
2951 sector_t p_size, p_usize, my_usize;
2952 int ldsc = 0; /* local disk size changed */
2953 enum dds_flags ddsf;
2954
2955 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
2956 if (drbd_recv(mdev, h->payload, h->length) != h->length)
2957 return FALSE;
2958
2959 p_size = be64_to_cpu(p->d_size);
2960 p_usize = be64_to_cpu(p->u_size);
2961
2962 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2963 dev_err(DEV, "some backing storage is needed\n");
2964 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2965 return FALSE;
2966 }
2967
2968 /* just store the peer's disk size for now.
2969 * we still need to figure out whether we accept that. */
2970 mdev->p_size = p_size;
2971
2972 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
2973 if (get_ldev(mdev)) {
2974 warn_if_differ_considerably(mdev, "lower level device sizes",
2975 p_size, drbd_get_max_capacity(mdev->ldev));
2976 warn_if_differ_considerably(mdev, "user requested size",
2977 p_usize, mdev->ldev->dc.disk_size);
2978
2979 /* if this is the first connect, or an otherwise expected
2980 * param exchange, choose the minimum */
2981 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2982 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
2983 p_usize);
2984
2985 my_usize = mdev->ldev->dc.disk_size;
2986
2987 if (mdev->ldev->dc.disk_size != p_usize) {
2988 mdev->ldev->dc.disk_size = p_usize;
2989 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
2990 (unsigned long)mdev->ldev->dc.disk_size);
2991 }
2992
2993 /* Never shrink a device with usable data during connect.
2994 But allow online shrinking if we are connected. */
2995 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
2996 drbd_get_capacity(mdev->this_bdev) &&
2997 mdev->state.disk >= D_OUTDATED &&
2998 mdev->state.conn < C_CONNECTED) {
2999 dev_err(DEV, "The peer's disk size is too small!\n");
3000 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3001 mdev->ldev->dc.disk_size = my_usize;
3002 put_ldev(mdev);
3003 return FALSE;
3004 }
3005 put_ldev(mdev);
3006 }
3007 #undef min_not_zero
3008
3009 ddsf = be16_to_cpu(p->dds_flags);
3010 if (get_ldev(mdev)) {
3011 dd = drbd_determin_dev_size(mdev, ddsf);
3012 put_ldev(mdev);
3013 if (dd == dev_size_error)
3014 return FALSE;
3015 drbd_md_sync(mdev);
3016 } else {
3017 /* I am diskless, need to accept the peer's size. */
3018 drbd_set_my_capacity(mdev, p_size);
3019 }
3020
3021 if (get_ldev(mdev)) {
3022 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3023 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3024 ldsc = 1;
3025 }
3026
3027 if (mdev->agreed_pro_version < 94)
3028 max_seg_s = be32_to_cpu(p->max_segment_size);
3029 else /* drbd 8.3.8 onwards */
3030 max_seg_s = DRBD_MAX_SEGMENT_SIZE;
3031
3032 if (max_seg_s != queue_max_segment_size(mdev->rq_queue))
3033 drbd_setup_queue_param(mdev, max_seg_s);
3034
3035 drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type));
3036 put_ldev(mdev);
3037 }
3038
3039 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3040 if (be64_to_cpu(p->c_size) !=
3041 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3042 /* we have different sizes, probably peer
3043 * needs to know my new size... */
3044 drbd_send_sizes(mdev, 0, ddsf);
3045 }
3046 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3047 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3048 if (mdev->state.pdsk >= D_INCONSISTENT &&
3049 mdev->state.disk >= D_INCONSISTENT) {
3050 if (ddsf & DDSF_NO_RESYNC)
3051 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3052 else
3053 resync_after_online_grow(mdev);
3054 } else
3055 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3056 }
3057 }
3058
3059 return TRUE;
3060 }
3061
3062 static int receive_uuids(struct drbd_conf *mdev, struct p_header *h)
3063 {
3064 struct p_uuids *p = (struct p_uuids *)h;
3065 u64 *p_uuid;
3066 int i;
3067
3068 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3069 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3070 return FALSE;
3071
3072 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3073
3074 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3075 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3076
3077 kfree(mdev->p_uuid);
3078 mdev->p_uuid = p_uuid;
3079
3080 if (mdev->state.conn < C_CONNECTED &&
3081 mdev->state.disk < D_INCONSISTENT &&
3082 mdev->state.role == R_PRIMARY &&
3083 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3084 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3085 (unsigned long long)mdev->ed_uuid);
3086 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3087 return FALSE;
3088 }
3089
3090 if (get_ldev(mdev)) {
3091 int skip_initial_sync =
3092 mdev->state.conn == C_CONNECTED &&
3093 mdev->agreed_pro_version >= 90 &&
3094 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3095 (p_uuid[UI_FLAGS] & 8);
3096 if (skip_initial_sync) {
3097 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3098 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3099 "clear_n_write from receive_uuids");
3100 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3101 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3102 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3103 CS_VERBOSE, NULL);
3104 drbd_md_sync(mdev);
3105 }
3106 put_ldev(mdev);
3107 } else if (mdev->state.disk < D_INCONSISTENT &&
3108 mdev->state.role == R_PRIMARY) {
3109 /* I am a diskless primary, the peer just created a new current UUID
3110 for me. */
3111 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3112 }
3113
3114 /* Before we test for the disk state, we should wait until an eventually
3115 ongoing cluster wide state change is finished. That is important if
3116 we are primary and are detaching from our disk. We need to see the
3117 new disk state... */
3118 wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
3119 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3120 drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3121
3122 return TRUE;
3123 }
3124
3125 /**
3126 * convert_state() - Converts the peer's view of the cluster state to our point of view
3127 * @ps: The state as seen by the peer.
3128 */
3129 static union drbd_state convert_state(union drbd_state ps)
3130 {
3131 union drbd_state ms;
3132
3133 static enum drbd_conns c_tab[] = {
3134 [C_CONNECTED] = C_CONNECTED,
3135
3136 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3137 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3138 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3139 [C_VERIFY_S] = C_VERIFY_T,
3140 [C_MASK] = C_MASK,
3141 };
3142
3143 ms.i = ps.i;
3144
3145 ms.conn = c_tab[ps.conn];
3146 ms.peer = ps.role;
3147 ms.role = ps.peer;
3148 ms.pdsk = ps.disk;
3149 ms.disk = ps.pdsk;
3150 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3151
3152 return ms;
3153 }
3154
3155 static int receive_req_state(struct drbd_conf *mdev, struct p_header *h)
3156 {
3157 struct p_req_state *p = (struct p_req_state *)h;
3158 union drbd_state mask, val;
3159 int rv;
3160
3161 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3162 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3163 return FALSE;
3164
3165 mask.i = be32_to_cpu(p->mask);
3166 val.i = be32_to_cpu(p->val);
3167
3168 if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
3169 test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
3170 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3171 return TRUE;
3172 }
3173
3174 mask = convert_state(mask);
3175 val = convert_state(val);
3176
3177 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3178
3179 drbd_send_sr_reply(mdev, rv);
3180 drbd_md_sync(mdev);
3181
3182 return TRUE;
3183 }
3184
3185 static int receive_state(struct drbd_conf *mdev, struct p_header *h)
3186 {
3187 struct p_state *p = (struct p_state *)h;
3188 enum drbd_conns nconn, oconn;
3189 union drbd_state ns, peer_state;
3190 enum drbd_disk_state real_peer_disk;
3191 int rv;
3192
3193 ERR_IF(h->length != (sizeof(*p)-sizeof(*h)))
3194 return FALSE;
3195
3196 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3197 return FALSE;
3198
3199 peer_state.i = be32_to_cpu(p->state);
3200
3201 real_peer_disk = peer_state.disk;
3202 if (peer_state.disk == D_NEGOTIATING) {
3203 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3204 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3205 }
3206
3207 spin_lock_irq(&mdev->req_lock);
3208 retry:
3209 oconn = nconn = mdev->state.conn;
3210 spin_unlock_irq(&mdev->req_lock);
3211
3212 if (nconn == C_WF_REPORT_PARAMS)
3213 nconn = C_CONNECTED;
3214
3215 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3216 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3217 int cr; /* consider resync */
3218
3219 /* if we established a new connection */
3220 cr = (oconn < C_CONNECTED);
3221 /* if we had an established connection
3222 * and one of the nodes newly attaches a disk */
3223 cr |= (oconn == C_CONNECTED &&
3224 (peer_state.disk == D_NEGOTIATING ||
3225 mdev->state.disk == D_NEGOTIATING));
3226 /* if we have both been inconsistent, and the peer has been
3227 * forced to be UpToDate with --overwrite-data */
3228 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3229 /* if we had been plain connected, and the admin requested to
3230 * start a sync by "invalidate" or "invalidate-remote" */
3231 cr |= (oconn == C_CONNECTED &&
3232 (peer_state.conn >= C_STARTING_SYNC_S &&
3233 peer_state.conn <= C_WF_BITMAP_T));
3234
3235 if (cr)
3236 nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3237
3238 put_ldev(mdev);
3239 if (nconn == C_MASK) {
3240 nconn = C_CONNECTED;
3241 if (mdev->state.disk == D_NEGOTIATING) {
3242 drbd_force_state(mdev, NS(disk, D_DISKLESS));
3243 } else if (peer_state.disk == D_NEGOTIATING) {
3244 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3245 peer_state.disk = D_DISKLESS;
3246 real_peer_disk = D_DISKLESS;
3247 } else {
3248 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
3249 return FALSE;
3250 D_ASSERT(oconn == C_WF_REPORT_PARAMS);
3251 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3252 return FALSE;
3253 }
3254 }
3255 }
3256
3257 spin_lock_irq(&mdev->req_lock);
3258 if (mdev->state.conn != oconn)
3259 goto retry;
3260 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3261 ns.i = mdev->state.i;
3262 ns.conn = nconn;
3263 ns.peer = peer_state.role;
3264 ns.pdsk = real_peer_disk;
3265 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3266 if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3267 ns.disk = mdev->new_state_tmp.disk;
3268
3269 rv = _drbd_set_state(mdev, ns, CS_VERBOSE | CS_HARD, NULL);
3270 ns = mdev->state;
3271 spin_unlock_irq(&mdev->req_lock);
3272
3273 if (rv < SS_SUCCESS) {
3274 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3275 return FALSE;
3276 }
3277
3278 if (oconn > C_WF_REPORT_PARAMS) {
3279 if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3280 peer_state.disk != D_NEGOTIATING ) {
3281 /* we want resync, peer has not yet decided to sync... */
3282 /* Nowadays only used when forcing a node into primary role and
3283 setting its disk to UpToDate with that */
3284 drbd_send_uuids(mdev);
3285 drbd_send_state(mdev);
3286 }
3287 }
3288
3289 mdev->net_conf->want_lose = 0;
3290
3291 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3292
3293 return TRUE;
3294 }
3295
3296 static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h)
3297 {
3298 struct p_rs_uuid *p = (struct p_rs_uuid *)h;
3299
3300 wait_event(mdev->misc_wait,
3301 mdev->state.conn == C_WF_SYNC_UUID ||
3302 mdev->state.conn < C_CONNECTED ||
3303 mdev->state.disk < D_NEGOTIATING);
3304
3305 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3306
3307 ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE;
3308 if (drbd_recv(mdev, h->payload, h->length) != h->length)
3309 return FALSE;
3310
3311 /* Here the _drbd_uuid_ functions are right, current should
3312 _not_ be rotated into the history */
3313 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3314 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3315 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3316
3317 drbd_start_resync(mdev, C_SYNC_TARGET);
3318
3319 put_ldev(mdev);
3320 } else
3321 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3322
3323 return TRUE;
3324 }
3325
3326 enum receive_bitmap_ret { OK, DONE, FAILED };
3327
3328 static enum receive_bitmap_ret
3329 receive_bitmap_plain(struct drbd_conf *mdev, struct p_header *h,
3330 unsigned long *buffer, struct bm_xfer_ctx *c)
3331 {
3332 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3333 unsigned want = num_words * sizeof(long);
3334
3335 if (want != h->length) {
3336 dev_err(DEV, "%s:want (%u) != h->length (%u)\n", __func__, want, h->length);
3337 return FAILED;
3338 }
3339 if (want == 0)
3340 return DONE;
3341 if (drbd_recv(mdev, buffer, want) != want)
3342 return FAILED;
3343
3344 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3345
3346 c->word_offset += num_words;
3347 c->bit_offset = c->word_offset * BITS_PER_LONG;
3348 if (c->bit_offset > c->bm_bits)
3349 c->bit_offset = c->bm_bits;
3350
3351 return OK;
3352 }
3353
3354 static enum receive_bitmap_ret
3355 recv_bm_rle_bits(struct drbd_conf *mdev,
3356 struct p_compressed_bm *p,
3357 struct bm_xfer_ctx *c)
3358 {
3359 struct bitstream bs;
3360 u64 look_ahead;
3361 u64 rl;
3362 u64 tmp;
3363 unsigned long s = c->bit_offset;
3364 unsigned long e;
3365 int len = p->head.length - (sizeof(*p) - sizeof(p->head));
3366 int toggle = DCBP_get_start(p);
3367 int have;
3368 int bits;
3369
3370 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3371
3372 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3373 if (bits < 0)
3374 return FAILED;
3375
3376 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3377 bits = vli_decode_bits(&rl, look_ahead);
3378 if (bits <= 0)
3379 return FAILED;
3380
3381 if (toggle) {
3382 e = s + rl -1;
3383 if (e >= c->bm_bits) {
3384 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3385 return FAILED;
3386 }
3387 _drbd_bm_set_bits(mdev, s, e);
3388 }
3389
3390 if (have < bits) {
3391 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3392 have, bits, look_ahead,
3393 (unsigned int)(bs.cur.b - p->code),
3394 (unsigned int)bs.buf_len);
3395 return FAILED;
3396 }
3397 look_ahead >>= bits;
3398 have -= bits;
3399
3400 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3401 if (bits < 0)
3402 return FAILED;
3403 look_ahead |= tmp << have;
3404 have += bits;
3405 }
3406
3407 c->bit_offset = s;
3408 bm_xfer_ctx_bit_to_word_offset(c);
3409
3410 return (s == c->bm_bits) ? DONE : OK;
3411 }
3412
3413 static enum receive_bitmap_ret
3414 decode_bitmap_c(struct drbd_conf *mdev,
3415 struct p_compressed_bm *p,
3416 struct bm_xfer_ctx *c)
3417 {
3418 if (DCBP_get_code(p) == RLE_VLI_Bits)
3419 return recv_bm_rle_bits(mdev, p, c);
3420
3421 /* other variants had been implemented for evaluation,
3422 * but have been dropped as this one turned out to be "best"
3423 * during all our tests. */
3424
3425 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3426 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3427 return FAILED;
3428 }
3429
3430 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3431 const char *direction, struct bm_xfer_ctx *c)
3432 {
3433 /* what would it take to transfer it "plaintext" */
3434 unsigned plain = sizeof(struct p_header) *
3435 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3436 + c->bm_words * sizeof(long);
3437 unsigned total = c->bytes[0] + c->bytes[1];
3438 unsigned r;
3439
3440 /* total can not be zero. but just in case: */
3441 if (total == 0)
3442 return;
3443
3444 /* don't report if not compressed */
3445 if (total >= plain)
3446 return;
3447
3448 /* total < plain. check for overflow, still */
3449 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3450 : (1000 * total / plain);
3451
3452 if (r > 1000)
3453 r = 1000;
3454
3455 r = 1000 - r;
3456 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3457 "total %u; compression: %u.%u%%\n",
3458 direction,
3459 c->bytes[1], c->packets[1],
3460 c->bytes[0], c->packets[0],
3461 total, r/10, r % 10);
3462 }
3463
3464 /* Since we are processing the bitfield from lower addresses to higher,
3465 it does not matter if the process it in 32 bit chunks or 64 bit
3466 chunks as long as it is little endian. (Understand it as byte stream,
3467 beginning with the lowest byte...) If we would use big endian
3468 we would need to process it from the highest address to the lowest,
3469 in order to be agnostic to the 32 vs 64 bits issue.
3470
3471 returns 0 on failure, 1 if we successfully received it. */
3472 static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h)
3473 {
3474 struct bm_xfer_ctx c;
3475 void *buffer;
3476 enum receive_bitmap_ret ret;
3477 int ok = FALSE;
3478
3479 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
3480
3481 drbd_bm_lock(mdev, "receive bitmap");
3482
3483 /* maybe we should use some per thread scratch page,
3484 * and allocate that during initial device creation? */
3485 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3486 if (!buffer) {
3487 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3488 goto out;
3489 }
3490
3491 c = (struct bm_xfer_ctx) {
3492 .bm_bits = drbd_bm_bits(mdev),
3493 .bm_words = drbd_bm_words(mdev),
3494 };
3495
3496 do {
3497 if (h->command == P_BITMAP) {
3498 ret = receive_bitmap_plain(mdev, h, buffer, &c);
3499 } else if (h->command == P_COMPRESSED_BITMAP) {
3500 /* MAYBE: sanity check that we speak proto >= 90,
3501 * and the feature is enabled! */
3502 struct p_compressed_bm *p;
3503
3504 if (h->length > BM_PACKET_PAYLOAD_BYTES) {
3505 dev_err(DEV, "ReportCBitmap packet too large\n");
3506 goto out;
3507 }
3508 /* use the page buff */
3509 p = buffer;
3510 memcpy(p, h, sizeof(*h));
3511 if (drbd_recv(mdev, p->head.payload, h->length) != h->length)
3512 goto out;
3513 if (p->head.length <= (sizeof(*p) - sizeof(p->head))) {
3514 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", p->head.length);
3515 return FAILED;
3516 }
3517 ret = decode_bitmap_c(mdev, p, &c);
3518 } else {
3519 dev_warn(DEV, "receive_bitmap: h->command neither ReportBitMap nor ReportCBitMap (is 0x%x)", h->command);
3520 goto out;
3521 }
3522
3523 c.packets[h->command == P_BITMAP]++;
3524 c.bytes[h->command == P_BITMAP] += sizeof(struct p_header) + h->length;
3525
3526 if (ret != OK)
3527 break;
3528
3529 if (!drbd_recv_header(mdev, h))
3530 goto out;
3531 } while (ret == OK);
3532 if (ret == FAILED)
3533 goto out;
3534
3535 INFO_bm_xfer_stats(mdev, "receive", &c);
3536
3537 if (mdev->state.conn == C_WF_BITMAP_T) {
3538 ok = !drbd_send_bitmap(mdev);
3539 if (!ok)
3540 goto out;
3541 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3542 ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3543 D_ASSERT(ok == SS_SUCCESS);
3544 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3545 /* admin may have requested C_DISCONNECTING,
3546 * other threads may have noticed network errors */
3547 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3548 drbd_conn_str(mdev->state.conn));
3549 }
3550
3551 ok = TRUE;
3552 out:
3553 drbd_bm_unlock(mdev);
3554 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3555 drbd_start_resync(mdev, C_SYNC_SOURCE);
3556 free_page((unsigned long) buffer);
3557 return ok;
3558 }
3559
3560 static int receive_skip_(struct drbd_conf *mdev, struct p_header *h, int silent)
3561 {
3562 /* TODO zero copy sink :) */
3563 static char sink[128];
3564 int size, want, r;
3565
3566 if (!silent)
3567 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3568 h->command, h->length);
3569
3570 size = h->length;
3571 while (size > 0) {
3572 want = min_t(int, size, sizeof(sink));
3573 r = drbd_recv(mdev, sink, want);
3574 ERR_IF(r <= 0) break;
3575 size -= r;
3576 }
3577 return size == 0;
3578 }
3579
3580 static int receive_skip(struct drbd_conf *mdev, struct p_header *h)
3581 {
3582 return receive_skip_(mdev, h, 0);
3583 }
3584
3585 static int receive_skip_silent(struct drbd_conf *mdev, struct p_header *h)
3586 {
3587 return receive_skip_(mdev, h, 1);
3588 }
3589
3590 static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h)
3591 {
3592 if (mdev->state.disk >= D_INCONSISTENT)
3593 drbd_kick_lo(mdev);
3594
3595 /* Make sure we've acked all the TCP data associated
3596 * with the data requests being unplugged */
3597 drbd_tcp_quickack(mdev->data.socket);
3598
3599 return TRUE;
3600 }
3601
3602 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *);
3603
3604 static drbd_cmd_handler_f drbd_default_handler[] = {
3605 [P_DATA] = receive_Data,
3606 [P_DATA_REPLY] = receive_DataReply,
3607 [P_RS_DATA_REPLY] = receive_RSDataReply,
3608 [P_BARRIER] = receive_Barrier,
3609 [P_BITMAP] = receive_bitmap,
3610 [P_COMPRESSED_BITMAP] = receive_bitmap,
3611 [P_UNPLUG_REMOTE] = receive_UnplugRemote,
3612 [P_DATA_REQUEST] = receive_DataRequest,
3613 [P_RS_DATA_REQUEST] = receive_DataRequest,
3614 [P_SYNC_PARAM] = receive_SyncParam,
3615 [P_SYNC_PARAM89] = receive_SyncParam,
3616 [P_PROTOCOL] = receive_protocol,
3617 [P_UUIDS] = receive_uuids,
3618 [P_SIZES] = receive_sizes,
3619 [P_STATE] = receive_state,
3620 [P_STATE_CHG_REQ] = receive_req_state,
3621 [P_SYNC_UUID] = receive_sync_uuid,
3622 [P_OV_REQUEST] = receive_DataRequest,
3623 [P_OV_REPLY] = receive_DataRequest,
3624 [P_CSUM_RS_REQUEST] = receive_DataRequest,
3625 [P_DELAY_PROBE] = receive_skip_silent,
3626 /* anything missing from this table is in
3627 * the asender_tbl, see get_asender_cmd */
3628 [P_MAX_CMD] = NULL,
3629 };
3630
3631 static drbd_cmd_handler_f *drbd_cmd_handler = drbd_default_handler;
3632 static drbd_cmd_handler_f *drbd_opt_cmd_handler;
3633
3634 static void drbdd(struct drbd_conf *mdev)
3635 {
3636 drbd_cmd_handler_f handler;
3637 struct p_header *header = &mdev->data.rbuf.header;
3638
3639 while (get_t_state(&mdev->receiver) == Running) {
3640 drbd_thread_current_set_cpu(mdev);
3641 if (!drbd_recv_header(mdev, header)) {
3642 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3643 break;
3644 }
3645
3646 if (header->command < P_MAX_CMD)
3647 handler = drbd_cmd_handler[header->command];
3648 else if (P_MAY_IGNORE < header->command
3649 && header->command < P_MAX_OPT_CMD)
3650 handler = drbd_opt_cmd_handler[header->command-P_MAY_IGNORE];
3651 else if (header->command > P_MAX_OPT_CMD)
3652 handler = receive_skip;
3653 else
3654 handler = NULL;
3655
3656 if (unlikely(!handler)) {
3657 dev_err(DEV, "unknown packet type %d, l: %d!\n",
3658 header->command, header->length);
3659 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3660 break;
3661 }
3662 if (unlikely(!handler(mdev, header))) {
3663 dev_err(DEV, "error receiving %s, l: %d!\n",
3664 cmdname(header->command), header->length);
3665 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3666 break;
3667 }
3668 }
3669 }
3670
3671 void drbd_flush_workqueue(struct drbd_conf *mdev)
3672 {
3673 struct drbd_wq_barrier barr;
3674
3675 barr.w.cb = w_prev_work_done;
3676 init_completion(&barr.done);
3677 drbd_queue_work(&mdev->data.work, &barr.w);
3678 wait_for_completion(&barr.done);
3679 }
3680
3681 static void drbd_disconnect(struct drbd_conf *mdev)
3682 {
3683 enum drbd_fencing_p fp;
3684 union drbd_state os, ns;
3685 int rv = SS_UNKNOWN_ERROR;
3686 unsigned int i;
3687
3688 if (mdev->state.conn == C_STANDALONE)
3689 return;
3690 if (mdev->state.conn >= C_WF_CONNECTION)
3691 dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n",
3692 drbd_conn_str(mdev->state.conn));
3693
3694 /* asender does not clean up anything. it must not interfere, either */
3695 drbd_thread_stop(&mdev->asender);
3696 drbd_free_sock(mdev);
3697
3698 spin_lock_irq(&mdev->req_lock);
3699 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3700 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3701 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3702 spin_unlock_irq(&mdev->req_lock);
3703
3704 /* We do not have data structures that would allow us to
3705 * get the rs_pending_cnt down to 0 again.
3706 * * On C_SYNC_TARGET we do not have any data structures describing
3707 * the pending RSDataRequest's we have sent.
3708 * * On C_SYNC_SOURCE there is no data structure that tracks
3709 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3710 * And no, it is not the sum of the reference counts in the
3711 * resync_LRU. The resync_LRU tracks the whole operation including
3712 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3713 * on the fly. */
3714 drbd_rs_cancel_all(mdev);
3715 mdev->rs_total = 0;
3716 mdev->rs_failed = 0;
3717 atomic_set(&mdev->rs_pending_cnt, 0);
3718 wake_up(&mdev->misc_wait);
3719
3720 /* make sure syncer is stopped and w_resume_next_sg queued */
3721 del_timer_sync(&mdev->resync_timer);
3722 set_bit(STOP_SYNC_TIMER, &mdev->flags);
3723 resync_timer_fn((unsigned long)mdev);
3724
3725 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3726 * w_make_resync_request etc. which may still be on the worker queue
3727 * to be "canceled" */
3728 drbd_flush_workqueue(mdev);
3729
3730 /* This also does reclaim_net_ee(). If we do this too early, we might
3731 * miss some resync ee and pages.*/
3732 drbd_process_done_ee(mdev);
3733
3734 kfree(mdev->p_uuid);
3735 mdev->p_uuid = NULL;
3736
3737 if (!mdev->state.susp)
3738 tl_clear(mdev);
3739
3740 dev_info(DEV, "Connection closed\n");
3741
3742 drbd_md_sync(mdev);
3743
3744 fp = FP_DONT_CARE;
3745 if (get_ldev(mdev)) {
3746 fp = mdev->ldev->dc.fencing;
3747 put_ldev(mdev);
3748 }
3749
3750 if (mdev->state.role == R_PRIMARY) {
3751 if (fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) {
3752 enum drbd_disk_state nps = drbd_try_outdate_peer(mdev);
3753 drbd_request_state(mdev, NS(pdsk, nps));
3754 }
3755 }
3756
3757 spin_lock_irq(&mdev->req_lock);
3758 os = mdev->state;
3759 if (os.conn >= C_UNCONNECTED) {
3760 /* Do not restart in case we are C_DISCONNECTING */
3761 ns = os;
3762 ns.conn = C_UNCONNECTED;
3763 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3764 }
3765 spin_unlock_irq(&mdev->req_lock);
3766
3767 if (os.conn == C_DISCONNECTING) {
3768 struct hlist_head *h;
3769 wait_event(mdev->misc_wait, atomic_read(&mdev->net_cnt) == 0);
3770
3771 /* we must not free the tl_hash
3772 * while application io is still on the fly */
3773 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_bio_cnt) == 0);
3774
3775 spin_lock_irq(&mdev->req_lock);
3776 /* paranoia code */
3777 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3778 if (h->first)
3779 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3780 (int)(h - mdev->ee_hash), h->first);
3781 kfree(mdev->ee_hash);
3782 mdev->ee_hash = NULL;
3783 mdev->ee_hash_s = 0;
3784
3785 /* paranoia code */
3786 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++)
3787 if (h->first)
3788 dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n",
3789 (int)(h - mdev->tl_hash), h->first);
3790 kfree(mdev->tl_hash);
3791 mdev->tl_hash = NULL;
3792 mdev->tl_hash_s = 0;
3793 spin_unlock_irq(&mdev->req_lock);
3794
3795 crypto_free_hash(mdev->cram_hmac_tfm);
3796 mdev->cram_hmac_tfm = NULL;
3797
3798 kfree(mdev->net_conf);
3799 mdev->net_conf = NULL;
3800 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3801 }
3802
3803 /* tcp_close and release of sendpage pages can be deferred. I don't
3804 * want to use SO_LINGER, because apparently it can be deferred for
3805 * more than 20 seconds (longest time I checked).
3806 *
3807 * Actually we don't care for exactly when the network stack does its
3808 * put_page(), but release our reference on these pages right here.
3809 */
3810 i = drbd_release_ee(mdev, &mdev->net_ee);
3811 if (i)
3812 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3813 i = atomic_read(&mdev->pp_in_use);
3814 if (i)
3815 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
3816
3817 D_ASSERT(list_empty(&mdev->read_ee));
3818 D_ASSERT(list_empty(&mdev->active_ee));
3819 D_ASSERT(list_empty(&mdev->sync_ee));
3820 D_ASSERT(list_empty(&mdev->done_ee));
3821
3822 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3823 atomic_set(&mdev->current_epoch->epoch_size, 0);
3824 D_ASSERT(list_empty(&mdev->current_epoch->list));
3825 }
3826
3827 /*
3828 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3829 * we can agree on is stored in agreed_pro_version.
3830 *
3831 * feature flags and the reserved array should be enough room for future
3832 * enhancements of the handshake protocol, and possible plugins...
3833 *
3834 * for now, they are expected to be zero, but ignored.
3835 */
3836 static int drbd_send_handshake(struct drbd_conf *mdev)
3837 {
3838 /* ASSERT current == mdev->receiver ... */
3839 struct p_handshake *p = &mdev->data.sbuf.handshake;
3840 int ok;
3841
3842 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3843 dev_err(DEV, "interrupted during initial handshake\n");
3844 return 0; /* interrupted. not ok. */
3845 }
3846
3847 if (mdev->data.socket == NULL) {
3848 mutex_unlock(&mdev->data.mutex);
3849 return 0;
3850 }
3851
3852 memset(p, 0, sizeof(*p));
3853 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3854 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3855 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3856 (struct p_header *)p, sizeof(*p), 0 );
3857 mutex_unlock(&mdev->data.mutex);
3858 return ok;
3859 }
3860
3861 /*
3862 * return values:
3863 * 1 yes, we have a valid connection
3864 * 0 oops, did not work out, please try again
3865 * -1 peer talks different language,
3866 * no point in trying again, please go standalone.
3867 */
3868 static int drbd_do_handshake(struct drbd_conf *mdev)
3869 {
3870 /* ASSERT current == mdev->receiver ... */
3871 struct p_handshake *p = &mdev->data.rbuf.handshake;
3872 const int expect = sizeof(struct p_handshake)
3873 -sizeof(struct p_header);
3874 int rv;
3875
3876 rv = drbd_send_handshake(mdev);
3877 if (!rv)
3878 return 0;
3879
3880 rv = drbd_recv_header(mdev, &p->head);
3881 if (!rv)
3882 return 0;
3883
3884 if (p->head.command != P_HAND_SHAKE) {
3885 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
3886 cmdname(p->head.command), p->head.command);
3887 return -1;
3888 }
3889
3890 if (p->head.length != expect) {
3891 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
3892 expect, p->head.length);
3893 return -1;
3894 }
3895
3896 rv = drbd_recv(mdev, &p->head.payload, expect);
3897
3898 if (rv != expect) {
3899 dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv);
3900 return 0;
3901 }
3902
3903 p->protocol_min = be32_to_cpu(p->protocol_min);
3904 p->protocol_max = be32_to_cpu(p->protocol_max);
3905 if (p->protocol_max == 0)
3906 p->protocol_max = p->protocol_min;
3907
3908 if (PRO_VERSION_MAX < p->protocol_min ||
3909 PRO_VERSION_MIN > p->protocol_max)
3910 goto incompat;
3911
3912 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
3913
3914 dev_info(DEV, "Handshake successful: "
3915 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
3916
3917 return 1;
3918
3919 incompat:
3920 dev_err(DEV, "incompatible DRBD dialects: "
3921 "I support %d-%d, peer supports %d-%d\n",
3922 PRO_VERSION_MIN, PRO_VERSION_MAX,
3923 p->protocol_min, p->protocol_max);
3924 return -1;
3925 }
3926
3927 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
3928 static int drbd_do_auth(struct drbd_conf *mdev)
3929 {
3930 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
3931 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
3932 return -1;
3933 }
3934 #else
3935 #define CHALLENGE_LEN 64
3936
3937 /* Return value:
3938 1 - auth succeeded,
3939 0 - failed, try again (network error),
3940 -1 - auth failed, don't try again.
3941 */
3942
3943 static int drbd_do_auth(struct drbd_conf *mdev)
3944 {
3945 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
3946 struct scatterlist sg;
3947 char *response = NULL;
3948 char *right_response = NULL;
3949 char *peers_ch = NULL;
3950 struct p_header p;
3951 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
3952 unsigned int resp_size;
3953 struct hash_desc desc;
3954 int rv;
3955
3956 desc.tfm = mdev->cram_hmac_tfm;
3957 desc.flags = 0;
3958
3959 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
3960 (u8 *)mdev->net_conf->shared_secret, key_len);
3961 if (rv) {
3962 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
3963 rv = -1;
3964 goto fail;
3965 }
3966
3967 get_random_bytes(my_challenge, CHALLENGE_LEN);
3968
3969 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
3970 if (!rv)
3971 goto fail;
3972
3973 rv = drbd_recv_header(mdev, &p);
3974 if (!rv)
3975 goto fail;
3976
3977 if (p.command != P_AUTH_CHALLENGE) {
3978 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
3979 cmdname(p.command), p.command);
3980 rv = 0;
3981 goto fail;
3982 }
3983
3984 if (p.length > CHALLENGE_LEN*2) {
3985 dev_err(DEV, "expected AuthChallenge payload too big.\n");
3986 rv = -1;
3987 goto fail;
3988 }
3989
3990 peers_ch = kmalloc(p.length, GFP_NOIO);
3991 if (peers_ch == NULL) {
3992 dev_err(DEV, "kmalloc of peers_ch failed\n");
3993 rv = -1;
3994 goto fail;
3995 }
3996
3997 rv = drbd_recv(mdev, peers_ch, p.length);
3998
3999 if (rv != p.length) {
4000 dev_err(DEV, "short read AuthChallenge: l=%u\n", rv);
4001 rv = 0;
4002 goto fail;
4003 }
4004
4005 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4006 response = kmalloc(resp_size, GFP_NOIO);
4007 if (response == NULL) {
4008 dev_err(DEV, "kmalloc of response failed\n");
4009 rv = -1;
4010 goto fail;
4011 }
4012
4013 sg_init_table(&sg, 1);
4014 sg_set_buf(&sg, peers_ch, p.length);
4015
4016 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4017 if (rv) {
4018 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4019 rv = -1;
4020 goto fail;
4021 }
4022
4023 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4024 if (!rv)
4025 goto fail;
4026
4027 rv = drbd_recv_header(mdev, &p);
4028 if (!rv)
4029 goto fail;
4030
4031 if (p.command != P_AUTH_RESPONSE) {
4032 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4033 cmdname(p.command), p.command);
4034 rv = 0;
4035 goto fail;
4036 }
4037
4038 if (p.length != resp_size) {
4039 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4040 rv = 0;
4041 goto fail;
4042 }
4043
4044 rv = drbd_recv(mdev, response , resp_size);
4045
4046 if (rv != resp_size) {
4047 dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4048 rv = 0;
4049 goto fail;
4050 }
4051
4052 right_response = kmalloc(resp_size, GFP_NOIO);
4053 if (right_response == NULL) {
4054 dev_err(DEV, "kmalloc of right_response failed\n");
4055 rv = -1;
4056 goto fail;
4057 }
4058
4059 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4060
4061 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4062 if (rv) {
4063 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4064 rv = -1;
4065 goto fail;
4066 }
4067
4068 rv = !memcmp(response, right_response, resp_size);
4069
4070 if (rv)
4071 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4072 resp_size, mdev->net_conf->cram_hmac_alg);
4073 else
4074 rv = -1;
4075
4076 fail:
4077 kfree(peers_ch);
4078 kfree(response);
4079 kfree(right_response);
4080
4081 return rv;
4082 }
4083 #endif
4084
4085 int drbdd_init(struct drbd_thread *thi)
4086 {
4087 struct drbd_conf *mdev = thi->mdev;
4088 unsigned int minor = mdev_to_minor(mdev);
4089 int h;
4090
4091 sprintf(current->comm, "drbd%d_receiver", minor);
4092
4093 dev_info(DEV, "receiver (re)started\n");
4094
4095 do {
4096 h = drbd_connect(mdev);
4097 if (h == 0) {
4098 drbd_disconnect(mdev);
4099 __set_current_state(TASK_INTERRUPTIBLE);
4100 schedule_timeout(HZ);
4101 }
4102 if (h == -1) {
4103 dev_warn(DEV, "Discarding network configuration.\n");
4104 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4105 }
4106 } while (h == 0);
4107
4108 if (h > 0) {
4109 if (get_net_conf(mdev)) {
4110 drbdd(mdev);
4111 put_net_conf(mdev);
4112 }
4113 }
4114
4115 drbd_disconnect(mdev);
4116
4117 dev_info(DEV, "receiver terminated\n");
4118 return 0;
4119 }
4120
4121 /* ********* acknowledge sender ******** */
4122
4123 static int got_RqSReply(struct drbd_conf *mdev, struct p_header *h)
4124 {
4125 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4126
4127 int retcode = be32_to_cpu(p->retcode);
4128
4129 if (retcode >= SS_SUCCESS) {
4130 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4131 } else {
4132 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4133 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4134 drbd_set_st_err_str(retcode), retcode);
4135 }
4136 wake_up(&mdev->state_wait);
4137
4138 return TRUE;
4139 }
4140
4141 static int got_Ping(struct drbd_conf *mdev, struct p_header *h)
4142 {
4143 return drbd_send_ping_ack(mdev);
4144
4145 }
4146
4147 static int got_PingAck(struct drbd_conf *mdev, struct p_header *h)
4148 {
4149 /* restore idle timeout */
4150 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4151 if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
4152 wake_up(&mdev->misc_wait);
4153
4154 return TRUE;
4155 }
4156
4157 static int got_IsInSync(struct drbd_conf *mdev, struct p_header *h)
4158 {
4159 struct p_block_ack *p = (struct p_block_ack *)h;
4160 sector_t sector = be64_to_cpu(p->sector);
4161 int blksize = be32_to_cpu(p->blksize);
4162
4163 D_ASSERT(mdev->agreed_pro_version >= 89);
4164
4165 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4166
4167 drbd_rs_complete_io(mdev, sector);
4168 drbd_set_in_sync(mdev, sector, blksize);
4169 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4170 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4171 dec_rs_pending(mdev);
4172
4173 return TRUE;
4174 }
4175
4176 /* when we receive the ACK for a write request,
4177 * verify that we actually know about it */
4178 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4179 u64 id, sector_t sector)
4180 {
4181 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4182 struct hlist_node *n;
4183 struct drbd_request *req;
4184
4185 hlist_for_each_entry(req, n, slot, colision) {
4186 if ((unsigned long)req == (unsigned long)id) {
4187 if (req->sector != sector) {
4188 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4189 "wrong sector (%llus versus %llus)\n", req,
4190 (unsigned long long)req->sector,
4191 (unsigned long long)sector);
4192 break;
4193 }
4194 return req;
4195 }
4196 }
4197 dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n",
4198 (void *)(unsigned long)id, (unsigned long long)sector);
4199 return NULL;
4200 }
4201
4202 typedef struct drbd_request *(req_validator_fn)
4203 (struct drbd_conf *mdev, u64 id, sector_t sector);
4204
4205 static int validate_req_change_req_state(struct drbd_conf *mdev,
4206 u64 id, sector_t sector, req_validator_fn validator,
4207 const char *func, enum drbd_req_event what)
4208 {
4209 struct drbd_request *req;
4210 struct bio_and_error m;
4211
4212 spin_lock_irq(&mdev->req_lock);
4213 req = validator(mdev, id, sector);
4214 if (unlikely(!req)) {
4215 spin_unlock_irq(&mdev->req_lock);
4216 dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func);
4217 return FALSE;
4218 }
4219 __req_mod(req, what, &m);
4220 spin_unlock_irq(&mdev->req_lock);
4221
4222 if (m.bio)
4223 complete_master_bio(mdev, &m);
4224 return TRUE;
4225 }
4226
4227 static int got_BlockAck(struct drbd_conf *mdev, struct p_header *h)
4228 {
4229 struct p_block_ack *p = (struct p_block_ack *)h;
4230 sector_t sector = be64_to_cpu(p->sector);
4231 int blksize = be32_to_cpu(p->blksize);
4232 enum drbd_req_event what;
4233
4234 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4235
4236 if (is_syncer_block_id(p->block_id)) {
4237 drbd_set_in_sync(mdev, sector, blksize);
4238 dec_rs_pending(mdev);
4239 return TRUE;
4240 }
4241 switch (be16_to_cpu(h->command)) {
4242 case P_RS_WRITE_ACK:
4243 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4244 what = write_acked_by_peer_and_sis;
4245 break;
4246 case P_WRITE_ACK:
4247 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4248 what = write_acked_by_peer;
4249 break;
4250 case P_RECV_ACK:
4251 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4252 what = recv_acked_by_peer;
4253 break;
4254 case P_DISCARD_ACK:
4255 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4256 what = conflict_discarded_by_peer;
4257 break;
4258 default:
4259 D_ASSERT(0);
4260 return FALSE;
4261 }
4262
4263 return validate_req_change_req_state(mdev, p->block_id, sector,
4264 _ack_id_to_req, __func__ , what);
4265 }
4266
4267 static int got_NegAck(struct drbd_conf *mdev, struct p_header *h)
4268 {
4269 struct p_block_ack *p = (struct p_block_ack *)h;
4270 sector_t sector = be64_to_cpu(p->sector);
4271
4272 if (__ratelimit(&drbd_ratelimit_state))
4273 dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n");
4274
4275 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4276
4277 if (is_syncer_block_id(p->block_id)) {
4278 int size = be32_to_cpu(p->blksize);
4279 dec_rs_pending(mdev);
4280 drbd_rs_failed_io(mdev, sector, size);
4281 return TRUE;
4282 }
4283 return validate_req_change_req_state(mdev, p->block_id, sector,
4284 _ack_id_to_req, __func__ , neg_acked);
4285 }
4286
4287 static int got_NegDReply(struct drbd_conf *mdev, struct p_header *h)
4288 {
4289 struct p_block_ack *p = (struct p_block_ack *)h;
4290 sector_t sector = be64_to_cpu(p->sector);
4291
4292 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4293 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4294 (unsigned long long)sector, be32_to_cpu(p->blksize));
4295
4296 return validate_req_change_req_state(mdev, p->block_id, sector,
4297 _ar_id_to_req, __func__ , neg_acked);
4298 }
4299
4300 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h)
4301 {
4302 sector_t sector;
4303 int size;
4304 struct p_block_ack *p = (struct p_block_ack *)h;
4305
4306 sector = be64_to_cpu(p->sector);
4307 size = be32_to_cpu(p->blksize);
4308
4309 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4310
4311 dec_rs_pending(mdev);
4312
4313 if (get_ldev_if_state(mdev, D_FAILED)) {
4314 drbd_rs_complete_io(mdev, sector);
4315 drbd_rs_failed_io(mdev, sector, size);
4316 put_ldev(mdev);
4317 }
4318
4319 return TRUE;
4320 }
4321
4322 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h)
4323 {
4324 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4325
4326 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4327
4328 return TRUE;
4329 }
4330
4331 static int got_OVResult(struct drbd_conf *mdev, struct p_header *h)
4332 {
4333 struct p_block_ack *p = (struct p_block_ack *)h;
4334 struct drbd_work *w;
4335 sector_t sector;
4336 int size;
4337
4338 sector = be64_to_cpu(p->sector);
4339 size = be32_to_cpu(p->blksize);
4340
4341 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4342
4343 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4344 drbd_ov_oos_found(mdev, sector, size);
4345 else
4346 ov_oos_print(mdev);
4347
4348 drbd_rs_complete_io(mdev, sector);
4349 dec_rs_pending(mdev);
4350
4351 if (--mdev->ov_left == 0) {
4352 w = kmalloc(sizeof(*w), GFP_NOIO);
4353 if (w) {
4354 w->cb = w_ov_finished;
4355 drbd_queue_work_front(&mdev->data.work, w);
4356 } else {
4357 dev_err(DEV, "kmalloc(w) failed.");
4358 ov_oos_print(mdev);
4359 drbd_resync_finished(mdev);
4360 }
4361 }
4362 return TRUE;
4363 }
4364
4365 static int got_something_to_ignore_m(struct drbd_conf *mdev, struct p_header *h)
4366 {
4367 /* IGNORE */
4368 return TRUE;
4369 }
4370
4371 struct asender_cmd {
4372 size_t pkt_size;
4373 int (*process)(struct drbd_conf *mdev, struct p_header *h);
4374 };
4375
4376 static struct asender_cmd *get_asender_cmd(int cmd)
4377 {
4378 static struct asender_cmd asender_tbl[] = {
4379 /* anything missing from this table is in
4380 * the drbd_cmd_handler (drbd_default_handler) table,
4381 * see the beginning of drbdd() */
4382 [P_PING] = { sizeof(struct p_header), got_Ping },
4383 [P_PING_ACK] = { sizeof(struct p_header), got_PingAck },
4384 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4385 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4386 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4387 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4388 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4389 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4390 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4391 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4392 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4393 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4394 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4395 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_something_to_ignore_m },
4396 [P_MAX_CMD] = { 0, NULL },
4397 };
4398 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4399 return NULL;
4400 return &asender_tbl[cmd];
4401 }
4402
4403 int drbd_asender(struct drbd_thread *thi)
4404 {
4405 struct drbd_conf *mdev = thi->mdev;
4406 struct p_header *h = &mdev->meta.rbuf.header;
4407 struct asender_cmd *cmd = NULL;
4408
4409 int rv, len;
4410 void *buf = h;
4411 int received = 0;
4412 int expect = sizeof(struct p_header);
4413 int empty;
4414
4415 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4416
4417 current->policy = SCHED_RR; /* Make this a realtime task! */
4418 current->rt_priority = 2; /* more important than all other tasks */
4419
4420 while (get_t_state(thi) == Running) {
4421 drbd_thread_current_set_cpu(mdev);
4422 if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
4423 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4424 mdev->meta.socket->sk->sk_rcvtimeo =
4425 mdev->net_conf->ping_timeo*HZ/10;
4426 }
4427
4428 /* conditionally cork;
4429 * it may hurt latency if we cork without much to send */
4430 if (!mdev->net_conf->no_cork &&
4431 3 < atomic_read(&mdev->unacked_cnt))
4432 drbd_tcp_cork(mdev->meta.socket);
4433 while (1) {
4434 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4435 flush_signals(current);
4436 if (!drbd_process_done_ee(mdev)) {
4437 dev_err(DEV, "process_done_ee() = NOT_OK\n");
4438 goto reconnect;
4439 }
4440 /* to avoid race with newly queued ACKs */
4441 set_bit(SIGNAL_ASENDER, &mdev->flags);
4442 spin_lock_irq(&mdev->req_lock);
4443 empty = list_empty(&mdev->done_ee);
4444 spin_unlock_irq(&mdev->req_lock);
4445 /* new ack may have been queued right here,
4446 * but then there is also a signal pending,
4447 * and we start over... */
4448 if (empty)
4449 break;
4450 }
4451 /* but unconditionally uncork unless disabled */
4452 if (!mdev->net_conf->no_cork)
4453 drbd_tcp_uncork(mdev->meta.socket);
4454
4455 /* short circuit, recv_msg would return EINTR anyways. */
4456 if (signal_pending(current))
4457 continue;
4458
4459 rv = drbd_recv_short(mdev, mdev->meta.socket,
4460 buf, expect-received, 0);
4461 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4462
4463 flush_signals(current);
4464
4465 /* Note:
4466 * -EINTR (on meta) we got a signal
4467 * -EAGAIN (on meta) rcvtimeo expired
4468 * -ECONNRESET other side closed the connection
4469 * -ERESTARTSYS (on data) we got a signal
4470 * rv < 0 other than above: unexpected error!
4471 * rv == expected: full header or command
4472 * rv < expected: "woken" by signal during receive
4473 * rv == 0 : "connection shut down by peer"
4474 */
4475 if (likely(rv > 0)) {
4476 received += rv;
4477 buf += rv;
4478 } else if (rv == 0) {
4479 dev_err(DEV, "meta connection shut down by peer.\n");
4480 goto reconnect;
4481 } else if (rv == -EAGAIN) {
4482 if (mdev->meta.socket->sk->sk_rcvtimeo ==
4483 mdev->net_conf->ping_timeo*HZ/10) {
4484 dev_err(DEV, "PingAck did not arrive in time.\n");
4485 goto reconnect;
4486 }
4487 set_bit(SEND_PING, &mdev->flags);
4488 continue;
4489 } else if (rv == -EINTR) {
4490 continue;
4491 } else {
4492 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4493 goto reconnect;
4494 }
4495
4496 if (received == expect && cmd == NULL) {
4497 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4498 dev_err(DEV, "magic?? on meta m: 0x%lx c: %d l: %d\n",
4499 (long)be32_to_cpu(h->magic),
4500 h->command, h->length);
4501 goto reconnect;
4502 }
4503 cmd = get_asender_cmd(be16_to_cpu(h->command));
4504 len = be16_to_cpu(h->length);
4505 if (unlikely(cmd == NULL)) {
4506 dev_err(DEV, "unknown command?? on meta m: 0x%lx c: %d l: %d\n",
4507 (long)be32_to_cpu(h->magic),
4508 h->command, h->length);
4509 goto disconnect;
4510 }
4511 expect = cmd->pkt_size;
4512 ERR_IF(len != expect-sizeof(struct p_header))
4513 goto reconnect;
4514 }
4515 if (received == expect) {
4516 D_ASSERT(cmd != NULL);
4517 if (!cmd->process(mdev, h))
4518 goto reconnect;
4519
4520 buf = h;
4521 received = 0;
4522 expect = sizeof(struct p_header);
4523 cmd = NULL;
4524 }
4525 }
4526
4527 if (0) {
4528 reconnect:
4529 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4530 }
4531 if (0) {
4532 disconnect:
4533 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4534 }
4535 clear_bit(SIGNAL_ASENDER, &mdev->flags);
4536
4537 D_ASSERT(mdev->state.conn < C_CONNECTED);
4538 dev_info(DEV, "asender terminated\n");
4539
4540 return 0;
4541 }
This page took 0.120143 seconds and 6 git commands to generate.