drbd: Introduce "peer_device" object between "device" and "connection"
[deliverable/linux.git] / drivers / block / drbd / drbd_receiver.c
1 /*
2 drbd_receiver.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25
26 #include <linux/module.h>
27
28 #include <asm/uaccess.h>
29 #include <net/sock.h>
30
31 #include <linux/drbd.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/in.h>
35 #include <linux/mm.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
46 #include "drbd_int.h"
47 #include "drbd_protocol.h"
48 #include "drbd_req.h"
49
50 #include "drbd_vli.h"
51
52 struct packet_info {
53 enum drbd_packet cmd;
54 unsigned int size;
55 unsigned int vnr;
56 void *data;
57 };
58
59 enum finish_epoch {
60 FE_STILL_LIVE,
61 FE_DESTROYED,
62 FE_RECYCLED,
63 };
64
65 static int drbd_do_features(struct drbd_connection *connection);
66 static int drbd_do_auth(struct drbd_connection *connection);
67 static int drbd_disconnected(struct drbd_device *device);
68
69 static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event);
70 static int e_end_block(struct drbd_work *, int);
71
72
73 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
74
75 /*
76 * some helper functions to deal with single linked page lists,
77 * page->private being our "next" pointer.
78 */
79
80 /* If at least n pages are linked at head, get n pages off.
81 * Otherwise, don't modify head, and return NULL.
82 * Locking is the responsibility of the caller.
83 */
84 static struct page *page_chain_del(struct page **head, int n)
85 {
86 struct page *page;
87 struct page *tmp;
88
89 BUG_ON(!n);
90 BUG_ON(!head);
91
92 page = *head;
93
94 if (!page)
95 return NULL;
96
97 while (page) {
98 tmp = page_chain_next(page);
99 if (--n == 0)
100 break; /* found sufficient pages */
101 if (tmp == NULL)
102 /* insufficient pages, don't use any of them. */
103 return NULL;
104 page = tmp;
105 }
106
107 /* add end of list marker for the returned list */
108 set_page_private(page, 0);
109 /* actual return value, and adjustment of head */
110 page = *head;
111 *head = tmp;
112 return page;
113 }
114
115 /* may be used outside of locks to find the tail of a (usually short)
116 * "private" page chain, before adding it back to a global chain head
117 * with page_chain_add() under a spinlock. */
118 static struct page *page_chain_tail(struct page *page, int *len)
119 {
120 struct page *tmp;
121 int i = 1;
122 while ((tmp = page_chain_next(page)))
123 ++i, page = tmp;
124 if (len)
125 *len = i;
126 return page;
127 }
128
129 static int page_chain_free(struct page *page)
130 {
131 struct page *tmp;
132 int i = 0;
133 page_chain_for_each_safe(page, tmp) {
134 put_page(page);
135 ++i;
136 }
137 return i;
138 }
139
140 static void page_chain_add(struct page **head,
141 struct page *chain_first, struct page *chain_last)
142 {
143 #if 1
144 struct page *tmp;
145 tmp = page_chain_tail(chain_first, NULL);
146 BUG_ON(tmp != chain_last);
147 #endif
148
149 /* add chain to head */
150 set_page_private(chain_last, (unsigned long)*head);
151 *head = chain_first;
152 }
153
154 static struct page *__drbd_alloc_pages(struct drbd_device *device,
155 unsigned int number)
156 {
157 struct page *page = NULL;
158 struct page *tmp = NULL;
159 unsigned int i = 0;
160
161 /* Yes, testing drbd_pp_vacant outside the lock is racy.
162 * So what. It saves a spin_lock. */
163 if (drbd_pp_vacant >= number) {
164 spin_lock(&drbd_pp_lock);
165 page = page_chain_del(&drbd_pp_pool, number);
166 if (page)
167 drbd_pp_vacant -= number;
168 spin_unlock(&drbd_pp_lock);
169 if (page)
170 return page;
171 }
172
173 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
174 * "criss-cross" setup, that might cause write-out on some other DRBD,
175 * which in turn might block on the other node at this very place. */
176 for (i = 0; i < number; i++) {
177 tmp = alloc_page(GFP_TRY);
178 if (!tmp)
179 break;
180 set_page_private(tmp, (unsigned long)page);
181 page = tmp;
182 }
183
184 if (i == number)
185 return page;
186
187 /* Not enough pages immediately available this time.
188 * No need to jump around here, drbd_alloc_pages will retry this
189 * function "soon". */
190 if (page) {
191 tmp = page_chain_tail(page, NULL);
192 spin_lock(&drbd_pp_lock);
193 page_chain_add(&drbd_pp_pool, page, tmp);
194 drbd_pp_vacant += i;
195 spin_unlock(&drbd_pp_lock);
196 }
197 return NULL;
198 }
199
200 static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
201 struct list_head *to_be_freed)
202 {
203 struct drbd_peer_request *peer_req;
204 struct list_head *le, *tle;
205
206 /* The EEs are always appended to the end of the list. Since
207 they are sent in order over the wire, they have to finish
208 in order. As soon as we see the first not finished we can
209 stop to examine the list... */
210
211 list_for_each_safe(le, tle, &device->net_ee) {
212 peer_req = list_entry(le, struct drbd_peer_request, w.list);
213 if (drbd_peer_req_has_active_page(peer_req))
214 break;
215 list_move(le, to_be_freed);
216 }
217 }
218
219 static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device)
220 {
221 LIST_HEAD(reclaimed);
222 struct drbd_peer_request *peer_req, *t;
223
224 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
225 reclaim_finished_net_peer_reqs(device, &reclaimed);
226 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
227
228 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
229 drbd_free_net_peer_req(device, peer_req);
230 }
231
232 /**
233 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
234 * @device: DRBD device.
235 * @number: number of pages requested
236 * @retry: whether to retry, if not enough pages are available right now
237 *
238 * Tries to allocate number pages, first from our own page pool, then from
239 * the kernel, unless this allocation would exceed the max_buffers setting.
240 * Possibly retry until DRBD frees sufficient pages somewhere else.
241 *
242 * Returns a page chain linked via page->private.
243 */
244 struct page *drbd_alloc_pages(struct drbd_device *device, unsigned int number,
245 bool retry)
246 {
247 struct page *page = NULL;
248 struct net_conf *nc;
249 DEFINE_WAIT(wait);
250 int mxb;
251
252 /* Yes, we may run up to @number over max_buffers. If we
253 * follow it strictly, the admin will get it wrong anyways. */
254 rcu_read_lock();
255 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
256 mxb = nc ? nc->max_buffers : 1000000;
257 rcu_read_unlock();
258
259 if (atomic_read(&device->pp_in_use) < mxb)
260 page = __drbd_alloc_pages(device, number);
261
262 while (page == NULL) {
263 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
264
265 drbd_kick_lo_and_reclaim_net(device);
266
267 if (atomic_read(&device->pp_in_use) < mxb) {
268 page = __drbd_alloc_pages(device, number);
269 if (page)
270 break;
271 }
272
273 if (!retry)
274 break;
275
276 if (signal_pending(current)) {
277 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
278 break;
279 }
280
281 schedule();
282 }
283 finish_wait(&drbd_pp_wait, &wait);
284
285 if (page)
286 atomic_add(number, &device->pp_in_use);
287 return page;
288 }
289
290 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
291 * Is also used from inside an other spin_lock_irq(&first_peer_device(device)->connection->req_lock);
292 * Either links the page chain back to the global pool,
293 * or returns all pages to the system. */
294 static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
295 {
296 atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
297 int i;
298
299 if (page == NULL)
300 return;
301
302 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
303 i = page_chain_free(page);
304 else {
305 struct page *tmp;
306 tmp = page_chain_tail(page, &i);
307 spin_lock(&drbd_pp_lock);
308 page_chain_add(&drbd_pp_pool, page, tmp);
309 drbd_pp_vacant += i;
310 spin_unlock(&drbd_pp_lock);
311 }
312 i = atomic_sub_return(i, a);
313 if (i < 0)
314 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
315 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
316 wake_up(&drbd_pp_wait);
317 }
318
319 /*
320 You need to hold the req_lock:
321 _drbd_wait_ee_list_empty()
322
323 You must not have the req_lock:
324 drbd_free_peer_req()
325 drbd_alloc_peer_req()
326 drbd_free_peer_reqs()
327 drbd_ee_fix_bhs()
328 drbd_finish_peer_reqs()
329 drbd_clear_done_ee()
330 drbd_wait_ee_list_empty()
331 */
332
333 struct drbd_peer_request *
334 drbd_alloc_peer_req(struct drbd_device *device, u64 id, sector_t sector,
335 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
336 {
337 struct drbd_peer_request *peer_req;
338 struct page *page = NULL;
339 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
340
341 if (drbd_insert_fault(device, DRBD_FAULT_AL_EE))
342 return NULL;
343
344 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
345 if (!peer_req) {
346 if (!(gfp_mask & __GFP_NOWARN))
347 dev_err(DEV, "%s: allocation failed\n", __func__);
348 return NULL;
349 }
350
351 if (data_size) {
352 page = drbd_alloc_pages(device, nr_pages, (gfp_mask & __GFP_WAIT));
353 if (!page)
354 goto fail;
355 }
356
357 drbd_clear_interval(&peer_req->i);
358 peer_req->i.size = data_size;
359 peer_req->i.sector = sector;
360 peer_req->i.local = false;
361 peer_req->i.waiting = false;
362
363 peer_req->epoch = NULL;
364 peer_req->w.device = device;
365 peer_req->pages = page;
366 atomic_set(&peer_req->pending_bios, 0);
367 peer_req->flags = 0;
368 /*
369 * The block_id is opaque to the receiver. It is not endianness
370 * converted, and sent back to the sender unchanged.
371 */
372 peer_req->block_id = id;
373
374 return peer_req;
375
376 fail:
377 mempool_free(peer_req, drbd_ee_mempool);
378 return NULL;
379 }
380
381 void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
382 int is_net)
383 {
384 if (peer_req->flags & EE_HAS_DIGEST)
385 kfree(peer_req->digest);
386 drbd_free_pages(device, peer_req->pages, is_net);
387 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
388 D_ASSERT(drbd_interval_empty(&peer_req->i));
389 mempool_free(peer_req, drbd_ee_mempool);
390 }
391
392 int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
393 {
394 LIST_HEAD(work_list);
395 struct drbd_peer_request *peer_req, *t;
396 int count = 0;
397 int is_net = list == &device->net_ee;
398
399 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
400 list_splice_init(list, &work_list);
401 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
402
403 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
404 __drbd_free_peer_req(device, peer_req, is_net);
405 count++;
406 }
407 return count;
408 }
409
410 /*
411 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
412 */
413 static int drbd_finish_peer_reqs(struct drbd_device *device)
414 {
415 LIST_HEAD(work_list);
416 LIST_HEAD(reclaimed);
417 struct drbd_peer_request *peer_req, *t;
418 int err = 0;
419
420 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
421 reclaim_finished_net_peer_reqs(device, &reclaimed);
422 list_splice_init(&device->done_ee, &work_list);
423 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
424
425 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
426 drbd_free_net_peer_req(device, peer_req);
427
428 /* possible callbacks here:
429 * e_end_block, and e_end_resync_block, e_send_superseded.
430 * all ignore the last argument.
431 */
432 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
433 int err2;
434
435 /* list_del not necessary, next/prev members not touched */
436 err2 = peer_req->w.cb(&peer_req->w, !!err);
437 if (!err)
438 err = err2;
439 drbd_free_peer_req(device, peer_req);
440 }
441 wake_up(&device->ee_wait);
442
443 return err;
444 }
445
446 static void _drbd_wait_ee_list_empty(struct drbd_device *device,
447 struct list_head *head)
448 {
449 DEFINE_WAIT(wait);
450
451 /* avoids spin_lock/unlock
452 * and calling prepare_to_wait in the fast path */
453 while (!list_empty(head)) {
454 prepare_to_wait(&device->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
455 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
456 io_schedule();
457 finish_wait(&device->ee_wait, &wait);
458 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
459 }
460 }
461
462 static void drbd_wait_ee_list_empty(struct drbd_device *device,
463 struct list_head *head)
464 {
465 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
466 _drbd_wait_ee_list_empty(device, head);
467 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
468 }
469
470 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
471 {
472 mm_segment_t oldfs;
473 struct kvec iov = {
474 .iov_base = buf,
475 .iov_len = size,
476 };
477 struct msghdr msg = {
478 .msg_iovlen = 1,
479 .msg_iov = (struct iovec *)&iov,
480 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
481 };
482 int rv;
483
484 oldfs = get_fs();
485 set_fs(KERNEL_DS);
486 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
487 set_fs(oldfs);
488
489 return rv;
490 }
491
492 static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
493 {
494 int rv;
495
496 rv = drbd_recv_short(connection->data.socket, buf, size, 0);
497
498 if (rv < 0) {
499 if (rv == -ECONNRESET)
500 conn_info(connection, "sock was reset by peer\n");
501 else if (rv != -ERESTARTSYS)
502 conn_err(connection, "sock_recvmsg returned %d\n", rv);
503 } else if (rv == 0) {
504 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
505 long t;
506 rcu_read_lock();
507 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
508 rcu_read_unlock();
509
510 t = wait_event_timeout(connection->ping_wait, connection->cstate < C_WF_REPORT_PARAMS, t);
511
512 if (t)
513 goto out;
514 }
515 conn_info(connection, "sock was shut down by peer\n");
516 }
517
518 if (rv != size)
519 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
520
521 out:
522 return rv;
523 }
524
525 static int drbd_recv_all(struct drbd_connection *connection, void *buf, size_t size)
526 {
527 int err;
528
529 err = drbd_recv(connection, buf, size);
530 if (err != size) {
531 if (err >= 0)
532 err = -EIO;
533 } else
534 err = 0;
535 return err;
536 }
537
538 static int drbd_recv_all_warn(struct drbd_connection *connection, void *buf, size_t size)
539 {
540 int err;
541
542 err = drbd_recv_all(connection, buf, size);
543 if (err && !signal_pending(current))
544 conn_warn(connection, "short read (expected size %d)\n", (int)size);
545 return err;
546 }
547
548 /* quoting tcp(7):
549 * On individual connections, the socket buffer size must be set prior to the
550 * listen(2) or connect(2) calls in order to have it take effect.
551 * This is our wrapper to do so.
552 */
553 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
554 unsigned int rcv)
555 {
556 /* open coded SO_SNDBUF, SO_RCVBUF */
557 if (snd) {
558 sock->sk->sk_sndbuf = snd;
559 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
560 }
561 if (rcv) {
562 sock->sk->sk_rcvbuf = rcv;
563 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
564 }
565 }
566
567 static struct socket *drbd_try_connect(struct drbd_connection *connection)
568 {
569 const char *what;
570 struct socket *sock;
571 struct sockaddr_in6 src_in6;
572 struct sockaddr_in6 peer_in6;
573 struct net_conf *nc;
574 int err, peer_addr_len, my_addr_len;
575 int sndbuf_size, rcvbuf_size, connect_int;
576 int disconnect_on_error = 1;
577
578 rcu_read_lock();
579 nc = rcu_dereference(connection->net_conf);
580 if (!nc) {
581 rcu_read_unlock();
582 return NULL;
583 }
584 sndbuf_size = nc->sndbuf_size;
585 rcvbuf_size = nc->rcvbuf_size;
586 connect_int = nc->connect_int;
587 rcu_read_unlock();
588
589 my_addr_len = min_t(int, connection->my_addr_len, sizeof(src_in6));
590 memcpy(&src_in6, &connection->my_addr, my_addr_len);
591
592 if (((struct sockaddr *)&connection->my_addr)->sa_family == AF_INET6)
593 src_in6.sin6_port = 0;
594 else
595 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
596
597 peer_addr_len = min_t(int, connection->peer_addr_len, sizeof(src_in6));
598 memcpy(&peer_in6, &connection->peer_addr, peer_addr_len);
599
600 what = "sock_create_kern";
601 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
602 SOCK_STREAM, IPPROTO_TCP, &sock);
603 if (err < 0) {
604 sock = NULL;
605 goto out;
606 }
607
608 sock->sk->sk_rcvtimeo =
609 sock->sk->sk_sndtimeo = connect_int * HZ;
610 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
611
612 /* explicitly bind to the configured IP as source IP
613 * for the outgoing connections.
614 * This is needed for multihomed hosts and to be
615 * able to use lo: interfaces for drbd.
616 * Make sure to use 0 as port number, so linux selects
617 * a free one dynamically.
618 */
619 what = "bind before connect";
620 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
621 if (err < 0)
622 goto out;
623
624 /* connect may fail, peer not yet available.
625 * stay C_WF_CONNECTION, don't go Disconnecting! */
626 disconnect_on_error = 0;
627 what = "connect";
628 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
629
630 out:
631 if (err < 0) {
632 if (sock) {
633 sock_release(sock);
634 sock = NULL;
635 }
636 switch (-err) {
637 /* timeout, busy, signal pending */
638 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
639 case EINTR: case ERESTARTSYS:
640 /* peer not (yet) available, network problem */
641 case ECONNREFUSED: case ENETUNREACH:
642 case EHOSTDOWN: case EHOSTUNREACH:
643 disconnect_on_error = 0;
644 break;
645 default:
646 conn_err(connection, "%s failed, err = %d\n", what, err);
647 }
648 if (disconnect_on_error)
649 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
650 }
651
652 return sock;
653 }
654
655 struct accept_wait_data {
656 struct drbd_connection *connection;
657 struct socket *s_listen;
658 struct completion door_bell;
659 void (*original_sk_state_change)(struct sock *sk);
660
661 };
662
663 static void drbd_incoming_connection(struct sock *sk)
664 {
665 struct accept_wait_data *ad = sk->sk_user_data;
666 void (*state_change)(struct sock *sk);
667
668 state_change = ad->original_sk_state_change;
669 if (sk->sk_state == TCP_ESTABLISHED)
670 complete(&ad->door_bell);
671 state_change(sk);
672 }
673
674 static int prepare_listen_socket(struct drbd_connection *connection, struct accept_wait_data *ad)
675 {
676 int err, sndbuf_size, rcvbuf_size, my_addr_len;
677 struct sockaddr_in6 my_addr;
678 struct socket *s_listen;
679 struct net_conf *nc;
680 const char *what;
681
682 rcu_read_lock();
683 nc = rcu_dereference(connection->net_conf);
684 if (!nc) {
685 rcu_read_unlock();
686 return -EIO;
687 }
688 sndbuf_size = nc->sndbuf_size;
689 rcvbuf_size = nc->rcvbuf_size;
690 rcu_read_unlock();
691
692 my_addr_len = min_t(int, connection->my_addr_len, sizeof(struct sockaddr_in6));
693 memcpy(&my_addr, &connection->my_addr, my_addr_len);
694
695 what = "sock_create_kern";
696 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
697 SOCK_STREAM, IPPROTO_TCP, &s_listen);
698 if (err) {
699 s_listen = NULL;
700 goto out;
701 }
702
703 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
704 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
705
706 what = "bind before listen";
707 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
708 if (err < 0)
709 goto out;
710
711 ad->s_listen = s_listen;
712 write_lock_bh(&s_listen->sk->sk_callback_lock);
713 ad->original_sk_state_change = s_listen->sk->sk_state_change;
714 s_listen->sk->sk_state_change = drbd_incoming_connection;
715 s_listen->sk->sk_user_data = ad;
716 write_unlock_bh(&s_listen->sk->sk_callback_lock);
717
718 what = "listen";
719 err = s_listen->ops->listen(s_listen, 5);
720 if (err < 0)
721 goto out;
722
723 return 0;
724 out:
725 if (s_listen)
726 sock_release(s_listen);
727 if (err < 0) {
728 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
729 conn_err(connection, "%s failed, err = %d\n", what, err);
730 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
731 }
732 }
733
734 return -EIO;
735 }
736
737 static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
738 {
739 write_lock_bh(&sk->sk_callback_lock);
740 sk->sk_state_change = ad->original_sk_state_change;
741 sk->sk_user_data = NULL;
742 write_unlock_bh(&sk->sk_callback_lock);
743 }
744
745 static struct socket *drbd_wait_for_connect(struct drbd_connection *connection, struct accept_wait_data *ad)
746 {
747 int timeo, connect_int, err = 0;
748 struct socket *s_estab = NULL;
749 struct net_conf *nc;
750
751 rcu_read_lock();
752 nc = rcu_dereference(connection->net_conf);
753 if (!nc) {
754 rcu_read_unlock();
755 return NULL;
756 }
757 connect_int = nc->connect_int;
758 rcu_read_unlock();
759
760 timeo = connect_int * HZ;
761 /* 28.5% random jitter */
762 timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
763
764 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
765 if (err <= 0)
766 return NULL;
767
768 err = kernel_accept(ad->s_listen, &s_estab, 0);
769 if (err < 0) {
770 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
771 conn_err(connection, "accept failed, err = %d\n", err);
772 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
773 }
774 }
775
776 if (s_estab)
777 unregister_state_change(s_estab->sk, ad);
778
779 return s_estab;
780 }
781
782 static int decode_header(struct drbd_connection *, void *, struct packet_info *);
783
784 static int send_first_packet(struct drbd_connection *connection, struct drbd_socket *sock,
785 enum drbd_packet cmd)
786 {
787 if (!conn_prepare_command(connection, sock))
788 return -EIO;
789 return conn_send_command(connection, sock, cmd, 0, NULL, 0);
790 }
791
792 static int receive_first_packet(struct drbd_connection *connection, struct socket *sock)
793 {
794 unsigned int header_size = drbd_header_size(connection);
795 struct packet_info pi;
796 int err;
797
798 err = drbd_recv_short(sock, connection->data.rbuf, header_size, 0);
799 if (err != header_size) {
800 if (err >= 0)
801 err = -EIO;
802 return err;
803 }
804 err = decode_header(connection, connection->data.rbuf, &pi);
805 if (err)
806 return err;
807 return pi.cmd;
808 }
809
810 /**
811 * drbd_socket_okay() - Free the socket if its connection is not okay
812 * @sock: pointer to the pointer to the socket.
813 */
814 static int drbd_socket_okay(struct socket **sock)
815 {
816 int rr;
817 char tb[4];
818
819 if (!*sock)
820 return false;
821
822 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
823
824 if (rr > 0 || rr == -EAGAIN) {
825 return true;
826 } else {
827 sock_release(*sock);
828 *sock = NULL;
829 return false;
830 }
831 }
832 /* Gets called if a connection is established, or if a new minor gets created
833 in a connection */
834 int drbd_connected(struct drbd_device *device)
835 {
836 int err;
837
838 atomic_set(&device->packet_seq, 0);
839 device->peer_seq = 0;
840
841 device->state_mutex = first_peer_device(device)->connection->agreed_pro_version < 100 ?
842 &first_peer_device(device)->connection->cstate_mutex :
843 &device->own_state_mutex;
844
845 err = drbd_send_sync_param(device);
846 if (!err)
847 err = drbd_send_sizes(device, 0, 0);
848 if (!err)
849 err = drbd_send_uuids(device);
850 if (!err)
851 err = drbd_send_current_state(device);
852 clear_bit(USE_DEGR_WFC_T, &device->flags);
853 clear_bit(RESIZE_PENDING, &device->flags);
854 atomic_set(&device->ap_in_flight, 0);
855 mod_timer(&device->request_timer, jiffies + HZ); /* just start it here. */
856 return err;
857 }
858
859 /*
860 * return values:
861 * 1 yes, we have a valid connection
862 * 0 oops, did not work out, please try again
863 * -1 peer talks different language,
864 * no point in trying again, please go standalone.
865 * -2 We do not have a network config...
866 */
867 static int conn_connect(struct drbd_connection *connection)
868 {
869 struct drbd_socket sock, msock;
870 struct drbd_device *device;
871 struct net_conf *nc;
872 int vnr, timeout, h, ok;
873 bool discard_my_data;
874 enum drbd_state_rv rv;
875 struct accept_wait_data ad = {
876 .connection = connection,
877 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
878 };
879
880 clear_bit(DISCONNECT_SENT, &connection->flags);
881 if (conn_request_state(connection, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
882 return -2;
883
884 mutex_init(&sock.mutex);
885 sock.sbuf = connection->data.sbuf;
886 sock.rbuf = connection->data.rbuf;
887 sock.socket = NULL;
888 mutex_init(&msock.mutex);
889 msock.sbuf = connection->meta.sbuf;
890 msock.rbuf = connection->meta.rbuf;
891 msock.socket = NULL;
892
893 /* Assume that the peer only understands protocol 80 until we know better. */
894 connection->agreed_pro_version = 80;
895
896 if (prepare_listen_socket(connection, &ad))
897 return 0;
898
899 do {
900 struct socket *s;
901
902 s = drbd_try_connect(connection);
903 if (s) {
904 if (!sock.socket) {
905 sock.socket = s;
906 send_first_packet(connection, &sock, P_INITIAL_DATA);
907 } else if (!msock.socket) {
908 clear_bit(RESOLVE_CONFLICTS, &connection->flags);
909 msock.socket = s;
910 send_first_packet(connection, &msock, P_INITIAL_META);
911 } else {
912 conn_err(connection, "Logic error in conn_connect()\n");
913 goto out_release_sockets;
914 }
915 }
916
917 if (sock.socket && msock.socket) {
918 rcu_read_lock();
919 nc = rcu_dereference(connection->net_conf);
920 timeout = nc->ping_timeo * HZ / 10;
921 rcu_read_unlock();
922 schedule_timeout_interruptible(timeout);
923 ok = drbd_socket_okay(&sock.socket);
924 ok = drbd_socket_okay(&msock.socket) && ok;
925 if (ok)
926 break;
927 }
928
929 retry:
930 s = drbd_wait_for_connect(connection, &ad);
931 if (s) {
932 int fp = receive_first_packet(connection, s);
933 drbd_socket_okay(&sock.socket);
934 drbd_socket_okay(&msock.socket);
935 switch (fp) {
936 case P_INITIAL_DATA:
937 if (sock.socket) {
938 conn_warn(connection, "initial packet S crossed\n");
939 sock_release(sock.socket);
940 sock.socket = s;
941 goto randomize;
942 }
943 sock.socket = s;
944 break;
945 case P_INITIAL_META:
946 set_bit(RESOLVE_CONFLICTS, &connection->flags);
947 if (msock.socket) {
948 conn_warn(connection, "initial packet M crossed\n");
949 sock_release(msock.socket);
950 msock.socket = s;
951 goto randomize;
952 }
953 msock.socket = s;
954 break;
955 default:
956 conn_warn(connection, "Error receiving initial packet\n");
957 sock_release(s);
958 randomize:
959 if (prandom_u32() & 1)
960 goto retry;
961 }
962 }
963
964 if (connection->cstate <= C_DISCONNECTING)
965 goto out_release_sockets;
966 if (signal_pending(current)) {
967 flush_signals(current);
968 smp_rmb();
969 if (get_t_state(&connection->receiver) == EXITING)
970 goto out_release_sockets;
971 }
972
973 ok = drbd_socket_okay(&sock.socket);
974 ok = drbd_socket_okay(&msock.socket) && ok;
975 } while (!ok);
976
977 if (ad.s_listen)
978 sock_release(ad.s_listen);
979
980 sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
981 msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
982
983 sock.socket->sk->sk_allocation = GFP_NOIO;
984 msock.socket->sk->sk_allocation = GFP_NOIO;
985
986 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
987 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
988
989 /* NOT YET ...
990 * sock.socket->sk->sk_sndtimeo = connection->net_conf->timeout*HZ/10;
991 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
992 * first set it to the P_CONNECTION_FEATURES timeout,
993 * which we set to 4x the configured ping_timeout. */
994 rcu_read_lock();
995 nc = rcu_dereference(connection->net_conf);
996
997 sock.socket->sk->sk_sndtimeo =
998 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
999
1000 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
1001 timeout = nc->timeout * HZ / 10;
1002 discard_my_data = nc->discard_my_data;
1003 rcu_read_unlock();
1004
1005 msock.socket->sk->sk_sndtimeo = timeout;
1006
1007 /* we don't want delays.
1008 * we use TCP_CORK where appropriate, though */
1009 drbd_tcp_nodelay(sock.socket);
1010 drbd_tcp_nodelay(msock.socket);
1011
1012 connection->data.socket = sock.socket;
1013 connection->meta.socket = msock.socket;
1014 connection->last_received = jiffies;
1015
1016 h = drbd_do_features(connection);
1017 if (h <= 0)
1018 return h;
1019
1020 if (connection->cram_hmac_tfm) {
1021 /* drbd_request_state(device, NS(conn, WFAuth)); */
1022 switch (drbd_do_auth(connection)) {
1023 case -1:
1024 conn_err(connection, "Authentication of peer failed\n");
1025 return -1;
1026 case 0:
1027 conn_err(connection, "Authentication of peer failed, trying again.\n");
1028 return 0;
1029 }
1030 }
1031
1032 connection->data.socket->sk->sk_sndtimeo = timeout;
1033 connection->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1034
1035 if (drbd_send_protocol(connection) == -EOPNOTSUPP)
1036 return -1;
1037
1038 set_bit(STATE_SENT, &connection->flags);
1039
1040 rcu_read_lock();
1041 idr_for_each_entry(&connection->volumes, device, vnr) {
1042 kref_get(&device->kref);
1043 rcu_read_unlock();
1044
1045 /* Prevent a race between resync-handshake and
1046 * being promoted to Primary.
1047 *
1048 * Grab and release the state mutex, so we know that any current
1049 * drbd_set_role() is finished, and any incoming drbd_set_role
1050 * will see the STATE_SENT flag, and wait for it to be cleared.
1051 */
1052 mutex_lock(device->state_mutex);
1053 mutex_unlock(device->state_mutex);
1054
1055 if (discard_my_data)
1056 set_bit(DISCARD_MY_DATA, &device->flags);
1057 else
1058 clear_bit(DISCARD_MY_DATA, &device->flags);
1059
1060 drbd_connected(device);
1061 kref_put(&device->kref, &drbd_minor_destroy);
1062 rcu_read_lock();
1063 }
1064 rcu_read_unlock();
1065
1066 rv = conn_request_state(connection, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1067 if (rv < SS_SUCCESS || connection->cstate != C_WF_REPORT_PARAMS) {
1068 clear_bit(STATE_SENT, &connection->flags);
1069 return 0;
1070 }
1071
1072 drbd_thread_start(&connection->asender);
1073
1074 mutex_lock(&connection->conf_update);
1075 /* The discard_my_data flag is a single-shot modifier to the next
1076 * connection attempt, the handshake of which is now well underway.
1077 * No need for rcu style copying of the whole struct
1078 * just to clear a single value. */
1079 connection->net_conf->discard_my_data = 0;
1080 mutex_unlock(&connection->conf_update);
1081
1082 return h;
1083
1084 out_release_sockets:
1085 if (ad.s_listen)
1086 sock_release(ad.s_listen);
1087 if (sock.socket)
1088 sock_release(sock.socket);
1089 if (msock.socket)
1090 sock_release(msock.socket);
1091 return -1;
1092 }
1093
1094 static int decode_header(struct drbd_connection *connection, void *header, struct packet_info *pi)
1095 {
1096 unsigned int header_size = drbd_header_size(connection);
1097
1098 if (header_size == sizeof(struct p_header100) &&
1099 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1100 struct p_header100 *h = header;
1101 if (h->pad != 0) {
1102 conn_err(connection, "Header padding is not zero\n");
1103 return -EINVAL;
1104 }
1105 pi->vnr = be16_to_cpu(h->volume);
1106 pi->cmd = be16_to_cpu(h->command);
1107 pi->size = be32_to_cpu(h->length);
1108 } else if (header_size == sizeof(struct p_header95) &&
1109 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1110 struct p_header95 *h = header;
1111 pi->cmd = be16_to_cpu(h->command);
1112 pi->size = be32_to_cpu(h->length);
1113 pi->vnr = 0;
1114 } else if (header_size == sizeof(struct p_header80) &&
1115 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1116 struct p_header80 *h = header;
1117 pi->cmd = be16_to_cpu(h->command);
1118 pi->size = be16_to_cpu(h->length);
1119 pi->vnr = 0;
1120 } else {
1121 conn_err(connection, "Wrong magic value 0x%08x in protocol version %d\n",
1122 be32_to_cpu(*(__be32 *)header),
1123 connection->agreed_pro_version);
1124 return -EINVAL;
1125 }
1126 pi->data = header + header_size;
1127 return 0;
1128 }
1129
1130 static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
1131 {
1132 void *buffer = connection->data.rbuf;
1133 int err;
1134
1135 err = drbd_recv_all_warn(connection, buffer, drbd_header_size(connection));
1136 if (err)
1137 return err;
1138
1139 err = decode_header(connection, buffer, pi);
1140 connection->last_received = jiffies;
1141
1142 return err;
1143 }
1144
1145 static void drbd_flush(struct drbd_connection *connection)
1146 {
1147 int rv;
1148 struct drbd_device *device;
1149 int vnr;
1150
1151 if (connection->write_ordering >= WO_bdev_flush) {
1152 rcu_read_lock();
1153 idr_for_each_entry(&connection->volumes, device, vnr) {
1154 if (!get_ldev(device))
1155 continue;
1156 kref_get(&device->kref);
1157 rcu_read_unlock();
1158
1159 rv = blkdev_issue_flush(device->ldev->backing_bdev,
1160 GFP_NOIO, NULL);
1161 if (rv) {
1162 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1163 /* would rather check on EOPNOTSUPP, but that is not reliable.
1164 * don't try again for ANY return value != 0
1165 * if (rv == -EOPNOTSUPP) */
1166 drbd_bump_write_ordering(connection, WO_drain_io);
1167 }
1168 put_ldev(device);
1169 kref_put(&device->kref, &drbd_minor_destroy);
1170
1171 rcu_read_lock();
1172 if (rv)
1173 break;
1174 }
1175 rcu_read_unlock();
1176 }
1177 }
1178
1179 /**
1180 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1181 * @device: DRBD device.
1182 * @epoch: Epoch object.
1183 * @ev: Epoch event.
1184 */
1185 static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *connection,
1186 struct drbd_epoch *epoch,
1187 enum epoch_event ev)
1188 {
1189 int epoch_size;
1190 struct drbd_epoch *next_epoch;
1191 enum finish_epoch rv = FE_STILL_LIVE;
1192
1193 spin_lock(&connection->epoch_lock);
1194 do {
1195 next_epoch = NULL;
1196
1197 epoch_size = atomic_read(&epoch->epoch_size);
1198
1199 switch (ev & ~EV_CLEANUP) {
1200 case EV_PUT:
1201 atomic_dec(&epoch->active);
1202 break;
1203 case EV_GOT_BARRIER_NR:
1204 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1205 break;
1206 case EV_BECAME_LAST:
1207 /* nothing to do*/
1208 break;
1209 }
1210
1211 if (epoch_size != 0 &&
1212 atomic_read(&epoch->active) == 0 &&
1213 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1214 if (!(ev & EV_CLEANUP)) {
1215 spin_unlock(&connection->epoch_lock);
1216 drbd_send_b_ack(epoch->connection, epoch->barrier_nr, epoch_size);
1217 spin_lock(&connection->epoch_lock);
1218 }
1219 #if 0
1220 /* FIXME: dec unacked on connection, once we have
1221 * something to count pending connection packets in. */
1222 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1223 dec_unacked(epoch->connection);
1224 #endif
1225
1226 if (connection->current_epoch != epoch) {
1227 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1228 list_del(&epoch->list);
1229 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1230 connection->epochs--;
1231 kfree(epoch);
1232
1233 if (rv == FE_STILL_LIVE)
1234 rv = FE_DESTROYED;
1235 } else {
1236 epoch->flags = 0;
1237 atomic_set(&epoch->epoch_size, 0);
1238 /* atomic_set(&epoch->active, 0); is already zero */
1239 if (rv == FE_STILL_LIVE)
1240 rv = FE_RECYCLED;
1241 }
1242 }
1243
1244 if (!next_epoch)
1245 break;
1246
1247 epoch = next_epoch;
1248 } while (1);
1249
1250 spin_unlock(&connection->epoch_lock);
1251
1252 return rv;
1253 }
1254
1255 /**
1256 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1257 * @connection: DRBD connection.
1258 * @wo: Write ordering method to try.
1259 */
1260 void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo)
1261 {
1262 struct disk_conf *dc;
1263 struct drbd_device *device;
1264 enum write_ordering_e pwo;
1265 int vnr;
1266 static char *write_ordering_str[] = {
1267 [WO_none] = "none",
1268 [WO_drain_io] = "drain",
1269 [WO_bdev_flush] = "flush",
1270 };
1271
1272 pwo = connection->write_ordering;
1273 wo = min(pwo, wo);
1274 rcu_read_lock();
1275 idr_for_each_entry(&connection->volumes, device, vnr) {
1276 if (!get_ldev_if_state(device, D_ATTACHING))
1277 continue;
1278 dc = rcu_dereference(device->ldev->disk_conf);
1279
1280 if (wo == WO_bdev_flush && !dc->disk_flushes)
1281 wo = WO_drain_io;
1282 if (wo == WO_drain_io && !dc->disk_drain)
1283 wo = WO_none;
1284 put_ldev(device);
1285 }
1286 rcu_read_unlock();
1287 connection->write_ordering = wo;
1288 if (pwo != connection->write_ordering || wo == WO_bdev_flush)
1289 conn_info(connection, "Method to ensure write ordering: %s\n", write_ordering_str[connection->write_ordering]);
1290 }
1291
1292 /**
1293 * drbd_submit_peer_request()
1294 * @device: DRBD device.
1295 * @peer_req: peer request
1296 * @rw: flag field, see bio->bi_rw
1297 *
1298 * May spread the pages to multiple bios,
1299 * depending on bio_add_page restrictions.
1300 *
1301 * Returns 0 if all bios have been submitted,
1302 * -ENOMEM if we could not allocate enough bios,
1303 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1304 * single page to an empty bio (which should never happen and likely indicates
1305 * that the lower level IO stack is in some way broken). This has been observed
1306 * on certain Xen deployments.
1307 */
1308 /* TODO allocate from our own bio_set. */
1309 int drbd_submit_peer_request(struct drbd_device *device,
1310 struct drbd_peer_request *peer_req,
1311 const unsigned rw, const int fault_type)
1312 {
1313 struct bio *bios = NULL;
1314 struct bio *bio;
1315 struct page *page = peer_req->pages;
1316 sector_t sector = peer_req->i.sector;
1317 unsigned ds = peer_req->i.size;
1318 unsigned n_bios = 0;
1319 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1320 int err = -ENOMEM;
1321
1322 /* In most cases, we will only need one bio. But in case the lower
1323 * level restrictions happen to be different at this offset on this
1324 * side than those of the sending peer, we may need to submit the
1325 * request in more than one bio.
1326 *
1327 * Plain bio_alloc is good enough here, this is no DRBD internally
1328 * generated bio, but a bio allocated on behalf of the peer.
1329 */
1330 next_bio:
1331 bio = bio_alloc(GFP_NOIO, nr_pages);
1332 if (!bio) {
1333 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1334 goto fail;
1335 }
1336 /* > peer_req->i.sector, unless this is the first bio */
1337 bio->bi_iter.bi_sector = sector;
1338 bio->bi_bdev = device->ldev->backing_bdev;
1339 bio->bi_rw = rw;
1340 bio->bi_private = peer_req;
1341 bio->bi_end_io = drbd_peer_request_endio;
1342
1343 bio->bi_next = bios;
1344 bios = bio;
1345 ++n_bios;
1346
1347 page_chain_for_each(page) {
1348 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1349 if (!bio_add_page(bio, page, len, 0)) {
1350 /* A single page must always be possible!
1351 * But in case it fails anyways,
1352 * we deal with it, and complain (below). */
1353 if (bio->bi_vcnt == 0) {
1354 dev_err(DEV,
1355 "bio_add_page failed for len=%u, "
1356 "bi_vcnt=0 (bi_sector=%llu)\n",
1357 len, (uint64_t)bio->bi_iter.bi_sector);
1358 err = -ENOSPC;
1359 goto fail;
1360 }
1361 goto next_bio;
1362 }
1363 ds -= len;
1364 sector += len >> 9;
1365 --nr_pages;
1366 }
1367 D_ASSERT(page == NULL);
1368 D_ASSERT(ds == 0);
1369
1370 atomic_set(&peer_req->pending_bios, n_bios);
1371 do {
1372 bio = bios;
1373 bios = bios->bi_next;
1374 bio->bi_next = NULL;
1375
1376 drbd_generic_make_request(device, fault_type, bio);
1377 } while (bios);
1378 return 0;
1379
1380 fail:
1381 while (bios) {
1382 bio = bios;
1383 bios = bios->bi_next;
1384 bio_put(bio);
1385 }
1386 return err;
1387 }
1388
1389 static void drbd_remove_epoch_entry_interval(struct drbd_device *device,
1390 struct drbd_peer_request *peer_req)
1391 {
1392 struct drbd_interval *i = &peer_req->i;
1393
1394 drbd_remove_interval(&device->write_requests, i);
1395 drbd_clear_interval(i);
1396
1397 /* Wake up any processes waiting for this peer request to complete. */
1398 if (i->waiting)
1399 wake_up(&device->misc_wait);
1400 }
1401
1402 static void conn_wait_active_ee_empty(struct drbd_connection *connection)
1403 {
1404 struct drbd_device *device;
1405 int vnr;
1406
1407 rcu_read_lock();
1408 idr_for_each_entry(&connection->volumes, device, vnr) {
1409 kref_get(&device->kref);
1410 rcu_read_unlock();
1411 drbd_wait_ee_list_empty(device, &device->active_ee);
1412 kref_put(&device->kref, &drbd_minor_destroy);
1413 rcu_read_lock();
1414 }
1415 rcu_read_unlock();
1416 }
1417
1418 static int receive_Barrier(struct drbd_connection *connection, struct packet_info *pi)
1419 {
1420 int rv;
1421 struct p_barrier *p = pi->data;
1422 struct drbd_epoch *epoch;
1423
1424 /* FIXME these are unacked on connection,
1425 * not a specific (peer)device.
1426 */
1427 connection->current_epoch->barrier_nr = p->barrier;
1428 connection->current_epoch->connection = connection;
1429 rv = drbd_may_finish_epoch(connection, connection->current_epoch, EV_GOT_BARRIER_NR);
1430
1431 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1432 * the activity log, which means it would not be resynced in case the
1433 * R_PRIMARY crashes now.
1434 * Therefore we must send the barrier_ack after the barrier request was
1435 * completed. */
1436 switch (connection->write_ordering) {
1437 case WO_none:
1438 if (rv == FE_RECYCLED)
1439 return 0;
1440
1441 /* receiver context, in the writeout path of the other node.
1442 * avoid potential distributed deadlock */
1443 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1444 if (epoch)
1445 break;
1446 else
1447 conn_warn(connection, "Allocation of an epoch failed, slowing down\n");
1448 /* Fall through */
1449
1450 case WO_bdev_flush:
1451 case WO_drain_io:
1452 conn_wait_active_ee_empty(connection);
1453 drbd_flush(connection);
1454
1455 if (atomic_read(&connection->current_epoch->epoch_size)) {
1456 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1457 if (epoch)
1458 break;
1459 }
1460
1461 return 0;
1462 default:
1463 conn_err(connection, "Strangeness in connection->write_ordering %d\n", connection->write_ordering);
1464 return -EIO;
1465 }
1466
1467 epoch->flags = 0;
1468 atomic_set(&epoch->epoch_size, 0);
1469 atomic_set(&epoch->active, 0);
1470
1471 spin_lock(&connection->epoch_lock);
1472 if (atomic_read(&connection->current_epoch->epoch_size)) {
1473 list_add(&epoch->list, &connection->current_epoch->list);
1474 connection->current_epoch = epoch;
1475 connection->epochs++;
1476 } else {
1477 /* The current_epoch got recycled while we allocated this one... */
1478 kfree(epoch);
1479 }
1480 spin_unlock(&connection->epoch_lock);
1481
1482 return 0;
1483 }
1484
1485 /* used from receive_RSDataReply (recv_resync_read)
1486 * and from receive_Data */
1487 static struct drbd_peer_request *
1488 read_in_block(struct drbd_device *device, u64 id, sector_t sector,
1489 int data_size) __must_hold(local)
1490 {
1491 const sector_t capacity = drbd_get_capacity(device->this_bdev);
1492 struct drbd_peer_request *peer_req;
1493 struct page *page;
1494 int dgs, ds, err;
1495 void *dig_in = first_peer_device(device)->connection->int_dig_in;
1496 void *dig_vv = first_peer_device(device)->connection->int_dig_vv;
1497 unsigned long *data;
1498
1499 dgs = 0;
1500 if (first_peer_device(device)->connection->peer_integrity_tfm) {
1501 dgs = crypto_hash_digestsize(first_peer_device(device)->connection->peer_integrity_tfm);
1502 /*
1503 * FIXME: Receive the incoming digest into the receive buffer
1504 * here, together with its struct p_data?
1505 */
1506 err = drbd_recv_all_warn(first_peer_device(device)->connection, dig_in, dgs);
1507 if (err)
1508 return NULL;
1509 data_size -= dgs;
1510 }
1511
1512 if (!expect(IS_ALIGNED(data_size, 512)))
1513 return NULL;
1514 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1515 return NULL;
1516
1517 /* even though we trust out peer,
1518 * we sometimes have to double check. */
1519 if (sector + (data_size>>9) > capacity) {
1520 dev_err(DEV, "request from peer beyond end of local disk: "
1521 "capacity: %llus < sector: %llus + size: %u\n",
1522 (unsigned long long)capacity,
1523 (unsigned long long)sector, data_size);
1524 return NULL;
1525 }
1526
1527 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1528 * "criss-cross" setup, that might cause write-out on some other DRBD,
1529 * which in turn might block on the other node at this very place. */
1530 peer_req = drbd_alloc_peer_req(device, id, sector, data_size, GFP_NOIO);
1531 if (!peer_req)
1532 return NULL;
1533
1534 if (!data_size)
1535 return peer_req;
1536
1537 ds = data_size;
1538 page = peer_req->pages;
1539 page_chain_for_each(page) {
1540 unsigned len = min_t(int, ds, PAGE_SIZE);
1541 data = kmap(page);
1542 err = drbd_recv_all_warn(first_peer_device(device)->connection, data, len);
1543 if (drbd_insert_fault(device, DRBD_FAULT_RECEIVE)) {
1544 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1545 data[0] = data[0] ^ (unsigned long)-1;
1546 }
1547 kunmap(page);
1548 if (err) {
1549 drbd_free_peer_req(device, peer_req);
1550 return NULL;
1551 }
1552 ds -= len;
1553 }
1554
1555 if (dgs) {
1556 drbd_csum_ee(device, first_peer_device(device)->connection->peer_integrity_tfm, peer_req, dig_vv);
1557 if (memcmp(dig_in, dig_vv, dgs)) {
1558 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1559 (unsigned long long)sector, data_size);
1560 drbd_free_peer_req(device, peer_req);
1561 return NULL;
1562 }
1563 }
1564 device->recv_cnt += data_size>>9;
1565 return peer_req;
1566 }
1567
1568 /* drbd_drain_block() just takes a data block
1569 * out of the socket input buffer, and discards it.
1570 */
1571 static int drbd_drain_block(struct drbd_device *device, int data_size)
1572 {
1573 struct page *page;
1574 int err = 0;
1575 void *data;
1576
1577 if (!data_size)
1578 return 0;
1579
1580 page = drbd_alloc_pages(device, 1, 1);
1581
1582 data = kmap(page);
1583 while (data_size) {
1584 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1585
1586 err = drbd_recv_all_warn(first_peer_device(device)->connection, data, len);
1587 if (err)
1588 break;
1589 data_size -= len;
1590 }
1591 kunmap(page);
1592 drbd_free_pages(device, page, 0);
1593 return err;
1594 }
1595
1596 static int recv_dless_read(struct drbd_device *device, struct drbd_request *req,
1597 sector_t sector, int data_size)
1598 {
1599 struct bio_vec bvec;
1600 struct bvec_iter iter;
1601 struct bio *bio;
1602 int dgs, err, expect;
1603 void *dig_in = first_peer_device(device)->connection->int_dig_in;
1604 void *dig_vv = first_peer_device(device)->connection->int_dig_vv;
1605
1606 dgs = 0;
1607 if (first_peer_device(device)->connection->peer_integrity_tfm) {
1608 dgs = crypto_hash_digestsize(first_peer_device(device)->connection->peer_integrity_tfm);
1609 err = drbd_recv_all_warn(first_peer_device(device)->connection, dig_in, dgs);
1610 if (err)
1611 return err;
1612 data_size -= dgs;
1613 }
1614
1615 /* optimistically update recv_cnt. if receiving fails below,
1616 * we disconnect anyways, and counters will be reset. */
1617 device->recv_cnt += data_size>>9;
1618
1619 bio = req->master_bio;
1620 D_ASSERT(sector == bio->bi_iter.bi_sector);
1621
1622 bio_for_each_segment(bvec, bio, iter) {
1623 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1624 expect = min_t(int, data_size, bvec.bv_len);
1625 err = drbd_recv_all_warn(first_peer_device(device)->connection, mapped, expect);
1626 kunmap(bvec.bv_page);
1627 if (err)
1628 return err;
1629 data_size -= expect;
1630 }
1631
1632 if (dgs) {
1633 drbd_csum_bio(device, first_peer_device(device)->connection->peer_integrity_tfm, bio, dig_vv);
1634 if (memcmp(dig_in, dig_vv, dgs)) {
1635 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1636 return -EINVAL;
1637 }
1638 }
1639
1640 D_ASSERT(data_size == 0);
1641 return 0;
1642 }
1643
1644 /*
1645 * e_end_resync_block() is called in asender context via
1646 * drbd_finish_peer_reqs().
1647 */
1648 static int e_end_resync_block(struct drbd_work *w, int unused)
1649 {
1650 struct drbd_peer_request *peer_req =
1651 container_of(w, struct drbd_peer_request, w);
1652 struct drbd_device *device = w->device;
1653 sector_t sector = peer_req->i.sector;
1654 int err;
1655
1656 D_ASSERT(drbd_interval_empty(&peer_req->i));
1657
1658 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1659 drbd_set_in_sync(device, sector, peer_req->i.size);
1660 err = drbd_send_ack(device, P_RS_WRITE_ACK, peer_req);
1661 } else {
1662 /* Record failure to sync */
1663 drbd_rs_failed_io(device, sector, peer_req->i.size);
1664
1665 err = drbd_send_ack(device, P_NEG_ACK, peer_req);
1666 }
1667 dec_unacked(device);
1668
1669 return err;
1670 }
1671
1672 static int recv_resync_read(struct drbd_device *device, sector_t sector, int data_size) __releases(local)
1673 {
1674 struct drbd_peer_request *peer_req;
1675
1676 peer_req = read_in_block(device, ID_SYNCER, sector, data_size);
1677 if (!peer_req)
1678 goto fail;
1679
1680 dec_rs_pending(device);
1681
1682 inc_unacked(device);
1683 /* corresponding dec_unacked() in e_end_resync_block()
1684 * respective _drbd_clear_done_ee */
1685
1686 peer_req->w.cb = e_end_resync_block;
1687
1688 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1689 list_add(&peer_req->w.list, &device->sync_ee);
1690 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1691
1692 atomic_add(data_size >> 9, &device->rs_sect_ev);
1693 if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
1694 return 0;
1695
1696 /* don't care for the reason here */
1697 dev_err(DEV, "submit failed, triggering re-connect\n");
1698 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1699 list_del(&peer_req->w.list);
1700 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1701
1702 drbd_free_peer_req(device, peer_req);
1703 fail:
1704 put_ldev(device);
1705 return -EIO;
1706 }
1707
1708 static struct drbd_request *
1709 find_request(struct drbd_device *device, struct rb_root *root, u64 id,
1710 sector_t sector, bool missing_ok, const char *func)
1711 {
1712 struct drbd_request *req;
1713
1714 /* Request object according to our peer */
1715 req = (struct drbd_request *)(unsigned long)id;
1716 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
1717 return req;
1718 if (!missing_ok) {
1719 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
1720 (unsigned long)id, (unsigned long long)sector);
1721 }
1722 return NULL;
1723 }
1724
1725 static int receive_DataReply(struct drbd_connection *connection, struct packet_info *pi)
1726 {
1727 struct drbd_device *device;
1728 struct drbd_request *req;
1729 sector_t sector;
1730 int err;
1731 struct p_data *p = pi->data;
1732
1733 device = vnr_to_device(connection, pi->vnr);
1734 if (!device)
1735 return -EIO;
1736
1737 sector = be64_to_cpu(p->sector);
1738
1739 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1740 req = find_request(device, &device->read_requests, p->block_id, sector, false, __func__);
1741 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1742 if (unlikely(!req))
1743 return -EIO;
1744
1745 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1746 * special casing it there for the various failure cases.
1747 * still no race with drbd_fail_pending_reads */
1748 err = recv_dless_read(device, req, sector, pi->size);
1749 if (!err)
1750 req_mod(req, DATA_RECEIVED);
1751 /* else: nothing. handled from drbd_disconnect...
1752 * I don't think we may complete this just yet
1753 * in case we are "on-disconnect: freeze" */
1754
1755 return err;
1756 }
1757
1758 static int receive_RSDataReply(struct drbd_connection *connection, struct packet_info *pi)
1759 {
1760 struct drbd_device *device;
1761 sector_t sector;
1762 int err;
1763 struct p_data *p = pi->data;
1764
1765 device = vnr_to_device(connection, pi->vnr);
1766 if (!device)
1767 return -EIO;
1768
1769 sector = be64_to_cpu(p->sector);
1770 D_ASSERT(p->block_id == ID_SYNCER);
1771
1772 if (get_ldev(device)) {
1773 /* data is submitted to disk within recv_resync_read.
1774 * corresponding put_ldev done below on error,
1775 * or in drbd_peer_request_endio. */
1776 err = recv_resync_read(device, sector, pi->size);
1777 } else {
1778 if (__ratelimit(&drbd_ratelimit_state))
1779 dev_err(DEV, "Can not write resync data to local disk.\n");
1780
1781 err = drbd_drain_block(device, pi->size);
1782
1783 drbd_send_ack_dp(device, P_NEG_ACK, p, pi->size);
1784 }
1785
1786 atomic_add(pi->size >> 9, &device->rs_sect_in);
1787
1788 return err;
1789 }
1790
1791 static void restart_conflicting_writes(struct drbd_device *device,
1792 sector_t sector, int size)
1793 {
1794 struct drbd_interval *i;
1795 struct drbd_request *req;
1796
1797 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
1798 if (!i->local)
1799 continue;
1800 req = container_of(i, struct drbd_request, i);
1801 if (req->rq_state & RQ_LOCAL_PENDING ||
1802 !(req->rq_state & RQ_POSTPONED))
1803 continue;
1804 /* as it is RQ_POSTPONED, this will cause it to
1805 * be queued on the retry workqueue. */
1806 __req_mod(req, CONFLICT_RESOLVED, NULL);
1807 }
1808 }
1809
1810 /*
1811 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
1812 */
1813 static int e_end_block(struct drbd_work *w, int cancel)
1814 {
1815 struct drbd_peer_request *peer_req =
1816 container_of(w, struct drbd_peer_request, w);
1817 struct drbd_device *device = w->device;
1818 sector_t sector = peer_req->i.sector;
1819 int err = 0, pcmd;
1820
1821 if (peer_req->flags & EE_SEND_WRITE_ACK) {
1822 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1823 pcmd = (device->state.conn >= C_SYNC_SOURCE &&
1824 device->state.conn <= C_PAUSED_SYNC_T &&
1825 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
1826 P_RS_WRITE_ACK : P_WRITE_ACK;
1827 err = drbd_send_ack(device, pcmd, peer_req);
1828 if (pcmd == P_RS_WRITE_ACK)
1829 drbd_set_in_sync(device, sector, peer_req->i.size);
1830 } else {
1831 err = drbd_send_ack(device, P_NEG_ACK, peer_req);
1832 /* we expect it to be marked out of sync anyways...
1833 * maybe assert this? */
1834 }
1835 dec_unacked(device);
1836 }
1837 /* we delete from the conflict detection hash _after_ we sent out the
1838 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1839 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1840 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1841 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1842 drbd_remove_epoch_entry_interval(device, peer_req);
1843 if (peer_req->flags & EE_RESTART_REQUESTS)
1844 restart_conflicting_writes(device, sector, peer_req->i.size);
1845 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1846 } else
1847 D_ASSERT(drbd_interval_empty(&peer_req->i));
1848
1849 drbd_may_finish_epoch(first_peer_device(device)->connection, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1850
1851 return err;
1852 }
1853
1854 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
1855 {
1856 struct drbd_device *device = w->device;
1857 struct drbd_peer_request *peer_req =
1858 container_of(w, struct drbd_peer_request, w);
1859 int err;
1860
1861 err = drbd_send_ack(device, ack, peer_req);
1862 dec_unacked(device);
1863
1864 return err;
1865 }
1866
1867 static int e_send_superseded(struct drbd_work *w, int unused)
1868 {
1869 return e_send_ack(w, P_SUPERSEDED);
1870 }
1871
1872 static int e_send_retry_write(struct drbd_work *w, int unused)
1873 {
1874 struct drbd_connection *connection = first_peer_device(w->device)->connection;
1875
1876 return e_send_ack(w, connection->agreed_pro_version >= 100 ?
1877 P_RETRY_WRITE : P_SUPERSEDED);
1878 }
1879
1880 static bool seq_greater(u32 a, u32 b)
1881 {
1882 /*
1883 * We assume 32-bit wrap-around here.
1884 * For 24-bit wrap-around, we would have to shift:
1885 * a <<= 8; b <<= 8;
1886 */
1887 return (s32)a - (s32)b > 0;
1888 }
1889
1890 static u32 seq_max(u32 a, u32 b)
1891 {
1892 return seq_greater(a, b) ? a : b;
1893 }
1894
1895 static void update_peer_seq(struct drbd_device *device, unsigned int peer_seq)
1896 {
1897 unsigned int newest_peer_seq;
1898
1899 if (test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags)) {
1900 spin_lock(&device->peer_seq_lock);
1901 newest_peer_seq = seq_max(device->peer_seq, peer_seq);
1902 device->peer_seq = newest_peer_seq;
1903 spin_unlock(&device->peer_seq_lock);
1904 /* wake up only if we actually changed device->peer_seq */
1905 if (peer_seq == newest_peer_seq)
1906 wake_up(&device->seq_wait);
1907 }
1908 }
1909
1910 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
1911 {
1912 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1913 }
1914
1915 /* maybe change sync_ee into interval trees as well? */
1916 static bool overlapping_resync_write(struct drbd_device *device, struct drbd_peer_request *peer_req)
1917 {
1918 struct drbd_peer_request *rs_req;
1919 bool rv = 0;
1920
1921 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
1922 list_for_each_entry(rs_req, &device->sync_ee, w.list) {
1923 if (overlaps(peer_req->i.sector, peer_req->i.size,
1924 rs_req->i.sector, rs_req->i.size)) {
1925 rv = 1;
1926 break;
1927 }
1928 }
1929 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
1930
1931 return rv;
1932 }
1933
1934 /* Called from receive_Data.
1935 * Synchronize packets on sock with packets on msock.
1936 *
1937 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1938 * packet traveling on msock, they are still processed in the order they have
1939 * been sent.
1940 *
1941 * Note: we don't care for Ack packets overtaking P_DATA packets.
1942 *
1943 * In case packet_seq is larger than device->peer_seq number, there are
1944 * outstanding packets on the msock. We wait for them to arrive.
1945 * In case we are the logically next packet, we update device->peer_seq
1946 * ourselves. Correctly handles 32bit wrap around.
1947 *
1948 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1949 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1950 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1951 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1952 *
1953 * returns 0 if we may process the packet,
1954 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1955 static int wait_for_and_update_peer_seq(struct drbd_device *device, const u32 peer_seq)
1956 {
1957 DEFINE_WAIT(wait);
1958 long timeout;
1959 int ret = 0, tp;
1960
1961 if (!test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags))
1962 return 0;
1963
1964 spin_lock(&device->peer_seq_lock);
1965 for (;;) {
1966 if (!seq_greater(peer_seq - 1, device->peer_seq)) {
1967 device->peer_seq = seq_max(device->peer_seq, peer_seq);
1968 break;
1969 }
1970
1971 if (signal_pending(current)) {
1972 ret = -ERESTARTSYS;
1973 break;
1974 }
1975
1976 rcu_read_lock();
1977 tp = rcu_dereference(first_peer_device(device)->connection->net_conf)->two_primaries;
1978 rcu_read_unlock();
1979
1980 if (!tp)
1981 break;
1982
1983 /* Only need to wait if two_primaries is enabled */
1984 prepare_to_wait(&device->seq_wait, &wait, TASK_INTERRUPTIBLE);
1985 spin_unlock(&device->peer_seq_lock);
1986 rcu_read_lock();
1987 timeout = rcu_dereference(first_peer_device(device)->connection->net_conf)->ping_timeo*HZ/10;
1988 rcu_read_unlock();
1989 timeout = schedule_timeout(timeout);
1990 spin_lock(&device->peer_seq_lock);
1991 if (!timeout) {
1992 ret = -ETIMEDOUT;
1993 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
1994 break;
1995 }
1996 }
1997 spin_unlock(&device->peer_seq_lock);
1998 finish_wait(&device->seq_wait, &wait);
1999 return ret;
2000 }
2001
2002 /* see also bio_flags_to_wire()
2003 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2004 * flags and back. We may replicate to other kernel versions. */
2005 static unsigned long wire_flags_to_bio(struct drbd_device *device, u32 dpf)
2006 {
2007 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2008 (dpf & DP_FUA ? REQ_FUA : 0) |
2009 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2010 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
2011 }
2012
2013 static void fail_postponed_requests(struct drbd_device *device, sector_t sector,
2014 unsigned int size)
2015 {
2016 struct drbd_interval *i;
2017
2018 repeat:
2019 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2020 struct drbd_request *req;
2021 struct bio_and_error m;
2022
2023 if (!i->local)
2024 continue;
2025 req = container_of(i, struct drbd_request, i);
2026 if (!(req->rq_state & RQ_POSTPONED))
2027 continue;
2028 req->rq_state &= ~RQ_POSTPONED;
2029 __req_mod(req, NEG_ACKED, &m);
2030 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
2031 if (m.bio)
2032 complete_master_bio(device, &m);
2033 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2034 goto repeat;
2035 }
2036 }
2037
2038 static int handle_write_conflicts(struct drbd_device *device,
2039 struct drbd_peer_request *peer_req)
2040 {
2041 struct drbd_connection *connection = first_peer_device(device)->connection;
2042 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &connection->flags);
2043 sector_t sector = peer_req->i.sector;
2044 const unsigned int size = peer_req->i.size;
2045 struct drbd_interval *i;
2046 bool equal;
2047 int err;
2048
2049 /*
2050 * Inserting the peer request into the write_requests tree will prevent
2051 * new conflicting local requests from being added.
2052 */
2053 drbd_insert_interval(&device->write_requests, &peer_req->i);
2054
2055 repeat:
2056 drbd_for_each_overlap(i, &device->write_requests, sector, size) {
2057 if (i == &peer_req->i)
2058 continue;
2059
2060 if (!i->local) {
2061 /*
2062 * Our peer has sent a conflicting remote request; this
2063 * should not happen in a two-node setup. Wait for the
2064 * earlier peer request to complete.
2065 */
2066 err = drbd_wait_misc(device, i);
2067 if (err)
2068 goto out;
2069 goto repeat;
2070 }
2071
2072 equal = i->sector == sector && i->size == size;
2073 if (resolve_conflicts) {
2074 /*
2075 * If the peer request is fully contained within the
2076 * overlapping request, it can be considered overwritten
2077 * and thus superseded; otherwise, it will be retried
2078 * once all overlapping requests have completed.
2079 */
2080 bool superseded = i->sector <= sector && i->sector +
2081 (i->size >> 9) >= sector + (size >> 9);
2082
2083 if (!equal)
2084 dev_alert(DEV, "Concurrent writes detected: "
2085 "local=%llus +%u, remote=%llus +%u, "
2086 "assuming %s came first\n",
2087 (unsigned long long)i->sector, i->size,
2088 (unsigned long long)sector, size,
2089 superseded ? "local" : "remote");
2090
2091 inc_unacked(device);
2092 peer_req->w.cb = superseded ? e_send_superseded :
2093 e_send_retry_write;
2094 list_add_tail(&peer_req->w.list, &device->done_ee);
2095 wake_asender(first_peer_device(device)->connection);
2096
2097 err = -ENOENT;
2098 goto out;
2099 } else {
2100 struct drbd_request *req =
2101 container_of(i, struct drbd_request, i);
2102
2103 if (!equal)
2104 dev_alert(DEV, "Concurrent writes detected: "
2105 "local=%llus +%u, remote=%llus +%u\n",
2106 (unsigned long long)i->sector, i->size,
2107 (unsigned long long)sector, size);
2108
2109 if (req->rq_state & RQ_LOCAL_PENDING ||
2110 !(req->rq_state & RQ_POSTPONED)) {
2111 /*
2112 * Wait for the node with the discard flag to
2113 * decide if this request has been superseded
2114 * or needs to be retried.
2115 * Requests that have been superseded will
2116 * disappear from the write_requests tree.
2117 *
2118 * In addition, wait for the conflicting
2119 * request to finish locally before submitting
2120 * the conflicting peer request.
2121 */
2122 err = drbd_wait_misc(device, &req->i);
2123 if (err) {
2124 _conn_request_state(first_peer_device(device)->connection,
2125 NS(conn, C_TIMEOUT),
2126 CS_HARD);
2127 fail_postponed_requests(device, sector, size);
2128 goto out;
2129 }
2130 goto repeat;
2131 }
2132 /*
2133 * Remember to restart the conflicting requests after
2134 * the new peer request has completed.
2135 */
2136 peer_req->flags |= EE_RESTART_REQUESTS;
2137 }
2138 }
2139 err = 0;
2140
2141 out:
2142 if (err)
2143 drbd_remove_epoch_entry_interval(device, peer_req);
2144 return err;
2145 }
2146
2147 /* mirrored write */
2148 static int receive_Data(struct drbd_connection *connection, struct packet_info *pi)
2149 {
2150 struct drbd_device *device;
2151 sector_t sector;
2152 struct drbd_peer_request *peer_req;
2153 struct p_data *p = pi->data;
2154 u32 peer_seq = be32_to_cpu(p->seq_num);
2155 int rw = WRITE;
2156 u32 dp_flags;
2157 int err, tp;
2158
2159 device = vnr_to_device(connection, pi->vnr);
2160 if (!device)
2161 return -EIO;
2162
2163 if (!get_ldev(device)) {
2164 int err2;
2165
2166 err = wait_for_and_update_peer_seq(device, peer_seq);
2167 drbd_send_ack_dp(device, P_NEG_ACK, p, pi->size);
2168 atomic_inc(&connection->current_epoch->epoch_size);
2169 err2 = drbd_drain_block(device, pi->size);
2170 if (!err)
2171 err = err2;
2172 return err;
2173 }
2174
2175 /*
2176 * Corresponding put_ldev done either below (on various errors), or in
2177 * drbd_peer_request_endio, if we successfully submit the data at the
2178 * end of this function.
2179 */
2180
2181 sector = be64_to_cpu(p->sector);
2182 peer_req = read_in_block(device, p->block_id, sector, pi->size);
2183 if (!peer_req) {
2184 put_ldev(device);
2185 return -EIO;
2186 }
2187
2188 peer_req->w.cb = e_end_block;
2189
2190 dp_flags = be32_to_cpu(p->dp_flags);
2191 rw |= wire_flags_to_bio(device, dp_flags);
2192 if (peer_req->pages == NULL) {
2193 D_ASSERT(peer_req->i.size == 0);
2194 D_ASSERT(dp_flags & DP_FLUSH);
2195 }
2196
2197 if (dp_flags & DP_MAY_SET_IN_SYNC)
2198 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2199
2200 spin_lock(&connection->epoch_lock);
2201 peer_req->epoch = connection->current_epoch;
2202 atomic_inc(&peer_req->epoch->epoch_size);
2203 atomic_inc(&peer_req->epoch->active);
2204 spin_unlock(&connection->epoch_lock);
2205
2206 rcu_read_lock();
2207 tp = rcu_dereference(first_peer_device(device)->connection->net_conf)->two_primaries;
2208 rcu_read_unlock();
2209 if (tp) {
2210 peer_req->flags |= EE_IN_INTERVAL_TREE;
2211 err = wait_for_and_update_peer_seq(device, peer_seq);
2212 if (err)
2213 goto out_interrupted;
2214 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2215 err = handle_write_conflicts(device, peer_req);
2216 if (err) {
2217 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
2218 if (err == -ENOENT) {
2219 put_ldev(device);
2220 return 0;
2221 }
2222 goto out_interrupted;
2223 }
2224 } else {
2225 update_peer_seq(device, peer_seq);
2226 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2227 }
2228 list_add(&peer_req->w.list, &device->active_ee);
2229 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
2230
2231 if (device->state.conn == C_SYNC_TARGET)
2232 wait_event(device->ee_wait, !overlapping_resync_write(device, peer_req));
2233
2234 if (first_peer_device(device)->connection->agreed_pro_version < 100) {
2235 rcu_read_lock();
2236 switch (rcu_dereference(first_peer_device(device)->connection->net_conf)->wire_protocol) {
2237 case DRBD_PROT_C:
2238 dp_flags |= DP_SEND_WRITE_ACK;
2239 break;
2240 case DRBD_PROT_B:
2241 dp_flags |= DP_SEND_RECEIVE_ACK;
2242 break;
2243 }
2244 rcu_read_unlock();
2245 }
2246
2247 if (dp_flags & DP_SEND_WRITE_ACK) {
2248 peer_req->flags |= EE_SEND_WRITE_ACK;
2249 inc_unacked(device);
2250 /* corresponding dec_unacked() in e_end_block()
2251 * respective _drbd_clear_done_ee */
2252 }
2253
2254 if (dp_flags & DP_SEND_RECEIVE_ACK) {
2255 /* I really don't like it that the receiver thread
2256 * sends on the msock, but anyways */
2257 drbd_send_ack(device, P_RECV_ACK, peer_req);
2258 }
2259
2260 if (device->state.pdsk < D_INCONSISTENT) {
2261 /* In case we have the only disk of the cluster, */
2262 drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
2263 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2264 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2265 drbd_al_begin_io(device, &peer_req->i, true);
2266 }
2267
2268 err = drbd_submit_peer_request(device, peer_req, rw, DRBD_FAULT_DT_WR);
2269 if (!err)
2270 return 0;
2271
2272 /* don't care for the reason here */
2273 dev_err(DEV, "submit failed, triggering re-connect\n");
2274 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2275 list_del(&peer_req->w.list);
2276 drbd_remove_epoch_entry_interval(device, peer_req);
2277 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
2278 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2279 drbd_al_complete_io(device, &peer_req->i);
2280
2281 out_interrupted:
2282 drbd_may_finish_epoch(connection, peer_req->epoch, EV_PUT + EV_CLEANUP);
2283 put_ldev(device);
2284 drbd_free_peer_req(device, peer_req);
2285 return err;
2286 }
2287
2288 /* We may throttle resync, if the lower device seems to be busy,
2289 * and current sync rate is above c_min_rate.
2290 *
2291 * To decide whether or not the lower device is busy, we use a scheme similar
2292 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2293 * (more than 64 sectors) of activity we cannot account for with our own resync
2294 * activity, it obviously is "busy".
2295 *
2296 * The current sync rate used here uses only the most recent two step marks,
2297 * to have a short time average so we can react faster.
2298 */
2299 int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector)
2300 {
2301 struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk;
2302 unsigned long db, dt, dbdt;
2303 struct lc_element *tmp;
2304 int curr_events;
2305 int throttle = 0;
2306 unsigned int c_min_rate;
2307
2308 rcu_read_lock();
2309 c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate;
2310 rcu_read_unlock();
2311
2312 /* feature disabled? */
2313 if (c_min_rate == 0)
2314 return 0;
2315
2316 spin_lock_irq(&device->al_lock);
2317 tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector));
2318 if (tmp) {
2319 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2320 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2321 spin_unlock_irq(&device->al_lock);
2322 return 0;
2323 }
2324 /* Do not slow down if app IO is already waiting for this extent */
2325 }
2326 spin_unlock_irq(&device->al_lock);
2327
2328 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2329 (int)part_stat_read(&disk->part0, sectors[1]) -
2330 atomic_read(&device->rs_sect_ev);
2331
2332 if (!device->rs_last_events || curr_events - device->rs_last_events > 64) {
2333 unsigned long rs_left;
2334 int i;
2335
2336 device->rs_last_events = curr_events;
2337
2338 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2339 * approx. */
2340 i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2341
2342 if (device->state.conn == C_VERIFY_S || device->state.conn == C_VERIFY_T)
2343 rs_left = device->ov_left;
2344 else
2345 rs_left = drbd_bm_total_weight(device) - device->rs_failed;
2346
2347 dt = ((long)jiffies - (long)device->rs_mark_time[i]) / HZ;
2348 if (!dt)
2349 dt++;
2350 db = device->rs_mark_left[i] - rs_left;
2351 dbdt = Bit2KB(db/dt);
2352
2353 if (dbdt > c_min_rate)
2354 throttle = 1;
2355 }
2356 return throttle;
2357 }
2358
2359
2360 static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi)
2361 {
2362 struct drbd_device *device;
2363 sector_t sector;
2364 sector_t capacity;
2365 struct drbd_peer_request *peer_req;
2366 struct digest_info *di = NULL;
2367 int size, verb;
2368 unsigned int fault_type;
2369 struct p_block_req *p = pi->data;
2370
2371 device = vnr_to_device(connection, pi->vnr);
2372 if (!device)
2373 return -EIO;
2374 capacity = drbd_get_capacity(device->this_bdev);
2375
2376 sector = be64_to_cpu(p->sector);
2377 size = be32_to_cpu(p->blksize);
2378
2379 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2380 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2381 (unsigned long long)sector, size);
2382 return -EINVAL;
2383 }
2384 if (sector + (size>>9) > capacity) {
2385 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2386 (unsigned long long)sector, size);
2387 return -EINVAL;
2388 }
2389
2390 if (!get_ldev_if_state(device, D_UP_TO_DATE)) {
2391 verb = 1;
2392 switch (pi->cmd) {
2393 case P_DATA_REQUEST:
2394 drbd_send_ack_rp(device, P_NEG_DREPLY, p);
2395 break;
2396 case P_RS_DATA_REQUEST:
2397 case P_CSUM_RS_REQUEST:
2398 case P_OV_REQUEST:
2399 drbd_send_ack_rp(device, P_NEG_RS_DREPLY , p);
2400 break;
2401 case P_OV_REPLY:
2402 verb = 0;
2403 dec_rs_pending(device);
2404 drbd_send_ack_ex(device, P_OV_RESULT, sector, size, ID_IN_SYNC);
2405 break;
2406 default:
2407 BUG();
2408 }
2409 if (verb && __ratelimit(&drbd_ratelimit_state))
2410 dev_err(DEV, "Can not satisfy peer's read request, "
2411 "no local data.\n");
2412
2413 /* drain possibly payload */
2414 return drbd_drain_block(device, pi->size);
2415 }
2416
2417 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2418 * "criss-cross" setup, that might cause write-out on some other DRBD,
2419 * which in turn might block on the other node at this very place. */
2420 peer_req = drbd_alloc_peer_req(device, p->block_id, sector, size, GFP_NOIO);
2421 if (!peer_req) {
2422 put_ldev(device);
2423 return -ENOMEM;
2424 }
2425
2426 switch (pi->cmd) {
2427 case P_DATA_REQUEST:
2428 peer_req->w.cb = w_e_end_data_req;
2429 fault_type = DRBD_FAULT_DT_RD;
2430 /* application IO, don't drbd_rs_begin_io */
2431 goto submit;
2432
2433 case P_RS_DATA_REQUEST:
2434 peer_req->w.cb = w_e_end_rsdata_req;
2435 fault_type = DRBD_FAULT_RS_RD;
2436 /* used in the sector offset progress display */
2437 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2438 break;
2439
2440 case P_OV_REPLY:
2441 case P_CSUM_RS_REQUEST:
2442 fault_type = DRBD_FAULT_RS_RD;
2443 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
2444 if (!di)
2445 goto out_free_e;
2446
2447 di->digest_size = pi->size;
2448 di->digest = (((char *)di)+sizeof(struct digest_info));
2449
2450 peer_req->digest = di;
2451 peer_req->flags |= EE_HAS_DIGEST;
2452
2453 if (drbd_recv_all(first_peer_device(device)->connection, di->digest, pi->size))
2454 goto out_free_e;
2455
2456 if (pi->cmd == P_CSUM_RS_REQUEST) {
2457 D_ASSERT(first_peer_device(device)->connection->agreed_pro_version >= 89);
2458 peer_req->w.cb = w_e_end_csum_rs_req;
2459 /* used in the sector offset progress display */
2460 device->bm_resync_fo = BM_SECT_TO_BIT(sector);
2461 } else if (pi->cmd == P_OV_REPLY) {
2462 /* track progress, we may need to throttle */
2463 atomic_add(size >> 9, &device->rs_sect_in);
2464 peer_req->w.cb = w_e_end_ov_reply;
2465 dec_rs_pending(device);
2466 /* drbd_rs_begin_io done when we sent this request,
2467 * but accounting still needs to be done. */
2468 goto submit_for_resync;
2469 }
2470 break;
2471
2472 case P_OV_REQUEST:
2473 if (device->ov_start_sector == ~(sector_t)0 &&
2474 first_peer_device(device)->connection->agreed_pro_version >= 90) {
2475 unsigned long now = jiffies;
2476 int i;
2477 device->ov_start_sector = sector;
2478 device->ov_position = sector;
2479 device->ov_left = drbd_bm_bits(device) - BM_SECT_TO_BIT(sector);
2480 device->rs_total = device->ov_left;
2481 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2482 device->rs_mark_left[i] = device->ov_left;
2483 device->rs_mark_time[i] = now;
2484 }
2485 dev_info(DEV, "Online Verify start sector: %llu\n",
2486 (unsigned long long)sector);
2487 }
2488 peer_req->w.cb = w_e_end_ov_req;
2489 fault_type = DRBD_FAULT_RS_RD;
2490 break;
2491
2492 default:
2493 BUG();
2494 }
2495
2496 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2497 * wrt the receiver, but it is not as straightforward as it may seem.
2498 * Various places in the resync start and stop logic assume resync
2499 * requests are processed in order, requeuing this on the worker thread
2500 * introduces a bunch of new code for synchronization between threads.
2501 *
2502 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2503 * "forever", throttling after drbd_rs_begin_io will lock that extent
2504 * for application writes for the same time. For now, just throttle
2505 * here, where the rest of the code expects the receiver to sleep for
2506 * a while, anyways.
2507 */
2508
2509 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2510 * this defers syncer requests for some time, before letting at least
2511 * on request through. The resync controller on the receiving side
2512 * will adapt to the incoming rate accordingly.
2513 *
2514 * We cannot throttle here if remote is Primary/SyncTarget:
2515 * we would also throttle its application reads.
2516 * In that case, throttling is done on the SyncTarget only.
2517 */
2518 if (device->state.peer != R_PRIMARY && drbd_rs_should_slow_down(device, sector))
2519 schedule_timeout_uninterruptible(HZ/10);
2520 if (drbd_rs_begin_io(device, sector))
2521 goto out_free_e;
2522
2523 submit_for_resync:
2524 atomic_add(size >> 9, &device->rs_sect_ev);
2525
2526 submit:
2527 inc_unacked(device);
2528 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2529 list_add_tail(&peer_req->w.list, &device->read_ee);
2530 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
2531
2532 if (drbd_submit_peer_request(device, peer_req, READ, fault_type) == 0)
2533 return 0;
2534
2535 /* don't care for the reason here */
2536 dev_err(DEV, "submit failed, triggering re-connect\n");
2537 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
2538 list_del(&peer_req->w.list);
2539 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
2540 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2541
2542 out_free_e:
2543 put_ldev(device);
2544 drbd_free_peer_req(device, peer_req);
2545 return -EIO;
2546 }
2547
2548 static int drbd_asb_recover_0p(struct drbd_device *device) __must_hold(local)
2549 {
2550 int self, peer, rv = -100;
2551 unsigned long ch_self, ch_peer;
2552 enum drbd_after_sb_p after_sb_0p;
2553
2554 self = device->ldev->md.uuid[UI_BITMAP] & 1;
2555 peer = device->p_uuid[UI_BITMAP] & 1;
2556
2557 ch_peer = device->p_uuid[UI_SIZE];
2558 ch_self = device->comm_bm_set;
2559
2560 rcu_read_lock();
2561 after_sb_0p = rcu_dereference(first_peer_device(device)->connection->net_conf)->after_sb_0p;
2562 rcu_read_unlock();
2563 switch (after_sb_0p) {
2564 case ASB_CONSENSUS:
2565 case ASB_DISCARD_SECONDARY:
2566 case ASB_CALL_HELPER:
2567 case ASB_VIOLENTLY:
2568 dev_err(DEV, "Configuration error.\n");
2569 break;
2570 case ASB_DISCONNECT:
2571 break;
2572 case ASB_DISCARD_YOUNGER_PRI:
2573 if (self == 0 && peer == 1) {
2574 rv = -1;
2575 break;
2576 }
2577 if (self == 1 && peer == 0) {
2578 rv = 1;
2579 break;
2580 }
2581 /* Else fall through to one of the other strategies... */
2582 case ASB_DISCARD_OLDER_PRI:
2583 if (self == 0 && peer == 1) {
2584 rv = 1;
2585 break;
2586 }
2587 if (self == 1 && peer == 0) {
2588 rv = -1;
2589 break;
2590 }
2591 /* Else fall through to one of the other strategies... */
2592 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2593 "Using discard-least-changes instead\n");
2594 case ASB_DISCARD_ZERO_CHG:
2595 if (ch_peer == 0 && ch_self == 0) {
2596 rv = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags)
2597 ? -1 : 1;
2598 break;
2599 } else {
2600 if (ch_peer == 0) { rv = 1; break; }
2601 if (ch_self == 0) { rv = -1; break; }
2602 }
2603 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
2604 break;
2605 case ASB_DISCARD_LEAST_CHG:
2606 if (ch_self < ch_peer)
2607 rv = -1;
2608 else if (ch_self > ch_peer)
2609 rv = 1;
2610 else /* ( ch_self == ch_peer ) */
2611 /* Well, then use something else. */
2612 rv = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags)
2613 ? -1 : 1;
2614 break;
2615 case ASB_DISCARD_LOCAL:
2616 rv = -1;
2617 break;
2618 case ASB_DISCARD_REMOTE:
2619 rv = 1;
2620 }
2621
2622 return rv;
2623 }
2624
2625 static int drbd_asb_recover_1p(struct drbd_device *device) __must_hold(local)
2626 {
2627 int hg, rv = -100;
2628 enum drbd_after_sb_p after_sb_1p;
2629
2630 rcu_read_lock();
2631 after_sb_1p = rcu_dereference(first_peer_device(device)->connection->net_conf)->after_sb_1p;
2632 rcu_read_unlock();
2633 switch (after_sb_1p) {
2634 case ASB_DISCARD_YOUNGER_PRI:
2635 case ASB_DISCARD_OLDER_PRI:
2636 case ASB_DISCARD_LEAST_CHG:
2637 case ASB_DISCARD_LOCAL:
2638 case ASB_DISCARD_REMOTE:
2639 case ASB_DISCARD_ZERO_CHG:
2640 dev_err(DEV, "Configuration error.\n");
2641 break;
2642 case ASB_DISCONNECT:
2643 break;
2644 case ASB_CONSENSUS:
2645 hg = drbd_asb_recover_0p(device);
2646 if (hg == -1 && device->state.role == R_SECONDARY)
2647 rv = hg;
2648 if (hg == 1 && device->state.role == R_PRIMARY)
2649 rv = hg;
2650 break;
2651 case ASB_VIOLENTLY:
2652 rv = drbd_asb_recover_0p(device);
2653 break;
2654 case ASB_DISCARD_SECONDARY:
2655 return device->state.role == R_PRIMARY ? 1 : -1;
2656 case ASB_CALL_HELPER:
2657 hg = drbd_asb_recover_0p(device);
2658 if (hg == -1 && device->state.role == R_PRIMARY) {
2659 enum drbd_state_rv rv2;
2660
2661 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2662 * we might be here in C_WF_REPORT_PARAMS which is transient.
2663 * we do not need to wait for the after state change work either. */
2664 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
2665 if (rv2 != SS_SUCCESS) {
2666 drbd_khelper(device, "pri-lost-after-sb");
2667 } else {
2668 dev_warn(DEV, "Successfully gave up primary role.\n");
2669 rv = hg;
2670 }
2671 } else
2672 rv = hg;
2673 }
2674
2675 return rv;
2676 }
2677
2678 static int drbd_asb_recover_2p(struct drbd_device *device) __must_hold(local)
2679 {
2680 int hg, rv = -100;
2681 enum drbd_after_sb_p after_sb_2p;
2682
2683 rcu_read_lock();
2684 after_sb_2p = rcu_dereference(first_peer_device(device)->connection->net_conf)->after_sb_2p;
2685 rcu_read_unlock();
2686 switch (after_sb_2p) {
2687 case ASB_DISCARD_YOUNGER_PRI:
2688 case ASB_DISCARD_OLDER_PRI:
2689 case ASB_DISCARD_LEAST_CHG:
2690 case ASB_DISCARD_LOCAL:
2691 case ASB_DISCARD_REMOTE:
2692 case ASB_CONSENSUS:
2693 case ASB_DISCARD_SECONDARY:
2694 case ASB_DISCARD_ZERO_CHG:
2695 dev_err(DEV, "Configuration error.\n");
2696 break;
2697 case ASB_VIOLENTLY:
2698 rv = drbd_asb_recover_0p(device);
2699 break;
2700 case ASB_DISCONNECT:
2701 break;
2702 case ASB_CALL_HELPER:
2703 hg = drbd_asb_recover_0p(device);
2704 if (hg == -1) {
2705 enum drbd_state_rv rv2;
2706
2707 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2708 * we might be here in C_WF_REPORT_PARAMS which is transient.
2709 * we do not need to wait for the after state change work either. */
2710 rv2 = drbd_change_state(device, CS_VERBOSE, NS(role, R_SECONDARY));
2711 if (rv2 != SS_SUCCESS) {
2712 drbd_khelper(device, "pri-lost-after-sb");
2713 } else {
2714 dev_warn(DEV, "Successfully gave up primary role.\n");
2715 rv = hg;
2716 }
2717 } else
2718 rv = hg;
2719 }
2720
2721 return rv;
2722 }
2723
2724 static void drbd_uuid_dump(struct drbd_device *device, char *text, u64 *uuid,
2725 u64 bits, u64 flags)
2726 {
2727 if (!uuid) {
2728 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2729 return;
2730 }
2731 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2732 text,
2733 (unsigned long long)uuid[UI_CURRENT],
2734 (unsigned long long)uuid[UI_BITMAP],
2735 (unsigned long long)uuid[UI_HISTORY_START],
2736 (unsigned long long)uuid[UI_HISTORY_END],
2737 (unsigned long long)bits,
2738 (unsigned long long)flags);
2739 }
2740
2741 /*
2742 100 after split brain try auto recover
2743 2 C_SYNC_SOURCE set BitMap
2744 1 C_SYNC_SOURCE use BitMap
2745 0 no Sync
2746 -1 C_SYNC_TARGET use BitMap
2747 -2 C_SYNC_TARGET set BitMap
2748 -100 after split brain, disconnect
2749 -1000 unrelated data
2750 -1091 requires proto 91
2751 -1096 requires proto 96
2752 */
2753 static int drbd_uuid_compare(struct drbd_device *device, int *rule_nr) __must_hold(local)
2754 {
2755 u64 self, peer;
2756 int i, j;
2757
2758 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2759 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
2760
2761 *rule_nr = 10;
2762 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2763 return 0;
2764
2765 *rule_nr = 20;
2766 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2767 peer != UUID_JUST_CREATED)
2768 return -2;
2769
2770 *rule_nr = 30;
2771 if (self != UUID_JUST_CREATED &&
2772 (peer == UUID_JUST_CREATED || peer == (u64)0))
2773 return 2;
2774
2775 if (self == peer) {
2776 int rct, dc; /* roles at crash time */
2777
2778 if (device->p_uuid[UI_BITMAP] == (u64)0 && device->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2779
2780 if (first_peer_device(device)->connection->agreed_pro_version < 91)
2781 return -1091;
2782
2783 if ((device->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2784 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2785 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2786 drbd_uuid_move_history(device);
2787 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
2788 device->ldev->md.uuid[UI_BITMAP] = 0;
2789
2790 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
2791 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
2792 *rule_nr = 34;
2793 } else {
2794 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2795 *rule_nr = 36;
2796 }
2797
2798 return 1;
2799 }
2800
2801 if (device->ldev->md.uuid[UI_BITMAP] == (u64)0 && device->p_uuid[UI_BITMAP] != (u64)0) {
2802
2803 if (first_peer_device(device)->connection->agreed_pro_version < 91)
2804 return -1091;
2805
2806 if ((device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (device->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2807 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (device->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2808 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2809
2810 device->p_uuid[UI_HISTORY_START + 1] = device->p_uuid[UI_HISTORY_START];
2811 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_BITMAP];
2812 device->p_uuid[UI_BITMAP] = 0UL;
2813
2814 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
2815 *rule_nr = 35;
2816 } else {
2817 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2818 *rule_nr = 37;
2819 }
2820
2821 return -1;
2822 }
2823
2824 /* Common power [off|failure] */
2825 rct = (test_bit(CRASHED_PRIMARY, &device->flags) ? 1 : 0) +
2826 (device->p_uuid[UI_FLAGS] & 2);
2827 /* lowest bit is set when we were primary,
2828 * next bit (weight 2) is set when peer was primary */
2829 *rule_nr = 40;
2830
2831 switch (rct) {
2832 case 0: /* !self_pri && !peer_pri */ return 0;
2833 case 1: /* self_pri && !peer_pri */ return 1;
2834 case 2: /* !self_pri && peer_pri */ return -1;
2835 case 3: /* self_pri && peer_pri */
2836 dc = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2837 return dc ? -1 : 1;
2838 }
2839 }
2840
2841 *rule_nr = 50;
2842 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
2843 if (self == peer)
2844 return -1;
2845
2846 *rule_nr = 51;
2847 peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
2848 if (self == peer) {
2849 if (first_peer_device(device)->connection->agreed_pro_version < 96 ?
2850 (device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2851 (device->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2852 peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
2853 /* The last P_SYNC_UUID did not get though. Undo the last start of
2854 resync as sync source modifications of the peer's UUIDs. */
2855
2856 if (first_peer_device(device)->connection->agreed_pro_version < 91)
2857 return -1091;
2858
2859 device->p_uuid[UI_BITMAP] = device->p_uuid[UI_HISTORY_START];
2860 device->p_uuid[UI_HISTORY_START] = device->p_uuid[UI_HISTORY_START + 1];
2861
2862 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
2863 drbd_uuid_dump(device, "peer", device->p_uuid, device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
2864
2865 return -1;
2866 }
2867 }
2868
2869 *rule_nr = 60;
2870 self = device->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2871 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2872 peer = device->p_uuid[i] & ~((u64)1);
2873 if (self == peer)
2874 return -2;
2875 }
2876
2877 *rule_nr = 70;
2878 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2879 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
2880 if (self == peer)
2881 return 1;
2882
2883 *rule_nr = 71;
2884 self = device->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2885 if (self == peer) {
2886 if (first_peer_device(device)->connection->agreed_pro_version < 96 ?
2887 (device->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2888 (device->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2889 self + UUID_NEW_BM_OFFSET == (device->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2890 /* The last P_SYNC_UUID did not get though. Undo the last start of
2891 resync as sync source modifications of our UUIDs. */
2892
2893 if (first_peer_device(device)->connection->agreed_pro_version < 91)
2894 return -1091;
2895
2896 __drbd_uuid_set(device, UI_BITMAP, device->ldev->md.uuid[UI_HISTORY_START]);
2897 __drbd_uuid_set(device, UI_HISTORY_START, device->ldev->md.uuid[UI_HISTORY_START + 1]);
2898
2899 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2900 drbd_uuid_dump(device, "self", device->ldev->md.uuid,
2901 device->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(device) : 0, 0);
2902
2903 return 1;
2904 }
2905 }
2906
2907
2908 *rule_nr = 80;
2909 peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
2910 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2911 self = device->ldev->md.uuid[i] & ~((u64)1);
2912 if (self == peer)
2913 return 2;
2914 }
2915
2916 *rule_nr = 90;
2917 self = device->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2918 peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
2919 if (self == peer && self != ((u64)0))
2920 return 100;
2921
2922 *rule_nr = 100;
2923 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2924 self = device->ldev->md.uuid[i] & ~((u64)1);
2925 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2926 peer = device->p_uuid[j] & ~((u64)1);
2927 if (self == peer)
2928 return -100;
2929 }
2930 }
2931
2932 return -1000;
2933 }
2934
2935 /* drbd_sync_handshake() returns the new conn state on success, or
2936 CONN_MASK (-1) on failure.
2937 */
2938 static enum drbd_conns drbd_sync_handshake(struct drbd_device *device, enum drbd_role peer_role,
2939 enum drbd_disk_state peer_disk) __must_hold(local)
2940 {
2941 enum drbd_conns rv = C_MASK;
2942 enum drbd_disk_state mydisk;
2943 struct net_conf *nc;
2944 int hg, rule_nr, rr_conflict, tentative;
2945
2946 mydisk = device->state.disk;
2947 if (mydisk == D_NEGOTIATING)
2948 mydisk = device->new_state_tmp.disk;
2949
2950 dev_info(DEV, "drbd_sync_handshake:\n");
2951
2952 spin_lock_irq(&device->ldev->md.uuid_lock);
2953 drbd_uuid_dump(device, "self", device->ldev->md.uuid, device->comm_bm_set, 0);
2954 drbd_uuid_dump(device, "peer", device->p_uuid,
2955 device->p_uuid[UI_SIZE], device->p_uuid[UI_FLAGS]);
2956
2957 hg = drbd_uuid_compare(device, &rule_nr);
2958 spin_unlock_irq(&device->ldev->md.uuid_lock);
2959
2960 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2961
2962 if (hg == -1000) {
2963 dev_alert(DEV, "Unrelated data, aborting!\n");
2964 return C_MASK;
2965 }
2966 if (hg < -1000) {
2967 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2968 return C_MASK;
2969 }
2970
2971 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2972 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2973 int f = (hg == -100) || abs(hg) == 2;
2974 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2975 if (f)
2976 hg = hg*2;
2977 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2978 hg > 0 ? "source" : "target");
2979 }
2980
2981 if (abs(hg) == 100)
2982 drbd_khelper(device, "initial-split-brain");
2983
2984 rcu_read_lock();
2985 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2986
2987 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
2988 int pcount = (device->state.role == R_PRIMARY)
2989 + (peer_role == R_PRIMARY);
2990 int forced = (hg == -100);
2991
2992 switch (pcount) {
2993 case 0:
2994 hg = drbd_asb_recover_0p(device);
2995 break;
2996 case 1:
2997 hg = drbd_asb_recover_1p(device);
2998 break;
2999 case 2:
3000 hg = drbd_asb_recover_2p(device);
3001 break;
3002 }
3003 if (abs(hg) < 100) {
3004 dev_warn(DEV, "Split-Brain detected, %d primaries, "
3005 "automatically solved. Sync from %s node\n",
3006 pcount, (hg < 0) ? "peer" : "this");
3007 if (forced) {
3008 dev_warn(DEV, "Doing a full sync, since"
3009 " UUIDs where ambiguous.\n");
3010 hg = hg*2;
3011 }
3012 }
3013 }
3014
3015 if (hg == -100) {
3016 if (test_bit(DISCARD_MY_DATA, &device->flags) && !(device->p_uuid[UI_FLAGS]&1))
3017 hg = -1;
3018 if (!test_bit(DISCARD_MY_DATA, &device->flags) && (device->p_uuid[UI_FLAGS]&1))
3019 hg = 1;
3020
3021 if (abs(hg) < 100)
3022 dev_warn(DEV, "Split-Brain detected, manually solved. "
3023 "Sync from %s node\n",
3024 (hg < 0) ? "peer" : "this");
3025 }
3026 rr_conflict = nc->rr_conflict;
3027 tentative = nc->tentative;
3028 rcu_read_unlock();
3029
3030 if (hg == -100) {
3031 /* FIXME this log message is not correct if we end up here
3032 * after an attempted attach on a diskless node.
3033 * We just refuse to attach -- well, we drop the "connection"
3034 * to that disk, in a way... */
3035 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
3036 drbd_khelper(device, "split-brain");
3037 return C_MASK;
3038 }
3039
3040 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3041 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3042 return C_MASK;
3043 }
3044
3045 if (hg < 0 && /* by intention we do not use mydisk here. */
3046 device->state.role == R_PRIMARY && device->state.disk >= D_CONSISTENT) {
3047 switch (rr_conflict) {
3048 case ASB_CALL_HELPER:
3049 drbd_khelper(device, "pri-lost");
3050 /* fall through */
3051 case ASB_DISCONNECT:
3052 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3053 return C_MASK;
3054 case ASB_VIOLENTLY:
3055 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3056 "assumption\n");
3057 }
3058 }
3059
3060 if (tentative || test_bit(CONN_DRY_RUN, &first_peer_device(device)->connection->flags)) {
3061 if (hg == 0)
3062 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3063 else
3064 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3065 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3066 abs(hg) >= 2 ? "full" : "bit-map based");
3067 return C_MASK;
3068 }
3069
3070 if (abs(hg) >= 2) {
3071 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3072 if (drbd_bitmap_io(device, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3073 BM_LOCKED_SET_ALLOWED))
3074 return C_MASK;
3075 }
3076
3077 if (hg > 0) { /* become sync source. */
3078 rv = C_WF_BITMAP_S;
3079 } else if (hg < 0) { /* become sync target */
3080 rv = C_WF_BITMAP_T;
3081 } else {
3082 rv = C_CONNECTED;
3083 if (drbd_bm_total_weight(device)) {
3084 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3085 drbd_bm_total_weight(device));
3086 }
3087 }
3088
3089 return rv;
3090 }
3091
3092 static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
3093 {
3094 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
3095 if (peer == ASB_DISCARD_REMOTE)
3096 return ASB_DISCARD_LOCAL;
3097
3098 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
3099 if (peer == ASB_DISCARD_LOCAL)
3100 return ASB_DISCARD_REMOTE;
3101
3102 /* everything else is valid if they are equal on both sides. */
3103 return peer;
3104 }
3105
3106 static int receive_protocol(struct drbd_connection *connection, struct packet_info *pi)
3107 {
3108 struct p_protocol *p = pi->data;
3109 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3110 int p_proto, p_discard_my_data, p_two_primaries, cf;
3111 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3112 char integrity_alg[SHARED_SECRET_MAX] = "";
3113 struct crypto_hash *peer_integrity_tfm = NULL;
3114 void *int_dig_in = NULL, *int_dig_vv = NULL;
3115
3116 p_proto = be32_to_cpu(p->protocol);
3117 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3118 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3119 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
3120 p_two_primaries = be32_to_cpu(p->two_primaries);
3121 cf = be32_to_cpu(p->conn_flags);
3122 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
3123
3124 if (connection->agreed_pro_version >= 87) {
3125 int err;
3126
3127 if (pi->size > sizeof(integrity_alg))
3128 return -EIO;
3129 err = drbd_recv_all(connection, integrity_alg, pi->size);
3130 if (err)
3131 return err;
3132 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
3133 }
3134
3135 if (pi->cmd != P_PROTOCOL_UPDATE) {
3136 clear_bit(CONN_DRY_RUN, &connection->flags);
3137
3138 if (cf & CF_DRY_RUN)
3139 set_bit(CONN_DRY_RUN, &connection->flags);
3140
3141 rcu_read_lock();
3142 nc = rcu_dereference(connection->net_conf);
3143
3144 if (p_proto != nc->wire_protocol) {
3145 conn_err(connection, "incompatible %s settings\n", "protocol");
3146 goto disconnect_rcu_unlock;
3147 }
3148
3149 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
3150 conn_err(connection, "incompatible %s settings\n", "after-sb-0pri");
3151 goto disconnect_rcu_unlock;
3152 }
3153
3154 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
3155 conn_err(connection, "incompatible %s settings\n", "after-sb-1pri");
3156 goto disconnect_rcu_unlock;
3157 }
3158
3159 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
3160 conn_err(connection, "incompatible %s settings\n", "after-sb-2pri");
3161 goto disconnect_rcu_unlock;
3162 }
3163
3164 if (p_discard_my_data && nc->discard_my_data) {
3165 conn_err(connection, "incompatible %s settings\n", "discard-my-data");
3166 goto disconnect_rcu_unlock;
3167 }
3168
3169 if (p_two_primaries != nc->two_primaries) {
3170 conn_err(connection, "incompatible %s settings\n", "allow-two-primaries");
3171 goto disconnect_rcu_unlock;
3172 }
3173
3174 if (strcmp(integrity_alg, nc->integrity_alg)) {
3175 conn_err(connection, "incompatible %s settings\n", "data-integrity-alg");
3176 goto disconnect_rcu_unlock;
3177 }
3178
3179 rcu_read_unlock();
3180 }
3181
3182 if (integrity_alg[0]) {
3183 int hash_size;
3184
3185 /*
3186 * We can only change the peer data integrity algorithm
3187 * here. Changing our own data integrity algorithm
3188 * requires that we send a P_PROTOCOL_UPDATE packet at
3189 * the same time; otherwise, the peer has no way to
3190 * tell between which packets the algorithm should
3191 * change.
3192 */
3193
3194 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3195 if (!peer_integrity_tfm) {
3196 conn_err(connection, "peer data-integrity-alg %s not supported\n",
3197 integrity_alg);
3198 goto disconnect;
3199 }
3200
3201 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3202 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3203 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3204 if (!(int_dig_in && int_dig_vv)) {
3205 conn_err(connection, "Allocation of buffers for data integrity checking failed\n");
3206 goto disconnect;
3207 }
3208 }
3209
3210 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3211 if (!new_net_conf) {
3212 conn_err(connection, "Allocation of new net_conf failed\n");
3213 goto disconnect;
3214 }
3215
3216 mutex_lock(&connection->data.mutex);
3217 mutex_lock(&connection->conf_update);
3218 old_net_conf = connection->net_conf;
3219 *new_net_conf = *old_net_conf;
3220
3221 new_net_conf->wire_protocol = p_proto;
3222 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3223 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3224 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3225 new_net_conf->two_primaries = p_two_primaries;
3226
3227 rcu_assign_pointer(connection->net_conf, new_net_conf);
3228 mutex_unlock(&connection->conf_update);
3229 mutex_unlock(&connection->data.mutex);
3230
3231 crypto_free_hash(connection->peer_integrity_tfm);
3232 kfree(connection->int_dig_in);
3233 kfree(connection->int_dig_vv);
3234 connection->peer_integrity_tfm = peer_integrity_tfm;
3235 connection->int_dig_in = int_dig_in;
3236 connection->int_dig_vv = int_dig_vv;
3237
3238 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3239 conn_info(connection, "peer data-integrity-alg: %s\n",
3240 integrity_alg[0] ? integrity_alg : "(none)");
3241
3242 synchronize_rcu();
3243 kfree(old_net_conf);
3244 return 0;
3245
3246 disconnect_rcu_unlock:
3247 rcu_read_unlock();
3248 disconnect:
3249 crypto_free_hash(peer_integrity_tfm);
3250 kfree(int_dig_in);
3251 kfree(int_dig_vv);
3252 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
3253 return -EIO;
3254 }
3255
3256 /* helper function
3257 * input: alg name, feature name
3258 * return: NULL (alg name was "")
3259 * ERR_PTR(error) if something goes wrong
3260 * or the crypto hash ptr, if it worked out ok. */
3261 static
3262 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
3263 const char *alg, const char *name)
3264 {
3265 struct crypto_hash *tfm;
3266
3267 if (!alg[0])
3268 return NULL;
3269
3270 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3271 if (IS_ERR(tfm)) {
3272 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3273 alg, name, PTR_ERR(tfm));
3274 return tfm;
3275 }
3276 return tfm;
3277 }
3278
3279 static int ignore_remaining_packet(struct drbd_connection *connection, struct packet_info *pi)
3280 {
3281 void *buffer = connection->data.rbuf;
3282 int size = pi->size;
3283
3284 while (size) {
3285 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3286 s = drbd_recv(connection, buffer, s);
3287 if (s <= 0) {
3288 if (s < 0)
3289 return s;
3290 break;
3291 }
3292 size -= s;
3293 }
3294 if (size)
3295 return -EIO;
3296 return 0;
3297 }
3298
3299 /*
3300 * config_unknown_volume - device configuration command for unknown volume
3301 *
3302 * When a device is added to an existing connection, the node on which the
3303 * device is added first will send configuration commands to its peer but the
3304 * peer will not know about the device yet. It will warn and ignore these
3305 * commands. Once the device is added on the second node, the second node will
3306 * send the same device configuration commands, but in the other direction.
3307 *
3308 * (We can also end up here if drbd is misconfigured.)
3309 */
3310 static int config_unknown_volume(struct drbd_connection *connection, struct packet_info *pi)
3311 {
3312 conn_warn(connection, "%s packet received for volume %u, which is not configured locally\n",
3313 cmdname(pi->cmd), pi->vnr);
3314 return ignore_remaining_packet(connection, pi);
3315 }
3316
3317 static int receive_SyncParam(struct drbd_connection *connection, struct packet_info *pi)
3318 {
3319 struct drbd_device *device;
3320 struct p_rs_param_95 *p;
3321 unsigned int header_size, data_size, exp_max_sz;
3322 struct crypto_hash *verify_tfm = NULL;
3323 struct crypto_hash *csums_tfm = NULL;
3324 struct net_conf *old_net_conf, *new_net_conf = NULL;
3325 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3326 const int apv = connection->agreed_pro_version;
3327 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3328 int fifo_size = 0;
3329 int err;
3330
3331 device = vnr_to_device(connection, pi->vnr);
3332 if (!device)
3333 return config_unknown_volume(connection, pi);
3334
3335 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3336 : apv == 88 ? sizeof(struct p_rs_param)
3337 + SHARED_SECRET_MAX
3338 : apv <= 94 ? sizeof(struct p_rs_param_89)
3339 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
3340
3341 if (pi->size > exp_max_sz) {
3342 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3343 pi->size, exp_max_sz);
3344 return -EIO;
3345 }
3346
3347 if (apv <= 88) {
3348 header_size = sizeof(struct p_rs_param);
3349 data_size = pi->size - header_size;
3350 } else if (apv <= 94) {
3351 header_size = sizeof(struct p_rs_param_89);
3352 data_size = pi->size - header_size;
3353 D_ASSERT(data_size == 0);
3354 } else {
3355 header_size = sizeof(struct p_rs_param_95);
3356 data_size = pi->size - header_size;
3357 D_ASSERT(data_size == 0);
3358 }
3359
3360 /* initialize verify_alg and csums_alg */
3361 p = pi->data;
3362 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3363
3364 err = drbd_recv_all(first_peer_device(device)->connection, p, header_size);
3365 if (err)
3366 return err;
3367
3368 mutex_lock(&first_peer_device(device)->connection->conf_update);
3369 old_net_conf = first_peer_device(device)->connection->net_conf;
3370 if (get_ldev(device)) {
3371 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3372 if (!new_disk_conf) {
3373 put_ldev(device);
3374 mutex_unlock(&first_peer_device(device)->connection->conf_update);
3375 dev_err(DEV, "Allocation of new disk_conf failed\n");
3376 return -ENOMEM;
3377 }
3378
3379 old_disk_conf = device->ldev->disk_conf;
3380 *new_disk_conf = *old_disk_conf;
3381
3382 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
3383 }
3384
3385 if (apv >= 88) {
3386 if (apv == 88) {
3387 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3388 dev_err(DEV, "verify-alg of wrong size, "
3389 "peer wants %u, accepting only up to %u byte\n",
3390 data_size, SHARED_SECRET_MAX);
3391 err = -EIO;
3392 goto reconnect;
3393 }
3394
3395 err = drbd_recv_all(first_peer_device(device)->connection, p->verify_alg, data_size);
3396 if (err)
3397 goto reconnect;
3398 /* we expect NUL terminated string */
3399 /* but just in case someone tries to be evil */
3400 D_ASSERT(p->verify_alg[data_size-1] == 0);
3401 p->verify_alg[data_size-1] = 0;
3402
3403 } else /* apv >= 89 */ {
3404 /* we still expect NUL terminated strings */
3405 /* but just in case someone tries to be evil */
3406 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3407 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3408 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3409 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3410 }
3411
3412 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3413 if (device->state.conn == C_WF_REPORT_PARAMS) {
3414 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3415 old_net_conf->verify_alg, p->verify_alg);
3416 goto disconnect;
3417 }
3418 verify_tfm = drbd_crypto_alloc_digest_safe(device,
3419 p->verify_alg, "verify-alg");
3420 if (IS_ERR(verify_tfm)) {
3421 verify_tfm = NULL;
3422 goto disconnect;
3423 }
3424 }
3425
3426 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3427 if (device->state.conn == C_WF_REPORT_PARAMS) {
3428 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3429 old_net_conf->csums_alg, p->csums_alg);
3430 goto disconnect;
3431 }
3432 csums_tfm = drbd_crypto_alloc_digest_safe(device,
3433 p->csums_alg, "csums-alg");
3434 if (IS_ERR(csums_tfm)) {
3435 csums_tfm = NULL;
3436 goto disconnect;
3437 }
3438 }
3439
3440 if (apv > 94 && new_disk_conf) {
3441 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3442 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3443 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3444 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
3445
3446 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3447 if (fifo_size != device->rs_plan_s->size) {
3448 new_plan = fifo_alloc(fifo_size);
3449 if (!new_plan) {
3450 dev_err(DEV, "kmalloc of fifo_buffer failed");
3451 put_ldev(device);
3452 goto disconnect;
3453 }
3454 }
3455 }
3456
3457 if (verify_tfm || csums_tfm) {
3458 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3459 if (!new_net_conf) {
3460 dev_err(DEV, "Allocation of new net_conf failed\n");
3461 goto disconnect;
3462 }
3463
3464 *new_net_conf = *old_net_conf;
3465
3466 if (verify_tfm) {
3467 strcpy(new_net_conf->verify_alg, p->verify_alg);
3468 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3469 crypto_free_hash(first_peer_device(device)->connection->verify_tfm);
3470 first_peer_device(device)->connection->verify_tfm = verify_tfm;
3471 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3472 }
3473 if (csums_tfm) {
3474 strcpy(new_net_conf->csums_alg, p->csums_alg);
3475 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3476 crypto_free_hash(first_peer_device(device)->connection->csums_tfm);
3477 first_peer_device(device)->connection->csums_tfm = csums_tfm;
3478 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3479 }
3480 rcu_assign_pointer(connection->net_conf, new_net_conf);
3481 }
3482 }
3483
3484 if (new_disk_conf) {
3485 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
3486 put_ldev(device);
3487 }
3488
3489 if (new_plan) {
3490 old_plan = device->rs_plan_s;
3491 rcu_assign_pointer(device->rs_plan_s, new_plan);
3492 }
3493
3494 mutex_unlock(&first_peer_device(device)->connection->conf_update);
3495 synchronize_rcu();
3496 if (new_net_conf)
3497 kfree(old_net_conf);
3498 kfree(old_disk_conf);
3499 kfree(old_plan);
3500
3501 return 0;
3502
3503 reconnect:
3504 if (new_disk_conf) {
3505 put_ldev(device);
3506 kfree(new_disk_conf);
3507 }
3508 mutex_unlock(&first_peer_device(device)->connection->conf_update);
3509 return -EIO;
3510
3511 disconnect:
3512 kfree(new_plan);
3513 if (new_disk_conf) {
3514 put_ldev(device);
3515 kfree(new_disk_conf);
3516 }
3517 mutex_unlock(&first_peer_device(device)->connection->conf_update);
3518 /* just for completeness: actually not needed,
3519 * as this is not reached if csums_tfm was ok. */
3520 crypto_free_hash(csums_tfm);
3521 /* but free the verify_tfm again, if csums_tfm did not work out */
3522 crypto_free_hash(verify_tfm);
3523 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3524 return -EIO;
3525 }
3526
3527 /* warn if the arguments differ by more than 12.5% */
3528 static void warn_if_differ_considerably(struct drbd_device *device,
3529 const char *s, sector_t a, sector_t b)
3530 {
3531 sector_t d;
3532 if (a == 0 || b == 0)
3533 return;
3534 d = (a > b) ? (a - b) : (b - a);
3535 if (d > (a>>3) || d > (b>>3))
3536 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3537 (unsigned long long)a, (unsigned long long)b);
3538 }
3539
3540 static int receive_sizes(struct drbd_connection *connection, struct packet_info *pi)
3541 {
3542 struct drbd_device *device;
3543 struct p_sizes *p = pi->data;
3544 enum determine_dev_size dd = DS_UNCHANGED;
3545 sector_t p_size, p_usize, my_usize;
3546 int ldsc = 0; /* local disk size changed */
3547 enum dds_flags ddsf;
3548
3549 device = vnr_to_device(connection, pi->vnr);
3550 if (!device)
3551 return config_unknown_volume(connection, pi);
3552
3553 p_size = be64_to_cpu(p->d_size);
3554 p_usize = be64_to_cpu(p->u_size);
3555
3556 /* just store the peer's disk size for now.
3557 * we still need to figure out whether we accept that. */
3558 device->p_size = p_size;
3559
3560 if (get_ldev(device)) {
3561 rcu_read_lock();
3562 my_usize = rcu_dereference(device->ldev->disk_conf)->disk_size;
3563 rcu_read_unlock();
3564
3565 warn_if_differ_considerably(device, "lower level device sizes",
3566 p_size, drbd_get_max_capacity(device->ldev));
3567 warn_if_differ_considerably(device, "user requested size",
3568 p_usize, my_usize);
3569
3570 /* if this is the first connect, or an otherwise expected
3571 * param exchange, choose the minimum */
3572 if (device->state.conn == C_WF_REPORT_PARAMS)
3573 p_usize = min_not_zero(my_usize, p_usize);
3574
3575 /* Never shrink a device with usable data during connect.
3576 But allow online shrinking if we are connected. */
3577 if (drbd_new_dev_size(device, device->ldev, p_usize, 0) <
3578 drbd_get_capacity(device->this_bdev) &&
3579 device->state.disk >= D_OUTDATED &&
3580 device->state.conn < C_CONNECTED) {
3581 dev_err(DEV, "The peer's disk size is too small!\n");
3582 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3583 put_ldev(device);
3584 return -EIO;
3585 }
3586
3587 if (my_usize != p_usize) {
3588 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3589
3590 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3591 if (!new_disk_conf) {
3592 dev_err(DEV, "Allocation of new disk_conf failed\n");
3593 put_ldev(device);
3594 return -ENOMEM;
3595 }
3596
3597 mutex_lock(&first_peer_device(device)->connection->conf_update);
3598 old_disk_conf = device->ldev->disk_conf;
3599 *new_disk_conf = *old_disk_conf;
3600 new_disk_conf->disk_size = p_usize;
3601
3602 rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
3603 mutex_unlock(&first_peer_device(device)->connection->conf_update);
3604 synchronize_rcu();
3605 kfree(old_disk_conf);
3606
3607 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3608 (unsigned long)my_usize);
3609 }
3610
3611 put_ldev(device);
3612 }
3613
3614 ddsf = be16_to_cpu(p->dds_flags);
3615 if (get_ldev(device)) {
3616 dd = drbd_determine_dev_size(device, ddsf, NULL);
3617 put_ldev(device);
3618 if (dd == DS_ERROR)
3619 return -EIO;
3620 drbd_md_sync(device);
3621 } else {
3622 /* I am diskless, need to accept the peer's size. */
3623 drbd_set_my_capacity(device, p_size);
3624 }
3625
3626 device->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3627 drbd_reconsider_max_bio_size(device);
3628
3629 if (get_ldev(device)) {
3630 if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) {
3631 device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
3632 ldsc = 1;
3633 }
3634
3635 put_ldev(device);
3636 }
3637
3638 if (device->state.conn > C_WF_REPORT_PARAMS) {
3639 if (be64_to_cpu(p->c_size) !=
3640 drbd_get_capacity(device->this_bdev) || ldsc) {
3641 /* we have different sizes, probably peer
3642 * needs to know my new size... */
3643 drbd_send_sizes(device, 0, ddsf);
3644 }
3645 if (test_and_clear_bit(RESIZE_PENDING, &device->flags) ||
3646 (dd == DS_GREW && device->state.conn == C_CONNECTED)) {
3647 if (device->state.pdsk >= D_INCONSISTENT &&
3648 device->state.disk >= D_INCONSISTENT) {
3649 if (ddsf & DDSF_NO_RESYNC)
3650 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3651 else
3652 resync_after_online_grow(device);
3653 } else
3654 set_bit(RESYNC_AFTER_NEG, &device->flags);
3655 }
3656 }
3657
3658 return 0;
3659 }
3660
3661 static int receive_uuids(struct drbd_connection *connection, struct packet_info *pi)
3662 {
3663 struct drbd_device *device;
3664 struct p_uuids *p = pi->data;
3665 u64 *p_uuid;
3666 int i, updated_uuids = 0;
3667
3668 device = vnr_to_device(connection, pi->vnr);
3669 if (!device)
3670 return config_unknown_volume(connection, pi);
3671
3672 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3673 if (!p_uuid) {
3674 dev_err(DEV, "kmalloc of p_uuid failed\n");
3675 return false;
3676 }
3677
3678 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3679 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3680
3681 kfree(device->p_uuid);
3682 device->p_uuid = p_uuid;
3683
3684 if (device->state.conn < C_CONNECTED &&
3685 device->state.disk < D_INCONSISTENT &&
3686 device->state.role == R_PRIMARY &&
3687 (device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3688 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3689 (unsigned long long)device->ed_uuid);
3690 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3691 return -EIO;
3692 }
3693
3694 if (get_ldev(device)) {
3695 int skip_initial_sync =
3696 device->state.conn == C_CONNECTED &&
3697 first_peer_device(device)->connection->agreed_pro_version >= 90 &&
3698 device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3699 (p_uuid[UI_FLAGS] & 8);
3700 if (skip_initial_sync) {
3701 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3702 drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
3703 "clear_n_write from receive_uuids",
3704 BM_LOCKED_TEST_ALLOWED);
3705 _drbd_uuid_set(device, UI_CURRENT, p_uuid[UI_CURRENT]);
3706 _drbd_uuid_set(device, UI_BITMAP, 0);
3707 _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3708 CS_VERBOSE, NULL);
3709 drbd_md_sync(device);
3710 updated_uuids = 1;
3711 }
3712 put_ldev(device);
3713 } else if (device->state.disk < D_INCONSISTENT &&
3714 device->state.role == R_PRIMARY) {
3715 /* I am a diskless primary, the peer just created a new current UUID
3716 for me. */
3717 updated_uuids = drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
3718 }
3719
3720 /* Before we test for the disk state, we should wait until an eventually
3721 ongoing cluster wide state change is finished. That is important if
3722 we are primary and are detaching from our disk. We need to see the
3723 new disk state... */
3724 mutex_lock(device->state_mutex);
3725 mutex_unlock(device->state_mutex);
3726 if (device->state.conn >= C_CONNECTED && device->state.disk < D_INCONSISTENT)
3727 updated_uuids |= drbd_set_ed_uuid(device, p_uuid[UI_CURRENT]);
3728
3729 if (updated_uuids)
3730 drbd_print_uuids(device, "receiver updated UUIDs to");
3731
3732 return 0;
3733 }
3734
3735 /**
3736 * convert_state() - Converts the peer's view of the cluster state to our point of view
3737 * @ps: The state as seen by the peer.
3738 */
3739 static union drbd_state convert_state(union drbd_state ps)
3740 {
3741 union drbd_state ms;
3742
3743 static enum drbd_conns c_tab[] = {
3744 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
3745 [C_CONNECTED] = C_CONNECTED,
3746
3747 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3748 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3749 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3750 [C_VERIFY_S] = C_VERIFY_T,
3751 [C_MASK] = C_MASK,
3752 };
3753
3754 ms.i = ps.i;
3755
3756 ms.conn = c_tab[ps.conn];
3757 ms.peer = ps.role;
3758 ms.role = ps.peer;
3759 ms.pdsk = ps.disk;
3760 ms.disk = ps.pdsk;
3761 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3762
3763 return ms;
3764 }
3765
3766 static int receive_req_state(struct drbd_connection *connection, struct packet_info *pi)
3767 {
3768 struct drbd_device *device;
3769 struct p_req_state *p = pi->data;
3770 union drbd_state mask, val;
3771 enum drbd_state_rv rv;
3772
3773 device = vnr_to_device(connection, pi->vnr);
3774 if (!device)
3775 return -EIO;
3776
3777 mask.i = be32_to_cpu(p->mask);
3778 val.i = be32_to_cpu(p->val);
3779
3780 if (test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags) &&
3781 mutex_is_locked(device->state_mutex)) {
3782 drbd_send_sr_reply(device, SS_CONCURRENT_ST_CHG);
3783 return 0;
3784 }
3785
3786 mask = convert_state(mask);
3787 val = convert_state(val);
3788
3789 rv = drbd_change_state(device, CS_VERBOSE, mask, val);
3790 drbd_send_sr_reply(device, rv);
3791
3792 drbd_md_sync(device);
3793
3794 return 0;
3795 }
3796
3797 static int receive_req_conn_state(struct drbd_connection *connection, struct packet_info *pi)
3798 {
3799 struct p_req_state *p = pi->data;
3800 union drbd_state mask, val;
3801 enum drbd_state_rv rv;
3802
3803 mask.i = be32_to_cpu(p->mask);
3804 val.i = be32_to_cpu(p->val);
3805
3806 if (test_bit(RESOLVE_CONFLICTS, &connection->flags) &&
3807 mutex_is_locked(&connection->cstate_mutex)) {
3808 conn_send_sr_reply(connection, SS_CONCURRENT_ST_CHG);
3809 return 0;
3810 }
3811
3812 mask = convert_state(mask);
3813 val = convert_state(val);
3814
3815 rv = conn_request_state(connection, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
3816 conn_send_sr_reply(connection, rv);
3817
3818 return 0;
3819 }
3820
3821 static int receive_state(struct drbd_connection *connection, struct packet_info *pi)
3822 {
3823 struct drbd_device *device;
3824 struct p_state *p = pi->data;
3825 union drbd_state os, ns, peer_state;
3826 enum drbd_disk_state real_peer_disk;
3827 enum chg_state_flags cs_flags;
3828 int rv;
3829
3830 device = vnr_to_device(connection, pi->vnr);
3831 if (!device)
3832 return config_unknown_volume(connection, pi);
3833
3834 peer_state.i = be32_to_cpu(p->state);
3835
3836 real_peer_disk = peer_state.disk;
3837 if (peer_state.disk == D_NEGOTIATING) {
3838 real_peer_disk = device->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3839 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3840 }
3841
3842 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
3843 retry:
3844 os = ns = drbd_read_state(device);
3845 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
3846
3847 /* If some other part of the code (asender thread, timeout)
3848 * already decided to close the connection again,
3849 * we must not "re-establish" it here. */
3850 if (os.conn <= C_TEAR_DOWN)
3851 return -ECONNRESET;
3852
3853 /* If this is the "end of sync" confirmation, usually the peer disk
3854 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3855 * set) resync started in PausedSyncT, or if the timing of pause-/
3856 * unpause-sync events has been "just right", the peer disk may
3857 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3858 */
3859 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3860 real_peer_disk == D_UP_TO_DATE &&
3861 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3862 /* If we are (becoming) SyncSource, but peer is still in sync
3863 * preparation, ignore its uptodate-ness to avoid flapping, it
3864 * will change to inconsistent once the peer reaches active
3865 * syncing states.
3866 * It may have changed syncer-paused flags, however, so we
3867 * cannot ignore this completely. */
3868 if (peer_state.conn > C_CONNECTED &&
3869 peer_state.conn < C_SYNC_SOURCE)
3870 real_peer_disk = D_INCONSISTENT;
3871
3872 /* if peer_state changes to connected at the same time,
3873 * it explicitly notifies us that it finished resync.
3874 * Maybe we should finish it up, too? */
3875 else if (os.conn >= C_SYNC_SOURCE &&
3876 peer_state.conn == C_CONNECTED) {
3877 if (drbd_bm_total_weight(device) <= device->rs_failed)
3878 drbd_resync_finished(device);
3879 return 0;
3880 }
3881 }
3882
3883 /* explicit verify finished notification, stop sector reached. */
3884 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3885 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
3886 ov_out_of_sync_print(device);
3887 drbd_resync_finished(device);
3888 return 0;
3889 }
3890
3891 /* peer says his disk is inconsistent, while we think it is uptodate,
3892 * and this happens while the peer still thinks we have a sync going on,
3893 * but we think we are already done with the sync.
3894 * We ignore this to avoid flapping pdsk.
3895 * This should not happen, if the peer is a recent version of drbd. */
3896 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3897 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3898 real_peer_disk = D_UP_TO_DATE;
3899
3900 if (ns.conn == C_WF_REPORT_PARAMS)
3901 ns.conn = C_CONNECTED;
3902
3903 if (peer_state.conn == C_AHEAD)
3904 ns.conn = C_BEHIND;
3905
3906 if (device->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3907 get_ldev_if_state(device, D_NEGOTIATING)) {
3908 int cr; /* consider resync */
3909
3910 /* if we established a new connection */
3911 cr = (os.conn < C_CONNECTED);
3912 /* if we had an established connection
3913 * and one of the nodes newly attaches a disk */
3914 cr |= (os.conn == C_CONNECTED &&
3915 (peer_state.disk == D_NEGOTIATING ||
3916 os.disk == D_NEGOTIATING));
3917 /* if we have both been inconsistent, and the peer has been
3918 * forced to be UpToDate with --overwrite-data */
3919 cr |= test_bit(CONSIDER_RESYNC, &device->flags);
3920 /* if we had been plain connected, and the admin requested to
3921 * start a sync by "invalidate" or "invalidate-remote" */
3922 cr |= (os.conn == C_CONNECTED &&
3923 (peer_state.conn >= C_STARTING_SYNC_S &&
3924 peer_state.conn <= C_WF_BITMAP_T));
3925
3926 if (cr)
3927 ns.conn = drbd_sync_handshake(device, peer_state.role, real_peer_disk);
3928
3929 put_ldev(device);
3930 if (ns.conn == C_MASK) {
3931 ns.conn = C_CONNECTED;
3932 if (device->state.disk == D_NEGOTIATING) {
3933 drbd_force_state(device, NS(disk, D_FAILED));
3934 } else if (peer_state.disk == D_NEGOTIATING) {
3935 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3936 peer_state.disk = D_DISKLESS;
3937 real_peer_disk = D_DISKLESS;
3938 } else {
3939 if (test_and_clear_bit(CONN_DRY_RUN, &first_peer_device(device)->connection->flags))
3940 return -EIO;
3941 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3942 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3943 return -EIO;
3944 }
3945 }
3946 }
3947
3948 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
3949 if (os.i != drbd_read_state(device).i)
3950 goto retry;
3951 clear_bit(CONSIDER_RESYNC, &device->flags);
3952 ns.peer = peer_state.role;
3953 ns.pdsk = real_peer_disk;
3954 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3955 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3956 ns.disk = device->new_state_tmp.disk;
3957 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3958 if (ns.pdsk == D_CONSISTENT && drbd_suspended(device) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3959 test_bit(NEW_CUR_UUID, &device->flags)) {
3960 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3961 for temporal network outages! */
3962 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
3963 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3964 tl_clear(first_peer_device(device)->connection);
3965 drbd_uuid_new_current(device);
3966 clear_bit(NEW_CUR_UUID, &device->flags);
3967 conn_request_state(first_peer_device(device)->connection, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
3968 return -EIO;
3969 }
3970 rv = _drbd_set_state(device, ns, cs_flags, NULL);
3971 ns = drbd_read_state(device);
3972 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
3973
3974 if (rv < SS_SUCCESS) {
3975 conn_request_state(first_peer_device(device)->connection, NS(conn, C_DISCONNECTING), CS_HARD);
3976 return -EIO;
3977 }
3978
3979 if (os.conn > C_WF_REPORT_PARAMS) {
3980 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3981 peer_state.disk != D_NEGOTIATING ) {
3982 /* we want resync, peer has not yet decided to sync... */
3983 /* Nowadays only used when forcing a node into primary role and
3984 setting its disk to UpToDate with that */
3985 drbd_send_uuids(device);
3986 drbd_send_current_state(device);
3987 }
3988 }
3989
3990 clear_bit(DISCARD_MY_DATA, &device->flags);
3991
3992 drbd_md_sync(device); /* update connected indicator, la_size_sect, ... */
3993
3994 return 0;
3995 }
3996
3997 static int receive_sync_uuid(struct drbd_connection *connection, struct packet_info *pi)
3998 {
3999 struct drbd_device *device;
4000 struct p_rs_uuid *p = pi->data;
4001
4002 device = vnr_to_device(connection, pi->vnr);
4003 if (!device)
4004 return -EIO;
4005
4006 wait_event(device->misc_wait,
4007 device->state.conn == C_WF_SYNC_UUID ||
4008 device->state.conn == C_BEHIND ||
4009 device->state.conn < C_CONNECTED ||
4010 device->state.disk < D_NEGOTIATING);
4011
4012 /* D_ASSERT( device->state.conn == C_WF_SYNC_UUID ); */
4013
4014 /* Here the _drbd_uuid_ functions are right, current should
4015 _not_ be rotated into the history */
4016 if (get_ldev_if_state(device, D_NEGOTIATING)) {
4017 _drbd_uuid_set(device, UI_CURRENT, be64_to_cpu(p->uuid));
4018 _drbd_uuid_set(device, UI_BITMAP, 0UL);
4019
4020 drbd_print_uuids(device, "updated sync uuid");
4021 drbd_start_resync(device, C_SYNC_TARGET);
4022
4023 put_ldev(device);
4024 } else
4025 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4026
4027 return 0;
4028 }
4029
4030 /**
4031 * receive_bitmap_plain
4032 *
4033 * Return 0 when done, 1 when another iteration is needed, and a negative error
4034 * code upon failure.
4035 */
4036 static int
4037 receive_bitmap_plain(struct drbd_device *device, unsigned int size,
4038 unsigned long *p, struct bm_xfer_ctx *c)
4039 {
4040 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4041 drbd_header_size(first_peer_device(device)->connection);
4042 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
4043 c->bm_words - c->word_offset);
4044 unsigned int want = num_words * sizeof(*p);
4045 int err;
4046
4047 if (want != size) {
4048 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
4049 return -EIO;
4050 }
4051 if (want == 0)
4052 return 0;
4053 err = drbd_recv_all(first_peer_device(device)->connection, p, want);
4054 if (err)
4055 return err;
4056
4057 drbd_bm_merge_lel(device, c->word_offset, num_words, p);
4058
4059 c->word_offset += num_words;
4060 c->bit_offset = c->word_offset * BITS_PER_LONG;
4061 if (c->bit_offset > c->bm_bits)
4062 c->bit_offset = c->bm_bits;
4063
4064 return 1;
4065 }
4066
4067 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4068 {
4069 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4070 }
4071
4072 static int dcbp_get_start(struct p_compressed_bm *p)
4073 {
4074 return (p->encoding & 0x80) != 0;
4075 }
4076
4077 static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4078 {
4079 return (p->encoding >> 4) & 0x7;
4080 }
4081
4082 /**
4083 * recv_bm_rle_bits
4084 *
4085 * Return 0 when done, 1 when another iteration is needed, and a negative error
4086 * code upon failure.
4087 */
4088 static int
4089 recv_bm_rle_bits(struct drbd_device *device,
4090 struct p_compressed_bm *p,
4091 struct bm_xfer_ctx *c,
4092 unsigned int len)
4093 {
4094 struct bitstream bs;
4095 u64 look_ahead;
4096 u64 rl;
4097 u64 tmp;
4098 unsigned long s = c->bit_offset;
4099 unsigned long e;
4100 int toggle = dcbp_get_start(p);
4101 int have;
4102 int bits;
4103
4104 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
4105
4106 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4107 if (bits < 0)
4108 return -EIO;
4109
4110 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4111 bits = vli_decode_bits(&rl, look_ahead);
4112 if (bits <= 0)
4113 return -EIO;
4114
4115 if (toggle) {
4116 e = s + rl -1;
4117 if (e >= c->bm_bits) {
4118 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4119 return -EIO;
4120 }
4121 _drbd_bm_set_bits(device, s, e);
4122 }
4123
4124 if (have < bits) {
4125 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4126 have, bits, look_ahead,
4127 (unsigned int)(bs.cur.b - p->code),
4128 (unsigned int)bs.buf_len);
4129 return -EIO;
4130 }
4131 /* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4132 if (likely(bits < 64))
4133 look_ahead >>= bits;
4134 else
4135 look_ahead = 0;
4136 have -= bits;
4137
4138 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4139 if (bits < 0)
4140 return -EIO;
4141 look_ahead |= tmp << have;
4142 have += bits;
4143 }
4144
4145 c->bit_offset = s;
4146 bm_xfer_ctx_bit_to_word_offset(c);
4147
4148 return (s != c->bm_bits);
4149 }
4150
4151 /**
4152 * decode_bitmap_c
4153 *
4154 * Return 0 when done, 1 when another iteration is needed, and a negative error
4155 * code upon failure.
4156 */
4157 static int
4158 decode_bitmap_c(struct drbd_device *device,
4159 struct p_compressed_bm *p,
4160 struct bm_xfer_ctx *c,
4161 unsigned int len)
4162 {
4163 if (dcbp_get_code(p) == RLE_VLI_Bits)
4164 return recv_bm_rle_bits(device, p, c, len - sizeof(*p));
4165
4166 /* other variants had been implemented for evaluation,
4167 * but have been dropped as this one turned out to be "best"
4168 * during all our tests. */
4169
4170 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4171 conn_request_state(first_peer_device(device)->connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4172 return -EIO;
4173 }
4174
4175 void INFO_bm_xfer_stats(struct drbd_device *device,
4176 const char *direction, struct bm_xfer_ctx *c)
4177 {
4178 /* what would it take to transfer it "plaintext" */
4179 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
4180 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4181 unsigned int plain =
4182 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4183 c->bm_words * sizeof(unsigned long);
4184 unsigned int total = c->bytes[0] + c->bytes[1];
4185 unsigned int r;
4186
4187 /* total can not be zero. but just in case: */
4188 if (total == 0)
4189 return;
4190
4191 /* don't report if not compressed */
4192 if (total >= plain)
4193 return;
4194
4195 /* total < plain. check for overflow, still */
4196 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4197 : (1000 * total / plain);
4198
4199 if (r > 1000)
4200 r = 1000;
4201
4202 r = 1000 - r;
4203 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4204 "total %u; compression: %u.%u%%\n",
4205 direction,
4206 c->bytes[1], c->packets[1],
4207 c->bytes[0], c->packets[0],
4208 total, r/10, r % 10);
4209 }
4210
4211 /* Since we are processing the bitfield from lower addresses to higher,
4212 it does not matter if the process it in 32 bit chunks or 64 bit
4213 chunks as long as it is little endian. (Understand it as byte stream,
4214 beginning with the lowest byte...) If we would use big endian
4215 we would need to process it from the highest address to the lowest,
4216 in order to be agnostic to the 32 vs 64 bits issue.
4217
4218 returns 0 on failure, 1 if we successfully received it. */
4219 static int receive_bitmap(struct drbd_connection *connection, struct packet_info *pi)
4220 {
4221 struct drbd_device *device;
4222 struct bm_xfer_ctx c;
4223 int err;
4224
4225 device = vnr_to_device(connection, pi->vnr);
4226 if (!device)
4227 return -EIO;
4228
4229 drbd_bm_lock(device, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4230 /* you are supposed to send additional out-of-sync information
4231 * if you actually set bits during this phase */
4232
4233 c = (struct bm_xfer_ctx) {
4234 .bm_bits = drbd_bm_bits(device),
4235 .bm_words = drbd_bm_words(device),
4236 };
4237
4238 for(;;) {
4239 if (pi->cmd == P_BITMAP)
4240 err = receive_bitmap_plain(device, pi->size, pi->data, &c);
4241 else if (pi->cmd == P_COMPRESSED_BITMAP) {
4242 /* MAYBE: sanity check that we speak proto >= 90,
4243 * and the feature is enabled! */
4244 struct p_compressed_bm *p = pi->data;
4245
4246 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(connection)) {
4247 dev_err(DEV, "ReportCBitmap packet too large\n");
4248 err = -EIO;
4249 goto out;
4250 }
4251 if (pi->size <= sizeof(*p)) {
4252 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4253 err = -EIO;
4254 goto out;
4255 }
4256 err = drbd_recv_all(first_peer_device(device)->connection, p, pi->size);
4257 if (err)
4258 goto out;
4259 err = decode_bitmap_c(device, p, &c, pi->size);
4260 } else {
4261 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4262 err = -EIO;
4263 goto out;
4264 }
4265
4266 c.packets[pi->cmd == P_BITMAP]++;
4267 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(connection) + pi->size;
4268
4269 if (err <= 0) {
4270 if (err < 0)
4271 goto out;
4272 break;
4273 }
4274 err = drbd_recv_header(first_peer_device(device)->connection, pi);
4275 if (err)
4276 goto out;
4277 }
4278
4279 INFO_bm_xfer_stats(device, "receive", &c);
4280
4281 if (device->state.conn == C_WF_BITMAP_T) {
4282 enum drbd_state_rv rv;
4283
4284 err = drbd_send_bitmap(device);
4285 if (err)
4286 goto out;
4287 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4288 rv = _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4289 D_ASSERT(rv == SS_SUCCESS);
4290 } else if (device->state.conn != C_WF_BITMAP_S) {
4291 /* admin may have requested C_DISCONNECTING,
4292 * other threads may have noticed network errors */
4293 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4294 drbd_conn_str(device->state.conn));
4295 }
4296 err = 0;
4297
4298 out:
4299 drbd_bm_unlock(device);
4300 if (!err && device->state.conn == C_WF_BITMAP_S)
4301 drbd_start_resync(device, C_SYNC_SOURCE);
4302 return err;
4303 }
4304
4305 static int receive_skip(struct drbd_connection *connection, struct packet_info *pi)
4306 {
4307 conn_warn(connection, "skipping unknown optional packet type %d, l: %d!\n",
4308 pi->cmd, pi->size);
4309
4310 return ignore_remaining_packet(connection, pi);
4311 }
4312
4313 static int receive_UnplugRemote(struct drbd_connection *connection, struct packet_info *pi)
4314 {
4315 /* Make sure we've acked all the TCP data associated
4316 * with the data requests being unplugged */
4317 drbd_tcp_quickack(connection->data.socket);
4318
4319 return 0;
4320 }
4321
4322 static int receive_out_of_sync(struct drbd_connection *connection, struct packet_info *pi)
4323 {
4324 struct drbd_device *device;
4325 struct p_block_desc *p = pi->data;
4326
4327 device = vnr_to_device(connection, pi->vnr);
4328 if (!device)
4329 return -EIO;
4330
4331 switch (device->state.conn) {
4332 case C_WF_SYNC_UUID:
4333 case C_WF_BITMAP_T:
4334 case C_BEHIND:
4335 break;
4336 default:
4337 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4338 drbd_conn_str(device->state.conn));
4339 }
4340
4341 drbd_set_out_of_sync(device, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4342
4343 return 0;
4344 }
4345
4346 struct data_cmd {
4347 int expect_payload;
4348 size_t pkt_size;
4349 int (*fn)(struct drbd_connection *, struct packet_info *);
4350 };
4351
4352 static struct data_cmd drbd_cmd_handler[] = {
4353 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4354 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4355 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4356 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
4357 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4358 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4359 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
4360 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4361 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4362 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4363 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
4364 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4365 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4366 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4367 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4368 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4369 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4370 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4371 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4372 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4373 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
4374 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4375 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
4376 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
4377 };
4378
4379 static void drbdd(struct drbd_connection *connection)
4380 {
4381 struct packet_info pi;
4382 size_t shs; /* sub header size */
4383 int err;
4384
4385 while (get_t_state(&connection->receiver) == RUNNING) {
4386 struct data_cmd *cmd;
4387
4388 drbd_thread_current_set_cpu(&connection->receiver);
4389 if (drbd_recv_header(connection, &pi))
4390 goto err_out;
4391
4392 cmd = &drbd_cmd_handler[pi.cmd];
4393 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
4394 conn_err(connection, "Unexpected data packet %s (0x%04x)",
4395 cmdname(pi.cmd), pi.cmd);
4396 goto err_out;
4397 }
4398
4399 shs = cmd->pkt_size;
4400 if (pi.size > shs && !cmd->expect_payload) {
4401 conn_err(connection, "No payload expected %s l:%d\n",
4402 cmdname(pi.cmd), pi.size);
4403 goto err_out;
4404 }
4405
4406 if (shs) {
4407 err = drbd_recv_all_warn(connection, pi.data, shs);
4408 if (err)
4409 goto err_out;
4410 pi.size -= shs;
4411 }
4412
4413 err = cmd->fn(connection, &pi);
4414 if (err) {
4415 conn_err(connection, "error receiving %s, e: %d l: %d!\n",
4416 cmdname(pi.cmd), err, pi.size);
4417 goto err_out;
4418 }
4419 }
4420 return;
4421
4422 err_out:
4423 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4424 }
4425
4426 void conn_flush_workqueue(struct drbd_connection *connection)
4427 {
4428 struct drbd_wq_barrier barr;
4429
4430 barr.w.cb = w_prev_work_done;
4431 barr.w.connection = connection;
4432 init_completion(&barr.done);
4433 drbd_queue_work(&connection->sender_work, &barr.w);
4434 wait_for_completion(&barr.done);
4435 }
4436
4437 static void conn_disconnect(struct drbd_connection *connection)
4438 {
4439 struct drbd_device *device;
4440 enum drbd_conns oc;
4441 int vnr;
4442
4443 if (connection->cstate == C_STANDALONE)
4444 return;
4445
4446 /* We are about to start the cleanup after connection loss.
4447 * Make sure drbd_make_request knows about that.
4448 * Usually we should be in some network failure state already,
4449 * but just in case we are not, we fix it up here.
4450 */
4451 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
4452
4453 /* asender does not clean up anything. it must not interfere, either */
4454 drbd_thread_stop(&connection->asender);
4455 drbd_free_sock(connection);
4456
4457 rcu_read_lock();
4458 idr_for_each_entry(&connection->volumes, device, vnr) {
4459 kref_get(&device->kref);
4460 rcu_read_unlock();
4461 drbd_disconnected(device);
4462 kref_put(&device->kref, &drbd_minor_destroy);
4463 rcu_read_lock();
4464 }
4465 rcu_read_unlock();
4466
4467 if (!list_empty(&connection->current_epoch->list))
4468 conn_err(connection, "ASSERTION FAILED: connection->current_epoch->list not empty\n");
4469 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4470 atomic_set(&connection->current_epoch->epoch_size, 0);
4471 connection->send.seen_any_write_yet = false;
4472
4473 conn_info(connection, "Connection closed\n");
4474
4475 if (conn_highest_role(connection) == R_PRIMARY && conn_highest_pdsk(connection) >= D_UNKNOWN)
4476 conn_try_outdate_peer_async(connection);
4477
4478 spin_lock_irq(&connection->req_lock);
4479 oc = connection->cstate;
4480 if (oc >= C_UNCONNECTED)
4481 _conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4482
4483 spin_unlock_irq(&connection->req_lock);
4484
4485 if (oc == C_DISCONNECTING)
4486 conn_request_state(connection, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
4487 }
4488
4489 static int drbd_disconnected(struct drbd_device *device)
4490 {
4491 unsigned int i;
4492
4493 /* wait for current activity to cease. */
4494 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
4495 _drbd_wait_ee_list_empty(device, &device->active_ee);
4496 _drbd_wait_ee_list_empty(device, &device->sync_ee);
4497 _drbd_wait_ee_list_empty(device, &device->read_ee);
4498 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
4499
4500 /* We do not have data structures that would allow us to
4501 * get the rs_pending_cnt down to 0 again.
4502 * * On C_SYNC_TARGET we do not have any data structures describing
4503 * the pending RSDataRequest's we have sent.
4504 * * On C_SYNC_SOURCE there is no data structure that tracks
4505 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4506 * And no, it is not the sum of the reference counts in the
4507 * resync_LRU. The resync_LRU tracks the whole operation including
4508 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4509 * on the fly. */
4510 drbd_rs_cancel_all(device);
4511 device->rs_total = 0;
4512 device->rs_failed = 0;
4513 atomic_set(&device->rs_pending_cnt, 0);
4514 wake_up(&device->misc_wait);
4515
4516 del_timer_sync(&device->resync_timer);
4517 resync_timer_fn((unsigned long)device);
4518
4519 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4520 * w_make_resync_request etc. which may still be on the worker queue
4521 * to be "canceled" */
4522 drbd_flush_workqueue(device);
4523
4524 drbd_finish_peer_reqs(device);
4525
4526 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4527 might have issued a work again. The one before drbd_finish_peer_reqs() is
4528 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4529 drbd_flush_workqueue(device);
4530
4531 /* need to do it again, drbd_finish_peer_reqs() may have populated it
4532 * again via drbd_try_clear_on_disk_bm(). */
4533 drbd_rs_cancel_all(device);
4534
4535 kfree(device->p_uuid);
4536 device->p_uuid = NULL;
4537
4538 if (!drbd_suspended(device))
4539 tl_clear(first_peer_device(device)->connection);
4540
4541 drbd_md_sync(device);
4542
4543 /* serialize with bitmap writeout triggered by the state change,
4544 * if any. */
4545 wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
4546
4547 /* tcp_close and release of sendpage pages can be deferred. I don't
4548 * want to use SO_LINGER, because apparently it can be deferred for
4549 * more than 20 seconds (longest time I checked).
4550 *
4551 * Actually we don't care for exactly when the network stack does its
4552 * put_page(), but release our reference on these pages right here.
4553 */
4554 i = drbd_free_peer_reqs(device, &device->net_ee);
4555 if (i)
4556 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
4557 i = atomic_read(&device->pp_in_use_by_net);
4558 if (i)
4559 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
4560 i = atomic_read(&device->pp_in_use);
4561 if (i)
4562 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
4563
4564 D_ASSERT(list_empty(&device->read_ee));
4565 D_ASSERT(list_empty(&device->active_ee));
4566 D_ASSERT(list_empty(&device->sync_ee));
4567 D_ASSERT(list_empty(&device->done_ee));
4568
4569 return 0;
4570 }
4571
4572 /*
4573 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4574 * we can agree on is stored in agreed_pro_version.
4575 *
4576 * feature flags and the reserved array should be enough room for future
4577 * enhancements of the handshake protocol, and possible plugins...
4578 *
4579 * for now, they are expected to be zero, but ignored.
4580 */
4581 static int drbd_send_features(struct drbd_connection *connection)
4582 {
4583 struct drbd_socket *sock;
4584 struct p_connection_features *p;
4585
4586 sock = &connection->data;
4587 p = conn_prepare_command(connection, sock);
4588 if (!p)
4589 return -EIO;
4590 memset(p, 0, sizeof(*p));
4591 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4592 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
4593 return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
4594 }
4595
4596 /*
4597 * return values:
4598 * 1 yes, we have a valid connection
4599 * 0 oops, did not work out, please try again
4600 * -1 peer talks different language,
4601 * no point in trying again, please go standalone.
4602 */
4603 static int drbd_do_features(struct drbd_connection *connection)
4604 {
4605 /* ASSERT current == connection->receiver ... */
4606 struct p_connection_features *p;
4607 const int expect = sizeof(struct p_connection_features);
4608 struct packet_info pi;
4609 int err;
4610
4611 err = drbd_send_features(connection);
4612 if (err)
4613 return 0;
4614
4615 err = drbd_recv_header(connection, &pi);
4616 if (err)
4617 return 0;
4618
4619 if (pi.cmd != P_CONNECTION_FEATURES) {
4620 conn_err(connection, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
4621 cmdname(pi.cmd), pi.cmd);
4622 return -1;
4623 }
4624
4625 if (pi.size != expect) {
4626 conn_err(connection, "expected ConnectionFeatures length: %u, received: %u\n",
4627 expect, pi.size);
4628 return -1;
4629 }
4630
4631 p = pi.data;
4632 err = drbd_recv_all_warn(connection, p, expect);
4633 if (err)
4634 return 0;
4635
4636 p->protocol_min = be32_to_cpu(p->protocol_min);
4637 p->protocol_max = be32_to_cpu(p->protocol_max);
4638 if (p->protocol_max == 0)
4639 p->protocol_max = p->protocol_min;
4640
4641 if (PRO_VERSION_MAX < p->protocol_min ||
4642 PRO_VERSION_MIN > p->protocol_max)
4643 goto incompat;
4644
4645 connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4646
4647 conn_info(connection, "Handshake successful: "
4648 "Agreed network protocol version %d\n", connection->agreed_pro_version);
4649
4650 return 1;
4651
4652 incompat:
4653 conn_err(connection, "incompatible DRBD dialects: "
4654 "I support %d-%d, peer supports %d-%d\n",
4655 PRO_VERSION_MIN, PRO_VERSION_MAX,
4656 p->protocol_min, p->protocol_max);
4657 return -1;
4658 }
4659
4660 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4661 static int drbd_do_auth(struct drbd_connection *connection)
4662 {
4663 conn_err(connection, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4664 conn_err(connection, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4665 return -1;
4666 }
4667 #else
4668 #define CHALLENGE_LEN 64
4669
4670 /* Return value:
4671 1 - auth succeeded,
4672 0 - failed, try again (network error),
4673 -1 - auth failed, don't try again.
4674 */
4675
4676 static int drbd_do_auth(struct drbd_connection *connection)
4677 {
4678 struct drbd_socket *sock;
4679 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4680 struct scatterlist sg;
4681 char *response = NULL;
4682 char *right_response = NULL;
4683 char *peers_ch = NULL;
4684 unsigned int key_len;
4685 char secret[SHARED_SECRET_MAX]; /* 64 byte */
4686 unsigned int resp_size;
4687 struct hash_desc desc;
4688 struct packet_info pi;
4689 struct net_conf *nc;
4690 int err, rv;
4691
4692 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
4693
4694 rcu_read_lock();
4695 nc = rcu_dereference(connection->net_conf);
4696 key_len = strlen(nc->shared_secret);
4697 memcpy(secret, nc->shared_secret, key_len);
4698 rcu_read_unlock();
4699
4700 desc.tfm = connection->cram_hmac_tfm;
4701 desc.flags = 0;
4702
4703 rv = crypto_hash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
4704 if (rv) {
4705 conn_err(connection, "crypto_hash_setkey() failed with %d\n", rv);
4706 rv = -1;
4707 goto fail;
4708 }
4709
4710 get_random_bytes(my_challenge, CHALLENGE_LEN);
4711
4712 sock = &connection->data;
4713 if (!conn_prepare_command(connection, sock)) {
4714 rv = 0;
4715 goto fail;
4716 }
4717 rv = !conn_send_command(connection, sock, P_AUTH_CHALLENGE, 0,
4718 my_challenge, CHALLENGE_LEN);
4719 if (!rv)
4720 goto fail;
4721
4722 err = drbd_recv_header(connection, &pi);
4723 if (err) {
4724 rv = 0;
4725 goto fail;
4726 }
4727
4728 if (pi.cmd != P_AUTH_CHALLENGE) {
4729 conn_err(connection, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4730 cmdname(pi.cmd), pi.cmd);
4731 rv = 0;
4732 goto fail;
4733 }
4734
4735 if (pi.size > CHALLENGE_LEN * 2) {
4736 conn_err(connection, "expected AuthChallenge payload too big.\n");
4737 rv = -1;
4738 goto fail;
4739 }
4740
4741 peers_ch = kmalloc(pi.size, GFP_NOIO);
4742 if (peers_ch == NULL) {
4743 conn_err(connection, "kmalloc of peers_ch failed\n");
4744 rv = -1;
4745 goto fail;
4746 }
4747
4748 err = drbd_recv_all_warn(connection, peers_ch, pi.size);
4749 if (err) {
4750 rv = 0;
4751 goto fail;
4752 }
4753
4754 resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm);
4755 response = kmalloc(resp_size, GFP_NOIO);
4756 if (response == NULL) {
4757 conn_err(connection, "kmalloc of response failed\n");
4758 rv = -1;
4759 goto fail;
4760 }
4761
4762 sg_init_table(&sg, 1);
4763 sg_set_buf(&sg, peers_ch, pi.size);
4764
4765 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4766 if (rv) {
4767 conn_err(connection, "crypto_hash_digest() failed with %d\n", rv);
4768 rv = -1;
4769 goto fail;
4770 }
4771
4772 if (!conn_prepare_command(connection, sock)) {
4773 rv = 0;
4774 goto fail;
4775 }
4776 rv = !conn_send_command(connection, sock, P_AUTH_RESPONSE, 0,
4777 response, resp_size);
4778 if (!rv)
4779 goto fail;
4780
4781 err = drbd_recv_header(connection, &pi);
4782 if (err) {
4783 rv = 0;
4784 goto fail;
4785 }
4786
4787 if (pi.cmd != P_AUTH_RESPONSE) {
4788 conn_err(connection, "expected AuthResponse packet, received: %s (0x%04x)\n",
4789 cmdname(pi.cmd), pi.cmd);
4790 rv = 0;
4791 goto fail;
4792 }
4793
4794 if (pi.size != resp_size) {
4795 conn_err(connection, "expected AuthResponse payload of wrong size\n");
4796 rv = 0;
4797 goto fail;
4798 }
4799
4800 err = drbd_recv_all_warn(connection, response , resp_size);
4801 if (err) {
4802 rv = 0;
4803 goto fail;
4804 }
4805
4806 right_response = kmalloc(resp_size, GFP_NOIO);
4807 if (right_response == NULL) {
4808 conn_err(connection, "kmalloc of right_response failed\n");
4809 rv = -1;
4810 goto fail;
4811 }
4812
4813 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4814
4815 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4816 if (rv) {
4817 conn_err(connection, "crypto_hash_digest() failed with %d\n", rv);
4818 rv = -1;
4819 goto fail;
4820 }
4821
4822 rv = !memcmp(response, right_response, resp_size);
4823
4824 if (rv)
4825 conn_info(connection, "Peer authenticated using %d bytes HMAC\n",
4826 resp_size);
4827 else
4828 rv = -1;
4829
4830 fail:
4831 kfree(peers_ch);
4832 kfree(response);
4833 kfree(right_response);
4834
4835 return rv;
4836 }
4837 #endif
4838
4839 int drbdd_init(struct drbd_thread *thi)
4840 {
4841 struct drbd_connection *connection = thi->connection;
4842 int h;
4843
4844 conn_info(connection, "receiver (re)started\n");
4845
4846 do {
4847 h = conn_connect(connection);
4848 if (h == 0) {
4849 conn_disconnect(connection);
4850 schedule_timeout_interruptible(HZ);
4851 }
4852 if (h == -1) {
4853 conn_warn(connection, "Discarding network configuration.\n");
4854 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
4855 }
4856 } while (h == 0);
4857
4858 if (h > 0)
4859 drbdd(connection);
4860
4861 conn_disconnect(connection);
4862
4863 conn_info(connection, "receiver terminated\n");
4864 return 0;
4865 }
4866
4867 /* ********* acknowledge sender ******** */
4868
4869 static int got_conn_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
4870 {
4871 struct p_req_state_reply *p = pi->data;
4872 int retcode = be32_to_cpu(p->retcode);
4873
4874 if (retcode >= SS_SUCCESS) {
4875 set_bit(CONN_WD_ST_CHG_OKAY, &connection->flags);
4876 } else {
4877 set_bit(CONN_WD_ST_CHG_FAIL, &connection->flags);
4878 conn_err(connection, "Requested state change failed by peer: %s (%d)\n",
4879 drbd_set_st_err_str(retcode), retcode);
4880 }
4881 wake_up(&connection->ping_wait);
4882
4883 return 0;
4884 }
4885
4886 static int got_RqSReply(struct drbd_connection *connection, struct packet_info *pi)
4887 {
4888 struct drbd_device *device;
4889 struct p_req_state_reply *p = pi->data;
4890 int retcode = be32_to_cpu(p->retcode);
4891
4892 device = vnr_to_device(connection, pi->vnr);
4893 if (!device)
4894 return -EIO;
4895
4896 if (test_bit(CONN_WD_ST_CHG_REQ, &connection->flags)) {
4897 D_ASSERT(connection->agreed_pro_version < 100);
4898 return got_conn_RqSReply(connection, pi);
4899 }
4900
4901 if (retcode >= SS_SUCCESS) {
4902 set_bit(CL_ST_CHG_SUCCESS, &device->flags);
4903 } else {
4904 set_bit(CL_ST_CHG_FAIL, &device->flags);
4905 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4906 drbd_set_st_err_str(retcode), retcode);
4907 }
4908 wake_up(&device->state_wait);
4909
4910 return 0;
4911 }
4912
4913 static int got_Ping(struct drbd_connection *connection, struct packet_info *pi)
4914 {
4915 return drbd_send_ping_ack(connection);
4916
4917 }
4918
4919 static int got_PingAck(struct drbd_connection *connection, struct packet_info *pi)
4920 {
4921 /* restore idle timeout */
4922 connection->meta.socket->sk->sk_rcvtimeo = connection->net_conf->ping_int*HZ;
4923 if (!test_and_set_bit(GOT_PING_ACK, &connection->flags))
4924 wake_up(&connection->ping_wait);
4925
4926 return 0;
4927 }
4928
4929 static int got_IsInSync(struct drbd_connection *connection, struct packet_info *pi)
4930 {
4931 struct drbd_device *device;
4932 struct p_block_ack *p = pi->data;
4933 sector_t sector = be64_to_cpu(p->sector);
4934 int blksize = be32_to_cpu(p->blksize);
4935
4936 device = vnr_to_device(connection, pi->vnr);
4937 if (!device)
4938 return -EIO;
4939
4940 D_ASSERT(first_peer_device(device)->connection->agreed_pro_version >= 89);
4941
4942 update_peer_seq(device, be32_to_cpu(p->seq_num));
4943
4944 if (get_ldev(device)) {
4945 drbd_rs_complete_io(device, sector);
4946 drbd_set_in_sync(device, sector, blksize);
4947 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4948 device->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4949 put_ldev(device);
4950 }
4951 dec_rs_pending(device);
4952 atomic_add(blksize >> 9, &device->rs_sect_in);
4953
4954 return 0;
4955 }
4956
4957 static int
4958 validate_req_change_req_state(struct drbd_device *device, u64 id, sector_t sector,
4959 struct rb_root *root, const char *func,
4960 enum drbd_req_event what, bool missing_ok)
4961 {
4962 struct drbd_request *req;
4963 struct bio_and_error m;
4964
4965 spin_lock_irq(&first_peer_device(device)->connection->req_lock);
4966 req = find_request(device, root, id, sector, missing_ok, func);
4967 if (unlikely(!req)) {
4968 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
4969 return -EIO;
4970 }
4971 __req_mod(req, what, &m);
4972 spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
4973
4974 if (m.bio)
4975 complete_master_bio(device, &m);
4976 return 0;
4977 }
4978
4979 static int got_BlockAck(struct drbd_connection *connection, struct packet_info *pi)
4980 {
4981 struct drbd_device *device;
4982 struct p_block_ack *p = pi->data;
4983 sector_t sector = be64_to_cpu(p->sector);
4984 int blksize = be32_to_cpu(p->blksize);
4985 enum drbd_req_event what;
4986
4987 device = vnr_to_device(connection, pi->vnr);
4988 if (!device)
4989 return -EIO;
4990
4991 update_peer_seq(device, be32_to_cpu(p->seq_num));
4992
4993 if (p->block_id == ID_SYNCER) {
4994 drbd_set_in_sync(device, sector, blksize);
4995 dec_rs_pending(device);
4996 return 0;
4997 }
4998 switch (pi->cmd) {
4999 case P_RS_WRITE_ACK:
5000 what = WRITE_ACKED_BY_PEER_AND_SIS;
5001 break;
5002 case P_WRITE_ACK:
5003 what = WRITE_ACKED_BY_PEER;
5004 break;
5005 case P_RECV_ACK:
5006 what = RECV_ACKED_BY_PEER;
5007 break;
5008 case P_SUPERSEDED:
5009 what = CONFLICT_RESOLVED;
5010 break;
5011 case P_RETRY_WRITE:
5012 what = POSTPONE_WRITE;
5013 break;
5014 default:
5015 BUG();
5016 }
5017
5018 return validate_req_change_req_state(device, p->block_id, sector,
5019 &device->write_requests, __func__,
5020 what, false);
5021 }
5022
5023 static int got_NegAck(struct drbd_connection *connection, struct packet_info *pi)
5024 {
5025 struct drbd_device *device;
5026 struct p_block_ack *p = pi->data;
5027 sector_t sector = be64_to_cpu(p->sector);
5028 int size = be32_to_cpu(p->blksize);
5029 int err;
5030
5031 device = vnr_to_device(connection, pi->vnr);
5032 if (!device)
5033 return -EIO;
5034
5035 update_peer_seq(device, be32_to_cpu(p->seq_num));
5036
5037 if (p->block_id == ID_SYNCER) {
5038 dec_rs_pending(device);
5039 drbd_rs_failed_io(device, sector, size);
5040 return 0;
5041 }
5042
5043 err = validate_req_change_req_state(device, p->block_id, sector,
5044 &device->write_requests, __func__,
5045 NEG_ACKED, true);
5046 if (err) {
5047 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5048 The master bio might already be completed, therefore the
5049 request is no longer in the collision hash. */
5050 /* In Protocol B we might already have got a P_RECV_ACK
5051 but then get a P_NEG_ACK afterwards. */
5052 drbd_set_out_of_sync(device, sector, size);
5053 }
5054 return 0;
5055 }
5056
5057 static int got_NegDReply(struct drbd_connection *connection, struct packet_info *pi)
5058 {
5059 struct drbd_device *device;
5060 struct p_block_ack *p = pi->data;
5061 sector_t sector = be64_to_cpu(p->sector);
5062
5063 device = vnr_to_device(connection, pi->vnr);
5064 if (!device)
5065 return -EIO;
5066
5067 update_peer_seq(device, be32_to_cpu(p->seq_num));
5068
5069 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
5070 (unsigned long long)sector, be32_to_cpu(p->blksize));
5071
5072 return validate_req_change_req_state(device, p->block_id, sector,
5073 &device->read_requests, __func__,
5074 NEG_ACKED, false);
5075 }
5076
5077 static int got_NegRSDReply(struct drbd_connection *connection, struct packet_info *pi)
5078 {
5079 struct drbd_device *device;
5080 sector_t sector;
5081 int size;
5082 struct p_block_ack *p = pi->data;
5083
5084 device = vnr_to_device(connection, pi->vnr);
5085 if (!device)
5086 return -EIO;
5087
5088 sector = be64_to_cpu(p->sector);
5089 size = be32_to_cpu(p->blksize);
5090
5091 update_peer_seq(device, be32_to_cpu(p->seq_num));
5092
5093 dec_rs_pending(device);
5094
5095 if (get_ldev_if_state(device, D_FAILED)) {
5096 drbd_rs_complete_io(device, sector);
5097 switch (pi->cmd) {
5098 case P_NEG_RS_DREPLY:
5099 drbd_rs_failed_io(device, sector, size);
5100 case P_RS_CANCEL:
5101 break;
5102 default:
5103 BUG();
5104 }
5105 put_ldev(device);
5106 }
5107
5108 return 0;
5109 }
5110
5111 static int got_BarrierAck(struct drbd_connection *connection, struct packet_info *pi)
5112 {
5113 struct p_barrier_ack *p = pi->data;
5114 struct drbd_device *device;
5115 int vnr;
5116
5117 tl_release(connection, p->barrier, be32_to_cpu(p->set_size));
5118
5119 rcu_read_lock();
5120 idr_for_each_entry(&connection->volumes, device, vnr) {
5121 if (device->state.conn == C_AHEAD &&
5122 atomic_read(&device->ap_in_flight) == 0 &&
5123 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &device->flags)) {
5124 device->start_resync_timer.expires = jiffies + HZ;
5125 add_timer(&device->start_resync_timer);
5126 }
5127 }
5128 rcu_read_unlock();
5129
5130 return 0;
5131 }
5132
5133 static int got_OVResult(struct drbd_connection *connection, struct packet_info *pi)
5134 {
5135 struct drbd_device *device;
5136 struct p_block_ack *p = pi->data;
5137 struct drbd_work *w;
5138 sector_t sector;
5139 int size;
5140
5141 device = vnr_to_device(connection, pi->vnr);
5142 if (!device)
5143 return -EIO;
5144
5145 sector = be64_to_cpu(p->sector);
5146 size = be32_to_cpu(p->blksize);
5147
5148 update_peer_seq(device, be32_to_cpu(p->seq_num));
5149
5150 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
5151 drbd_ov_out_of_sync_found(device, sector, size);
5152 else
5153 ov_out_of_sync_print(device);
5154
5155 if (!get_ldev(device))
5156 return 0;
5157
5158 drbd_rs_complete_io(device, sector);
5159 dec_rs_pending(device);
5160
5161 --device->ov_left;
5162
5163 /* let's advance progress step marks only for every other megabyte */
5164 if ((device->ov_left & 0x200) == 0x200)
5165 drbd_advance_rs_marks(device, device->ov_left);
5166
5167 if (device->ov_left == 0) {
5168 w = kmalloc(sizeof(*w), GFP_NOIO);
5169 if (w) {
5170 w->cb = w_ov_finished;
5171 w->device = device;
5172 drbd_queue_work(&first_peer_device(device)->connection->sender_work, w);
5173 } else {
5174 dev_err(DEV, "kmalloc(w) failed.");
5175 ov_out_of_sync_print(device);
5176 drbd_resync_finished(device);
5177 }
5178 }
5179 put_ldev(device);
5180 return 0;
5181 }
5182
5183 static int got_skip(struct drbd_connection *connection, struct packet_info *pi)
5184 {
5185 return 0;
5186 }
5187
5188 static int connection_finish_peer_reqs(struct drbd_connection *connection)
5189 {
5190 struct drbd_device *device;
5191 int vnr, not_empty = 0;
5192
5193 do {
5194 clear_bit(SIGNAL_ASENDER, &connection->flags);
5195 flush_signals(current);
5196
5197 rcu_read_lock();
5198 idr_for_each_entry(&connection->volumes, device, vnr) {
5199 kref_get(&device->kref);
5200 rcu_read_unlock();
5201 if (drbd_finish_peer_reqs(device)) {
5202 kref_put(&device->kref, &drbd_minor_destroy);
5203 return 1;
5204 }
5205 kref_put(&device->kref, &drbd_minor_destroy);
5206 rcu_read_lock();
5207 }
5208 set_bit(SIGNAL_ASENDER, &connection->flags);
5209
5210 spin_lock_irq(&connection->req_lock);
5211 idr_for_each_entry(&connection->volumes, device, vnr) {
5212 not_empty = !list_empty(&device->done_ee);
5213 if (not_empty)
5214 break;
5215 }
5216 spin_unlock_irq(&connection->req_lock);
5217 rcu_read_unlock();
5218 } while (not_empty);
5219
5220 return 0;
5221 }
5222
5223 struct asender_cmd {
5224 size_t pkt_size;
5225 int (*fn)(struct drbd_connection *connection, struct packet_info *);
5226 };
5227
5228 static struct asender_cmd asender_tbl[] = {
5229 [P_PING] = { 0, got_Ping },
5230 [P_PING_ACK] = { 0, got_PingAck },
5231 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5232 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5233 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5234 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
5235 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5236 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
5237 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
5238 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5239 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5240 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5241 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
5242 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
5243 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5244 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5245 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
5246 };
5247
5248 int drbd_asender(struct drbd_thread *thi)
5249 {
5250 struct drbd_connection *connection = thi->connection;
5251 struct asender_cmd *cmd = NULL;
5252 struct packet_info pi;
5253 int rv;
5254 void *buf = connection->meta.rbuf;
5255 int received = 0;
5256 unsigned int header_size = drbd_header_size(connection);
5257 int expect = header_size;
5258 bool ping_timeout_active = false;
5259 struct net_conf *nc;
5260 int ping_timeo, tcp_cork, ping_int;
5261 struct sched_param param = { .sched_priority = 2 };
5262
5263 rv = sched_setscheduler(current, SCHED_RR, &param);
5264 if (rv < 0)
5265 conn_err(connection, "drbd_asender: ERROR set priority, ret=%d\n", rv);
5266
5267 while (get_t_state(thi) == RUNNING) {
5268 drbd_thread_current_set_cpu(thi);
5269
5270 rcu_read_lock();
5271 nc = rcu_dereference(connection->net_conf);
5272 ping_timeo = nc->ping_timeo;
5273 tcp_cork = nc->tcp_cork;
5274 ping_int = nc->ping_int;
5275 rcu_read_unlock();
5276
5277 if (test_and_clear_bit(SEND_PING, &connection->flags)) {
5278 if (drbd_send_ping(connection)) {
5279 conn_err(connection, "drbd_send_ping has failed\n");
5280 goto reconnect;
5281 }
5282 connection->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5283 ping_timeout_active = true;
5284 }
5285
5286 /* TODO: conditionally cork; it may hurt latency if we cork without
5287 much to send */
5288 if (tcp_cork)
5289 drbd_tcp_cork(connection->meta.socket);
5290 if (connection_finish_peer_reqs(connection)) {
5291 conn_err(connection, "connection_finish_peer_reqs() failed\n");
5292 goto reconnect;
5293 }
5294 /* but unconditionally uncork unless disabled */
5295 if (tcp_cork)
5296 drbd_tcp_uncork(connection->meta.socket);
5297
5298 /* short circuit, recv_msg would return EINTR anyways. */
5299 if (signal_pending(current))
5300 continue;
5301
5302 rv = drbd_recv_short(connection->meta.socket, buf, expect-received, 0);
5303 clear_bit(SIGNAL_ASENDER, &connection->flags);
5304
5305 flush_signals(current);
5306
5307 /* Note:
5308 * -EINTR (on meta) we got a signal
5309 * -EAGAIN (on meta) rcvtimeo expired
5310 * -ECONNRESET other side closed the connection
5311 * -ERESTARTSYS (on data) we got a signal
5312 * rv < 0 other than above: unexpected error!
5313 * rv == expected: full header or command
5314 * rv < expected: "woken" by signal during receive
5315 * rv == 0 : "connection shut down by peer"
5316 */
5317 if (likely(rv > 0)) {
5318 received += rv;
5319 buf += rv;
5320 } else if (rv == 0) {
5321 if (test_bit(DISCONNECT_SENT, &connection->flags)) {
5322 long t;
5323 rcu_read_lock();
5324 t = rcu_dereference(connection->net_conf)->ping_timeo * HZ/10;
5325 rcu_read_unlock();
5326
5327 t = wait_event_timeout(connection->ping_wait,
5328 connection->cstate < C_WF_REPORT_PARAMS,
5329 t);
5330 if (t)
5331 break;
5332 }
5333 conn_err(connection, "meta connection shut down by peer.\n");
5334 goto reconnect;
5335 } else if (rv == -EAGAIN) {
5336 /* If the data socket received something meanwhile,
5337 * that is good enough: peer is still alive. */
5338 if (time_after(connection->last_received,
5339 jiffies - connection->meta.socket->sk->sk_rcvtimeo))
5340 continue;
5341 if (ping_timeout_active) {
5342 conn_err(connection, "PingAck did not arrive in time.\n");
5343 goto reconnect;
5344 }
5345 set_bit(SEND_PING, &connection->flags);
5346 continue;
5347 } else if (rv == -EINTR) {
5348 continue;
5349 } else {
5350 conn_err(connection, "sock_recvmsg returned %d\n", rv);
5351 goto reconnect;
5352 }
5353
5354 if (received == expect && cmd == NULL) {
5355 if (decode_header(connection, connection->meta.rbuf, &pi))
5356 goto reconnect;
5357 cmd = &asender_tbl[pi.cmd];
5358 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
5359 conn_err(connection, "Unexpected meta packet %s (0x%04x)\n",
5360 cmdname(pi.cmd), pi.cmd);
5361 goto disconnect;
5362 }
5363 expect = header_size + cmd->pkt_size;
5364 if (pi.size != expect - header_size) {
5365 conn_err(connection, "Wrong packet size on meta (c: %d, l: %d)\n",
5366 pi.cmd, pi.size);
5367 goto reconnect;
5368 }
5369 }
5370 if (received == expect) {
5371 bool err;
5372
5373 err = cmd->fn(connection, &pi);
5374 if (err) {
5375 conn_err(connection, "%pf failed\n", cmd->fn);
5376 goto reconnect;
5377 }
5378
5379 connection->last_received = jiffies;
5380
5381 if (cmd == &asender_tbl[P_PING_ACK]) {
5382 /* restore idle timeout */
5383 connection->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5384 ping_timeout_active = false;
5385 }
5386
5387 buf = connection->meta.rbuf;
5388 received = 0;
5389 expect = header_size;
5390 cmd = NULL;
5391 }
5392 }
5393
5394 if (0) {
5395 reconnect:
5396 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5397 conn_md_sync(connection);
5398 }
5399 if (0) {
5400 disconnect:
5401 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
5402 }
5403 clear_bit(SIGNAL_ASENDER, &connection->flags);
5404
5405 conn_info(connection, "asender terminated\n");
5406
5407 return 0;
5408 }
This page took 0.151364 seconds and 5 git commands to generate.