drbd: move put_ldev from __req_mod() to the endio callback
[deliverable/linux.git] / drivers / block / drbd / drbd_main.c
CommitLineData
b411b363
PR
1/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
b411b363 29#include <linux/module.h>
b411b363
PR
30#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
2a48fc0a 35#include <linux/mutex.h>
b411b363
PR
36#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
b411b363
PR
55#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
59struct after_state_chg_work {
60 struct drbd_work w;
61 union drbd_state os;
62 union drbd_state ns;
63 enum chg_state_flags flags;
64 struct completion *done;
65};
66
2a48fc0a 67static DEFINE_MUTEX(drbd_main_mutex);
b411b363
PR
68int drbdd_init(struct drbd_thread *);
69int drbd_worker(struct drbd_thread *);
70int drbd_asender(struct drbd_thread *);
71
72int drbd_init(void);
73static int drbd_open(struct block_device *bdev, fmode_t mode);
74static int drbd_release(struct gendisk *gd, fmode_t mode);
75static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77 union drbd_state ns, enum chg_state_flags flags);
78static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
e9e6f3ec 81static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
b411b363 82
b411b363
PR
83MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86MODULE_VERSION(REL_VERSION);
87MODULE_LICENSE("GPL");
2b8a90b5
PR
88MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
b411b363
PR
90MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92#include <linux/moduleparam.h>
93/* allow_open_on_secondary */
94MODULE_PARM_DESC(allow_oos, "DONT USE!");
95/* thanks to these macros, if compiled into the kernel (not-module),
96 * this becomes the boot parameter drbd.minor_count */
97module_param(minor_count, uint, 0444);
98module_param(disable_sendpage, bool, 0644);
99module_param(allow_oos, bool, 0);
100module_param(cn_idx, uint, 0444);
101module_param(proc_details, int, 0644);
102
103#ifdef CONFIG_DRBD_FAULT_INJECTION
104int enable_faults;
105int fault_rate;
106static int fault_count;
107int fault_devs;
108/* bitmap of enabled faults */
109module_param(enable_faults, int, 0664);
110/* fault rate % value - applies to all enabled faults */
111module_param(fault_rate, int, 0664);
112/* count of faults inserted */
113module_param(fault_count, int, 0664);
114/* bitmap of devices to insert faults on */
115module_param(fault_devs, int, 0644);
116#endif
117
118/* module parameter, defined */
2b8a90b5 119unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
90ab5ee9
RR
120bool disable_sendpage;
121bool allow_oos;
b411b363
PR
122unsigned int cn_idx = CN_IDX_DRBD;
123int proc_details; /* Detail level in proc drbd*/
124
125/* Module parameter for setting the user mode helper program
126 * to run. Default is /sbin/drbdadm */
127char usermode_helper[80] = "/sbin/drbdadm";
128
129module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131/* in 2.6.x, our device mapping and config info contains our virtual gendisks
132 * as member "struct gendisk *vdisk;"
133 */
134struct drbd_conf **minor_table;
135
136struct kmem_cache *drbd_request_cache;
137struct kmem_cache *drbd_ee_cache; /* epoch entries */
138struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
139struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
140mempool_t *drbd_request_mempool;
141mempool_t *drbd_ee_mempool;
142
143/* I do not use a standard mempool, because:
144 1) I want to hand out the pre-allocated objects first.
145 2) I want to be able to interrupt sleeping allocation with a signal.
146 Note: This is a single linked list, the next pointer is the private
147 member of struct page.
148 */
149struct page *drbd_pp_pool;
150spinlock_t drbd_pp_lock;
151int drbd_pp_vacant;
152wait_queue_head_t drbd_pp_wait;
153
154DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
7d4e9d09 156static const struct block_device_operations drbd_ops = {
b411b363
PR
157 .owner = THIS_MODULE,
158 .open = drbd_open,
159 .release = drbd_release,
160};
161
162#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164#ifdef __CHECKER__
165/* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
168int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169{
170 int io_allowed;
171
172 atomic_inc(&mdev->local_cnt);
173 io_allowed = (mdev->state.disk >= mins);
174 if (!io_allowed) {
175 if (atomic_dec_and_test(&mdev->local_cnt))
176 wake_up(&mdev->misc_wait);
177 }
178 return io_allowed;
179}
180
181#endif
182
183/**
184 * DOC: The transfer log
185 *
186 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188 * of the list. There is always at least one &struct drbd_tl_epoch object.
189 *
190 * Each &struct drbd_tl_epoch has a circular double linked list of requests
191 * attached.
192 */
193static int tl_init(struct drbd_conf *mdev)
194{
195 struct drbd_tl_epoch *b;
196
197 /* during device minor initialization, we may well use GFP_KERNEL */
198 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199 if (!b)
200 return 0;
201 INIT_LIST_HEAD(&b->requests);
202 INIT_LIST_HEAD(&b->w.list);
203 b->next = NULL;
204 b->br_number = 4711;
7e602c0a 205 b->n_writes = 0;
b411b363
PR
206 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208 mdev->oldest_tle = b;
209 mdev->newest_tle = b;
210 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
6d7e32f5 211 INIT_LIST_HEAD(&mdev->barrier_acked_requests);
b411b363
PR
212
213 mdev->tl_hash = NULL;
214 mdev->tl_hash_s = 0;
215
216 return 1;
217}
218
219static void tl_cleanup(struct drbd_conf *mdev)
220{
221 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
222 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
223 kfree(mdev->oldest_tle);
224 mdev->oldest_tle = NULL;
225 kfree(mdev->unused_spare_tle);
226 mdev->unused_spare_tle = NULL;
227 kfree(mdev->tl_hash);
228 mdev->tl_hash = NULL;
229 mdev->tl_hash_s = 0;
230}
231
232/**
233 * _tl_add_barrier() - Adds a barrier to the transfer log
234 * @mdev: DRBD device.
235 * @new: Barrier to be added before the current head of the TL.
236 *
237 * The caller must hold the req_lock.
238 */
239void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
240{
241 struct drbd_tl_epoch *newest_before;
242
243 INIT_LIST_HEAD(&new->requests);
244 INIT_LIST_HEAD(&new->w.list);
245 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
246 new->next = NULL;
7e602c0a 247 new->n_writes = 0;
b411b363
PR
248
249 newest_before = mdev->newest_tle;
c088b2d9 250 new->br_number = newest_before->br_number+1;
b411b363
PR
251 if (mdev->newest_tle != new) {
252 mdev->newest_tle->next = new;
253 mdev->newest_tle = new;
254 }
255}
256
257/**
258 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
259 * @mdev: DRBD device.
260 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
261 * @set_size: Expected number of requests before that barrier.
262 *
263 * In case the passed barrier_nr or set_size does not match the oldest
264 * &struct drbd_tl_epoch objects this function will cause a termination
265 * of the connection.
266 */
267void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
268 unsigned int set_size)
269{
270 struct drbd_tl_epoch *b, *nob; /* next old barrier */
271 struct list_head *le, *tle;
272 struct drbd_request *r;
273
274 spin_lock_irq(&mdev->req_lock);
275
276 b = mdev->oldest_tle;
277
278 /* first some paranoia code */
279 if (b == NULL) {
280 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
281 barrier_nr);
282 goto bail;
283 }
284 if (b->br_number != barrier_nr) {
285 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
286 barrier_nr, b->br_number);
287 goto bail;
288 }
7e602c0a
PR
289 if (b->n_writes != set_size) {
290 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
291 barrier_nr, set_size, b->n_writes);
b411b363
PR
292 goto bail;
293 }
294
295 /* Clean up list of requests processed during current epoch */
296 list_for_each_safe(le, tle, &b->requests) {
297 r = list_entry(le, struct drbd_request, tl_requests);
298 _req_mod(r, barrier_acked);
299 }
300 /* There could be requests on the list waiting for completion
301 of the write to the local disk. To avoid corruptions of
302 slab's data structures we have to remove the lists head.
303
304 Also there could have been a barrier ack out of sequence, overtaking
305 the write acks - which would be a bug and violating write ordering.
306 To not deadlock in case we lose connection while such requests are
307 still pending, we need some way to find them for the
308 _req_mode(connection_lost_while_pending).
309
310 These have been list_move'd to the out_of_sequence_requests list in
311 _req_mod(, barrier_acked) above.
312 */
6d7e32f5 313 list_splice_init(&b->requests, &mdev->barrier_acked_requests);
b411b363
PR
314
315 nob = b->next;
316 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
317 _tl_add_barrier(mdev, b);
318 if (nob)
319 mdev->oldest_tle = nob;
320 /* if nob == NULL b was the only barrier, and becomes the new
321 barrier. Therefore mdev->oldest_tle points already to b */
322 } else {
323 D_ASSERT(nob != NULL);
324 mdev->oldest_tle = nob;
325 kfree(b);
326 }
327
328 spin_unlock_irq(&mdev->req_lock);
329 dec_ap_pending(mdev);
330
331 return;
332
333bail:
334 spin_unlock_irq(&mdev->req_lock);
335 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
336}
337
617049aa 338
b411b363 339/**
11b58e73 340 * _tl_restart() - Walks the transfer log, and applies an action to all requests
b411b363 341 * @mdev: DRBD device.
11b58e73 342 * @what: The action/event to perform with all request objects
b411b363 343 *
11b58e73 344 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
fd2491f4 345 * restart_frozen_disk_io.
b411b363 346 */
11b58e73 347static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
b411b363 348{
11b58e73 349 struct drbd_tl_epoch *b, *tmp, **pn;
b9b98716 350 struct list_head *le, *tle, carry_reads;
11b58e73
PR
351 struct drbd_request *req;
352 int rv, n_writes, n_reads;
b411b363
PR
353
354 b = mdev->oldest_tle;
11b58e73 355 pn = &mdev->oldest_tle;
b411b363 356 while (b) {
11b58e73
PR
357 n_writes = 0;
358 n_reads = 0;
b9b98716 359 INIT_LIST_HEAD(&carry_reads);
b411b363 360 list_for_each_safe(le, tle, &b->requests) {
11b58e73
PR
361 req = list_entry(le, struct drbd_request, tl_requests);
362 rv = _req_mod(req, what);
363
364 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
365 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
b411b363
PR
366 }
367 tmp = b->next;
368
b9b98716 369 if (n_writes) {
11b58e73
PR
370 if (what == resend) {
371 b->n_writes = n_writes;
372 if (b->w.cb == NULL) {
373 b->w.cb = w_send_barrier;
374 inc_ap_pending(mdev);
375 set_bit(CREATE_BARRIER, &mdev->flags);
376 }
377
378 drbd_queue_work(&mdev->data.work, &b->w);
379 }
380 pn = &b->next;
381 } else {
b9b98716
PR
382 if (n_reads)
383 list_add(&carry_reads, &b->requests);
11b58e73
PR
384 /* there could still be requests on that ring list,
385 * in case local io is still pending */
386 list_del(&b->requests);
387
388 /* dec_ap_pending corresponding to queue_barrier.
389 * the newest barrier may not have been queued yet,
390 * in which case w.cb is still NULL. */
391 if (b->w.cb != NULL)
392 dec_ap_pending(mdev);
393
394 if (b == mdev->newest_tle) {
395 /* recycle, but reinit! */
396 D_ASSERT(tmp == NULL);
397 INIT_LIST_HEAD(&b->requests);
b9b98716 398 list_splice(&carry_reads, &b->requests);
11b58e73
PR
399 INIT_LIST_HEAD(&b->w.list);
400 b->w.cb = NULL;
401 b->br_number = net_random();
402 b->n_writes = 0;
403
404 *pn = b;
405 break;
406 }
407 *pn = tmp;
408 kfree(b);
b411b363 409 }
b411b363 410 b = tmp;
b9b98716 411 list_splice(&carry_reads, &b->requests);
b411b363 412 }
6d7e32f5
PR
413
414 /* Actions operating on the disk state, also want to work on
415 requests that got barrier acked. */
416 switch (what) {
6d7e32f5
PR
417 case fail_frozen_disk_io:
418 case restart_frozen_disk_io:
419 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
420 req = list_entry(le, struct drbd_request, tl_requests);
421 _req_mod(req, what);
422 }
423
424 case connection_lost_while_pending:
425 case resend:
426 break;
427 default:
428 dev_err(DEV, "what = %d in _tl_restart()\n", what);
429 }
11b58e73
PR
430}
431
b411b363
PR
432
433/**
434 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
435 * @mdev: DRBD device.
436 *
437 * This is called after the connection to the peer was lost. The storage covered
438 * by the requests on the transfer gets marked as our of sync. Called from the
439 * receiver thread and the worker thread.
440 */
441void tl_clear(struct drbd_conf *mdev)
442{
b411b363
PR
443 struct list_head *le, *tle;
444 struct drbd_request *r;
b411b363
PR
445
446 spin_lock_irq(&mdev->req_lock);
447
11b58e73 448 _tl_restart(mdev, connection_lost_while_pending);
b411b363
PR
449
450 /* we expect this list to be empty. */
451 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
452
453 /* but just in case, clean it up anyways! */
454 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
455 r = list_entry(le, struct drbd_request, tl_requests);
456 /* It would be nice to complete outside of spinlock.
457 * But this is easier for now. */
458 _req_mod(r, connection_lost_while_pending);
459 }
460
461 /* ensure bit indicating barrier is required is clear */
462 clear_bit(CREATE_BARRIER, &mdev->flags);
463
288f422e
PR
464 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
465
b411b363
PR
466 spin_unlock_irq(&mdev->req_lock);
467}
468
11b58e73
PR
469void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
470{
471 spin_lock_irq(&mdev->req_lock);
472 _tl_restart(mdev, what);
b411b363
PR
473 spin_unlock_irq(&mdev->req_lock);
474}
475
fd2491f4
PR
476/**
477 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
478 * @mdev: DRBD device.
479 */
480void tl_abort_disk_io(struct drbd_conf *mdev)
481{
482 struct drbd_tl_epoch *b;
483 struct list_head *le, *tle;
484 struct drbd_request *req;
485
486 spin_lock_irq(&mdev->req_lock);
487 b = mdev->oldest_tle;
488 while (b) {
489 list_for_each_safe(le, tle, &b->requests) {
490 req = list_entry(le, struct drbd_request, tl_requests);
491 if (!(req->rq_state & RQ_LOCAL_PENDING))
492 continue;
493 _req_mod(req, abort_disk_io);
494 }
495 b = b->next;
496 }
497
498 list_for_each_safe(le, tle, &mdev->barrier_acked_requests) {
499 req = list_entry(le, struct drbd_request, tl_requests);
500 if (!(req->rq_state & RQ_LOCAL_PENDING))
501 continue;
502 _req_mod(req, abort_disk_io);
503 }
504
505 spin_unlock_irq(&mdev->req_lock);
506}
507
b411b363 508/**
81e84650 509 * cl_wide_st_chg() - true if the state change is a cluster wide one
b411b363
PR
510 * @mdev: DRBD device.
511 * @os: old (current) state.
512 * @ns: new (wanted) state.
513 */
514static int cl_wide_st_chg(struct drbd_conf *mdev,
515 union drbd_state os, union drbd_state ns)
516{
517 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
518 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
519 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
520 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
02ee8f95 521 (os.disk != D_FAILED && ns.disk == D_FAILED))) ||
b411b363
PR
522 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
523 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
524}
525
bf885f8a
AG
526enum drbd_state_rv
527drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
528 union drbd_state mask, union drbd_state val)
b411b363
PR
529{
530 unsigned long flags;
531 union drbd_state os, ns;
bf885f8a 532 enum drbd_state_rv rv;
b411b363
PR
533
534 spin_lock_irqsave(&mdev->req_lock, flags);
535 os = mdev->state;
536 ns.i = (os.i & ~mask.i) | val.i;
537 rv = _drbd_set_state(mdev, ns, f, NULL);
538 ns = mdev->state;
539 spin_unlock_irqrestore(&mdev->req_lock, flags);
540
541 return rv;
542}
543
544/**
545 * drbd_force_state() - Impose a change which happens outside our control on our state
546 * @mdev: DRBD device.
547 * @mask: mask of state bits to change.
548 * @val: value of new state bits.
549 */
550void drbd_force_state(struct drbd_conf *mdev,
551 union drbd_state mask, union drbd_state val)
552{
553 drbd_change_state(mdev, CS_HARD, mask, val);
554}
555
bf885f8a
AG
556static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
557static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
558 union drbd_state,
559 union drbd_state);
77e8fdfc
PR
560enum sanitize_state_warnings {
561 NO_WARNING,
562 ABORTED_ONLINE_VERIFY,
563 ABORTED_RESYNC,
564 CONNECTION_LOST_NEGOTIATING,
565 IMPLICITLY_UPGRADED_DISK,
566 IMPLICITLY_UPGRADED_PDSK,
567};
b411b363 568static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
77e8fdfc 569 union drbd_state ns, enum sanitize_state_warnings *warn);
b411b363
PR
570int drbd_send_state_req(struct drbd_conf *,
571 union drbd_state, union drbd_state);
572
c8b32563
AG
573static enum drbd_state_rv
574_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
575 union drbd_state val)
b411b363
PR
576{
577 union drbd_state os, ns;
578 unsigned long flags;
bf885f8a 579 enum drbd_state_rv rv;
b411b363
PR
580
581 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
582 return SS_CW_SUCCESS;
583
584 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
585 return SS_CW_FAILED_BY_PEER;
586
587 rv = 0;
588 spin_lock_irqsave(&mdev->req_lock, flags);
589 os = mdev->state;
590 ns.i = (os.i & ~mask.i) | val.i;
591 ns = sanitize_state(mdev, os, ns, NULL);
592
593 if (!cl_wide_st_chg(mdev, os, ns))
594 rv = SS_CW_NO_NEED;
595 if (!rv) {
596 rv = is_valid_state(mdev, ns);
597 if (rv == SS_SUCCESS) {
598 rv = is_valid_state_transition(mdev, ns, os);
599 if (rv == SS_SUCCESS)
bf885f8a 600 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
b411b363
PR
601 }
602 }
603 spin_unlock_irqrestore(&mdev->req_lock, flags);
604
605 return rv;
606}
607
608/**
609 * drbd_req_state() - Perform an eventually cluster wide state change
610 * @mdev: DRBD device.
611 * @mask: mask of state bits to change.
612 * @val: value of new state bits.
613 * @f: flags
614 *
615 * Should not be called directly, use drbd_request_state() or
616 * _drbd_request_state().
617 */
bf885f8a
AG
618static enum drbd_state_rv
619drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
620 union drbd_state val, enum chg_state_flags f)
b411b363
PR
621{
622 struct completion done;
623 unsigned long flags;
624 union drbd_state os, ns;
bf885f8a 625 enum drbd_state_rv rv;
b411b363
PR
626
627 init_completion(&done);
628
629 if (f & CS_SERIALIZE)
630 mutex_lock(&mdev->state_mutex);
631
632 spin_lock_irqsave(&mdev->req_lock, flags);
633 os = mdev->state;
634 ns.i = (os.i & ~mask.i) | val.i;
635 ns = sanitize_state(mdev, os, ns, NULL);
636
637 if (cl_wide_st_chg(mdev, os, ns)) {
638 rv = is_valid_state(mdev, ns);
639 if (rv == SS_SUCCESS)
640 rv = is_valid_state_transition(mdev, ns, os);
641 spin_unlock_irqrestore(&mdev->req_lock, flags);
642
643 if (rv < SS_SUCCESS) {
644 if (f & CS_VERBOSE)
645 print_st_err(mdev, os, ns, rv);
646 goto abort;
647 }
648
649 drbd_state_lock(mdev);
650 if (!drbd_send_state_req(mdev, mask, val)) {
651 drbd_state_unlock(mdev);
652 rv = SS_CW_FAILED_BY_PEER;
653 if (f & CS_VERBOSE)
654 print_st_err(mdev, os, ns, rv);
655 goto abort;
656 }
657
658 wait_event(mdev->state_wait,
659 (rv = _req_st_cond(mdev, mask, val)));
660
661 if (rv < SS_SUCCESS) {
662 drbd_state_unlock(mdev);
663 if (f & CS_VERBOSE)
664 print_st_err(mdev, os, ns, rv);
665 goto abort;
666 }
667 spin_lock_irqsave(&mdev->req_lock, flags);
668 os = mdev->state;
669 ns.i = (os.i & ~mask.i) | val.i;
670 rv = _drbd_set_state(mdev, ns, f, &done);
671 drbd_state_unlock(mdev);
672 } else {
673 rv = _drbd_set_state(mdev, ns, f, &done);
674 }
675
676 spin_unlock_irqrestore(&mdev->req_lock, flags);
677
678 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
679 D_ASSERT(current != mdev->worker.task);
680 wait_for_completion(&done);
681 }
682
683abort:
684 if (f & CS_SERIALIZE)
685 mutex_unlock(&mdev->state_mutex);
686
687 return rv;
688}
689
690/**
691 * _drbd_request_state() - Request a state change (with flags)
692 * @mdev: DRBD device.
693 * @mask: mask of state bits to change.
694 * @val: value of new state bits.
695 * @f: flags
696 *
697 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
698 * flag, or when logging of failed state change requests is not desired.
699 */
bf885f8a
AG
700enum drbd_state_rv
701_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
702 union drbd_state val, enum chg_state_flags f)
b411b363 703{
bf885f8a 704 enum drbd_state_rv rv;
b411b363
PR
705
706 wait_event(mdev->state_wait,
707 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
708
709 return rv;
710}
711
712static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
713{
714 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
715 name,
716 drbd_conn_str(ns.conn),
717 drbd_role_str(ns.role),
718 drbd_role_str(ns.peer),
719 drbd_disk_str(ns.disk),
720 drbd_disk_str(ns.pdsk),
fb22c402 721 is_susp(ns) ? 's' : 'r',
b411b363
PR
722 ns.aftr_isp ? 'a' : '-',
723 ns.peer_isp ? 'p' : '-',
724 ns.user_isp ? 'u' : '-'
725 );
726}
727
bf885f8a
AG
728void print_st_err(struct drbd_conf *mdev, union drbd_state os,
729 union drbd_state ns, enum drbd_state_rv err)
b411b363
PR
730{
731 if (err == SS_IN_TRANSIENT_STATE)
732 return;
733 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
734 print_st(mdev, " state", os);
735 print_st(mdev, "wanted", ns);
736}
737
738
b411b363
PR
739/**
740 * is_valid_state() - Returns an SS_ error code if ns is not valid
741 * @mdev: DRBD device.
742 * @ns: State to consider.
743 */
bf885f8a
AG
744static enum drbd_state_rv
745is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
b411b363
PR
746{
747 /* See drbd_state_sw_errors in drbd_strings.c */
748
749 enum drbd_fencing_p fp;
bf885f8a 750 enum drbd_state_rv rv = SS_SUCCESS;
b411b363
PR
751
752 fp = FP_DONT_CARE;
753 if (get_ldev(mdev)) {
754 fp = mdev->ldev->dc.fencing;
755 put_ldev(mdev);
756 }
757
758 if (get_net_conf(mdev)) {
759 if (!mdev->net_conf->two_primaries &&
760 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
761 rv = SS_TWO_PRIMARIES;
762 put_net_conf(mdev);
763 }
764
765 if (rv <= 0)
766 /* already found a reason to abort */;
767 else if (ns.role == R_SECONDARY && mdev->open_cnt)
768 rv = SS_DEVICE_IN_USE;
769
770 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
771 rv = SS_NO_UP_TO_DATE_DISK;
772
773 else if (fp >= FP_RESOURCE &&
774 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
775 rv = SS_PRIMARY_NOP;
776
777 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
778 rv = SS_NO_UP_TO_DATE_DISK;
779
780 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
781 rv = SS_NO_LOCAL_DISK;
782
783 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
784 rv = SS_NO_REMOTE_DISK;
785
8d4ce82b
LE
786 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
787 rv = SS_NO_UP_TO_DATE_DISK;
788
b411b363
PR
789 else if ((ns.conn == C_CONNECTED ||
790 ns.conn == C_WF_BITMAP_S ||
791 ns.conn == C_SYNC_SOURCE ||
792 ns.conn == C_PAUSED_SYNC_S) &&
793 ns.disk == D_OUTDATED)
794 rv = SS_CONNECTED_OUTDATES;
795
796 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
797 (mdev->sync_conf.verify_alg[0] == 0))
798 rv = SS_NO_VERIFY_ALG;
799
800 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
801 mdev->agreed_pro_version < 88)
802 rv = SS_NOT_SUPPORTED;
803
fa7d9396
PR
804 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
805 rv = SS_CONNECTED_OUTDATES;
806
b411b363
PR
807 return rv;
808}
809
810/**
811 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
812 * @mdev: DRBD device.
813 * @ns: new state.
814 * @os: old state.
815 */
bf885f8a
AG
816static enum drbd_state_rv
817is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
818 union drbd_state os)
b411b363 819{
bf885f8a 820 enum drbd_state_rv rv = SS_SUCCESS;
b411b363
PR
821
822 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
823 os.conn > C_CONNECTED)
824 rv = SS_RESYNC_RUNNING;
825
826 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
827 rv = SS_ALREADY_STANDALONE;
828
829 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
830 rv = SS_IS_DISKLESS;
831
832 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
833 rv = SS_NO_NET_CONFIG;
834
835 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
836 rv = SS_LOWER_THAN_OUTDATED;
837
838 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
839 rv = SS_IN_TRANSIENT_STATE;
840
841 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
842 rv = SS_IN_TRANSIENT_STATE;
843
844 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
845 rv = SS_NEED_CONNECTION;
846
847 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
848 ns.conn != os.conn && os.conn > C_CONNECTED)
849 rv = SS_RESYNC_RUNNING;
850
851 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
852 os.conn < C_CONNECTED)
853 rv = SS_NEED_CONNECTION;
854
1fc80cf3
PR
855 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
856 && os.conn < C_WF_REPORT_PARAMS)
857 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
858
b411b363
PR
859 return rv;
860}
861
77e8fdfc
PR
862static void print_sanitize_warnings(struct drbd_conf *mdev, enum sanitize_state_warnings warn)
863{
864 static const char *msg_table[] = {
865 [NO_WARNING] = "",
866 [ABORTED_ONLINE_VERIFY] = "Online-verify aborted.",
867 [ABORTED_RESYNC] = "Resync aborted.",
868 [CONNECTION_LOST_NEGOTIATING] = "Connection lost while negotiating, no data!",
869 [IMPLICITLY_UPGRADED_DISK] = "Implicitly upgraded disk",
870 [IMPLICITLY_UPGRADED_PDSK] = "Implicitly upgraded pdsk",
871 };
872
873 if (warn != NO_WARNING)
874 dev_warn(DEV, "%s\n", msg_table[warn]);
875}
876
b411b363
PR
877/**
878 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
879 * @mdev: DRBD device.
880 * @os: old state.
881 * @ns: new state.
882 * @warn_sync_abort:
883 *
884 * When we loose connection, we have to set the state of the peers disk (pdsk)
885 * to D_UNKNOWN. This rule and many more along those lines are in this function.
886 */
887static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
77e8fdfc 888 union drbd_state ns, enum sanitize_state_warnings *warn)
b411b363
PR
889{
890 enum drbd_fencing_p fp;
ab17b68f 891 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
b411b363 892
77e8fdfc
PR
893 if (warn)
894 *warn = NO_WARNING;
895
b411b363
PR
896 fp = FP_DONT_CARE;
897 if (get_ldev(mdev)) {
898 fp = mdev->ldev->dc.fencing;
899 put_ldev(mdev);
900 }
901
902 /* Disallow Network errors to configure a device's network part */
903 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
904 os.conn <= C_DISCONNECTING)
905 ns.conn = os.conn;
906
f2906e18
LE
907 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
908 * If you try to go into some Sync* state, that shall fail (elsewhere). */
b411b363 909 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
545752d5 910 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_CONNECTED)
b411b363
PR
911 ns.conn = os.conn;
912
82f59cc6
LE
913 /* we cannot fail (again) if we already detached */
914 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
915 ns.disk = D_DISKLESS;
916
b411b363
PR
917 /* After C_DISCONNECTING only C_STANDALONE may follow */
918 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
919 ns.conn = os.conn;
920
921 if (ns.conn < C_CONNECTED) {
922 ns.peer_isp = 0;
923 ns.peer = R_UNKNOWN;
924 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
925 ns.pdsk = D_UNKNOWN;
926 }
927
928 /* Clear the aftr_isp when becoming unconfigured */
929 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
930 ns.aftr_isp = 0;
931
b411b363
PR
932 /* Abort resync if a disk fails/detaches */
933 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
934 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
77e8fdfc
PR
935 if (warn)
936 *warn = os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
937 ABORTED_ONLINE_VERIFY : ABORTED_RESYNC;
b411b363
PR
938 ns.conn = C_CONNECTED;
939 }
940
b411b363
PR
941 /* Connection breaks down before we finished "Negotiating" */
942 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
943 get_ldev_if_state(mdev, D_NEGOTIATING)) {
944 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
945 ns.disk = mdev->new_state_tmp.disk;
946 ns.pdsk = mdev->new_state_tmp.pdsk;
947 } else {
77e8fdfc
PR
948 if (warn)
949 *warn = CONNECTION_LOST_NEGOTIATING;
b411b363
PR
950 ns.disk = D_DISKLESS;
951 ns.pdsk = D_UNKNOWN;
952 }
953 put_ldev(mdev);
954 }
955
ab17b68f
PR
956 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
957 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
958 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
959 ns.disk = D_UP_TO_DATE;
960 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
961 ns.pdsk = D_UP_TO_DATE;
962 }
963
964 /* Implications of the connection stat on the disk states */
965 disk_min = D_DISKLESS;
966 disk_max = D_UP_TO_DATE;
967 pdsk_min = D_INCONSISTENT;
968 pdsk_max = D_UNKNOWN;
969 switch ((enum drbd_conns)ns.conn) {
970 case C_WF_BITMAP_T:
971 case C_PAUSED_SYNC_T:
972 case C_STARTING_SYNC_T:
973 case C_WF_SYNC_UUID:
974 case C_BEHIND:
975 disk_min = D_INCONSISTENT;
976 disk_max = D_OUTDATED;
977 pdsk_min = D_UP_TO_DATE;
978 pdsk_max = D_UP_TO_DATE;
979 break;
980 case C_VERIFY_S:
981 case C_VERIFY_T:
982 disk_min = D_UP_TO_DATE;
983 disk_max = D_UP_TO_DATE;
984 pdsk_min = D_UP_TO_DATE;
985 pdsk_max = D_UP_TO_DATE;
986 break;
987 case C_CONNECTED:
988 disk_min = D_DISKLESS;
989 disk_max = D_UP_TO_DATE;
990 pdsk_min = D_DISKLESS;
991 pdsk_max = D_UP_TO_DATE;
992 break;
993 case C_WF_BITMAP_S:
994 case C_PAUSED_SYNC_S:
995 case C_STARTING_SYNC_S:
996 case C_AHEAD:
997 disk_min = D_UP_TO_DATE;
998 disk_max = D_UP_TO_DATE;
999 pdsk_min = D_INCONSISTENT;
1000 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
1001 break;
1002 case C_SYNC_TARGET:
1003 disk_min = D_INCONSISTENT;
1004 disk_max = D_INCONSISTENT;
1005 pdsk_min = D_UP_TO_DATE;
1006 pdsk_max = D_UP_TO_DATE;
1007 break;
1008 case C_SYNC_SOURCE:
1009 disk_min = D_UP_TO_DATE;
1010 disk_max = D_UP_TO_DATE;
1011 pdsk_min = D_INCONSISTENT;
1012 pdsk_max = D_INCONSISTENT;
1013 break;
1014 case C_STANDALONE:
1015 case C_DISCONNECTING:
1016 case C_UNCONNECTED:
1017 case C_TIMEOUT:
1018 case C_BROKEN_PIPE:
1019 case C_NETWORK_FAILURE:
1020 case C_PROTOCOL_ERROR:
1021 case C_TEAR_DOWN:
1022 case C_WF_CONNECTION:
1023 case C_WF_REPORT_PARAMS:
1024 case C_MASK:
1025 break;
1026 }
1027 if (ns.disk > disk_max)
1028 ns.disk = disk_max;
1029
1030 if (ns.disk < disk_min) {
77e8fdfc
PR
1031 if (warn)
1032 *warn = IMPLICITLY_UPGRADED_DISK;
ab17b68f
PR
1033 ns.disk = disk_min;
1034 }
1035 if (ns.pdsk > pdsk_max)
1036 ns.pdsk = pdsk_max;
1037
1038 if (ns.pdsk < pdsk_min) {
77e8fdfc
PR
1039 if (warn)
1040 *warn = IMPLICITLY_UPGRADED_PDSK;
ab17b68f
PR
1041 ns.pdsk = pdsk_min;
1042 }
1043
b411b363 1044 if (fp == FP_STONITH &&
0a492166
PR
1045 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
1046 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
fb22c402 1047 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
265be2d0
PR
1048
1049 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
1050 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
1051 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
fb22c402 1052 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
b411b363
PR
1053
1054 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
1055 if (ns.conn == C_SYNC_SOURCE)
1056 ns.conn = C_PAUSED_SYNC_S;
1057 if (ns.conn == C_SYNC_TARGET)
1058 ns.conn = C_PAUSED_SYNC_T;
1059 } else {
1060 if (ns.conn == C_PAUSED_SYNC_S)
1061 ns.conn = C_SYNC_SOURCE;
1062 if (ns.conn == C_PAUSED_SYNC_T)
1063 ns.conn = C_SYNC_TARGET;
1064 }
1065
1066 return ns;
1067}
1068
1069/* helper for __drbd_set_state */
1070static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
1071{
30b743a2
LE
1072 if (mdev->agreed_pro_version < 90)
1073 mdev->ov_start_sector = 0;
1074 mdev->rs_total = drbd_bm_bits(mdev);
1075 mdev->ov_position = 0;
b411b363
PR
1076 if (cs == C_VERIFY_T) {
1077 /* starting online verify from an arbitrary position
1078 * does not fit well into the existing protocol.
1079 * on C_VERIFY_T, we initialize ov_left and friends
1080 * implicitly in receive_DataRequest once the
1081 * first P_OV_REQUEST is received */
1082 mdev->ov_start_sector = ~(sector_t)0;
1083 } else {
1084 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
30b743a2 1085 if (bit >= mdev->rs_total) {
b411b363
PR
1086 mdev->ov_start_sector =
1087 BM_BIT_TO_SECT(mdev->rs_total - 1);
30b743a2
LE
1088 mdev->rs_total = 1;
1089 } else
1090 mdev->rs_total -= bit;
b411b363
PR
1091 mdev->ov_position = mdev->ov_start_sector;
1092 }
30b743a2 1093 mdev->ov_left = mdev->rs_total;
b411b363
PR
1094}
1095
0778286a
PR
1096static void drbd_resume_al(struct drbd_conf *mdev)
1097{
1098 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1099 dev_info(DEV, "Resumed AL updates\n");
1100}
1101
b411b363
PR
1102/**
1103 * __drbd_set_state() - Set a new DRBD state
1104 * @mdev: DRBD device.
1105 * @ns: new state.
1106 * @flags: Flags
1107 * @done: Optional completion, that will get completed after the after_state_ch() finished
1108 *
1109 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1110 */
bf885f8a
AG
1111enum drbd_state_rv
1112__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1113 enum chg_state_flags flags, struct completion *done)
b411b363
PR
1114{
1115 union drbd_state os;
bf885f8a 1116 enum drbd_state_rv rv = SS_SUCCESS;
77e8fdfc 1117 enum sanitize_state_warnings ssw;
b411b363
PR
1118 struct after_state_chg_work *ascw;
1119
1120 os = mdev->state;
1121
77e8fdfc 1122 ns = sanitize_state(mdev, os, ns, &ssw);
b411b363
PR
1123
1124 if (ns.i == os.i)
1125 return SS_NOTHING_TO_DO;
1126
1127 if (!(flags & CS_HARD)) {
1128 /* pre-state-change checks ; only look at ns */
1129 /* See drbd_state_sw_errors in drbd_strings.c */
1130
1131 rv = is_valid_state(mdev, ns);
1132 if (rv < SS_SUCCESS) {
1133 /* If the old state was illegal as well, then let
1134 this happen...*/
1135
1616a254 1136 if (is_valid_state(mdev, os) == rv)
b411b363 1137 rv = is_valid_state_transition(mdev, ns, os);
b411b363
PR
1138 } else
1139 rv = is_valid_state_transition(mdev, ns, os);
1140 }
1141
1142 if (rv < SS_SUCCESS) {
1143 if (flags & CS_VERBOSE)
1144 print_st_err(mdev, os, ns, rv);
1145 return rv;
1146 }
1147
77e8fdfc 1148 print_sanitize_warnings(mdev, ssw);
b411b363
PR
1149
1150 {
662d91a2
AG
1151 char *pbp, pb[300];
1152 pbp = pb;
1153 *pbp = 0;
1154 if (ns.role != os.role)
1155 pbp += sprintf(pbp, "role( %s -> %s ) ",
1156 drbd_role_str(os.role),
1157 drbd_role_str(ns.role));
1158 if (ns.peer != os.peer)
1159 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1160 drbd_role_str(os.peer),
1161 drbd_role_str(ns.peer));
1162 if (ns.conn != os.conn)
1163 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1164 drbd_conn_str(os.conn),
1165 drbd_conn_str(ns.conn));
1166 if (ns.disk != os.disk)
1167 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1168 drbd_disk_str(os.disk),
1169 drbd_disk_str(ns.disk));
1170 if (ns.pdsk != os.pdsk)
1171 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1172 drbd_disk_str(os.pdsk),
1173 drbd_disk_str(ns.pdsk));
1174 if (is_susp(ns) != is_susp(os))
1175 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1176 is_susp(os),
1177 is_susp(ns));
1178 if (ns.aftr_isp != os.aftr_isp)
1179 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1180 os.aftr_isp,
1181 ns.aftr_isp);
1182 if (ns.peer_isp != os.peer_isp)
1183 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1184 os.peer_isp,
1185 ns.peer_isp);
1186 if (ns.user_isp != os.user_isp)
1187 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1188 os.user_isp,
1189 ns.user_isp);
1190 dev_info(DEV, "%s\n", pb);
b411b363
PR
1191 }
1192
1193 /* solve the race between becoming unconfigured,
1194 * worker doing the cleanup, and
1195 * admin reconfiguring us:
1196 * on (re)configure, first set CONFIG_PENDING,
1197 * then wait for a potentially exiting worker,
1198 * start the worker, and schedule one no_op.
1199 * then proceed with configuration.
1200 */
1201 if (ns.disk == D_DISKLESS &&
1202 ns.conn == C_STANDALONE &&
1203 ns.role == R_SECONDARY &&
1204 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1205 set_bit(DEVICE_DYING, &mdev->flags);
1206
82f59cc6
LE
1207 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1208 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1209 * drbd_ldev_destroy() won't happen before our corresponding
1210 * after_state_ch works run, where we put_ldev again. */
1211 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1212 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1213 atomic_inc(&mdev->local_cnt);
1214
1215 mdev->state = ns;
62b0da3a
LE
1216
1217 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1218 drbd_print_uuids(mdev, "attached to UUIDs");
1219
b411b363
PR
1220 wake_up(&mdev->misc_wait);
1221 wake_up(&mdev->state_wait);
1222
b411b363
PR
1223 /* aborted verify run. log the last position */
1224 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1225 ns.conn < C_CONNECTED) {
1226 mdev->ov_start_sector =
30b743a2 1227 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
b411b363
PR
1228 dev_info(DEV, "Online Verify reached sector %llu\n",
1229 (unsigned long long)mdev->ov_start_sector);
1230 }
1231
1232 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1233 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1234 dev_info(DEV, "Syncer continues.\n");
1d7734a0
LE
1235 mdev->rs_paused += (long)jiffies
1236 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
63106d3c
PR
1237 if (ns.conn == C_SYNC_TARGET)
1238 mod_timer(&mdev->resync_timer, jiffies);
b411b363
PR
1239 }
1240
1241 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1242 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1243 dev_info(DEV, "Resync suspended\n");
1d7734a0 1244 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
b411b363
PR
1245 }
1246
1247 if (os.conn == C_CONNECTED &&
1248 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1d7734a0
LE
1249 unsigned long now = jiffies;
1250 int i;
1251
30b743a2 1252 set_ov_position(mdev, ns.conn);
1d7734a0 1253 mdev->rs_start = now;
0f0601f4
LE
1254 mdev->rs_last_events = 0;
1255 mdev->rs_last_sect_ev = 0;
b411b363
PR
1256 mdev->ov_last_oos_size = 0;
1257 mdev->ov_last_oos_start = 0;
1258
1d7734a0 1259 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
30b743a2 1260 mdev->rs_mark_left[i] = mdev->ov_left;
1d7734a0
LE
1261 mdev->rs_mark_time[i] = now;
1262 }
1263
2649f080
LE
1264 drbd_rs_controller_reset(mdev);
1265
b411b363
PR
1266 if (ns.conn == C_VERIFY_S) {
1267 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1268 (unsigned long long)mdev->ov_position);
1269 mod_timer(&mdev->resync_timer, jiffies);
1270 }
1271 }
1272
1273 if (get_ldev(mdev)) {
1274 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1275 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1276 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1277
1278 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1279 mdf |= MDF_CRASHED_PRIMARY;
1280 if (mdev->state.role == R_PRIMARY ||
1281 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1282 mdf |= MDF_PRIMARY_IND;
1283 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1284 mdf |= MDF_CONNECTED_IND;
1285 if (mdev->state.disk > D_INCONSISTENT)
1286 mdf |= MDF_CONSISTENT;
1287 if (mdev->state.disk > D_OUTDATED)
1288 mdf |= MDF_WAS_UP_TO_DATE;
1289 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1290 mdf |= MDF_PEER_OUT_DATED;
1291 if (mdf != mdev->ldev->md.flags) {
1292 mdev->ldev->md.flags = mdf;
1293 drbd_md_mark_dirty(mdev);
1294 }
1295 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1296 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1297 put_ldev(mdev);
1298 }
1299
1300 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1301 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1302 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1303 set_bit(CONSIDER_RESYNC, &mdev->flags);
1304
1305 /* Receiver should clean up itself */
1306 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1307 drbd_thread_stop_nowait(&mdev->receiver);
1308
1309 /* Now the receiver finished cleaning up itself, it should die */
1310 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1311 drbd_thread_stop_nowait(&mdev->receiver);
1312
1313 /* Upon network failure, we need to restart the receiver. */
1e86ac48 1314 if (os.conn > C_WF_CONNECTION &&
b411b363
PR
1315 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1316 drbd_thread_restart_nowait(&mdev->receiver);
1317
0778286a
PR
1318 /* Resume AL writing if we get a connection */
1319 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1320 drbd_resume_al(mdev);
1321
b411b363
PR
1322 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1323 if (ascw) {
1324 ascw->os = os;
1325 ascw->ns = ns;
1326 ascw->flags = flags;
1327 ascw->w.cb = w_after_state_ch;
1328 ascw->done = done;
1329 drbd_queue_work(&mdev->data.work, &ascw->w);
1330 } else {
1331 dev_warn(DEV, "Could not kmalloc an ascw\n");
1332 }
1333
1334 return rv;
1335}
1336
1337static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1338{
1339 struct after_state_chg_work *ascw =
1340 container_of(w, struct after_state_chg_work, w);
1341 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1342 if (ascw->flags & CS_WAIT_COMPLETE) {
1343 D_ASSERT(ascw->done != NULL);
1344 complete(ascw->done);
1345 }
1346 kfree(ascw);
1347
1348 return 1;
1349}
1350
1351static void abw_start_sync(struct drbd_conf *mdev, int rv)
1352{
1353 if (rv) {
1354 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1355 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1356 return;
1357 }
1358
1359 switch (mdev->state.conn) {
1360 case C_STARTING_SYNC_T:
1361 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1362 break;
1363 case C_STARTING_SYNC_S:
1364 drbd_start_resync(mdev, C_SYNC_SOURCE);
1365 break;
1366 }
1367}
1368
20ceb2b2
LE
1369int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1370 int (*io_fn)(struct drbd_conf *),
1371 char *why, enum bm_flag flags)
19f843aa
LE
1372{
1373 int rv;
1374
1375 D_ASSERT(current == mdev->worker.task);
1376
1377 /* open coded non-blocking drbd_suspend_io(mdev); */
1378 set_bit(SUSPEND_IO, &mdev->flags);
19f843aa 1379
20ceb2b2 1380 drbd_bm_lock(mdev, why, flags);
19f843aa
LE
1381 rv = io_fn(mdev);
1382 drbd_bm_unlock(mdev);
1383
1384 drbd_resume_io(mdev);
1385
1386 return rv;
1387}
1388
b411b363
PR
1389/**
1390 * after_state_ch() - Perform after state change actions that may sleep
1391 * @mdev: DRBD device.
1392 * @os: old state.
1393 * @ns: new state.
1394 * @flags: Flags
1395 */
1396static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1397 union drbd_state ns, enum chg_state_flags flags)
1398{
1399 enum drbd_fencing_p fp;
67098930 1400 enum drbd_req_event what = nothing;
fb22c402 1401 union drbd_state nsm = (union drbd_state){ .i = -1 };
b411b363
PR
1402
1403 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1404 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1405 if (mdev->p_uuid)
1406 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1407 }
1408
1409 fp = FP_DONT_CARE;
1410 if (get_ldev(mdev)) {
1411 fp = mdev->ldev->dc.fencing;
1412 put_ldev(mdev);
1413 }
1414
1415 /* Inform userspace about the change... */
1416 drbd_bcast_state(mdev, ns);
1417
1418 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1419 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1420 drbd_khelper(mdev, "pri-on-incon-degr");
1421
1422 /* Here we have the actions that are performed after a
1423 state change. This function might sleep */
1424
dfa8bedb
PR
1425 if (os.disk <= D_NEGOTIATING && ns.disk > D_NEGOTIATING)
1426 mod_timer(&mdev->request_timer, jiffies + HZ);
1427
fb22c402
PR
1428 nsm.i = -1;
1429 if (ns.susp_nod) {
3f98688a
PR
1430 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1431 what = resend;
265be2d0 1432
79f16f5d
PR
1433 if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
1434 ns.disk > D_NEGOTIATING)
3f98688a 1435 what = restart_frozen_disk_io;
fb22c402 1436
3f98688a
PR
1437 if (what != nothing)
1438 nsm.susp_nod = 0;
265be2d0
PR
1439 }
1440
fb22c402 1441 if (ns.susp_fen) {
43a5182c
PR
1442 /* case1: The outdate peer handler is successful: */
1443 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
b411b363 1444 tl_clear(mdev);
43a5182c
PR
1445 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1446 drbd_uuid_new_current(mdev);
1447 clear_bit(NEW_CUR_UUID, &mdev->flags);
43a5182c 1448 }
b411b363 1449 spin_lock_irq(&mdev->req_lock);
fb22c402 1450 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
b411b363
PR
1451 spin_unlock_irq(&mdev->req_lock);
1452 }
43a5182c
PR
1453 /* case2: The connection was established again: */
1454 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1455 clear_bit(NEW_CUR_UUID, &mdev->flags);
67098930 1456 what = resend;
fb22c402 1457 nsm.susp_fen = 0;
43a5182c 1458 }
b411b363 1459 }
67098930
PR
1460
1461 if (what != nothing) {
1462 spin_lock_irq(&mdev->req_lock);
1463 _tl_restart(mdev, what);
fb22c402
PR
1464 nsm.i &= mdev->state.i;
1465 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
67098930 1466 spin_unlock_irq(&mdev->req_lock);
b411b363 1467 }
67098930 1468
5a22db89
LE
1469 /* Became sync source. With protocol >= 96, we still need to send out
1470 * the sync uuid now. Need to do that before any drbd_send_state, or
1471 * the other side may go "paused sync" before receiving the sync uuids,
1472 * which is unexpected. */
1473 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1474 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1475 mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1476 drbd_gen_and_send_sync_uuid(mdev);
1477 put_ldev(mdev);
1478 }
1479
b411b363
PR
1480 /* Do not change the order of the if above and the two below... */
1481 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1482 drbd_send_uuids(mdev);
f479ea06 1483 drbd_send_state(mdev, ns);
b411b363 1484 }
54b956ab
LE
1485 /* No point in queuing send_bitmap if we don't have a connection
1486 * anymore, so check also the _current_ state, not only the new state
1487 * at the time this work was queued. */
1488 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1489 mdev->state.conn == C_WF_BITMAP_S)
1490 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
20ceb2b2
LE
1491 "send_bitmap (WFBitMapS)",
1492 BM_LOCKED_TEST_ALLOWED);
b411b363
PR
1493
1494 /* Lost contact to peer's copy of the data */
1495 if ((os.pdsk >= D_INCONSISTENT &&
1496 os.pdsk != D_UNKNOWN &&
1497 os.pdsk != D_OUTDATED)
1498 && (ns.pdsk < D_INCONSISTENT ||
1499 ns.pdsk == D_UNKNOWN ||
1500 ns.pdsk == D_OUTDATED)) {
b411b363
PR
1501 if (get_ldev(mdev)) {
1502 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
2c8d1967 1503 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
fb22c402 1504 if (is_susp(mdev->state)) {
43a5182c
PR
1505 set_bit(NEW_CUR_UUID, &mdev->flags);
1506 } else {
1507 drbd_uuid_new_current(mdev);
1508 drbd_send_uuids(mdev);
1509 }
2c8d1967 1510 }
b411b363
PR
1511 put_ldev(mdev);
1512 }
1513 }
1514
1515 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
bca482e9
PR
1516 if (os.peer == R_SECONDARY && ns.peer == R_PRIMARY &&
1517 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
2c8d1967 1518 drbd_uuid_new_current(mdev);
18a50fa2
PR
1519 drbd_send_uuids(mdev);
1520 }
b411b363
PR
1521 /* D_DISKLESS Peer becomes secondary */
1522 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
20ceb2b2
LE
1523 /* We may still be Primary ourselves.
1524 * No harm done if the bitmap still changes,
1525 * redirtied pages will follow later. */
1526 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1527 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
19f843aa
LE
1528 put_ldev(mdev);
1529 }
1530
06d33e96
LE
1531 /* Write out all changed bits on demote.
1532 * Though, no need to da that just yet
1533 * if there is a resync going on still */
1534 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1535 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
20ceb2b2
LE
1536 /* No changes to the bitmap expected this time, so assert that,
1537 * even though no harm was done if it did change. */
1538 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1539 "demote", BM_LOCKED_TEST_ALLOWED);
b411b363
PR
1540 put_ldev(mdev);
1541 }
1542
1543 /* Last part of the attaching process ... */
1544 if (ns.conn >= C_CONNECTED &&
1545 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
e89b591c 1546 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
b411b363 1547 drbd_send_uuids(mdev);
f479ea06 1548 drbd_send_state(mdev, ns);
b411b363
PR
1549 }
1550
1551 /* We want to pause/continue resync, tell peer. */
1552 if (ns.conn >= C_CONNECTED &&
1553 ((os.aftr_isp != ns.aftr_isp) ||
1554 (os.user_isp != ns.user_isp)))
f479ea06 1555 drbd_send_state(mdev, ns);
b411b363
PR
1556
1557 /* In case one of the isp bits got set, suspend other devices. */
1558 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1559 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1560 suspend_other_sg(mdev);
1561
1562 /* Make sure the peer gets informed about eventual state
1563 changes (ISP bits) while we were in WFReportParams. */
1564 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
f479ea06 1565 drbd_send_state(mdev, ns);
b411b363 1566
67531718 1567 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
f479ea06 1568 drbd_send_state(mdev, ns);
67531718 1569
b411b363
PR
1570 /* We are in the progress to start a full sync... */
1571 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1572 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
20ceb2b2
LE
1573 /* no other bitmap changes expected during this phase */
1574 drbd_queue_bitmap_io(mdev,
1575 &drbd_bmio_set_n_write, &abw_start_sync,
1576 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
b411b363
PR
1577
1578 /* We are invalidating our self... */
1579 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1580 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
20ceb2b2
LE
1581 /* other bitmap operation expected during this phase */
1582 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1583 "set_n_write from invalidate", BM_LOCKED_MASK);
b411b363 1584
82f59cc6
LE
1585 /* first half of local IO error, failure to attach,
1586 * or administrative detach */
1587 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
7caacb69
PR
1588 enum drbd_io_error_p eh = EP_PASS_ON;
1589 int was_io_error = 0;
82f59cc6 1590 /* corresponding get_ldev was in __drbd_set_state, to serialize
7caacb69
PR
1591 * our cleanup here with the transition to D_DISKLESS.
1592 * But is is still not save to dreference ldev here, since
1593 * we might come from an failed Attach before ldev was set. */
1594 if (mdev->ldev) {
1595 eh = mdev->ldev->dc.on_io_error;
1596 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1597
1598 /* Immediately allow completion of all application IO, that waits
1599 for completion from the local disk. */
1600 tl_abort_disk_io(mdev);
1601
1602 /* current state still has to be D_FAILED,
1603 * there is only one way out: to D_DISKLESS,
1604 * and that may only happen after our put_ldev below. */
1605 if (mdev->state.disk != D_FAILED)
1606 dev_err(DEV,
1607 "ASSERT FAILED: disk is %s during detach\n",
1608 drbd_disk_str(mdev->state.disk));
1609
1610 if (ns.conn >= C_CONNECTED)
1611 drbd_send_state(mdev, ns);
1612
1613 drbd_rs_cancel_all(mdev);
1614
1615 /* In case we want to get something to stable storage still,
1616 * this may be the last chance.
1617 * Following put_ldev may transition to D_DISKLESS. */
1618 drbd_md_sync(mdev);
1619 }
82f59cc6
LE
1620 put_ldev(mdev);
1621
1622 if (was_io_error && eh == EP_CALL_HELPER)
e9e6f3ec
LE
1623 drbd_khelper(mdev, "local-io-error");
1624 }
b411b363 1625
82f59cc6
LE
1626 /* second half of local IO error, failure to attach,
1627 * or administrative detach,
1628 * after local_cnt references have reached zero again */
1629 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1630 /* We must still be diskless,
1631 * re-attach has to be serialized with this! */
1632 if (mdev->state.disk != D_DISKLESS)
1633 dev_err(DEV,
1634 "ASSERT FAILED: disk is %s while going diskless\n",
1635 drbd_disk_str(mdev->state.disk));
e9e6f3ec 1636
82f59cc6
LE
1637 mdev->rs_total = 0;
1638 mdev->rs_failed = 0;
1639 atomic_set(&mdev->rs_pending_cnt, 0);
9d282875 1640
4afc433c
PR
1641 if (ns.conn >= C_CONNECTED)
1642 drbd_send_state(mdev, ns);
1643
82f59cc6 1644 /* corresponding get_ldev in __drbd_set_state
25985edc 1645 * this may finally trigger drbd_ldev_destroy. */
82f59cc6 1646 put_ldev(mdev);
b411b363
PR
1647 }
1648
738a84b2 1649 /* Notify peer that I had a local IO error, and did not detached.. */
4afc433c 1650 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT && ns.conn >= C_CONNECTED)
f479ea06 1651 drbd_send_state(mdev, ns);
738a84b2 1652
b411b363
PR
1653 /* Disks got bigger while they were detached */
1654 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1655 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1656 if (ns.conn == C_CONNECTED)
1657 resync_after_online_grow(mdev);
1658 }
1659
1660 /* A resync finished or aborted, wake paused devices... */
1661 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1662 (os.peer_isp && !ns.peer_isp) ||
1663 (os.user_isp && !ns.user_isp))
1664 resume_next_sg(mdev);
1665
af85e8e8
LE
1666 /* sync target done with resync. Explicitly notify peer, even though
1667 * it should (at least for non-empty resyncs) already know itself. */
1668 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
f479ea06 1669 drbd_send_state(mdev, ns);
af85e8e8 1670
79a30d2d
LE
1671 /* This triggers bitmap writeout of potentially still unwritten pages
1672 * if the resync finished cleanly, or aborted because of peer disk
20ceb2b2 1673 * failure, or because of connection loss.
79a30d2d
LE
1674 * For resync aborted because of local disk failure, we cannot do
1675 * any bitmap writeout anymore.
20ceb2b2 1676 * No harm done if some bits change during this phase.
79a30d2d 1677 */
20ceb2b2
LE
1678 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1679 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1680 "write from resync_finished", BM_LOCKED_SET_ALLOWED);
79a30d2d
LE
1681 put_ldev(mdev);
1682 }
02851e9f 1683
f70b3511 1684 /* free tl_hash if we Got thawed and are C_STANDALONE */
fb22c402 1685 if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
f70b3511
PR
1686 drbd_free_tl_hash(mdev);
1687
b411b363
PR
1688 /* Upon network connection, we need to start the receiver */
1689 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1690 drbd_thread_start(&mdev->receiver);
1691
1692 /* Terminate worker thread if we are unconfigured - it will be
1693 restarted as needed... */
1694 if (ns.disk == D_DISKLESS &&
1695 ns.conn == C_STANDALONE &&
1696 ns.role == R_SECONDARY) {
1697 if (os.aftr_isp != ns.aftr_isp)
1698 resume_next_sg(mdev);
1699 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1700 if (test_bit(DEVICE_DYING, &mdev->flags))
1701 drbd_thread_stop_nowait(&mdev->worker);
1702 }
1703
1704 drbd_md_sync(mdev);
1705}
1706
1707
1708static int drbd_thread_setup(void *arg)
1709{
1710 struct drbd_thread *thi = (struct drbd_thread *) arg;
1711 struct drbd_conf *mdev = thi->mdev;
1712 unsigned long flags;
1713 int retval;
1714
1715restart:
1716 retval = thi->function(thi);
1717
1718 spin_lock_irqsave(&thi->t_lock, flags);
1719
1720 /* if the receiver has been "Exiting", the last thing it did
1721 * was set the conn state to "StandAlone",
1722 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1723 * and receiver thread will be "started".
1724 * drbd_thread_start needs to set "Restarting" in that case.
1725 * t_state check and assignment needs to be within the same spinlock,
1726 * so either thread_start sees Exiting, and can remap to Restarting,
1727 * or thread_start see None, and can proceed as normal.
1728 */
1729
1730 if (thi->t_state == Restarting) {
1731 dev_info(DEV, "Restarting %s\n", current->comm);
1732 thi->t_state = Running;
1733 spin_unlock_irqrestore(&thi->t_lock, flags);
1734 goto restart;
1735 }
1736
1737 thi->task = NULL;
1738 thi->t_state = None;
1739 smp_mb();
1740 complete(&thi->stop);
1741 spin_unlock_irqrestore(&thi->t_lock, flags);
1742
1743 dev_info(DEV, "Terminating %s\n", current->comm);
1744
1745 /* Release mod reference taken when thread was started */
1746 module_put(THIS_MODULE);
1747 return retval;
1748}
1749
1750static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1751 int (*func) (struct drbd_thread *))
1752{
1753 spin_lock_init(&thi->t_lock);
1754 thi->task = NULL;
1755 thi->t_state = None;
1756 thi->function = func;
1757 thi->mdev = mdev;
1758}
1759
1760int drbd_thread_start(struct drbd_thread *thi)
1761{
1762 struct drbd_conf *mdev = thi->mdev;
1763 struct task_struct *nt;
1764 unsigned long flags;
1765
1766 const char *me =
1767 thi == &mdev->receiver ? "receiver" :
1768 thi == &mdev->asender ? "asender" :
1769 thi == &mdev->worker ? "worker" : "NONSENSE";
1770
1771 /* is used from state engine doing drbd_thread_stop_nowait,
1772 * while holding the req lock irqsave */
1773 spin_lock_irqsave(&thi->t_lock, flags);
1774
1775 switch (thi->t_state) {
1776 case None:
1777 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1778 me, current->comm, current->pid);
1779
1780 /* Get ref on module for thread - this is released when thread exits */
1781 if (!try_module_get(THIS_MODULE)) {
1782 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1783 spin_unlock_irqrestore(&thi->t_lock, flags);
81e84650 1784 return false;
b411b363
PR
1785 }
1786
1787 init_completion(&thi->stop);
1788 D_ASSERT(thi->task == NULL);
1789 thi->reset_cpu_mask = 1;
1790 thi->t_state = Running;
1791 spin_unlock_irqrestore(&thi->t_lock, flags);
1792 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1793
1794 nt = kthread_create(drbd_thread_setup, (void *) thi,
1795 "drbd%d_%s", mdev_to_minor(mdev), me);
1796
1797 if (IS_ERR(nt)) {
1798 dev_err(DEV, "Couldn't start thread\n");
1799
1800 module_put(THIS_MODULE);
81e84650 1801 return false;
b411b363
PR
1802 }
1803 spin_lock_irqsave(&thi->t_lock, flags);
1804 thi->task = nt;
1805 thi->t_state = Running;
1806 spin_unlock_irqrestore(&thi->t_lock, flags);
1807 wake_up_process(nt);
1808 break;
1809 case Exiting:
1810 thi->t_state = Restarting;
1811 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1812 me, current->comm, current->pid);
1813 /* fall through */
1814 case Running:
1815 case Restarting:
1816 default:
1817 spin_unlock_irqrestore(&thi->t_lock, flags);
1818 break;
1819 }
1820
81e84650 1821 return true;
b411b363
PR
1822}
1823
1824
1825void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1826{
1827 unsigned long flags;
1828
1829 enum drbd_thread_state ns = restart ? Restarting : Exiting;
1830
1831 /* may be called from state engine, holding the req lock irqsave */
1832 spin_lock_irqsave(&thi->t_lock, flags);
1833
1834 if (thi->t_state == None) {
1835 spin_unlock_irqrestore(&thi->t_lock, flags);
1836 if (restart)
1837 drbd_thread_start(thi);
1838 return;
1839 }
1840
1841 if (thi->t_state != ns) {
1842 if (thi->task == NULL) {
1843 spin_unlock_irqrestore(&thi->t_lock, flags);
1844 return;
1845 }
1846
1847 thi->t_state = ns;
1848 smp_mb();
1849 init_completion(&thi->stop);
1850 if (thi->task != current)
1851 force_sig(DRBD_SIGKILL, thi->task);
1852
1853 }
1854
1855 spin_unlock_irqrestore(&thi->t_lock, flags);
1856
1857 if (wait)
1858 wait_for_completion(&thi->stop);
1859}
1860
1861#ifdef CONFIG_SMP
1862/**
1863 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1864 * @mdev: DRBD device.
1865 *
1866 * Forces all threads of a device onto the same CPU. This is beneficial for
1867 * DRBD's performance. May be overwritten by user's configuration.
1868 */
1869void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1870{
1871 int ord, cpu;
1872
1873 /* user override. */
1874 if (cpumask_weight(mdev->cpu_mask))
1875 return;
1876
1877 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1878 for_each_online_cpu(cpu) {
1879 if (ord-- == 0) {
1880 cpumask_set_cpu(cpu, mdev->cpu_mask);
1881 return;
1882 }
1883 }
1884 /* should not be reached */
1885 cpumask_setall(mdev->cpu_mask);
1886}
1887
1888/**
1889 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1890 * @mdev: DRBD device.
1891 *
1892 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1893 * prematurely.
1894 */
1895void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1896{
1897 struct task_struct *p = current;
1898 struct drbd_thread *thi =
1899 p == mdev->asender.task ? &mdev->asender :
1900 p == mdev->receiver.task ? &mdev->receiver :
1901 p == mdev->worker.task ? &mdev->worker :
1902 NULL;
1903 ERR_IF(thi == NULL)
1904 return;
1905 if (!thi->reset_cpu_mask)
1906 return;
1907 thi->reset_cpu_mask = 0;
1908 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1909}
1910#endif
1911
1912/* the appropriate socket mutex must be held already */
1913int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
0b70a13d 1914 enum drbd_packets cmd, struct p_header80 *h,
b411b363
PR
1915 size_t size, unsigned msg_flags)
1916{
1917 int sent, ok;
1918
81e84650
AG
1919 ERR_IF(!h) return false;
1920 ERR_IF(!size) return false;
b411b363
PR
1921
1922 h->magic = BE_DRBD_MAGIC;
1923 h->command = cpu_to_be16(cmd);
0b70a13d 1924 h->length = cpu_to_be16(size-sizeof(struct p_header80));
b411b363 1925
b411b363
PR
1926 sent = drbd_send(mdev, sock, h, size, msg_flags);
1927
1928 ok = (sent == size);
0ddc5549
LE
1929 if (!ok && !signal_pending(current))
1930 dev_warn(DEV, "short sent %s size=%d sent=%d\n",
b411b363
PR
1931 cmdname(cmd), (int)size, sent);
1932 return ok;
1933}
1934
1935/* don't pass the socket. we may only look at it
1936 * when we hold the appropriate socket mutex.
1937 */
1938int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
0b70a13d 1939 enum drbd_packets cmd, struct p_header80 *h, size_t size)
b411b363
PR
1940{
1941 int ok = 0;
1942 struct socket *sock;
1943
1944 if (use_data_socket) {
1945 mutex_lock(&mdev->data.mutex);
1946 sock = mdev->data.socket;
1947 } else {
1948 mutex_lock(&mdev->meta.mutex);
1949 sock = mdev->meta.socket;
1950 }
1951
1952 /* drbd_disconnect() could have called drbd_free_sock()
1953 * while we were waiting in down()... */
1954 if (likely(sock != NULL))
1955 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1956
1957 if (use_data_socket)
1958 mutex_unlock(&mdev->data.mutex);
1959 else
1960 mutex_unlock(&mdev->meta.mutex);
1961 return ok;
1962}
1963
1964int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1965 size_t size)
1966{
0b70a13d 1967 struct p_header80 h;
b411b363
PR
1968 int ok;
1969
1970 h.magic = BE_DRBD_MAGIC;
1971 h.command = cpu_to_be16(cmd);
1972 h.length = cpu_to_be16(size);
1973
1974 if (!drbd_get_data_sock(mdev))
1975 return 0;
1976
b411b363
PR
1977 ok = (sizeof(h) ==
1978 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1979 ok = ok && (size ==
1980 drbd_send(mdev, mdev->data.socket, data, size, 0));
1981
1982 drbd_put_data_sock(mdev);
1983
1984 return ok;
1985}
1986
1987int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1988{
8e26f9cc 1989 struct p_rs_param_95 *p;
b411b363
PR
1990 struct socket *sock;
1991 int size, rv;
1992 const int apv = mdev->agreed_pro_version;
1993
1994 size = apv <= 87 ? sizeof(struct p_rs_param)
1995 : apv == 88 ? sizeof(struct p_rs_param)
1996 + strlen(mdev->sync_conf.verify_alg) + 1
8e26f9cc
PR
1997 : apv <= 94 ? sizeof(struct p_rs_param_89)
1998 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363
PR
1999
2000 /* used from admin command context and receiver/worker context.
2001 * to avoid kmalloc, grab the socket right here,
2002 * then use the pre-allocated sbuf there */
2003 mutex_lock(&mdev->data.mutex);
2004 sock = mdev->data.socket;
2005
2006 if (likely(sock != NULL)) {
2007 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
2008
8e26f9cc 2009 p = &mdev->data.sbuf.rs_param_95;
b411b363
PR
2010
2011 /* initialize verify_alg and csums_alg */
2012 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2013
2014 p->rate = cpu_to_be32(sc->rate);
8e26f9cc
PR
2015 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
2016 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
2017 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
2018 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
b411b363
PR
2019
2020 if (apv >= 88)
2021 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
2022 if (apv >= 89)
2023 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
2024
2025 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
2026 } else
2027 rv = 0; /* not ok */
2028
2029 mutex_unlock(&mdev->data.mutex);
2030
2031 return rv;
2032}
2033
2034int drbd_send_protocol(struct drbd_conf *mdev)
2035{
2036 struct p_protocol *p;
cf14c2e9 2037 int size, cf, rv;
b411b363
PR
2038
2039 size = sizeof(struct p_protocol);
2040
2041 if (mdev->agreed_pro_version >= 87)
2042 size += strlen(mdev->net_conf->integrity_alg) + 1;
2043
2044 /* we must not recurse into our own queue,
2045 * as that is blocked during handshake */
2046 p = kmalloc(size, GFP_NOIO);
2047 if (p == NULL)
2048 return 0;
2049
2050 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
2051 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
2052 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
2053 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
b411b363
PR
2054 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
2055
cf14c2e9
PR
2056 cf = 0;
2057 if (mdev->net_conf->want_lose)
2058 cf |= CF_WANT_LOSE;
2059 if (mdev->net_conf->dry_run) {
2060 if (mdev->agreed_pro_version >= 92)
2061 cf |= CF_DRY_RUN;
2062 else {
2063 dev_err(DEV, "--dry-run is not supported by peer");
7ac314c8 2064 kfree(p);
148efa16 2065 return -1;
cf14c2e9
PR
2066 }
2067 }
2068 p->conn_flags = cpu_to_be32(cf);
2069
b411b363
PR
2070 if (mdev->agreed_pro_version >= 87)
2071 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
2072
2073 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
0b70a13d 2074 (struct p_header80 *)p, size);
b411b363
PR
2075 kfree(p);
2076 return rv;
2077}
2078
2079int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
2080{
2081 struct p_uuids p;
2082 int i;
2083
2084 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
2085 return 1;
2086
2087 for (i = UI_CURRENT; i < UI_SIZE; i++)
2088 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
2089
2090 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2091 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2092 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2093 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
2094 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2095 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2096
2097 put_ldev(mdev);
2098
2099 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
0b70a13d 2100 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2101}
2102
2103int drbd_send_uuids(struct drbd_conf *mdev)
2104{
2105 return _drbd_send_uuids(mdev, 0);
2106}
2107
2108int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2109{
2110 return _drbd_send_uuids(mdev, 8);
2111}
2112
62b0da3a
LE
2113void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
2114{
2115 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2116 u64 *uuid = mdev->ldev->md.uuid;
2117 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
2118 text,
2119 (unsigned long long)uuid[UI_CURRENT],
2120 (unsigned long long)uuid[UI_BITMAP],
2121 (unsigned long long)uuid[UI_HISTORY_START],
2122 (unsigned long long)uuid[UI_HISTORY_END]);
2123 put_ldev(mdev);
2124 } else {
2125 dev_info(DEV, "%s effective data uuid: %016llX\n",
2126 text,
2127 (unsigned long long)mdev->ed_uuid);
2128 }
2129}
2130
5a22db89 2131int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
b411b363
PR
2132{
2133 struct p_rs_uuid p;
5a22db89
LE
2134 u64 uuid;
2135
2136 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
b411b363 2137
5ba3dac5
PR
2138 uuid = mdev->ldev->md.uuid[UI_BITMAP];
2139 if (uuid && uuid != UUID_JUST_CREATED)
2140 uuid = uuid + UUID_NEW_BM_OFFSET;
2141 else
2142 get_random_bytes(&uuid, sizeof(u64));
5a22db89 2143 drbd_uuid_set(mdev, UI_BITMAP, uuid);
62b0da3a 2144 drbd_print_uuids(mdev, "updated sync UUID");
5a22db89
LE
2145 drbd_md_sync(mdev);
2146 p.uuid = cpu_to_be64(uuid);
b411b363
PR
2147
2148 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
0b70a13d 2149 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2150}
2151
e89b591c 2152int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
b411b363
PR
2153{
2154 struct p_sizes p;
2155 sector_t d_size, u_size;
99432fcc 2156 int q_order_type, max_bio_size;
b411b363
PR
2157 int ok;
2158
2159 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2160 D_ASSERT(mdev->ldev->backing_bdev);
2161 d_size = drbd_get_max_capacity(mdev->ldev);
2162 u_size = mdev->ldev->dc.disk_size;
2163 q_order_type = drbd_queue_order_type(mdev);
99432fcc
PR
2164 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2165 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
b411b363
PR
2166 put_ldev(mdev);
2167 } else {
2168 d_size = 0;
2169 u_size = 0;
2170 q_order_type = QUEUE_ORDERED_NONE;
99432fcc 2171 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
b411b363
PR
2172 }
2173
6809384c
PR
2174 /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
2175 if (mdev->agreed_pro_version <= 94)
2176 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
2177
b411b363
PR
2178 p.d_size = cpu_to_be64(d_size);
2179 p.u_size = cpu_to_be64(u_size);
2180 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
99432fcc 2181 p.max_bio_size = cpu_to_be32(max_bio_size);
e89b591c
PR
2182 p.queue_order_type = cpu_to_be16(q_order_type);
2183 p.dds_flags = cpu_to_be16(flags);
b411b363
PR
2184
2185 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
0b70a13d 2186 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2187 return ok;
2188}
2189
2190/**
f479ea06 2191 * drbd_send_current_state() - Sends the drbd state to the peer
b411b363
PR
2192 * @mdev: DRBD device.
2193 */
f479ea06 2194int drbd_send_current_state(struct drbd_conf *mdev)
b411b363
PR
2195{
2196 struct socket *sock;
2197 struct p_state p;
2198 int ok = 0;
2199
2200 /* Grab state lock so we wont send state if we're in the middle
2201 * of a cluster wide state change on another thread */
2202 drbd_state_lock(mdev);
2203
2204 mutex_lock(&mdev->data.mutex);
2205
2206 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2207 sock = mdev->data.socket;
2208
2209 if (likely(sock != NULL)) {
2210 ok = _drbd_send_cmd(mdev, sock, P_STATE,
0b70a13d 2211 (struct p_header80 *)&p, sizeof(p), 0);
b411b363
PR
2212 }
2213
2214 mutex_unlock(&mdev->data.mutex);
2215
2216 drbd_state_unlock(mdev);
2217 return ok;
2218}
2219
f479ea06
LE
2220/**
2221 * drbd_send_state() - After a state change, sends the new state to the peer
2222 * @mdev: DRBD device.
2223 * @state: the state to send, not necessarily the current state.
2224 *
2225 * Each state change queues an "after_state_ch" work, which will eventually
2226 * send the resulting new state to the peer. If more state changes happen
2227 * between queuing and processing of the after_state_ch work, we still
2228 * want to send each intermediary state in the order it occurred.
2229 */
2230int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
2231{
2232 struct socket *sock;
2233 struct p_state p;
2234 int ok = 0;
2235
2236 mutex_lock(&mdev->data.mutex);
2237
2238 p.state = cpu_to_be32(state.i);
2239 sock = mdev->data.socket;
2240
2241 if (likely(sock != NULL)) {
2242 ok = _drbd_send_cmd(mdev, sock, P_STATE,
2243 (struct p_header80 *)&p, sizeof(p), 0);
2244 }
2245
2246 mutex_unlock(&mdev->data.mutex);
2247
2248 return ok;
2249}
2250
b411b363
PR
2251int drbd_send_state_req(struct drbd_conf *mdev,
2252 union drbd_state mask, union drbd_state val)
2253{
2254 struct p_req_state p;
2255
2256 p.mask = cpu_to_be32(mask.i);
2257 p.val = cpu_to_be32(val.i);
2258
2259 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
0b70a13d 2260 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2261}
2262
bf885f8a 2263int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
b411b363
PR
2264{
2265 struct p_req_state_reply p;
2266
2267 p.retcode = cpu_to_be32(retcode);
2268
2269 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
0b70a13d 2270 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2271}
2272
2273int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2274 struct p_compressed_bm *p,
2275 struct bm_xfer_ctx *c)
2276{
2277 struct bitstream bs;
2278 unsigned long plain_bits;
2279 unsigned long tmp;
2280 unsigned long rl;
2281 unsigned len;
2282 unsigned toggle;
2283 int bits;
2284
2285 /* may we use this feature? */
2286 if ((mdev->sync_conf.use_rle == 0) ||
2287 (mdev->agreed_pro_version < 90))
2288 return 0;
2289
2290 if (c->bit_offset >= c->bm_bits)
2291 return 0; /* nothing to do. */
2292
2293 /* use at most thus many bytes */
2294 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2295 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2296 /* plain bits covered in this code string */
2297 plain_bits = 0;
2298
2299 /* p->encoding & 0x80 stores whether the first run length is set.
2300 * bit offset is implicit.
2301 * start with toggle == 2 to be able to tell the first iteration */
2302 toggle = 2;
2303
2304 /* see how much plain bits we can stuff into one packet
2305 * using RLE and VLI. */
2306 do {
2307 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2308 : _drbd_bm_find_next(mdev, c->bit_offset);
2309 if (tmp == -1UL)
2310 tmp = c->bm_bits;
2311 rl = tmp - c->bit_offset;
2312
2313 if (toggle == 2) { /* first iteration */
2314 if (rl == 0) {
2315 /* the first checked bit was set,
2316 * store start value, */
2317 DCBP_set_start(p, 1);
2318 /* but skip encoding of zero run length */
2319 toggle = !toggle;
2320 continue;
2321 }
2322 DCBP_set_start(p, 0);
2323 }
2324
2325 /* paranoia: catch zero runlength.
2326 * can only happen if bitmap is modified while we scan it. */
2327 if (rl == 0) {
2328 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2329 "t:%u bo:%lu\n", toggle, c->bit_offset);
2330 return -1;
2331 }
2332
2333 bits = vli_encode_bits(&bs, rl);
2334 if (bits == -ENOBUFS) /* buffer full */
2335 break;
2336 if (bits <= 0) {
2337 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2338 return 0;
2339 }
2340
2341 toggle = !toggle;
2342 plain_bits += rl;
2343 c->bit_offset = tmp;
2344 } while (c->bit_offset < c->bm_bits);
2345
2346 len = bs.cur.b - p->code + !!bs.cur.bit;
2347
2348 if (plain_bits < (len << 3)) {
2349 /* incompressible with this method.
2350 * we need to rewind both word and bit position. */
2351 c->bit_offset -= plain_bits;
2352 bm_xfer_ctx_bit_to_word_offset(c);
2353 c->bit_offset = c->word_offset * BITS_PER_LONG;
2354 return 0;
2355 }
2356
2357 /* RLE + VLI was able to compress it just fine.
2358 * update c->word_offset. */
2359 bm_xfer_ctx_bit_to_word_offset(c);
2360
2361 /* store pad_bits */
2362 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2363
2364 return len;
2365}
2366
f70af118
AG
2367/**
2368 * send_bitmap_rle_or_plain
2369 *
2370 * Return 0 when done, 1 when another iteration is needed, and a negative error
2371 * code upon failure.
2372 */
2373static int
b411b363 2374send_bitmap_rle_or_plain(struct drbd_conf *mdev,
f70af118 2375 struct p_header80 *h, struct bm_xfer_ctx *c)
b411b363
PR
2376{
2377 struct p_compressed_bm *p = (void*)h;
2378 unsigned long num_words;
2379 int len;
2380 int ok;
2381
2382 len = fill_bitmap_rle_bits(mdev, p, c);
2383
2384 if (len < 0)
f70af118 2385 return -EIO;
b411b363
PR
2386
2387 if (len) {
2388 DCBP_set_code(p, RLE_VLI_Bits);
2389 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2390 sizeof(*p) + len, 0);
2391
2392 c->packets[0]++;
2393 c->bytes[0] += sizeof(*p) + len;
2394
2395 if (c->bit_offset >= c->bm_bits)
2396 len = 0; /* DONE */
2397 } else {
2398 /* was not compressible.
2399 * send a buffer full of plain text bits instead. */
2400 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2401 len = num_words * sizeof(long);
2402 if (len)
2403 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2404 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
0b70a13d 2405 h, sizeof(struct p_header80) + len, 0);
b411b363
PR
2406 c->word_offset += num_words;
2407 c->bit_offset = c->word_offset * BITS_PER_LONG;
2408
2409 c->packets[1]++;
0b70a13d 2410 c->bytes[1] += sizeof(struct p_header80) + len;
b411b363
PR
2411
2412 if (c->bit_offset > c->bm_bits)
2413 c->bit_offset = c->bm_bits;
2414 }
f70af118
AG
2415 if (ok) {
2416 if (len == 0) {
2417 INFO_bm_xfer_stats(mdev, "send", c);
2418 return 0;
2419 } else
2420 return 1;
2421 }
2422 return -EIO;
b411b363
PR
2423}
2424
2425/* See the comment at receive_bitmap() */
2426int _drbd_send_bitmap(struct drbd_conf *mdev)
2427{
2428 struct bm_xfer_ctx c;
0b70a13d 2429 struct p_header80 *p;
f70af118 2430 int err;
b411b363 2431
81e84650 2432 ERR_IF(!mdev->bitmap) return false;
b411b363
PR
2433
2434 /* maybe we should use some per thread scratch page,
2435 * and allocate that during initial device creation? */
0b70a13d 2436 p = (struct p_header80 *) __get_free_page(GFP_NOIO);
b411b363
PR
2437 if (!p) {
2438 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
81e84650 2439 return false;
b411b363
PR
2440 }
2441
2442 if (get_ldev(mdev)) {
2443 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2444 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2445 drbd_bm_set_all(mdev);
2446 if (drbd_bm_write(mdev)) {
2447 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2448 * but otherwise process as per normal - need to tell other
2449 * side that a full resync is required! */
2450 dev_err(DEV, "Failed to write bitmap to disk!\n");
2451 } else {
2452 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2453 drbd_md_sync(mdev);
2454 }
2455 }
2456 put_ldev(mdev);
2457 }
2458
2459 c = (struct bm_xfer_ctx) {
2460 .bm_bits = drbd_bm_bits(mdev),
2461 .bm_words = drbd_bm_words(mdev),
2462 };
2463
2464 do {
f70af118
AG
2465 err = send_bitmap_rle_or_plain(mdev, p, &c);
2466 } while (err > 0);
b411b363
PR
2467
2468 free_page((unsigned long) p);
f70af118 2469 return err == 0;
b411b363
PR
2470}
2471
2472int drbd_send_bitmap(struct drbd_conf *mdev)
2473{
2474 int err;
2475
2476 if (!drbd_get_data_sock(mdev))
2477 return -1;
2478 err = !_drbd_send_bitmap(mdev);
2479 drbd_put_data_sock(mdev);
2480 return err;
2481}
2482
2483int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2484{
2485 int ok;
2486 struct p_barrier_ack p;
2487
2488 p.barrier = barrier_nr;
2489 p.set_size = cpu_to_be32(set_size);
2490
2491 if (mdev->state.conn < C_CONNECTED)
81e84650 2492 return false;
b411b363 2493 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
0b70a13d 2494 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2495 return ok;
2496}
2497
2498/**
2499 * _drbd_send_ack() - Sends an ack packet
2500 * @mdev: DRBD device.
2501 * @cmd: Packet command code.
2502 * @sector: sector, needs to be in big endian byte order
2503 * @blksize: size in byte, needs to be in big endian byte order
2504 * @block_id: Id, big endian byte order
2505 */
2506static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2507 u64 sector,
2508 u32 blksize,
2509 u64 block_id)
2510{
2511 int ok;
2512 struct p_block_ack p;
2513
2514 p.sector = sector;
2515 p.block_id = block_id;
2516 p.blksize = blksize;
2517 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2518
2519 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
81e84650 2520 return false;
b411b363 2521 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
0b70a13d 2522 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2523 return ok;
2524}
2525
2b2bf214
LE
2526/* dp->sector and dp->block_id already/still in network byte order,
2527 * data_size is payload size according to dp->head,
2528 * and may need to be corrected for digest size. */
b411b363 2529int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2b2bf214 2530 struct p_data *dp, int data_size)
b411b363 2531{
2b2bf214
LE
2532 data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2533 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
b411b363
PR
2534 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2535 dp->block_id);
2536}
2537
2538int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2539 struct p_block_req *rp)
2540{
2541 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2542}
2543
2544/**
2545 * drbd_send_ack() - Sends an ack packet
2546 * @mdev: DRBD device.
2547 * @cmd: Packet command code.
2548 * @e: Epoch entry.
2549 */
2550int drbd_send_ack(struct drbd_conf *mdev,
2551 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2552{
2553 return _drbd_send_ack(mdev, cmd,
2554 cpu_to_be64(e->sector),
2555 cpu_to_be32(e->size),
2556 e->block_id);
2557}
2558
2559/* This function misuses the block_id field to signal if the blocks
2560 * are is sync or not. */
2561int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2562 sector_t sector, int blksize, u64 block_id)
2563{
2564 return _drbd_send_ack(mdev, cmd,
2565 cpu_to_be64(sector),
2566 cpu_to_be32(blksize),
2567 cpu_to_be64(block_id));
2568}
2569
2570int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2571 sector_t sector, int size, u64 block_id)
2572{
2573 int ok;
2574 struct p_block_req p;
2575
2576 p.sector = cpu_to_be64(sector);
2577 p.block_id = block_id;
2578 p.blksize = cpu_to_be32(size);
2579
2580 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
0b70a13d 2581 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2582 return ok;
2583}
2584
2585int drbd_send_drequest_csum(struct drbd_conf *mdev,
2586 sector_t sector, int size,
2587 void *digest, int digest_size,
2588 enum drbd_packets cmd)
2589{
2590 int ok;
2591 struct p_block_req p;
2592
2593 p.sector = cpu_to_be64(sector);
2594 p.block_id = BE_DRBD_MAGIC + 0xbeef;
2595 p.blksize = cpu_to_be32(size);
2596
2597 p.head.magic = BE_DRBD_MAGIC;
2598 p.head.command = cpu_to_be16(cmd);
0b70a13d 2599 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
b411b363
PR
2600
2601 mutex_lock(&mdev->data.mutex);
2602
2603 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2604 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2605
2606 mutex_unlock(&mdev->data.mutex);
2607
2608 return ok;
2609}
2610
2611int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2612{
2613 int ok;
2614 struct p_block_req p;
2615
2616 p.sector = cpu_to_be64(sector);
2617 p.block_id = BE_DRBD_MAGIC + 0xbabe;
2618 p.blksize = cpu_to_be32(size);
2619
2620 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
0b70a13d 2621 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2622 return ok;
2623}
2624
2625/* called on sndtimeo
81e84650
AG
2626 * returns false if we should retry,
2627 * true if we think connection is dead
b411b363
PR
2628 */
2629static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2630{
2631 int drop_it;
2632 /* long elapsed = (long)(jiffies - mdev->last_received); */
2633
2634 drop_it = mdev->meta.socket == sock
2635 || !mdev->asender.task
2636 || get_t_state(&mdev->asender) != Running
2637 || mdev->state.conn < C_CONNECTED;
2638
2639 if (drop_it)
81e84650 2640 return true;
b411b363
PR
2641
2642 drop_it = !--mdev->ko_count;
2643 if (!drop_it) {
2644 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2645 current->comm, current->pid, mdev->ko_count);
2646 request_ping(mdev);
2647 }
2648
2649 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2650}
2651
2652/* The idea of sendpage seems to be to put some kind of reference
2653 * to the page into the skb, and to hand it over to the NIC. In
2654 * this process get_page() gets called.
2655 *
2656 * As soon as the page was really sent over the network put_page()
2657 * gets called by some part of the network layer. [ NIC driver? ]
2658 *
2659 * [ get_page() / put_page() increment/decrement the count. If count
2660 * reaches 0 the page will be freed. ]
2661 *
2662 * This works nicely with pages from FSs.
2663 * But this means that in protocol A we might signal IO completion too early!
2664 *
2665 * In order not to corrupt data during a resync we must make sure
2666 * that we do not reuse our own buffer pages (EEs) to early, therefore
2667 * we have the net_ee list.
2668 *
2669 * XFS seems to have problems, still, it submits pages with page_count == 0!
2670 * As a workaround, we disable sendpage on pages
2671 * with page_count == 0 or PageSlab.
2672 */
2673static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
ba11ad9a 2674 int offset, size_t size, unsigned msg_flags)
b411b363 2675{
ba11ad9a 2676 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
b411b363
PR
2677 kunmap(page);
2678 if (sent == size)
2679 mdev->send_cnt += size>>9;
2680 return sent == size;
2681}
2682
2683static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
ba11ad9a 2684 int offset, size_t size, unsigned msg_flags)
b411b363
PR
2685{
2686 mm_segment_t oldfs = get_fs();
2687 int sent, ok;
2688 int len = size;
2689
2690 /* e.g. XFS meta- & log-data is in slab pages, which have a
2691 * page_count of 0 and/or have PageSlab() set.
2692 * we cannot use send_page for those, as that does get_page();
2693 * put_page(); and would cause either a VM_BUG directly, or
2694 * __page_cache_release a page that would actually still be referenced
2695 * by someone, leading to some obscure delayed Oops somewhere else. */
2696 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
ba11ad9a 2697 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
b411b363 2698
ba11ad9a 2699 msg_flags |= MSG_NOSIGNAL;
b411b363
PR
2700 drbd_update_congested(mdev);
2701 set_fs(KERNEL_DS);
2702 do {
2703 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2704 offset, len,
ba11ad9a 2705 msg_flags);
b411b363
PR
2706 if (sent == -EAGAIN) {
2707 if (we_should_drop_the_connection(mdev,
2708 mdev->data.socket))
2709 break;
2710 else
2711 continue;
2712 }
2713 if (sent <= 0) {
2714 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2715 __func__, (int)size, len, sent);
2716 break;
2717 }
2718 len -= sent;
2719 offset += sent;
2720 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2721 set_fs(oldfs);
2722 clear_bit(NET_CONGESTED, &mdev->flags);
2723
2724 ok = (len == 0);
2725 if (likely(ok))
2726 mdev->send_cnt += size>>9;
2727 return ok;
2728}
2729
2730static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2731{
2732 struct bio_vec *bvec;
2733 int i;
ba11ad9a 2734 /* hint all but last page with MSG_MORE */
001a8868 2735 bio_for_each_segment(bvec, bio, i) {
b411b363 2736 if (!_drbd_no_send_page(mdev, bvec->bv_page,
ba11ad9a
LE
2737 bvec->bv_offset, bvec->bv_len,
2738 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
b411b363
PR
2739 return 0;
2740 }
2741 return 1;
2742}
2743
2744static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2745{
2746 struct bio_vec *bvec;
2747 int i;
ba11ad9a 2748 /* hint all but last page with MSG_MORE */
001a8868 2749 bio_for_each_segment(bvec, bio, i) {
b411b363 2750 if (!_drbd_send_page(mdev, bvec->bv_page,
ba11ad9a
LE
2751 bvec->bv_offset, bvec->bv_len,
2752 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
b411b363
PR
2753 return 0;
2754 }
b411b363
PR
2755 return 1;
2756}
2757
45bb912b
LE
2758static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2759{
2760 struct page *page = e->pages;
2761 unsigned len = e->size;
ba11ad9a 2762 /* hint all but last page with MSG_MORE */
45bb912b
LE
2763 page_chain_for_each(page) {
2764 unsigned l = min_t(unsigned, len, PAGE_SIZE);
ba11ad9a
LE
2765 if (!_drbd_send_page(mdev, page, 0, l,
2766 page_chain_next(page) ? MSG_MORE : 0))
45bb912b
LE
2767 return 0;
2768 len -= l;
2769 }
2770 return 1;
2771}
2772
76d2e7ec
PR
2773static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2774{
2775 if (mdev->agreed_pro_version >= 95)
2776 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
76d2e7ec
PR
2777 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2778 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2779 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2780 else
721a9602 2781 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
76d2e7ec
PR
2782}
2783
b411b363
PR
2784/* Used to send write requests
2785 * R_PRIMARY -> Peer (P_DATA)
2786 */
2787int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2788{
2789 int ok = 1;
2790 struct p_data p;
2791 unsigned int dp_flags = 0;
2792 void *dgb;
2793 int dgs;
2794
2795 if (!drbd_get_data_sock(mdev))
2796 return 0;
2797
2798 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2799 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2800
d5373389 2801 if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
0b70a13d
PR
2802 p.head.h80.magic = BE_DRBD_MAGIC;
2803 p.head.h80.command = cpu_to_be16(P_DATA);
2804 p.head.h80.length =
2805 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2806 } else {
2807 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2808 p.head.h95.command = cpu_to_be16(P_DATA);
2809 p.head.h95.length =
2810 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2811 }
b411b363
PR
2812
2813 p.sector = cpu_to_be64(req->sector);
2814 p.block_id = (unsigned long)req;
671a74e7 2815 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
b411b363 2816
76d2e7ec
PR
2817 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2818
b411b363
PR
2819 if (mdev->state.conn >= C_SYNC_SOURCE &&
2820 mdev->state.conn <= C_PAUSED_SYNC_T)
2821 dp_flags |= DP_MAY_SET_IN_SYNC;
2822
2823 p.dp_flags = cpu_to_be32(dp_flags);
b411b363
PR
2824 set_bit(UNPLUG_REMOTE, &mdev->flags);
2825 ok = (sizeof(p) ==
ba11ad9a 2826 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
b411b363
PR
2827 if (ok && dgs) {
2828 dgb = mdev->int_dig_out;
45bb912b 2829 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
cab2f74b 2830 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
b411b363
PR
2831 }
2832 if (ok) {
470be44a
LE
2833 /* For protocol A, we have to memcpy the payload into
2834 * socket buffers, as we may complete right away
2835 * as soon as we handed it over to tcp, at which point the data
2836 * pages may become invalid.
2837 *
2838 * For data-integrity enabled, we copy it as well, so we can be
2839 * sure that even if the bio pages may still be modified, it
2840 * won't change the data on the wire, thus if the digest checks
2841 * out ok after sending on this side, but does not fit on the
2842 * receiving side, we sure have detected corruption elsewhere.
2843 */
2844 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
b411b363
PR
2845 ok = _drbd_send_bio(mdev, req->master_bio);
2846 else
2847 ok = _drbd_send_zc_bio(mdev, req->master_bio);
470be44a
LE
2848
2849 /* double check digest, sometimes buffers have been modified in flight. */
2850 if (dgs > 0 && dgs <= 64) {
24c4830c 2851 /* 64 byte, 512 bit, is the largest digest size
470be44a
LE
2852 * currently supported in kernel crypto. */
2853 unsigned char digest[64];
2854 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2855 if (memcmp(mdev->int_dig_out, digest, dgs)) {
2856 dev_warn(DEV,
2857 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2858 (unsigned long long)req->sector, req->size);
2859 }
2860 } /* else if (dgs > 64) {
2861 ... Be noisy about digest too large ...
2862 } */
b411b363
PR
2863 }
2864
2865 drbd_put_data_sock(mdev);
bd26bfc5 2866
b411b363
PR
2867 return ok;
2868}
2869
2870/* answer packet, used to send data back for read requests:
2871 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2872 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2873 */
2874int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2875 struct drbd_epoch_entry *e)
2876{
2877 int ok;
2878 struct p_data p;
2879 void *dgb;
2880 int dgs;
2881
2882 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2883 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2884
d5373389 2885 if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
0b70a13d
PR
2886 p.head.h80.magic = BE_DRBD_MAGIC;
2887 p.head.h80.command = cpu_to_be16(cmd);
2888 p.head.h80.length =
2889 cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2890 } else {
2891 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2892 p.head.h95.command = cpu_to_be16(cmd);
2893 p.head.h95.length =
2894 cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2895 }
b411b363
PR
2896
2897 p.sector = cpu_to_be64(e->sector);
2898 p.block_id = e->block_id;
2899 /* p.seq_num = 0; No sequence numbers here.. */
2900
2901 /* Only called by our kernel thread.
2902 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2903 * in response to admin command or module unload.
2904 */
2905 if (!drbd_get_data_sock(mdev))
2906 return 0;
2907
0b70a13d 2908 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
b411b363
PR
2909 if (ok && dgs) {
2910 dgb = mdev->int_dig_out;
45bb912b 2911 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
cab2f74b 2912 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
b411b363
PR
2913 }
2914 if (ok)
45bb912b 2915 ok = _drbd_send_zc_ee(mdev, e);
b411b363
PR
2916
2917 drbd_put_data_sock(mdev);
bd26bfc5 2918
b411b363
PR
2919 return ok;
2920}
2921
73a01a18
PR
2922int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2923{
2924 struct p_block_desc p;
2925
2926 p.sector = cpu_to_be64(req->sector);
2927 p.blksize = cpu_to_be32(req->size);
2928
2929 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2930}
2931
b411b363
PR
2932/*
2933 drbd_send distinguishes two cases:
2934
2935 Packets sent via the data socket "sock"
2936 and packets sent via the meta data socket "msock"
2937
2938 sock msock
2939 -----------------+-------------------------+------------------------------
2940 timeout conf.timeout / 2 conf.timeout / 2
2941 timeout action send a ping via msock Abort communication
2942 and close all sockets
2943*/
2944
2945/*
2946 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2947 */
2948int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2949 void *buf, size_t size, unsigned msg_flags)
2950{
2951 struct kvec iov;
2952 struct msghdr msg;
2953 int rv, sent = 0;
2954
2955 if (!sock)
2956 return -1000;
2957
2958 /* THINK if (signal_pending) return ... ? */
2959
2960 iov.iov_base = buf;
2961 iov.iov_len = size;
2962
2963 msg.msg_name = NULL;
2964 msg.msg_namelen = 0;
2965 msg.msg_control = NULL;
2966 msg.msg_controllen = 0;
2967 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2968
2969 if (sock == mdev->data.socket) {
2970 mdev->ko_count = mdev->net_conf->ko_count;
2971 drbd_update_congested(mdev);
2972 }
2973 do {
2974 /* STRANGE
2975 * tcp_sendmsg does _not_ use its size parameter at all ?
2976 *
2977 * -EAGAIN on timeout, -EINTR on signal.
2978 */
2979/* THINK
2980 * do we need to block DRBD_SIG if sock == &meta.socket ??
2981 * otherwise wake_asender() might interrupt some send_*Ack !
2982 */
2983 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2984 if (rv == -EAGAIN) {
2985 if (we_should_drop_the_connection(mdev, sock))
2986 break;
2987 else
2988 continue;
2989 }
2990 D_ASSERT(rv != 0);
2991 if (rv == -EINTR) {
2992 flush_signals(current);
2993 rv = 0;
2994 }
2995 if (rv < 0)
2996 break;
2997 sent += rv;
2998 iov.iov_base += rv;
2999 iov.iov_len -= rv;
3000 } while (sent < size);
3001
3002 if (sock == mdev->data.socket)
3003 clear_bit(NET_CONGESTED, &mdev->flags);
3004
3005 if (rv <= 0) {
3006 if (rv != -EAGAIN) {
3007 dev_err(DEV, "%s_sendmsg returned %d\n",
3008 sock == mdev->meta.socket ? "msock" : "sock",
3009 rv);
3010 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
3011 } else
3012 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
3013 }
3014
3015 return sent;
3016}
3017
3018static int drbd_open(struct block_device *bdev, fmode_t mode)
3019{
3020 struct drbd_conf *mdev = bdev->bd_disk->private_data;
3021 unsigned long flags;
3022 int rv = 0;
3023
2a48fc0a 3024 mutex_lock(&drbd_main_mutex);
b411b363
PR
3025 spin_lock_irqsave(&mdev->req_lock, flags);
3026 /* to have a stable mdev->state.role
3027 * and no race with updating open_cnt */
3028
3029 if (mdev->state.role != R_PRIMARY) {
3030 if (mode & FMODE_WRITE)
3031 rv = -EROFS;
3032 else if (!allow_oos)
3033 rv = -EMEDIUMTYPE;
3034 }
3035
3036 if (!rv)
3037 mdev->open_cnt++;
3038 spin_unlock_irqrestore(&mdev->req_lock, flags);
2a48fc0a 3039 mutex_unlock(&drbd_main_mutex);
b411b363
PR
3040
3041 return rv;
3042}
3043
3044static int drbd_release(struct gendisk *gd, fmode_t mode)
3045{
3046 struct drbd_conf *mdev = gd->private_data;
2a48fc0a 3047 mutex_lock(&drbd_main_mutex);
b411b363 3048 mdev->open_cnt--;
2a48fc0a 3049 mutex_unlock(&drbd_main_mutex);
b411b363
PR
3050 return 0;
3051}
3052
b411b363
PR
3053static void drbd_set_defaults(struct drbd_conf *mdev)
3054{
85f4cc17
PR
3055 /* This way we get a compile error when sync_conf grows,
3056 and we forgot to initialize it here */
3057 mdev->sync_conf = (struct syncer_conf) {
3058 /* .rate = */ DRBD_RATE_DEF,
3059 /* .after = */ DRBD_AFTER_DEF,
3060 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
85f4cc17
PR
3061 /* .verify_alg = */ {}, 0,
3062 /* .cpu_mask = */ {}, 0,
3063 /* .csums_alg = */ {}, 0,
e756414f 3064 /* .use_rle = */ 0,
9a31d716
PR
3065 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
3066 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
3067 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
3068 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
0f0601f4
LE
3069 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
3070 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
85f4cc17
PR
3071 };
3072
3073 /* Have to use that way, because the layout differs between
3074 big endian and little endian */
b411b363
PR
3075 mdev->state = (union drbd_state) {
3076 { .role = R_SECONDARY,
3077 .peer = R_UNKNOWN,
3078 .conn = C_STANDALONE,
3079 .disk = D_DISKLESS,
3080 .pdsk = D_UNKNOWN,
fb22c402
PR
3081 .susp = 0,
3082 .susp_nod = 0,
3083 .susp_fen = 0
b411b363
PR
3084 } };
3085}
3086
3087void drbd_init_set_defaults(struct drbd_conf *mdev)
3088{
3089 /* the memset(,0,) did most of this.
3090 * note: only assignments, no allocation in here */
3091
3092 drbd_set_defaults(mdev);
3093
b411b363
PR
3094 atomic_set(&mdev->ap_bio_cnt, 0);
3095 atomic_set(&mdev->ap_pending_cnt, 0);
3096 atomic_set(&mdev->rs_pending_cnt, 0);
3097 atomic_set(&mdev->unacked_cnt, 0);
3098 atomic_set(&mdev->local_cnt, 0);
3099 atomic_set(&mdev->net_cnt, 0);
3100 atomic_set(&mdev->packet_seq, 0);
3101 atomic_set(&mdev->pp_in_use, 0);
435f0740 3102 atomic_set(&mdev->pp_in_use_by_net, 0);
778f271d 3103 atomic_set(&mdev->rs_sect_in, 0);
0f0601f4 3104 atomic_set(&mdev->rs_sect_ev, 0);
759fbdfb 3105 atomic_set(&mdev->ap_in_flight, 0);
e1711731 3106 atomic_set(&mdev->md_io_in_use, 0);
b411b363 3107
b411b363
PR
3108 mutex_init(&mdev->data.mutex);
3109 mutex_init(&mdev->meta.mutex);
3110 sema_init(&mdev->data.work.s, 0);
3111 sema_init(&mdev->meta.work.s, 0);
3112 mutex_init(&mdev->state_mutex);
3113
3114 spin_lock_init(&mdev->data.work.q_lock);
3115 spin_lock_init(&mdev->meta.work.q_lock);
3116
3117 spin_lock_init(&mdev->al_lock);
3118 spin_lock_init(&mdev->req_lock);
3119 spin_lock_init(&mdev->peer_seq_lock);
3120 spin_lock_init(&mdev->epoch_lock);
3121
3122 INIT_LIST_HEAD(&mdev->active_ee);
3123 INIT_LIST_HEAD(&mdev->sync_ee);
3124 INIT_LIST_HEAD(&mdev->done_ee);
3125 INIT_LIST_HEAD(&mdev->read_ee);
3126 INIT_LIST_HEAD(&mdev->net_ee);
3127 INIT_LIST_HEAD(&mdev->resync_reads);
3128 INIT_LIST_HEAD(&mdev->data.work.q);
3129 INIT_LIST_HEAD(&mdev->meta.work.q);
3130 INIT_LIST_HEAD(&mdev->resync_work.list);
3131 INIT_LIST_HEAD(&mdev->unplug_work.list);
e9e6f3ec 3132 INIT_LIST_HEAD(&mdev->go_diskless.list);
b411b363 3133 INIT_LIST_HEAD(&mdev->md_sync_work.list);
c4752ef1 3134 INIT_LIST_HEAD(&mdev->start_resync_work.list);
b411b363 3135 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
0ced55a3 3136
794abb75 3137 mdev->resync_work.cb = w_resync_timer;
b411b363 3138 mdev->unplug_work.cb = w_send_write_hint;
e9e6f3ec 3139 mdev->go_diskless.cb = w_go_diskless;
b411b363
PR
3140 mdev->md_sync_work.cb = w_md_sync;
3141 mdev->bm_io_work.w.cb = w_bitmap_io;
370a43e7 3142 mdev->start_resync_work.cb = w_start_resync;
b411b363
PR
3143 init_timer(&mdev->resync_timer);
3144 init_timer(&mdev->md_sync_timer);
370a43e7 3145 init_timer(&mdev->start_resync_timer);
7fde2be9 3146 init_timer(&mdev->request_timer);
b411b363
PR
3147 mdev->resync_timer.function = resync_timer_fn;
3148 mdev->resync_timer.data = (unsigned long) mdev;
3149 mdev->md_sync_timer.function = md_sync_timer_fn;
3150 mdev->md_sync_timer.data = (unsigned long) mdev;
370a43e7
PR
3151 mdev->start_resync_timer.function = start_resync_timer_fn;
3152 mdev->start_resync_timer.data = (unsigned long) mdev;
7fde2be9
PR
3153 mdev->request_timer.function = request_timer_fn;
3154 mdev->request_timer.data = (unsigned long) mdev;
b411b363
PR
3155
3156 init_waitqueue_head(&mdev->misc_wait);
3157 init_waitqueue_head(&mdev->state_wait);
84dfb9f5 3158 init_waitqueue_head(&mdev->net_cnt_wait);
b411b363
PR
3159 init_waitqueue_head(&mdev->ee_wait);
3160 init_waitqueue_head(&mdev->al_wait);
3161 init_waitqueue_head(&mdev->seq_wait);
3162
3163 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3164 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3165 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3166
3167 mdev->agreed_pro_version = PRO_VERSION_MAX;
2451fc3b 3168 mdev->write_ordering = WO_bdev_flush;
b411b363 3169 mdev->resync_wenr = LC_FREE;
99432fcc
PR
3170 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3171 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
b411b363
PR
3172}
3173
3174void drbd_mdev_cleanup(struct drbd_conf *mdev)
3175{
1d7734a0 3176 int i;
b411b363
PR
3177 if (mdev->receiver.t_state != None)
3178 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3179 mdev->receiver.t_state);
3180
3181 /* no need to lock it, I'm the only thread alive */
3182 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
3183 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3184 mdev->al_writ_cnt =
3185 mdev->bm_writ_cnt =
3186 mdev->read_cnt =
3187 mdev->recv_cnt =
3188 mdev->send_cnt =
3189 mdev->writ_cnt =
3190 mdev->p_size =
3191 mdev->rs_start =
3192 mdev->rs_total =
1d7734a0
LE
3193 mdev->rs_failed = 0;
3194 mdev->rs_last_events = 0;
0f0601f4 3195 mdev->rs_last_sect_ev = 0;
1d7734a0
LE
3196 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3197 mdev->rs_mark_left[i] = 0;
3198 mdev->rs_mark_time[i] = 0;
3199 }
b411b363
PR
3200 D_ASSERT(mdev->net_conf == NULL);
3201
3202 drbd_set_my_capacity(mdev, 0);
3203 if (mdev->bitmap) {
3204 /* maybe never allocated. */
02d9a94b 3205 drbd_bm_resize(mdev, 0, 1);
b411b363
PR
3206 drbd_bm_cleanup(mdev);
3207 }
3208
3209 drbd_free_resources(mdev);
0778286a 3210 clear_bit(AL_SUSPENDED, &mdev->flags);
b411b363
PR
3211
3212 /*
3213 * currently we drbd_init_ee only on module load, so
3214 * we may do drbd_release_ee only on module unload!
3215 */
3216 D_ASSERT(list_empty(&mdev->active_ee));
3217 D_ASSERT(list_empty(&mdev->sync_ee));
3218 D_ASSERT(list_empty(&mdev->done_ee));
3219 D_ASSERT(list_empty(&mdev->read_ee));
3220 D_ASSERT(list_empty(&mdev->net_ee));
3221 D_ASSERT(list_empty(&mdev->resync_reads));
3222 D_ASSERT(list_empty(&mdev->data.work.q));
3223 D_ASSERT(list_empty(&mdev->meta.work.q));
3224 D_ASSERT(list_empty(&mdev->resync_work.list));
3225 D_ASSERT(list_empty(&mdev->unplug_work.list));
e9e6f3ec 3226 D_ASSERT(list_empty(&mdev->go_diskless.list));
2265b473
LE
3227
3228 drbd_set_defaults(mdev);
b411b363
PR
3229}
3230
3231
3232static void drbd_destroy_mempools(void)
3233{
3234 struct page *page;
3235
3236 while (drbd_pp_pool) {
3237 page = drbd_pp_pool;
3238 drbd_pp_pool = (struct page *)page_private(page);
3239 __free_page(page);
3240 drbd_pp_vacant--;
3241 }
3242
3243 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3244
3245 if (drbd_ee_mempool)
3246 mempool_destroy(drbd_ee_mempool);
3247 if (drbd_request_mempool)
3248 mempool_destroy(drbd_request_mempool);
3249 if (drbd_ee_cache)
3250 kmem_cache_destroy(drbd_ee_cache);
3251 if (drbd_request_cache)
3252 kmem_cache_destroy(drbd_request_cache);
3253 if (drbd_bm_ext_cache)
3254 kmem_cache_destroy(drbd_bm_ext_cache);
3255 if (drbd_al_ext_cache)
3256 kmem_cache_destroy(drbd_al_ext_cache);
3257
3258 drbd_ee_mempool = NULL;
3259 drbd_request_mempool = NULL;
3260 drbd_ee_cache = NULL;
3261 drbd_request_cache = NULL;
3262 drbd_bm_ext_cache = NULL;
3263 drbd_al_ext_cache = NULL;
3264
3265 return;
3266}
3267
3268static int drbd_create_mempools(void)
3269{
3270 struct page *page;
1816a2b4 3271 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
b411b363
PR
3272 int i;
3273
3274 /* prepare our caches and mempools */
3275 drbd_request_mempool = NULL;
3276 drbd_ee_cache = NULL;
3277 drbd_request_cache = NULL;
3278 drbd_bm_ext_cache = NULL;
3279 drbd_al_ext_cache = NULL;
3280 drbd_pp_pool = NULL;
3281
3282 /* caches */
3283 drbd_request_cache = kmem_cache_create(
3284 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3285 if (drbd_request_cache == NULL)
3286 goto Enomem;
3287
3288 drbd_ee_cache = kmem_cache_create(
3289 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3290 if (drbd_ee_cache == NULL)
3291 goto Enomem;
3292
3293 drbd_bm_ext_cache = kmem_cache_create(
3294 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3295 if (drbd_bm_ext_cache == NULL)
3296 goto Enomem;
3297
3298 drbd_al_ext_cache = kmem_cache_create(
3299 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3300 if (drbd_al_ext_cache == NULL)
3301 goto Enomem;
3302
3303 /* mempools */
3304 drbd_request_mempool = mempool_create(number,
3305 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3306 if (drbd_request_mempool == NULL)
3307 goto Enomem;
3308
3309 drbd_ee_mempool = mempool_create(number,
3310 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2027ae1f 3311 if (drbd_ee_mempool == NULL)
b411b363
PR
3312 goto Enomem;
3313
3314 /* drbd's page pool */
3315 spin_lock_init(&drbd_pp_lock);
3316
3317 for (i = 0; i < number; i++) {
3318 page = alloc_page(GFP_HIGHUSER);
3319 if (!page)
3320 goto Enomem;
3321 set_page_private(page, (unsigned long)drbd_pp_pool);
3322 drbd_pp_pool = page;
3323 }
3324 drbd_pp_vacant = number;
3325
3326 return 0;
3327
3328Enomem:
3329 drbd_destroy_mempools(); /* in case we allocated some */
3330 return -ENOMEM;
3331}
3332
3333static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3334 void *unused)
3335{
3336 /* just so we have it. you never know what interesting things we
3337 * might want to do here some day...
3338 */
3339
3340 return NOTIFY_DONE;
3341}
3342
3343static struct notifier_block drbd_notifier = {
3344 .notifier_call = drbd_notify_sys,
3345};
3346
3347static void drbd_release_ee_lists(struct drbd_conf *mdev)
3348{
3349 int rr;
3350
3351 rr = drbd_release_ee(mdev, &mdev->active_ee);
3352 if (rr)
3353 dev_err(DEV, "%d EEs in active list found!\n", rr);
3354
3355 rr = drbd_release_ee(mdev, &mdev->sync_ee);
3356 if (rr)
3357 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3358
3359 rr = drbd_release_ee(mdev, &mdev->read_ee);
3360 if (rr)
3361 dev_err(DEV, "%d EEs in read list found!\n", rr);
3362
3363 rr = drbd_release_ee(mdev, &mdev->done_ee);
3364 if (rr)
3365 dev_err(DEV, "%d EEs in done list found!\n", rr);
3366
3367 rr = drbd_release_ee(mdev, &mdev->net_ee);
3368 if (rr)
3369 dev_err(DEV, "%d EEs in net list found!\n", rr);
3370}
3371
3372/* caution. no locking.
3373 * currently only used from module cleanup code. */
3374static void drbd_delete_device(unsigned int minor)
3375{
3376 struct drbd_conf *mdev = minor_to_mdev(minor);
3377
3378 if (!mdev)
3379 return;
3380
dfa8bedb
PR
3381 del_timer_sync(&mdev->request_timer);
3382
b411b363
PR
3383 /* paranoia asserts */
3384 if (mdev->open_cnt != 0)
3385 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3386 __FILE__ , __LINE__);
3387
3388 ERR_IF (!list_empty(&mdev->data.work.q)) {
3389 struct list_head *lp;
3390 list_for_each(lp, &mdev->data.work.q) {
3391 dev_err(DEV, "lp = %p\n", lp);
3392 }
3393 };
3394 /* end paranoia asserts */
3395
3396 del_gendisk(mdev->vdisk);
3397
3398 /* cleanup stuff that may have been allocated during
3399 * device (re-)configuration or state changes */
3400
3401 if (mdev->this_bdev)
3402 bdput(mdev->this_bdev);
3403
3404 drbd_free_resources(mdev);
3405
3406 drbd_release_ee_lists(mdev);
3407
24c4830c 3408 /* should be freed on disconnect? */
b411b363
PR
3409 kfree(mdev->ee_hash);
3410 /*
3411 mdev->ee_hash_s = 0;
3412 mdev->ee_hash = NULL;
3413 */
3414
3415 lc_destroy(mdev->act_log);
3416 lc_destroy(mdev->resync);
3417
3418 kfree(mdev->p_uuid);
3419 /* mdev->p_uuid = NULL; */
3420
3421 kfree(mdev->int_dig_out);
3422 kfree(mdev->int_dig_in);
3423 kfree(mdev->int_dig_vv);
3424
3425 /* cleanup the rest that has been
3426 * allocated from drbd_new_device
3427 * and actually free the mdev itself */
3428 drbd_free_mdev(mdev);
3429}
3430
3431static void drbd_cleanup(void)
3432{
3433 unsigned int i;
3434
3435 unregister_reboot_notifier(&drbd_notifier);
3436
17a93f30
LE
3437 /* first remove proc,
3438 * drbdsetup uses it's presence to detect
3439 * whether DRBD is loaded.
3440 * If we would get stuck in proc removal,
3441 * but have netlink already deregistered,
3442 * some drbdsetup commands may wait forever
3443 * for an answer.
3444 */
3445 if (drbd_proc)
3446 remove_proc_entry("drbd", NULL);
3447
b411b363
PR
3448 drbd_nl_cleanup();
3449
3450 if (minor_table) {
b411b363
PR
3451 i = minor_count;
3452 while (i--)
3453 drbd_delete_device(i);
3454 drbd_destroy_mempools();
3455 }
3456
3457 kfree(minor_table);
3458
3459 unregister_blkdev(DRBD_MAJOR, "drbd");
3460
3461 printk(KERN_INFO "drbd: module cleanup done.\n");
3462}
3463
3464/**
3465 * drbd_congested() - Callback for pdflush
3466 * @congested_data: User data
3467 * @bdi_bits: Bits pdflush is currently interested in
3468 *
3469 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3470 */
3471static int drbd_congested(void *congested_data, int bdi_bits)
3472{
3473 struct drbd_conf *mdev = congested_data;
3474 struct request_queue *q;
3475 char reason = '-';
3476 int r = 0;
3477
1b881ef7 3478 if (!may_inc_ap_bio(mdev)) {
b411b363
PR
3479 /* DRBD has frozen IO */
3480 r = bdi_bits;
3481 reason = 'd';
3482 goto out;
3483 }
3484
3485 if (get_ldev(mdev)) {
3486 q = bdev_get_queue(mdev->ldev->backing_bdev);
3487 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3488 put_ldev(mdev);
3489 if (r)
3490 reason = 'b';
3491 }
3492
3493 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3494 r |= (1 << BDI_async_congested);
3495 reason = reason == 'b' ? 'a' : 'n';
3496 }
3497
3498out:
3499 mdev->congestion_reason = reason;
3500 return r;
3501}
3502
3503struct drbd_conf *drbd_new_device(unsigned int minor)
3504{
3505 struct drbd_conf *mdev;
3506 struct gendisk *disk;
3507 struct request_queue *q;
3508
3509 /* GFP_KERNEL, we are outside of all write-out paths */
3510 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3511 if (!mdev)
3512 return NULL;
3513 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3514 goto out_no_cpumask;
3515
3516 mdev->minor = minor;
3517
3518 drbd_init_set_defaults(mdev);
3519
3520 q = blk_alloc_queue(GFP_KERNEL);
3521 if (!q)
3522 goto out_no_q;
3523 mdev->rq_queue = q;
3524 q->queuedata = mdev;
b411b363
PR
3525
3526 disk = alloc_disk(1);
3527 if (!disk)
3528 goto out_no_disk;
3529 mdev->vdisk = disk;
3530
81e84650 3531 set_disk_ro(disk, true);
b411b363
PR
3532
3533 disk->queue = q;
3534 disk->major = DRBD_MAJOR;
3535 disk->first_minor = minor;
3536 disk->fops = &drbd_ops;
3537 sprintf(disk->disk_name, "drbd%d", minor);
3538 disk->private_data = mdev;
3539
3540 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3541 /* we have no partitions. we contain only ourselves. */
3542 mdev->this_bdev->bd_contains = mdev->this_bdev;
3543
3544 q->backing_dev_info.congested_fn = drbd_congested;
3545 q->backing_dev_info.congested_data = mdev;
3546
2f58dcfc 3547 blk_queue_make_request(q, drbd_make_request);
99432fcc
PR
3548 /* Setting the max_hw_sectors to an odd value of 8kibyte here
3549 This triggers a max_bio_size message upon first attach or connect */
3550 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
b411b363
PR
3551 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3552 blk_queue_merge_bvec(q, drbd_merge_bvec);
7eaceacc 3553 q->queue_lock = &mdev->req_lock;
b411b363
PR
3554
3555 mdev->md_io_page = alloc_page(GFP_KERNEL);
3556 if (!mdev->md_io_page)
3557 goto out_no_io_page;
3558
3559 if (drbd_bm_init(mdev))
3560 goto out_no_bitmap;
3561 /* no need to lock access, we are still initializing this minor device. */
3562 if (!tl_init(mdev))
3563 goto out_no_tl;
3564
3565 mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3566 if (!mdev->app_reads_hash)
3567 goto out_no_app_reads;
3568
3569 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3570 if (!mdev->current_epoch)
3571 goto out_no_epoch;
3572
3573 INIT_LIST_HEAD(&mdev->current_epoch->list);
3574 mdev->epochs = 1;
3575
3576 return mdev;
3577
3578/* out_whatever_else:
3579 kfree(mdev->current_epoch); */
3580out_no_epoch:
3581 kfree(mdev->app_reads_hash);
3582out_no_app_reads:
3583 tl_cleanup(mdev);
3584out_no_tl:
3585 drbd_bm_cleanup(mdev);
3586out_no_bitmap:
3587 __free_page(mdev->md_io_page);
3588out_no_io_page:
3589 put_disk(disk);
3590out_no_disk:
3591 blk_cleanup_queue(q);
3592out_no_q:
3593 free_cpumask_var(mdev->cpu_mask);
3594out_no_cpumask:
3595 kfree(mdev);
3596 return NULL;
3597}
3598
3599/* counterpart of drbd_new_device.
3600 * last part of drbd_delete_device. */
3601void drbd_free_mdev(struct drbd_conf *mdev)
3602{
3603 kfree(mdev->current_epoch);
3604 kfree(mdev->app_reads_hash);
3605 tl_cleanup(mdev);
3606 if (mdev->bitmap) /* should no longer be there. */
3607 drbd_bm_cleanup(mdev);
3608 __free_page(mdev->md_io_page);
3609 put_disk(mdev->vdisk);
3610 blk_cleanup_queue(mdev->rq_queue);
3611 free_cpumask_var(mdev->cpu_mask);
3719094e 3612 drbd_free_tl_hash(mdev);
b411b363
PR
3613 kfree(mdev);
3614}
3615
3616
3617int __init drbd_init(void)
3618{
3619 int err;
3620
3621 if (sizeof(struct p_handshake) != 80) {
3622 printk(KERN_ERR
3623 "drbd: never change the size or layout "
3624 "of the HandShake packet.\n");
3625 return -EINVAL;
3626 }
3627
2b8a90b5 3628 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
b411b363
PR
3629 printk(KERN_ERR
3630 "drbd: invalid minor_count (%d)\n", minor_count);
3631#ifdef MODULE
3632 return -EINVAL;
3633#else
3634 minor_count = 8;
3635#endif
3636 }
3637
3638 err = drbd_nl_init();
3639 if (err)
3640 return err;
3641
3642 err = register_blkdev(DRBD_MAJOR, "drbd");
3643 if (err) {
3644 printk(KERN_ERR
3645 "drbd: unable to register block device major %d\n",
3646 DRBD_MAJOR);
3647 return err;
3648 }
3649
3650 register_reboot_notifier(&drbd_notifier);
3651
3652 /*
3653 * allocate all necessary structs
3654 */
3655 err = -ENOMEM;
3656
3657 init_waitqueue_head(&drbd_pp_wait);
3658
3659 drbd_proc = NULL; /* play safe for drbd_cleanup */
3660 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3661 GFP_KERNEL);
3662 if (!minor_table)
3663 goto Enomem;
3664
3665 err = drbd_create_mempools();
3666 if (err)
3667 goto Enomem;
3668
8c484ee4 3669 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
b411b363
PR
3670 if (!drbd_proc) {
3671 printk(KERN_ERR "drbd: unable to register proc file\n");
3672 goto Enomem;
3673 }
3674
3675 rwlock_init(&global_state_lock);
3676
3677 printk(KERN_INFO "drbd: initialized. "
3678 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3679 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3680 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3681 printk(KERN_INFO "drbd: registered as block device major %d\n",
3682 DRBD_MAJOR);
3683 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3684
3685 return 0; /* Success! */
3686
3687Enomem:
3688 drbd_cleanup();
3689 if (err == -ENOMEM)
3690 /* currently always the case */
3691 printk(KERN_ERR "drbd: ran out of memory\n");
3692 else
3693 printk(KERN_ERR "drbd: initialization failure\n");
3694 return err;
3695}
3696
3697void drbd_free_bc(struct drbd_backing_dev *ldev)
3698{
3699 if (ldev == NULL)
3700 return;
3701
e525fd89
TH
3702 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3703 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
b411b363
PR
3704
3705 kfree(ldev);
3706}
3707
3708void drbd_free_sock(struct drbd_conf *mdev)
3709{
3710 if (mdev->data.socket) {
4589d7f8 3711 mutex_lock(&mdev->data.mutex);
b411b363
PR
3712 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3713 sock_release(mdev->data.socket);
3714 mdev->data.socket = NULL;
4589d7f8 3715 mutex_unlock(&mdev->data.mutex);
b411b363
PR
3716 }
3717 if (mdev->meta.socket) {
4589d7f8 3718 mutex_lock(&mdev->meta.mutex);
b411b363
PR
3719 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3720 sock_release(mdev->meta.socket);
3721 mdev->meta.socket = NULL;
4589d7f8 3722 mutex_unlock(&mdev->meta.mutex);
b411b363
PR
3723 }
3724}
3725
3726
3727void drbd_free_resources(struct drbd_conf *mdev)
3728{
3729 crypto_free_hash(mdev->csums_tfm);
3730 mdev->csums_tfm = NULL;
3731 crypto_free_hash(mdev->verify_tfm);
3732 mdev->verify_tfm = NULL;
3733 crypto_free_hash(mdev->cram_hmac_tfm);
3734 mdev->cram_hmac_tfm = NULL;
3735 crypto_free_hash(mdev->integrity_w_tfm);
3736 mdev->integrity_w_tfm = NULL;
3737 crypto_free_hash(mdev->integrity_r_tfm);
3738 mdev->integrity_r_tfm = NULL;
3739
3740 drbd_free_sock(mdev);
3741
3742 __no_warn(local,
3743 drbd_free_bc(mdev->ldev);
3744 mdev->ldev = NULL;);
3745}
3746
3747/* meta data management */
3748
3749struct meta_data_on_disk {
3750 u64 la_size; /* last agreed size. */
3751 u64 uuid[UI_SIZE]; /* UUIDs. */
3752 u64 device_uuid;
3753 u64 reserved_u64_1;
3754 u32 flags; /* MDF */
3755 u32 magic;
3756 u32 md_size_sect;
3757 u32 al_offset; /* offset to this block */
3758 u32 al_nr_extents; /* important for restoring the AL */
3759 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3760 u32 bm_offset; /* offset to the bitmap, from here */
3761 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
99432fcc
PR
3762 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3763 u32 reserved_u32[3];
b411b363
PR
3764
3765} __packed;
3766
3767/**
3768 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3769 * @mdev: DRBD device.
3770 */
3771void drbd_md_sync(struct drbd_conf *mdev)
3772{
3773 struct meta_data_on_disk *buffer;
3774 sector_t sector;
3775 int i;
3776
ee15b038
LE
3777 del_timer(&mdev->md_sync_timer);
3778 /* timer may be rearmed by drbd_md_mark_dirty() now. */
b411b363
PR
3779 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3780 return;
b411b363
PR
3781
3782 /* We use here D_FAILED and not D_ATTACHING because we try to write
3783 * metadata even if we detach due to a disk failure! */
3784 if (!get_ldev_if_state(mdev, D_FAILED))
3785 return;
3786
e1711731
PR
3787 buffer = drbd_md_get_buffer(mdev);
3788 if (!buffer)
3789 goto out;
3790
b411b363
PR
3791 memset(buffer, 0, 512);
3792
3793 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3794 for (i = UI_CURRENT; i < UI_SIZE; i++)
3795 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3796 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3797 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3798
3799 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3800 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3801 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3802 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3803 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3804
3805 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
99432fcc 3806 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
b411b363
PR
3807
3808 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3809 sector = mdev->ldev->md.md_offset;
3810
3f3a9b84 3811 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
b411b363
PR
3812 /* this was a try anyways ... */
3813 dev_err(DEV, "meta data update failed!\n");
81e84650 3814 drbd_chk_io_error(mdev, 1, true);
b411b363
PR
3815 }
3816
3817 /* Update mdev->ldev->md.la_size_sect,
3818 * since we updated it on metadata. */
3819 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3820
e1711731
PR
3821 drbd_md_put_buffer(mdev);
3822out:
b411b363
PR
3823 put_ldev(mdev);
3824}
3825
3826/**
3827 * drbd_md_read() - Reads in the meta data super block
3828 * @mdev: DRBD device.
3829 * @bdev: Device from which the meta data should be read in.
3830 *
116676ca 3831 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
b411b363
PR
3832 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3833 */
3834int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3835{
3836 struct meta_data_on_disk *buffer;
3837 int i, rv = NO_ERROR;
3838
3839 if (!get_ldev_if_state(mdev, D_ATTACHING))
3840 return ERR_IO_MD_DISK;
3841
e1711731
PR
3842 buffer = drbd_md_get_buffer(mdev);
3843 if (!buffer)
3844 goto out;
b411b363
PR
3845
3846 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
25985edc 3847 /* NOTE: can't do normal error processing here as this is
b411b363
PR
3848 called BEFORE disk is attached */
3849 dev_err(DEV, "Error while reading metadata.\n");
3850 rv = ERR_IO_MD_DISK;
3851 goto err;
3852 }
3853
3854 if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3855 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3856 rv = ERR_MD_INVALID;
3857 goto err;
3858 }
3859 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3860 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3861 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3862 rv = ERR_MD_INVALID;
3863 goto err;
3864 }
3865 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3866 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3867 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3868 rv = ERR_MD_INVALID;
3869 goto err;
3870 }
3871 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3872 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3873 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3874 rv = ERR_MD_INVALID;
3875 goto err;
3876 }
3877
3878 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3879 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3880 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3881 rv = ERR_MD_INVALID;
3882 goto err;
3883 }
3884
3885 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3886 for (i = UI_CURRENT; i < UI_SIZE; i++)
3887 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3888 bdev->md.flags = be32_to_cpu(buffer->flags);
3889 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3890 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3891
99432fcc
PR
3892 spin_lock_irq(&mdev->req_lock);
3893 if (mdev->state.conn < C_CONNECTED) {
3894 int peer;
3895 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3896 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3897 mdev->peer_max_bio_size = peer;
3898 }
3899 spin_unlock_irq(&mdev->req_lock);
3900
b411b363
PR
3901 if (mdev->sync_conf.al_extents < 7)
3902 mdev->sync_conf.al_extents = 127;
3903
3904 err:
e1711731
PR
3905 drbd_md_put_buffer(mdev);
3906 out:
b411b363
PR
3907 put_ldev(mdev);
3908
3909 return rv;
3910}
3911
3912/**
3913 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3914 * @mdev: DRBD device.
3915 *
3916 * Call this function if you change anything that should be written to
3917 * the meta-data super block. This function sets MD_DIRTY, and starts a
3918 * timer that ensures that within five seconds you have to call drbd_md_sync().
3919 */
ca0e6098 3920#ifdef DEBUG
ee15b038
LE
3921void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3922{
3923 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3924 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3925 mdev->last_md_mark_dirty.line = line;
3926 mdev->last_md_mark_dirty.func = func;
3927 }
3928}
3929#else
b411b363
PR
3930void drbd_md_mark_dirty(struct drbd_conf *mdev)
3931{
ee15b038 3932 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
ca0e6098 3933 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
b411b363 3934}
ee15b038 3935#endif
b411b363
PR
3936
3937static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3938{
3939 int i;
3940
62b0da3a 3941 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
b411b363 3942 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
b411b363
PR
3943}
3944
3945void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3946{
3947 if (idx == UI_CURRENT) {
3948 if (mdev->state.role == R_PRIMARY)
3949 val |= 1;
3950 else
3951 val &= ~((u64)1);
3952
3953 drbd_set_ed_uuid(mdev, val);
3954 }
3955
3956 mdev->ldev->md.uuid[idx] = val;
b411b363
PR
3957 drbd_md_mark_dirty(mdev);
3958}
3959
3960
3961void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3962{
3963 if (mdev->ldev->md.uuid[idx]) {
3964 drbd_uuid_move_history(mdev);
3965 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
b411b363
PR
3966 }
3967 _drbd_uuid_set(mdev, idx, val);
3968}
3969
3970/**
3971 * drbd_uuid_new_current() - Creates a new current UUID
3972 * @mdev: DRBD device.
3973 *
3974 * Creates a new current UUID, and rotates the old current UUID into
3975 * the bitmap slot. Causes an incremental resync upon next connect.
3976 */
3977void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3978{
3979 u64 val;
62b0da3a
LE
3980 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3981
3982 if (bm_uuid)
3983 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
b411b363 3984
b411b363 3985 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
b411b363
PR
3986
3987 get_random_bytes(&val, sizeof(u64));
3988 _drbd_uuid_set(mdev, UI_CURRENT, val);
62b0da3a 3989 drbd_print_uuids(mdev, "new current UUID");
aaa8e2b3
LE
3990 /* get it to stable storage _now_ */
3991 drbd_md_sync(mdev);
b411b363
PR
3992}
3993
3994void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3995{
3996 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3997 return;
3998
3999 if (val == 0) {
4000 drbd_uuid_move_history(mdev);
4001 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
4002 mdev->ldev->md.uuid[UI_BITMAP] = 0;
b411b363 4003 } else {
62b0da3a
LE
4004 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
4005 if (bm_uuid)
4006 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
b411b363 4007
62b0da3a 4008 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
b411b363
PR
4009 }
4010 drbd_md_mark_dirty(mdev);
4011}
4012
4013/**
4014 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
4015 * @mdev: DRBD device.
4016 *
4017 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
4018 */
4019int drbd_bmio_set_n_write(struct drbd_conf *mdev)
4020{
4021 int rv = -EIO;
4022
4023 if (get_ldev_if_state(mdev, D_ATTACHING)) {
4024 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
4025 drbd_md_sync(mdev);
4026 drbd_bm_set_all(mdev);
4027
4028 rv = drbd_bm_write(mdev);
4029
4030 if (!rv) {
4031 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
4032 drbd_md_sync(mdev);
4033 }
4034
4035 put_ldev(mdev);
4036 }
4037
4038 return rv;
4039}
4040
4041/**
4042 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
4043 * @mdev: DRBD device.
4044 *
4045 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
4046 */
4047int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
4048{
4049 int rv = -EIO;
4050
0778286a 4051 drbd_resume_al(mdev);
b411b363
PR
4052 if (get_ldev_if_state(mdev, D_ATTACHING)) {
4053 drbd_bm_clear_all(mdev);
4054 rv = drbd_bm_write(mdev);
4055 put_ldev(mdev);
4056 }
4057
4058 return rv;
4059}
4060
4061static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4062{
4063 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
02851e9f 4064 int rv = -EIO;
b411b363
PR
4065
4066 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
4067
02851e9f 4068 if (get_ldev(mdev)) {
20ceb2b2 4069 drbd_bm_lock(mdev, work->why, work->flags);
02851e9f
LE
4070 rv = work->io_fn(mdev);
4071 drbd_bm_unlock(mdev);
4072 put_ldev(mdev);
4073 }
b411b363
PR
4074
4075 clear_bit(BITMAP_IO, &mdev->flags);
127b3178 4076 smp_mb__after_clear_bit();
b411b363
PR
4077 wake_up(&mdev->misc_wait);
4078
4079 if (work->done)
4080 work->done(mdev, rv);
4081
4082 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
4083 work->why = NULL;
20ceb2b2 4084 work->flags = 0;
b411b363
PR
4085
4086 return 1;
4087}
4088
82f59cc6
LE
4089void drbd_ldev_destroy(struct drbd_conf *mdev)
4090{
4091 lc_destroy(mdev->resync);
4092 mdev->resync = NULL;
4093 lc_destroy(mdev->act_log);
4094 mdev->act_log = NULL;
4095 __no_warn(local,
4096 drbd_free_bc(mdev->ldev);
4097 mdev->ldev = NULL;);
4098
4099 if (mdev->md_io_tmpp) {
4100 __free_page(mdev->md_io_tmpp);
4101 mdev->md_io_tmpp = NULL;
4102 }
4103 clear_bit(GO_DISKLESS, &mdev->flags);
4104}
4105
e9e6f3ec
LE
4106static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4107{
4108 D_ASSERT(mdev->state.disk == D_FAILED);
9d282875
LE
4109 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
4110 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
82f59cc6
LE
4111 * the protected members anymore, though, so once put_ldev reaches zero
4112 * again, it will be safe to free them. */
e9e6f3ec 4113 drbd_force_state(mdev, NS(disk, D_DISKLESS));
e9e6f3ec
LE
4114 return 1;
4115}
4116
4117void drbd_go_diskless(struct drbd_conf *mdev)
4118{
4119 D_ASSERT(mdev->state.disk == D_FAILED);
4120 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
9d282875 4121 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
e9e6f3ec
LE
4122}
4123
b411b363
PR
4124/**
4125 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
4126 * @mdev: DRBD device.
4127 * @io_fn: IO callback to be called when bitmap IO is possible
4128 * @done: callback to be called after the bitmap IO was performed
4129 * @why: Descriptive text of the reason for doing the IO
4130 *
4131 * While IO on the bitmap happens we freeze application IO thus we ensure
4132 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
4133 * called from worker context. It MUST NOT be used while a previous such
4134 * work is still pending!
4135 */
4136void drbd_queue_bitmap_io(struct drbd_conf *mdev,
4137 int (*io_fn)(struct drbd_conf *),
4138 void (*done)(struct drbd_conf *, int),
20ceb2b2 4139 char *why, enum bm_flag flags)
b411b363
PR
4140{
4141 D_ASSERT(current == mdev->worker.task);
4142
4143 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
4144 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
4145 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
4146 if (mdev->bm_io_work.why)
4147 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
4148 why, mdev->bm_io_work.why);
4149
4150 mdev->bm_io_work.io_fn = io_fn;
4151 mdev->bm_io_work.done = done;
4152 mdev->bm_io_work.why = why;
20ceb2b2 4153 mdev->bm_io_work.flags = flags;
b411b363 4154
22afd7ee 4155 spin_lock_irq(&mdev->req_lock);
b411b363
PR
4156 set_bit(BITMAP_IO, &mdev->flags);
4157 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
127b3178 4158 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
b411b363 4159 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
b411b363 4160 }
22afd7ee 4161 spin_unlock_irq(&mdev->req_lock);
b411b363
PR
4162}
4163
4164/**
4165 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
4166 * @mdev: DRBD device.
4167 * @io_fn: IO callback to be called when bitmap IO is possible
4168 * @why: Descriptive text of the reason for doing the IO
4169 *
4170 * freezes application IO while that the actual IO operations runs. This
4171 * functions MAY NOT be called from worker context.
4172 */
20ceb2b2
LE
4173int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
4174 char *why, enum bm_flag flags)
b411b363
PR
4175{
4176 int rv;
4177
4178 D_ASSERT(current != mdev->worker.task);
4179
20ceb2b2
LE
4180 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4181 drbd_suspend_io(mdev);
b411b363 4182
20ceb2b2 4183 drbd_bm_lock(mdev, why, flags);
b411b363
PR
4184 rv = io_fn(mdev);
4185 drbd_bm_unlock(mdev);
4186
20ceb2b2
LE
4187 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4188 drbd_resume_io(mdev);
b411b363
PR
4189
4190 return rv;
4191}
4192
4193void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4194{
4195 if ((mdev->ldev->md.flags & flag) != flag) {
4196 drbd_md_mark_dirty(mdev);
4197 mdev->ldev->md.flags |= flag;
4198 }
4199}
4200
4201void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4202{
4203 if ((mdev->ldev->md.flags & flag) != 0) {
4204 drbd_md_mark_dirty(mdev);
4205 mdev->ldev->md.flags &= ~flag;
4206 }
4207}
4208int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4209{
4210 return (bdev->md.flags & flag) != 0;
4211}
4212
4213static void md_sync_timer_fn(unsigned long data)
4214{
4215 struct drbd_conf *mdev = (struct drbd_conf *) data;
4216
4217 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4218}
4219
4220static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4221{
4222 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
ee15b038
LE
4223#ifdef DEBUG
4224 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4225 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4226#endif
b411b363 4227 drbd_md_sync(mdev);
b411b363
PR
4228 return 1;
4229}
4230
4231#ifdef CONFIG_DRBD_FAULT_INJECTION
4232/* Fault insertion support including random number generator shamelessly
4233 * stolen from kernel/rcutorture.c */
4234struct fault_random_state {
4235 unsigned long state;
4236 unsigned long count;
4237};
4238
4239#define FAULT_RANDOM_MULT 39916801 /* prime */
4240#define FAULT_RANDOM_ADD 479001701 /* prime */
4241#define FAULT_RANDOM_REFRESH 10000
4242
4243/*
4244 * Crude but fast random-number generator. Uses a linear congruential
4245 * generator, with occasional help from get_random_bytes().
4246 */
4247static unsigned long
4248_drbd_fault_random(struct fault_random_state *rsp)
4249{
4250 long refresh;
4251
49829ea7 4252 if (!rsp->count--) {
b411b363
PR
4253 get_random_bytes(&refresh, sizeof(refresh));
4254 rsp->state += refresh;
4255 rsp->count = FAULT_RANDOM_REFRESH;
4256 }
4257 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4258 return swahw32(rsp->state);
4259}
4260
4261static char *
4262_drbd_fault_str(unsigned int type) {
4263 static char *_faults[] = {
4264 [DRBD_FAULT_MD_WR] = "Meta-data write",
4265 [DRBD_FAULT_MD_RD] = "Meta-data read",
4266 [DRBD_FAULT_RS_WR] = "Resync write",
4267 [DRBD_FAULT_RS_RD] = "Resync read",
4268 [DRBD_FAULT_DT_WR] = "Data write",
4269 [DRBD_FAULT_DT_RD] = "Data read",
4270 [DRBD_FAULT_DT_RA] = "Data read ahead",
4271 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
6b4388ac
PR
4272 [DRBD_FAULT_AL_EE] = "EE allocation",
4273 [DRBD_FAULT_RECEIVE] = "receive data corruption",
b411b363
PR
4274 };
4275
4276 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4277}
4278
4279unsigned int
4280_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4281{
4282 static struct fault_random_state rrs = {0, 0};
4283
4284 unsigned int ret = (
4285 (fault_devs == 0 ||
4286 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4287 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4288
4289 if (ret) {
4290 fault_count++;
4291
7383506c 4292 if (__ratelimit(&drbd_ratelimit_state))
b411b363
PR
4293 dev_warn(DEV, "***Simulating %s failure\n",
4294 _drbd_fault_str(type));
4295 }
4296
4297 return ret;
4298}
4299#endif
4300
4301const char *drbd_buildtag(void)
4302{
4303 /* DRBD built from external sources has here a reference to the
4304 git hash of the source code. */
4305
4306 static char buildtag[38] = "\0uilt-in";
4307
4308 if (buildtag[0] == 0) {
4309#ifdef CONFIG_MODULES
4310 if (THIS_MODULE != NULL)
4311 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4312 else
4313#endif
4314 buildtag[0] = 'b';
4315 }
4316
4317 return buildtag;
4318}
4319
4320module_init(drbd_init)
4321module_exit(drbd_cleanup)
4322
b411b363
PR
4323EXPORT_SYMBOL(drbd_conn_str);
4324EXPORT_SYMBOL(drbd_role_str);
4325EXPORT_SYMBOL(drbd_disk_str);
4326EXPORT_SYMBOL(drbd_set_st_err_str);
This page took 0.351 seconds and 5 git commands to generate.