idr: idr_for_each_entry() macro
[deliverable/linux.git] / drivers / block / drbd / drbd_main.c
CommitLineData
b411b363
PR
1/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
b411b363 29#include <linux/module.h>
b411b363
PR
30#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
2a48fc0a 35#include <linux/mutex.h>
b411b363
PR
36#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
b411b363
PR
55#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
59struct after_state_chg_work {
60 struct drbd_work w;
61 union drbd_state os;
62 union drbd_state ns;
63 enum chg_state_flags flags;
64 struct completion *done;
65};
66
2a48fc0a 67static DEFINE_MUTEX(drbd_main_mutex);
b411b363
PR
68int drbdd_init(struct drbd_thread *);
69int drbd_worker(struct drbd_thread *);
70int drbd_asender(struct drbd_thread *);
71
72int drbd_init(void);
73static int drbd_open(struct block_device *bdev, fmode_t mode);
74static int drbd_release(struct gendisk *gd, fmode_t mode);
75static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77 union drbd_state ns, enum chg_state_flags flags);
78static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
e9e6f3ec 81static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
b411b363 82
b411b363
PR
83MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86MODULE_VERSION(REL_VERSION);
87MODULE_LICENSE("GPL");
2b8a90b5
PR
88MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
b411b363
PR
90MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92#include <linux/moduleparam.h>
93/* allow_open_on_secondary */
94MODULE_PARM_DESC(allow_oos, "DONT USE!");
95/* thanks to these macros, if compiled into the kernel (not-module),
96 * this becomes the boot parameter drbd.minor_count */
97module_param(minor_count, uint, 0444);
98module_param(disable_sendpage, bool, 0644);
99module_param(allow_oos, bool, 0);
100module_param(cn_idx, uint, 0444);
101module_param(proc_details, int, 0644);
102
103#ifdef CONFIG_DRBD_FAULT_INJECTION
104int enable_faults;
105int fault_rate;
106static int fault_count;
107int fault_devs;
108/* bitmap of enabled faults */
109module_param(enable_faults, int, 0664);
110/* fault rate % value - applies to all enabled faults */
111module_param(fault_rate, int, 0664);
112/* count of faults inserted */
113module_param(fault_count, int, 0664);
114/* bitmap of devices to insert faults on */
115module_param(fault_devs, int, 0644);
116#endif
117
118/* module parameter, defined */
2b8a90b5 119unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
b411b363
PR
120int disable_sendpage;
121int allow_oos;
122unsigned int cn_idx = CN_IDX_DRBD;
123int proc_details; /* Detail level in proc drbd*/
124
125/* Module parameter for setting the user mode helper program
126 * to run. Default is /sbin/drbdadm */
127char usermode_helper[80] = "/sbin/drbdadm";
128
129module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131/* in 2.6.x, our device mapping and config info contains our virtual gendisks
132 * as member "struct gendisk *vdisk;"
133 */
134struct drbd_conf **minor_table;
135
136struct kmem_cache *drbd_request_cache;
137struct kmem_cache *drbd_ee_cache; /* epoch entries */
138struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
139struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
140mempool_t *drbd_request_mempool;
141mempool_t *drbd_ee_mempool;
142
143/* I do not use a standard mempool, because:
144 1) I want to hand out the pre-allocated objects first.
145 2) I want to be able to interrupt sleeping allocation with a signal.
146 Note: This is a single linked list, the next pointer is the private
147 member of struct page.
148 */
149struct page *drbd_pp_pool;
150spinlock_t drbd_pp_lock;
151int drbd_pp_vacant;
152wait_queue_head_t drbd_pp_wait;
153
154DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
7d4e9d09 156static const struct block_device_operations drbd_ops = {
b411b363
PR
157 .owner = THIS_MODULE,
158 .open = drbd_open,
159 .release = drbd_release,
160};
161
162#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164#ifdef __CHECKER__
165/* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
168int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169{
170 int io_allowed;
171
172 atomic_inc(&mdev->local_cnt);
173 io_allowed = (mdev->state.disk >= mins);
174 if (!io_allowed) {
175 if (atomic_dec_and_test(&mdev->local_cnt))
176 wake_up(&mdev->misc_wait);
177 }
178 return io_allowed;
179}
180
181#endif
182
183/**
184 * DOC: The transfer log
185 *
186 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188 * of the list. There is always at least one &struct drbd_tl_epoch object.
189 *
190 * Each &struct drbd_tl_epoch has a circular double linked list of requests
191 * attached.
192 */
193static int tl_init(struct drbd_conf *mdev)
194{
195 struct drbd_tl_epoch *b;
196
197 /* during device minor initialization, we may well use GFP_KERNEL */
198 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199 if (!b)
200 return 0;
201 INIT_LIST_HEAD(&b->requests);
202 INIT_LIST_HEAD(&b->w.list);
203 b->next = NULL;
204 b->br_number = 4711;
7e602c0a 205 b->n_writes = 0;
b411b363
PR
206 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208 mdev->oldest_tle = b;
209 mdev->newest_tle = b;
210 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
211
b411b363
PR
212 return 1;
213}
214
215static void tl_cleanup(struct drbd_conf *mdev)
216{
217 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
218 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
219 kfree(mdev->oldest_tle);
220 mdev->oldest_tle = NULL;
221 kfree(mdev->unused_spare_tle);
222 mdev->unused_spare_tle = NULL;
d628769b
AG
223}
224
b411b363
PR
225/**
226 * _tl_add_barrier() - Adds a barrier to the transfer log
227 * @mdev: DRBD device.
228 * @new: Barrier to be added before the current head of the TL.
229 *
230 * The caller must hold the req_lock.
231 */
232void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
233{
234 struct drbd_tl_epoch *newest_before;
235
236 INIT_LIST_HEAD(&new->requests);
237 INIT_LIST_HEAD(&new->w.list);
238 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
239 new->next = NULL;
7e602c0a 240 new->n_writes = 0;
b411b363
PR
241
242 newest_before = mdev->newest_tle;
243 /* never send a barrier number == 0, because that is special-cased
244 * when using TCQ for our write ordering code */
245 new->br_number = (newest_before->br_number+1) ?: 1;
246 if (mdev->newest_tle != new) {
247 mdev->newest_tle->next = new;
248 mdev->newest_tle = new;
249 }
250}
251
252/**
253 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
254 * @mdev: DRBD device.
255 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
256 * @set_size: Expected number of requests before that barrier.
257 *
258 * In case the passed barrier_nr or set_size does not match the oldest
259 * &struct drbd_tl_epoch objects this function will cause a termination
260 * of the connection.
261 */
262void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
263 unsigned int set_size)
264{
265 struct drbd_tl_epoch *b, *nob; /* next old barrier */
266 struct list_head *le, *tle;
267 struct drbd_request *r;
268
269 spin_lock_irq(&mdev->req_lock);
270
271 b = mdev->oldest_tle;
272
273 /* first some paranoia code */
274 if (b == NULL) {
275 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
276 barrier_nr);
277 goto bail;
278 }
279 if (b->br_number != barrier_nr) {
280 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
281 barrier_nr, b->br_number);
282 goto bail;
283 }
7e602c0a
PR
284 if (b->n_writes != set_size) {
285 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
286 barrier_nr, set_size, b->n_writes);
b411b363
PR
287 goto bail;
288 }
289
290 /* Clean up list of requests processed during current epoch */
291 list_for_each_safe(le, tle, &b->requests) {
292 r = list_entry(le, struct drbd_request, tl_requests);
8554df1c 293 _req_mod(r, BARRIER_ACKED);
b411b363
PR
294 }
295 /* There could be requests on the list waiting for completion
296 of the write to the local disk. To avoid corruptions of
297 slab's data structures we have to remove the lists head.
298
299 Also there could have been a barrier ack out of sequence, overtaking
300 the write acks - which would be a bug and violating write ordering.
301 To not deadlock in case we lose connection while such requests are
302 still pending, we need some way to find them for the
8554df1c 303 _req_mode(CONNECTION_LOST_WHILE_PENDING).
b411b363
PR
304
305 These have been list_move'd to the out_of_sequence_requests list in
8554df1c 306 _req_mod(, BARRIER_ACKED) above.
b411b363
PR
307 */
308 list_del_init(&b->requests);
309
310 nob = b->next;
311 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
312 _tl_add_barrier(mdev, b);
313 if (nob)
314 mdev->oldest_tle = nob;
315 /* if nob == NULL b was the only barrier, and becomes the new
316 barrier. Therefore mdev->oldest_tle points already to b */
317 } else {
318 D_ASSERT(nob != NULL);
319 mdev->oldest_tle = nob;
320 kfree(b);
321 }
322
323 spin_unlock_irq(&mdev->req_lock);
324 dec_ap_pending(mdev);
325
326 return;
327
328bail:
329 spin_unlock_irq(&mdev->req_lock);
330 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
331}
332
617049aa 333
b411b363 334/**
11b58e73 335 * _tl_restart() - Walks the transfer log, and applies an action to all requests
b411b363 336 * @mdev: DRBD device.
11b58e73 337 * @what: The action/event to perform with all request objects
b411b363 338 *
8554df1c
AG
339 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
340 * RESTART_FROZEN_DISK_IO.
b411b363 341 */
11b58e73 342static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
b411b363 343{
11b58e73 344 struct drbd_tl_epoch *b, *tmp, **pn;
b9b98716 345 struct list_head *le, *tle, carry_reads;
11b58e73
PR
346 struct drbd_request *req;
347 int rv, n_writes, n_reads;
b411b363
PR
348
349 b = mdev->oldest_tle;
11b58e73 350 pn = &mdev->oldest_tle;
b411b363 351 while (b) {
11b58e73
PR
352 n_writes = 0;
353 n_reads = 0;
b9b98716 354 INIT_LIST_HEAD(&carry_reads);
b411b363 355 list_for_each_safe(le, tle, &b->requests) {
11b58e73
PR
356 req = list_entry(le, struct drbd_request, tl_requests);
357 rv = _req_mod(req, what);
358
359 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
360 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
b411b363
PR
361 }
362 tmp = b->next;
363
b9b98716 364 if (n_writes) {
8554df1c 365 if (what == RESEND) {
11b58e73
PR
366 b->n_writes = n_writes;
367 if (b->w.cb == NULL) {
368 b->w.cb = w_send_barrier;
369 inc_ap_pending(mdev);
370 set_bit(CREATE_BARRIER, &mdev->flags);
371 }
372
373 drbd_queue_work(&mdev->data.work, &b->w);
374 }
375 pn = &b->next;
376 } else {
b9b98716
PR
377 if (n_reads)
378 list_add(&carry_reads, &b->requests);
11b58e73
PR
379 /* there could still be requests on that ring list,
380 * in case local io is still pending */
381 list_del(&b->requests);
382
383 /* dec_ap_pending corresponding to queue_barrier.
384 * the newest barrier may not have been queued yet,
385 * in which case w.cb is still NULL. */
386 if (b->w.cb != NULL)
387 dec_ap_pending(mdev);
388
389 if (b == mdev->newest_tle) {
390 /* recycle, but reinit! */
391 D_ASSERT(tmp == NULL);
392 INIT_LIST_HEAD(&b->requests);
b9b98716 393 list_splice(&carry_reads, &b->requests);
11b58e73
PR
394 INIT_LIST_HEAD(&b->w.list);
395 b->w.cb = NULL;
396 b->br_number = net_random();
397 b->n_writes = 0;
398
399 *pn = b;
400 break;
401 }
402 *pn = tmp;
403 kfree(b);
b411b363 404 }
b411b363 405 b = tmp;
b9b98716 406 list_splice(&carry_reads, &b->requests);
b411b363 407 }
11b58e73
PR
408}
409
b411b363
PR
410
411/**
412 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
413 * @mdev: DRBD device.
414 *
415 * This is called after the connection to the peer was lost. The storage covered
416 * by the requests on the transfer gets marked as our of sync. Called from the
417 * receiver thread and the worker thread.
418 */
419void tl_clear(struct drbd_conf *mdev)
420{
b411b363
PR
421 struct list_head *le, *tle;
422 struct drbd_request *r;
b411b363
PR
423
424 spin_lock_irq(&mdev->req_lock);
425
8554df1c 426 _tl_restart(mdev, CONNECTION_LOST_WHILE_PENDING);
b411b363
PR
427
428 /* we expect this list to be empty. */
429 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
430
431 /* but just in case, clean it up anyways! */
432 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
433 r = list_entry(le, struct drbd_request, tl_requests);
434 /* It would be nice to complete outside of spinlock.
435 * But this is easier for now. */
8554df1c 436 _req_mod(r, CONNECTION_LOST_WHILE_PENDING);
b411b363
PR
437 }
438
439 /* ensure bit indicating barrier is required is clear */
440 clear_bit(CREATE_BARRIER, &mdev->flags);
441
442 spin_unlock_irq(&mdev->req_lock);
443}
444
11b58e73
PR
445void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
446{
447 spin_lock_irq(&mdev->req_lock);
448 _tl_restart(mdev, what);
b411b363
PR
449 spin_unlock_irq(&mdev->req_lock);
450}
451
452/**
81e84650 453 * cl_wide_st_chg() - true if the state change is a cluster wide one
b411b363
PR
454 * @mdev: DRBD device.
455 * @os: old (current) state.
456 * @ns: new (wanted) state.
457 */
458static int cl_wide_st_chg(struct drbd_conf *mdev,
459 union drbd_state os, union drbd_state ns)
460{
461 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
462 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
463 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
464 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
465 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
466 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
467 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
468}
469
bf885f8a
AG
470enum drbd_state_rv
471drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
472 union drbd_state mask, union drbd_state val)
b411b363
PR
473{
474 unsigned long flags;
475 union drbd_state os, ns;
bf885f8a 476 enum drbd_state_rv rv;
b411b363
PR
477
478 spin_lock_irqsave(&mdev->req_lock, flags);
479 os = mdev->state;
480 ns.i = (os.i & ~mask.i) | val.i;
481 rv = _drbd_set_state(mdev, ns, f, NULL);
482 ns = mdev->state;
483 spin_unlock_irqrestore(&mdev->req_lock, flags);
484
485 return rv;
486}
487
488/**
489 * drbd_force_state() - Impose a change which happens outside our control on our state
490 * @mdev: DRBD device.
491 * @mask: mask of state bits to change.
492 * @val: value of new state bits.
493 */
494void drbd_force_state(struct drbd_conf *mdev,
495 union drbd_state mask, union drbd_state val)
496{
497 drbd_change_state(mdev, CS_HARD, mask, val);
498}
499
bf885f8a
AG
500static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
501static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
502 union drbd_state,
503 union drbd_state);
b411b363 504static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
02bc7174 505 union drbd_state ns, const char **warn_sync_abort);
b411b363
PR
506int drbd_send_state_req(struct drbd_conf *,
507 union drbd_state, union drbd_state);
508
c8b32563
AG
509static enum drbd_state_rv
510_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
511 union drbd_state val)
b411b363
PR
512{
513 union drbd_state os, ns;
514 unsigned long flags;
bf885f8a 515 enum drbd_state_rv rv;
b411b363
PR
516
517 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
518 return SS_CW_SUCCESS;
519
520 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
521 return SS_CW_FAILED_BY_PEER;
522
523 rv = 0;
524 spin_lock_irqsave(&mdev->req_lock, flags);
525 os = mdev->state;
526 ns.i = (os.i & ~mask.i) | val.i;
527 ns = sanitize_state(mdev, os, ns, NULL);
528
529 if (!cl_wide_st_chg(mdev, os, ns))
530 rv = SS_CW_NO_NEED;
531 if (!rv) {
532 rv = is_valid_state(mdev, ns);
533 if (rv == SS_SUCCESS) {
534 rv = is_valid_state_transition(mdev, ns, os);
535 if (rv == SS_SUCCESS)
bf885f8a 536 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
b411b363
PR
537 }
538 }
539 spin_unlock_irqrestore(&mdev->req_lock, flags);
540
541 return rv;
542}
543
544/**
545 * drbd_req_state() - Perform an eventually cluster wide state change
546 * @mdev: DRBD device.
547 * @mask: mask of state bits to change.
548 * @val: value of new state bits.
549 * @f: flags
550 *
551 * Should not be called directly, use drbd_request_state() or
552 * _drbd_request_state().
553 */
bf885f8a
AG
554static enum drbd_state_rv
555drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
556 union drbd_state val, enum chg_state_flags f)
b411b363
PR
557{
558 struct completion done;
559 unsigned long flags;
560 union drbd_state os, ns;
bf885f8a 561 enum drbd_state_rv rv;
b411b363
PR
562
563 init_completion(&done);
564
565 if (f & CS_SERIALIZE)
566 mutex_lock(&mdev->state_mutex);
567
568 spin_lock_irqsave(&mdev->req_lock, flags);
569 os = mdev->state;
570 ns.i = (os.i & ~mask.i) | val.i;
571 ns = sanitize_state(mdev, os, ns, NULL);
572
573 if (cl_wide_st_chg(mdev, os, ns)) {
574 rv = is_valid_state(mdev, ns);
575 if (rv == SS_SUCCESS)
576 rv = is_valid_state_transition(mdev, ns, os);
577 spin_unlock_irqrestore(&mdev->req_lock, flags);
578
579 if (rv < SS_SUCCESS) {
580 if (f & CS_VERBOSE)
581 print_st_err(mdev, os, ns, rv);
582 goto abort;
583 }
584
585 drbd_state_lock(mdev);
586 if (!drbd_send_state_req(mdev, mask, val)) {
587 drbd_state_unlock(mdev);
588 rv = SS_CW_FAILED_BY_PEER;
589 if (f & CS_VERBOSE)
590 print_st_err(mdev, os, ns, rv);
591 goto abort;
592 }
593
594 wait_event(mdev->state_wait,
595 (rv = _req_st_cond(mdev, mask, val)));
596
597 if (rv < SS_SUCCESS) {
598 drbd_state_unlock(mdev);
599 if (f & CS_VERBOSE)
600 print_st_err(mdev, os, ns, rv);
601 goto abort;
602 }
603 spin_lock_irqsave(&mdev->req_lock, flags);
604 os = mdev->state;
605 ns.i = (os.i & ~mask.i) | val.i;
606 rv = _drbd_set_state(mdev, ns, f, &done);
607 drbd_state_unlock(mdev);
608 } else {
609 rv = _drbd_set_state(mdev, ns, f, &done);
610 }
611
612 spin_unlock_irqrestore(&mdev->req_lock, flags);
613
614 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
615 D_ASSERT(current != mdev->worker.task);
616 wait_for_completion(&done);
617 }
618
619abort:
620 if (f & CS_SERIALIZE)
621 mutex_unlock(&mdev->state_mutex);
622
623 return rv;
624}
625
626/**
627 * _drbd_request_state() - Request a state change (with flags)
628 * @mdev: DRBD device.
629 * @mask: mask of state bits to change.
630 * @val: value of new state bits.
631 * @f: flags
632 *
633 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
634 * flag, or when logging of failed state change requests is not desired.
635 */
bf885f8a
AG
636enum drbd_state_rv
637_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
638 union drbd_state val, enum chg_state_flags f)
b411b363 639{
bf885f8a 640 enum drbd_state_rv rv;
b411b363
PR
641
642 wait_event(mdev->state_wait,
643 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
644
645 return rv;
646}
647
648static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
649{
650 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
651 name,
652 drbd_conn_str(ns.conn),
653 drbd_role_str(ns.role),
654 drbd_role_str(ns.peer),
655 drbd_disk_str(ns.disk),
656 drbd_disk_str(ns.pdsk),
fb22c402 657 is_susp(ns) ? 's' : 'r',
b411b363
PR
658 ns.aftr_isp ? 'a' : '-',
659 ns.peer_isp ? 'p' : '-',
660 ns.user_isp ? 'u' : '-'
661 );
662}
663
bf885f8a
AG
664void print_st_err(struct drbd_conf *mdev, union drbd_state os,
665 union drbd_state ns, enum drbd_state_rv err)
b411b363
PR
666{
667 if (err == SS_IN_TRANSIENT_STATE)
668 return;
669 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
670 print_st(mdev, " state", os);
671 print_st(mdev, "wanted", ns);
672}
673
674
b411b363
PR
675/**
676 * is_valid_state() - Returns an SS_ error code if ns is not valid
677 * @mdev: DRBD device.
678 * @ns: State to consider.
679 */
bf885f8a
AG
680static enum drbd_state_rv
681is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
b411b363
PR
682{
683 /* See drbd_state_sw_errors in drbd_strings.c */
684
685 enum drbd_fencing_p fp;
bf885f8a 686 enum drbd_state_rv rv = SS_SUCCESS;
b411b363
PR
687
688 fp = FP_DONT_CARE;
689 if (get_ldev(mdev)) {
690 fp = mdev->ldev->dc.fencing;
691 put_ldev(mdev);
692 }
693
694 if (get_net_conf(mdev)) {
695 if (!mdev->net_conf->two_primaries &&
696 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
697 rv = SS_TWO_PRIMARIES;
698 put_net_conf(mdev);
699 }
700
701 if (rv <= 0)
702 /* already found a reason to abort */;
703 else if (ns.role == R_SECONDARY && mdev->open_cnt)
704 rv = SS_DEVICE_IN_USE;
705
706 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
707 rv = SS_NO_UP_TO_DATE_DISK;
708
709 else if (fp >= FP_RESOURCE &&
710 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
711 rv = SS_PRIMARY_NOP;
712
713 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
714 rv = SS_NO_UP_TO_DATE_DISK;
715
716 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
717 rv = SS_NO_LOCAL_DISK;
718
719 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
720 rv = SS_NO_REMOTE_DISK;
721
8d4ce82b
LE
722 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
723 rv = SS_NO_UP_TO_DATE_DISK;
724
b411b363
PR
725 else if ((ns.conn == C_CONNECTED ||
726 ns.conn == C_WF_BITMAP_S ||
727 ns.conn == C_SYNC_SOURCE ||
728 ns.conn == C_PAUSED_SYNC_S) &&
729 ns.disk == D_OUTDATED)
730 rv = SS_CONNECTED_OUTDATES;
731
732 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
733 (mdev->sync_conf.verify_alg[0] == 0))
734 rv = SS_NO_VERIFY_ALG;
735
736 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
737 mdev->agreed_pro_version < 88)
738 rv = SS_NOT_SUPPORTED;
739
fa7d9396
PR
740 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
741 rv = SS_CONNECTED_OUTDATES;
742
b411b363
PR
743 return rv;
744}
745
746/**
747 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
748 * @mdev: DRBD device.
749 * @ns: new state.
750 * @os: old state.
751 */
bf885f8a
AG
752static enum drbd_state_rv
753is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
754 union drbd_state os)
b411b363 755{
bf885f8a 756 enum drbd_state_rv rv = SS_SUCCESS;
b411b363
PR
757
758 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
759 os.conn > C_CONNECTED)
760 rv = SS_RESYNC_RUNNING;
761
762 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
763 rv = SS_ALREADY_STANDALONE;
764
765 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
766 rv = SS_IS_DISKLESS;
767
768 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
769 rv = SS_NO_NET_CONFIG;
770
771 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
772 rv = SS_LOWER_THAN_OUTDATED;
773
774 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
775 rv = SS_IN_TRANSIENT_STATE;
776
777 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
778 rv = SS_IN_TRANSIENT_STATE;
779
780 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
781 rv = SS_NEED_CONNECTION;
782
783 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
784 ns.conn != os.conn && os.conn > C_CONNECTED)
785 rv = SS_RESYNC_RUNNING;
786
787 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
788 os.conn < C_CONNECTED)
789 rv = SS_NEED_CONNECTION;
790
1fc80cf3
PR
791 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
792 && os.conn < C_WF_REPORT_PARAMS)
793 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
794
b411b363
PR
795 return rv;
796}
797
798/**
799 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
800 * @mdev: DRBD device.
801 * @os: old state.
802 * @ns: new state.
803 * @warn_sync_abort:
804 *
805 * When we loose connection, we have to set the state of the peers disk (pdsk)
806 * to D_UNKNOWN. This rule and many more along those lines are in this function.
807 */
808static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
02bc7174 809 union drbd_state ns, const char **warn_sync_abort)
b411b363
PR
810{
811 enum drbd_fencing_p fp;
ab17b68f 812 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
b411b363
PR
813
814 fp = FP_DONT_CARE;
815 if (get_ldev(mdev)) {
816 fp = mdev->ldev->dc.fencing;
817 put_ldev(mdev);
818 }
819
820 /* Disallow Network errors to configure a device's network part */
821 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
822 os.conn <= C_DISCONNECTING)
823 ns.conn = os.conn;
824
f2906e18
LE
825 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
826 * If you try to go into some Sync* state, that shall fail (elsewhere). */
b411b363 827 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
f2906e18 828 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
b411b363
PR
829 ns.conn = os.conn;
830
82f59cc6
LE
831 /* we cannot fail (again) if we already detached */
832 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
833 ns.disk = D_DISKLESS;
834
835 /* if we are only D_ATTACHING yet,
836 * we can (and should) go directly to D_DISKLESS. */
837 if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
838 ns.disk = D_DISKLESS;
839
b411b363
PR
840 /* After C_DISCONNECTING only C_STANDALONE may follow */
841 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
842 ns.conn = os.conn;
843
844 if (ns.conn < C_CONNECTED) {
845 ns.peer_isp = 0;
846 ns.peer = R_UNKNOWN;
847 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
848 ns.pdsk = D_UNKNOWN;
849 }
850
851 /* Clear the aftr_isp when becoming unconfigured */
852 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
853 ns.aftr_isp = 0;
854
b411b363
PR
855 /* Abort resync if a disk fails/detaches */
856 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
857 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
858 if (warn_sync_abort)
02bc7174
LE
859 *warn_sync_abort =
860 os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
861 "Online-verify" : "Resync";
b411b363
PR
862 ns.conn = C_CONNECTED;
863 }
864
b411b363
PR
865 /* Connection breaks down before we finished "Negotiating" */
866 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
867 get_ldev_if_state(mdev, D_NEGOTIATING)) {
868 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
869 ns.disk = mdev->new_state_tmp.disk;
870 ns.pdsk = mdev->new_state_tmp.pdsk;
871 } else {
872 dev_alert(DEV, "Connection lost while negotiating, no data!\n");
873 ns.disk = D_DISKLESS;
874 ns.pdsk = D_UNKNOWN;
875 }
876 put_ldev(mdev);
877 }
878
ab17b68f
PR
879 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
880 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
881 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
882 ns.disk = D_UP_TO_DATE;
883 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
884 ns.pdsk = D_UP_TO_DATE;
885 }
886
887 /* Implications of the connection stat on the disk states */
888 disk_min = D_DISKLESS;
889 disk_max = D_UP_TO_DATE;
890 pdsk_min = D_INCONSISTENT;
891 pdsk_max = D_UNKNOWN;
892 switch ((enum drbd_conns)ns.conn) {
893 case C_WF_BITMAP_T:
894 case C_PAUSED_SYNC_T:
895 case C_STARTING_SYNC_T:
896 case C_WF_SYNC_UUID:
897 case C_BEHIND:
898 disk_min = D_INCONSISTENT;
899 disk_max = D_OUTDATED;
900 pdsk_min = D_UP_TO_DATE;
901 pdsk_max = D_UP_TO_DATE;
902 break;
903 case C_VERIFY_S:
904 case C_VERIFY_T:
905 disk_min = D_UP_TO_DATE;
906 disk_max = D_UP_TO_DATE;
907 pdsk_min = D_UP_TO_DATE;
908 pdsk_max = D_UP_TO_DATE;
909 break;
910 case C_CONNECTED:
911 disk_min = D_DISKLESS;
912 disk_max = D_UP_TO_DATE;
913 pdsk_min = D_DISKLESS;
914 pdsk_max = D_UP_TO_DATE;
915 break;
916 case C_WF_BITMAP_S:
917 case C_PAUSED_SYNC_S:
918 case C_STARTING_SYNC_S:
919 case C_AHEAD:
920 disk_min = D_UP_TO_DATE;
921 disk_max = D_UP_TO_DATE;
922 pdsk_min = D_INCONSISTENT;
923 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
924 break;
925 case C_SYNC_TARGET:
926 disk_min = D_INCONSISTENT;
927 disk_max = D_INCONSISTENT;
928 pdsk_min = D_UP_TO_DATE;
929 pdsk_max = D_UP_TO_DATE;
930 break;
931 case C_SYNC_SOURCE:
932 disk_min = D_UP_TO_DATE;
933 disk_max = D_UP_TO_DATE;
934 pdsk_min = D_INCONSISTENT;
935 pdsk_max = D_INCONSISTENT;
936 break;
937 case C_STANDALONE:
938 case C_DISCONNECTING:
939 case C_UNCONNECTED:
940 case C_TIMEOUT:
941 case C_BROKEN_PIPE:
942 case C_NETWORK_FAILURE:
943 case C_PROTOCOL_ERROR:
944 case C_TEAR_DOWN:
945 case C_WF_CONNECTION:
946 case C_WF_REPORT_PARAMS:
947 case C_MASK:
948 break;
949 }
950 if (ns.disk > disk_max)
951 ns.disk = disk_max;
952
953 if (ns.disk < disk_min) {
954 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
955 drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
956 ns.disk = disk_min;
957 }
958 if (ns.pdsk > pdsk_max)
959 ns.pdsk = pdsk_max;
960
961 if (ns.pdsk < pdsk_min) {
962 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
963 drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
964 ns.pdsk = pdsk_min;
965 }
966
b411b363 967 if (fp == FP_STONITH &&
0a492166
PR
968 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
969 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
fb22c402 970 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
265be2d0
PR
971
972 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
973 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
974 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
fb22c402 975 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
b411b363
PR
976
977 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
978 if (ns.conn == C_SYNC_SOURCE)
979 ns.conn = C_PAUSED_SYNC_S;
980 if (ns.conn == C_SYNC_TARGET)
981 ns.conn = C_PAUSED_SYNC_T;
982 } else {
983 if (ns.conn == C_PAUSED_SYNC_S)
984 ns.conn = C_SYNC_SOURCE;
985 if (ns.conn == C_PAUSED_SYNC_T)
986 ns.conn = C_SYNC_TARGET;
987 }
988
989 return ns;
990}
991
992/* helper for __drbd_set_state */
993static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
994{
30b743a2
LE
995 if (mdev->agreed_pro_version < 90)
996 mdev->ov_start_sector = 0;
997 mdev->rs_total = drbd_bm_bits(mdev);
998 mdev->ov_position = 0;
b411b363
PR
999 if (cs == C_VERIFY_T) {
1000 /* starting online verify from an arbitrary position
1001 * does not fit well into the existing protocol.
1002 * on C_VERIFY_T, we initialize ov_left and friends
1003 * implicitly in receive_DataRequest once the
1004 * first P_OV_REQUEST is received */
1005 mdev->ov_start_sector = ~(sector_t)0;
1006 } else {
1007 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
30b743a2 1008 if (bit >= mdev->rs_total) {
b411b363
PR
1009 mdev->ov_start_sector =
1010 BM_BIT_TO_SECT(mdev->rs_total - 1);
30b743a2
LE
1011 mdev->rs_total = 1;
1012 } else
1013 mdev->rs_total -= bit;
b411b363
PR
1014 mdev->ov_position = mdev->ov_start_sector;
1015 }
30b743a2 1016 mdev->ov_left = mdev->rs_total;
b411b363
PR
1017}
1018
0778286a
PR
1019static void drbd_resume_al(struct drbd_conf *mdev)
1020{
1021 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1022 dev_info(DEV, "Resumed AL updates\n");
1023}
1024
b411b363
PR
1025/**
1026 * __drbd_set_state() - Set a new DRBD state
1027 * @mdev: DRBD device.
1028 * @ns: new state.
1029 * @flags: Flags
1030 * @done: Optional completion, that will get completed after the after_state_ch() finished
1031 *
1032 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1033 */
bf885f8a
AG
1034enum drbd_state_rv
1035__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1036 enum chg_state_flags flags, struct completion *done)
b411b363
PR
1037{
1038 union drbd_state os;
bf885f8a 1039 enum drbd_state_rv rv = SS_SUCCESS;
02bc7174 1040 const char *warn_sync_abort = NULL;
b411b363
PR
1041 struct after_state_chg_work *ascw;
1042
1043 os = mdev->state;
1044
1045 ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
1046
1047 if (ns.i == os.i)
1048 return SS_NOTHING_TO_DO;
1049
1050 if (!(flags & CS_HARD)) {
1051 /* pre-state-change checks ; only look at ns */
1052 /* See drbd_state_sw_errors in drbd_strings.c */
1053
1054 rv = is_valid_state(mdev, ns);
1055 if (rv < SS_SUCCESS) {
1056 /* If the old state was illegal as well, then let
1057 this happen...*/
1058
1616a254 1059 if (is_valid_state(mdev, os) == rv)
b411b363 1060 rv = is_valid_state_transition(mdev, ns, os);
b411b363
PR
1061 } else
1062 rv = is_valid_state_transition(mdev, ns, os);
1063 }
1064
1065 if (rv < SS_SUCCESS) {
1066 if (flags & CS_VERBOSE)
1067 print_st_err(mdev, os, ns, rv);
1068 return rv;
1069 }
1070
1071 if (warn_sync_abort)
02bc7174 1072 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
b411b363
PR
1073
1074 {
662d91a2
AG
1075 char *pbp, pb[300];
1076 pbp = pb;
1077 *pbp = 0;
1078 if (ns.role != os.role)
1079 pbp += sprintf(pbp, "role( %s -> %s ) ",
1080 drbd_role_str(os.role),
1081 drbd_role_str(ns.role));
1082 if (ns.peer != os.peer)
1083 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1084 drbd_role_str(os.peer),
1085 drbd_role_str(ns.peer));
1086 if (ns.conn != os.conn)
1087 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1088 drbd_conn_str(os.conn),
1089 drbd_conn_str(ns.conn));
1090 if (ns.disk != os.disk)
1091 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1092 drbd_disk_str(os.disk),
1093 drbd_disk_str(ns.disk));
1094 if (ns.pdsk != os.pdsk)
1095 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1096 drbd_disk_str(os.pdsk),
1097 drbd_disk_str(ns.pdsk));
1098 if (is_susp(ns) != is_susp(os))
1099 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1100 is_susp(os),
1101 is_susp(ns));
1102 if (ns.aftr_isp != os.aftr_isp)
1103 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1104 os.aftr_isp,
1105 ns.aftr_isp);
1106 if (ns.peer_isp != os.peer_isp)
1107 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1108 os.peer_isp,
1109 ns.peer_isp);
1110 if (ns.user_isp != os.user_isp)
1111 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1112 os.user_isp,
1113 ns.user_isp);
1114 dev_info(DEV, "%s\n", pb);
b411b363
PR
1115 }
1116
1117 /* solve the race between becoming unconfigured,
1118 * worker doing the cleanup, and
1119 * admin reconfiguring us:
1120 * on (re)configure, first set CONFIG_PENDING,
1121 * then wait for a potentially exiting worker,
1122 * start the worker, and schedule one no_op.
1123 * then proceed with configuration.
1124 */
1125 if (ns.disk == D_DISKLESS &&
1126 ns.conn == C_STANDALONE &&
1127 ns.role == R_SECONDARY &&
1128 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1129 set_bit(DEVICE_DYING, &mdev->flags);
1130
82f59cc6
LE
1131 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1132 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1133 * drbd_ldev_destroy() won't happen before our corresponding
1134 * after_state_ch works run, where we put_ldev again. */
1135 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1136 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1137 atomic_inc(&mdev->local_cnt);
1138
1139 mdev->state = ns;
62b0da3a
LE
1140
1141 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1142 drbd_print_uuids(mdev, "attached to UUIDs");
1143
b411b363
PR
1144 wake_up(&mdev->misc_wait);
1145 wake_up(&mdev->state_wait);
1146
b411b363
PR
1147 /* aborted verify run. log the last position */
1148 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1149 ns.conn < C_CONNECTED) {
1150 mdev->ov_start_sector =
30b743a2 1151 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
b411b363
PR
1152 dev_info(DEV, "Online Verify reached sector %llu\n",
1153 (unsigned long long)mdev->ov_start_sector);
1154 }
1155
1156 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1157 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1158 dev_info(DEV, "Syncer continues.\n");
1d7734a0
LE
1159 mdev->rs_paused += (long)jiffies
1160 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
63106d3c
PR
1161 if (ns.conn == C_SYNC_TARGET)
1162 mod_timer(&mdev->resync_timer, jiffies);
b411b363
PR
1163 }
1164
1165 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1166 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1167 dev_info(DEV, "Resync suspended\n");
1d7734a0 1168 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
b411b363
PR
1169 }
1170
1171 if (os.conn == C_CONNECTED &&
1172 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1d7734a0
LE
1173 unsigned long now = jiffies;
1174 int i;
1175
30b743a2 1176 set_ov_position(mdev, ns.conn);
1d7734a0 1177 mdev->rs_start = now;
0f0601f4
LE
1178 mdev->rs_last_events = 0;
1179 mdev->rs_last_sect_ev = 0;
b411b363
PR
1180 mdev->ov_last_oos_size = 0;
1181 mdev->ov_last_oos_start = 0;
1182
1d7734a0 1183 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
30b743a2 1184 mdev->rs_mark_left[i] = mdev->ov_left;
1d7734a0
LE
1185 mdev->rs_mark_time[i] = now;
1186 }
1187
2649f080
LE
1188 drbd_rs_controller_reset(mdev);
1189
b411b363
PR
1190 if (ns.conn == C_VERIFY_S) {
1191 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1192 (unsigned long long)mdev->ov_position);
1193 mod_timer(&mdev->resync_timer, jiffies);
1194 }
1195 }
1196
1197 if (get_ldev(mdev)) {
1198 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1199 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1200 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1201
1202 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1203 mdf |= MDF_CRASHED_PRIMARY;
1204 if (mdev->state.role == R_PRIMARY ||
1205 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1206 mdf |= MDF_PRIMARY_IND;
1207 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1208 mdf |= MDF_CONNECTED_IND;
1209 if (mdev->state.disk > D_INCONSISTENT)
1210 mdf |= MDF_CONSISTENT;
1211 if (mdev->state.disk > D_OUTDATED)
1212 mdf |= MDF_WAS_UP_TO_DATE;
1213 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1214 mdf |= MDF_PEER_OUT_DATED;
1215 if (mdf != mdev->ldev->md.flags) {
1216 mdev->ldev->md.flags = mdf;
1217 drbd_md_mark_dirty(mdev);
1218 }
1219 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1220 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1221 put_ldev(mdev);
1222 }
1223
1224 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1225 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1226 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1227 set_bit(CONSIDER_RESYNC, &mdev->flags);
1228
1229 /* Receiver should clean up itself */
1230 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1231 drbd_thread_stop_nowait(&mdev->receiver);
1232
1233 /* Now the receiver finished cleaning up itself, it should die */
1234 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1235 drbd_thread_stop_nowait(&mdev->receiver);
1236
1237 /* Upon network failure, we need to restart the receiver. */
1238 if (os.conn > C_TEAR_DOWN &&
1239 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1240 drbd_thread_restart_nowait(&mdev->receiver);
1241
0778286a
PR
1242 /* Resume AL writing if we get a connection */
1243 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1244 drbd_resume_al(mdev);
1245
b411b363
PR
1246 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1247 if (ascw) {
1248 ascw->os = os;
1249 ascw->ns = ns;
1250 ascw->flags = flags;
1251 ascw->w.cb = w_after_state_ch;
1252 ascw->done = done;
1253 drbd_queue_work(&mdev->data.work, &ascw->w);
1254 } else {
1255 dev_warn(DEV, "Could not kmalloc an ascw\n");
1256 }
1257
1258 return rv;
1259}
1260
1261static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1262{
1263 struct after_state_chg_work *ascw =
1264 container_of(w, struct after_state_chg_work, w);
1265 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1266 if (ascw->flags & CS_WAIT_COMPLETE) {
1267 D_ASSERT(ascw->done != NULL);
1268 complete(ascw->done);
1269 }
1270 kfree(ascw);
1271
1272 return 1;
1273}
1274
1275static void abw_start_sync(struct drbd_conf *mdev, int rv)
1276{
1277 if (rv) {
1278 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1279 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1280 return;
1281 }
1282
1283 switch (mdev->state.conn) {
1284 case C_STARTING_SYNC_T:
1285 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1286 break;
1287 case C_STARTING_SYNC_S:
1288 drbd_start_resync(mdev, C_SYNC_SOURCE);
1289 break;
1290 }
1291}
1292
20ceb2b2
LE
1293int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1294 int (*io_fn)(struct drbd_conf *),
1295 char *why, enum bm_flag flags)
19f843aa
LE
1296{
1297 int rv;
1298
1299 D_ASSERT(current == mdev->worker.task);
1300
1301 /* open coded non-blocking drbd_suspend_io(mdev); */
1302 set_bit(SUSPEND_IO, &mdev->flags);
19f843aa 1303
20ceb2b2 1304 drbd_bm_lock(mdev, why, flags);
19f843aa
LE
1305 rv = io_fn(mdev);
1306 drbd_bm_unlock(mdev);
1307
1308 drbd_resume_io(mdev);
1309
1310 return rv;
1311}
1312
b411b363
PR
1313/**
1314 * after_state_ch() - Perform after state change actions that may sleep
1315 * @mdev: DRBD device.
1316 * @os: old state.
1317 * @ns: new state.
1318 * @flags: Flags
1319 */
1320static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1321 union drbd_state ns, enum chg_state_flags flags)
1322{
1323 enum drbd_fencing_p fp;
8554df1c 1324 enum drbd_req_event what = NOTHING;
fb22c402 1325 union drbd_state nsm = (union drbd_state){ .i = -1 };
b411b363
PR
1326
1327 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1328 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1329 if (mdev->p_uuid)
1330 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1331 }
1332
1333 fp = FP_DONT_CARE;
1334 if (get_ldev(mdev)) {
1335 fp = mdev->ldev->dc.fencing;
1336 put_ldev(mdev);
1337 }
1338
1339 /* Inform userspace about the change... */
1340 drbd_bcast_state(mdev, ns);
1341
1342 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1343 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1344 drbd_khelper(mdev, "pri-on-incon-degr");
1345
1346 /* Here we have the actions that are performed after a
1347 state change. This function might sleep */
1348
fb22c402
PR
1349 nsm.i = -1;
1350 if (ns.susp_nod) {
3f98688a 1351 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
8554df1c 1352 what = RESEND;
265be2d0 1353
67098930 1354 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
8554df1c 1355 what = RESTART_FROZEN_DISK_IO;
fb22c402 1356
8554df1c 1357 if (what != NOTHING)
3f98688a 1358 nsm.susp_nod = 0;
265be2d0
PR
1359 }
1360
fb22c402 1361 if (ns.susp_fen) {
43a5182c
PR
1362 /* case1: The outdate peer handler is successful: */
1363 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
b411b363 1364 tl_clear(mdev);
43a5182c
PR
1365 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1366 drbd_uuid_new_current(mdev);
1367 clear_bit(NEW_CUR_UUID, &mdev->flags);
43a5182c 1368 }
b411b363 1369 spin_lock_irq(&mdev->req_lock);
fb22c402 1370 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
b411b363
PR
1371 spin_unlock_irq(&mdev->req_lock);
1372 }
43a5182c
PR
1373 /* case2: The connection was established again: */
1374 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1375 clear_bit(NEW_CUR_UUID, &mdev->flags);
8554df1c 1376 what = RESEND;
fb22c402 1377 nsm.susp_fen = 0;
43a5182c 1378 }
b411b363 1379 }
67098930 1380
8554df1c 1381 if (what != NOTHING) {
67098930
PR
1382 spin_lock_irq(&mdev->req_lock);
1383 _tl_restart(mdev, what);
fb22c402
PR
1384 nsm.i &= mdev->state.i;
1385 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
67098930 1386 spin_unlock_irq(&mdev->req_lock);
b411b363 1387 }
67098930 1388
5a22db89
LE
1389 /* Became sync source. With protocol >= 96, we still need to send out
1390 * the sync uuid now. Need to do that before any drbd_send_state, or
1391 * the other side may go "paused sync" before receiving the sync uuids,
1392 * which is unexpected. */
1393 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1394 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1395 mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1396 drbd_gen_and_send_sync_uuid(mdev);
1397 put_ldev(mdev);
1398 }
1399
b411b363
PR
1400 /* Do not change the order of the if above and the two below... */
1401 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1402 drbd_send_uuids(mdev);
1403 drbd_send_state(mdev);
1404 }
54b956ab
LE
1405 /* No point in queuing send_bitmap if we don't have a connection
1406 * anymore, so check also the _current_ state, not only the new state
1407 * at the time this work was queued. */
1408 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1409 mdev->state.conn == C_WF_BITMAP_S)
1410 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
20ceb2b2
LE
1411 "send_bitmap (WFBitMapS)",
1412 BM_LOCKED_TEST_ALLOWED);
b411b363
PR
1413
1414 /* Lost contact to peer's copy of the data */
1415 if ((os.pdsk >= D_INCONSISTENT &&
1416 os.pdsk != D_UNKNOWN &&
1417 os.pdsk != D_OUTDATED)
1418 && (ns.pdsk < D_INCONSISTENT ||
1419 ns.pdsk == D_UNKNOWN ||
1420 ns.pdsk == D_OUTDATED)) {
b411b363
PR
1421 if (get_ldev(mdev)) {
1422 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
2c8d1967 1423 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
fb22c402 1424 if (is_susp(mdev->state)) {
43a5182c
PR
1425 set_bit(NEW_CUR_UUID, &mdev->flags);
1426 } else {
1427 drbd_uuid_new_current(mdev);
1428 drbd_send_uuids(mdev);
1429 }
2c8d1967 1430 }
b411b363
PR
1431 put_ldev(mdev);
1432 }
1433 }
1434
1435 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
18a50fa2 1436 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
2c8d1967 1437 drbd_uuid_new_current(mdev);
18a50fa2
PR
1438 drbd_send_uuids(mdev);
1439 }
b411b363
PR
1440
1441 /* D_DISKLESS Peer becomes secondary */
1442 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
20ceb2b2
LE
1443 /* We may still be Primary ourselves.
1444 * No harm done if the bitmap still changes,
1445 * redirtied pages will follow later. */
1446 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1447 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
19f843aa
LE
1448 put_ldev(mdev);
1449 }
1450
06d33e96
LE
1451 /* Write out all changed bits on demote.
1452 * Though, no need to da that just yet
1453 * if there is a resync going on still */
1454 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1455 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
20ceb2b2
LE
1456 /* No changes to the bitmap expected this time, so assert that,
1457 * even though no harm was done if it did change. */
1458 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1459 "demote", BM_LOCKED_TEST_ALLOWED);
b411b363
PR
1460 put_ldev(mdev);
1461 }
1462
1463 /* Last part of the attaching process ... */
1464 if (ns.conn >= C_CONNECTED &&
1465 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
e89b591c 1466 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
b411b363
PR
1467 drbd_send_uuids(mdev);
1468 drbd_send_state(mdev);
1469 }
1470
1471 /* We want to pause/continue resync, tell peer. */
1472 if (ns.conn >= C_CONNECTED &&
1473 ((os.aftr_isp != ns.aftr_isp) ||
1474 (os.user_isp != ns.user_isp)))
1475 drbd_send_state(mdev);
1476
1477 /* In case one of the isp bits got set, suspend other devices. */
1478 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1479 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1480 suspend_other_sg(mdev);
1481
1482 /* Make sure the peer gets informed about eventual state
1483 changes (ISP bits) while we were in WFReportParams. */
1484 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1485 drbd_send_state(mdev);
1486
67531718
PR
1487 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1488 drbd_send_state(mdev);
1489
b411b363
PR
1490 /* We are in the progress to start a full sync... */
1491 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1492 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
20ceb2b2
LE
1493 /* no other bitmap changes expected during this phase */
1494 drbd_queue_bitmap_io(mdev,
1495 &drbd_bmio_set_n_write, &abw_start_sync,
1496 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
b411b363
PR
1497
1498 /* We are invalidating our self... */
1499 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1500 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
20ceb2b2
LE
1501 /* other bitmap operation expected during this phase */
1502 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1503 "set_n_write from invalidate", BM_LOCKED_MASK);
b411b363 1504
82f59cc6
LE
1505 /* first half of local IO error, failure to attach,
1506 * or administrative detach */
1507 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1508 enum drbd_io_error_p eh;
1509 int was_io_error;
1510 /* corresponding get_ldev was in __drbd_set_state, to serialize
1511 * our cleanup here with the transition to D_DISKLESS,
1512 * so it is safe to dreference ldev here. */
1513 eh = mdev->ldev->dc.on_io_error;
1514 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1515
1516 /* current state still has to be D_FAILED,
1517 * there is only one way out: to D_DISKLESS,
1518 * and that may only happen after our put_ldev below. */
1519 if (mdev->state.disk != D_FAILED)
1520 dev_err(DEV,
1521 "ASSERT FAILED: disk is %s during detach\n",
1522 drbd_disk_str(mdev->state.disk));
e9e6f3ec
LE
1523
1524 if (drbd_send_state(mdev))
82f59cc6 1525 dev_warn(DEV, "Notified peer that I am detaching my disk\n");
e9e6f3ec 1526 else
82f59cc6 1527 dev_err(DEV, "Sending state for detaching disk failed\n");
e9e6f3ec
LE
1528
1529 drbd_rs_cancel_all(mdev);
b411b363 1530
82f59cc6
LE
1531 /* In case we want to get something to stable storage still,
1532 * this may be the last chance.
1533 * Following put_ldev may transition to D_DISKLESS. */
1534 drbd_md_sync(mdev);
1535 put_ldev(mdev);
1536
1537 if (was_io_error && eh == EP_CALL_HELPER)
e9e6f3ec
LE
1538 drbd_khelper(mdev, "local-io-error");
1539 }
b411b363 1540
82f59cc6
LE
1541 /* second half of local IO error, failure to attach,
1542 * or administrative detach,
1543 * after local_cnt references have reached zero again */
1544 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1545 /* We must still be diskless,
1546 * re-attach has to be serialized with this! */
1547 if (mdev->state.disk != D_DISKLESS)
1548 dev_err(DEV,
1549 "ASSERT FAILED: disk is %s while going diskless\n",
1550 drbd_disk_str(mdev->state.disk));
e9e6f3ec 1551
82f59cc6
LE
1552 mdev->rs_total = 0;
1553 mdev->rs_failed = 0;
1554 atomic_set(&mdev->rs_pending_cnt, 0);
9d282875 1555
e9e6f3ec 1556 if (drbd_send_state(mdev))
82f59cc6 1557 dev_warn(DEV, "Notified peer that I'm now diskless.\n");
82f59cc6 1558 /* corresponding get_ldev in __drbd_set_state
25985edc 1559 * this may finally trigger drbd_ldev_destroy. */
82f59cc6 1560 put_ldev(mdev);
b411b363
PR
1561 }
1562
738a84b2
PR
1563 /* Notify peer that I had a local IO error, and did not detached.. */
1564 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1565 drbd_send_state(mdev);
1566
b411b363
PR
1567 /* Disks got bigger while they were detached */
1568 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1569 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1570 if (ns.conn == C_CONNECTED)
1571 resync_after_online_grow(mdev);
1572 }
1573
1574 /* A resync finished or aborted, wake paused devices... */
1575 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1576 (os.peer_isp && !ns.peer_isp) ||
1577 (os.user_isp && !ns.user_isp))
1578 resume_next_sg(mdev);
1579
af85e8e8
LE
1580 /* sync target done with resync. Explicitly notify peer, even though
1581 * it should (at least for non-empty resyncs) already know itself. */
1582 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1583 drbd_send_state(mdev);
1584
79a30d2d
LE
1585 /* This triggers bitmap writeout of potentially still unwritten pages
1586 * if the resync finished cleanly, or aborted because of peer disk
20ceb2b2 1587 * failure, or because of connection loss.
79a30d2d
LE
1588 * For resync aborted because of local disk failure, we cannot do
1589 * any bitmap writeout anymore.
20ceb2b2 1590 * No harm done if some bits change during this phase.
79a30d2d 1591 */
20ceb2b2
LE
1592 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1593 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1594 "write from resync_finished", BM_LOCKED_SET_ALLOWED);
79a30d2d
LE
1595 put_ldev(mdev);
1596 }
02851e9f 1597
b411b363
PR
1598 /* Upon network connection, we need to start the receiver */
1599 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1600 drbd_thread_start(&mdev->receiver);
1601
1602 /* Terminate worker thread if we are unconfigured - it will be
1603 restarted as needed... */
1604 if (ns.disk == D_DISKLESS &&
1605 ns.conn == C_STANDALONE &&
1606 ns.role == R_SECONDARY) {
1607 if (os.aftr_isp != ns.aftr_isp)
1608 resume_next_sg(mdev);
1609 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1610 if (test_bit(DEVICE_DYING, &mdev->flags))
1611 drbd_thread_stop_nowait(&mdev->worker);
1612 }
1613
1614 drbd_md_sync(mdev);
1615}
1616
1617
1618static int drbd_thread_setup(void *arg)
1619{
1620 struct drbd_thread *thi = (struct drbd_thread *) arg;
1621 struct drbd_conf *mdev = thi->mdev;
1622 unsigned long flags;
1623 int retval;
1624
1625restart:
1626 retval = thi->function(thi);
1627
1628 spin_lock_irqsave(&thi->t_lock, flags);
1629
e77a0a5c 1630 /* if the receiver has been "EXITING", the last thing it did
b411b363
PR
1631 * was set the conn state to "StandAlone",
1632 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1633 * and receiver thread will be "started".
e77a0a5c 1634 * drbd_thread_start needs to set "RESTARTING" in that case.
b411b363 1635 * t_state check and assignment needs to be within the same spinlock,
e77a0a5c
AG
1636 * so either thread_start sees EXITING, and can remap to RESTARTING,
1637 * or thread_start see NONE, and can proceed as normal.
b411b363
PR
1638 */
1639
e77a0a5c 1640 if (thi->t_state == RESTARTING) {
b411b363 1641 dev_info(DEV, "Restarting %s\n", current->comm);
e77a0a5c 1642 thi->t_state = RUNNING;
b411b363
PR
1643 spin_unlock_irqrestore(&thi->t_lock, flags);
1644 goto restart;
1645 }
1646
1647 thi->task = NULL;
e77a0a5c 1648 thi->t_state = NONE;
b411b363
PR
1649 smp_mb();
1650 complete(&thi->stop);
1651 spin_unlock_irqrestore(&thi->t_lock, flags);
1652
1653 dev_info(DEV, "Terminating %s\n", current->comm);
1654
1655 /* Release mod reference taken when thread was started */
1656 module_put(THIS_MODULE);
1657 return retval;
1658}
1659
1660static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1661 int (*func) (struct drbd_thread *))
1662{
1663 spin_lock_init(&thi->t_lock);
1664 thi->task = NULL;
e77a0a5c 1665 thi->t_state = NONE;
b411b363
PR
1666 thi->function = func;
1667 thi->mdev = mdev;
1668}
1669
1670int drbd_thread_start(struct drbd_thread *thi)
1671{
1672 struct drbd_conf *mdev = thi->mdev;
1673 struct task_struct *nt;
1674 unsigned long flags;
1675
1676 const char *me =
1677 thi == &mdev->receiver ? "receiver" :
1678 thi == &mdev->asender ? "asender" :
1679 thi == &mdev->worker ? "worker" : "NONSENSE";
1680
1681 /* is used from state engine doing drbd_thread_stop_nowait,
1682 * while holding the req lock irqsave */
1683 spin_lock_irqsave(&thi->t_lock, flags);
1684
1685 switch (thi->t_state) {
e77a0a5c 1686 case NONE:
b411b363
PR
1687 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1688 me, current->comm, current->pid);
1689
1690 /* Get ref on module for thread - this is released when thread exits */
1691 if (!try_module_get(THIS_MODULE)) {
1692 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1693 spin_unlock_irqrestore(&thi->t_lock, flags);
81e84650 1694 return false;
b411b363
PR
1695 }
1696
1697 init_completion(&thi->stop);
1698 D_ASSERT(thi->task == NULL);
1699 thi->reset_cpu_mask = 1;
e77a0a5c 1700 thi->t_state = RUNNING;
b411b363
PR
1701 spin_unlock_irqrestore(&thi->t_lock, flags);
1702 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1703
1704 nt = kthread_create(drbd_thread_setup, (void *) thi,
1705 "drbd%d_%s", mdev_to_minor(mdev), me);
1706
1707 if (IS_ERR(nt)) {
1708 dev_err(DEV, "Couldn't start thread\n");
1709
1710 module_put(THIS_MODULE);
81e84650 1711 return false;
b411b363
PR
1712 }
1713 spin_lock_irqsave(&thi->t_lock, flags);
1714 thi->task = nt;
e77a0a5c 1715 thi->t_state = RUNNING;
b411b363
PR
1716 spin_unlock_irqrestore(&thi->t_lock, flags);
1717 wake_up_process(nt);
1718 break;
e77a0a5c
AG
1719 case EXITING:
1720 thi->t_state = RESTARTING;
b411b363
PR
1721 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1722 me, current->comm, current->pid);
1723 /* fall through */
e77a0a5c
AG
1724 case RUNNING:
1725 case RESTARTING:
b411b363
PR
1726 default:
1727 spin_unlock_irqrestore(&thi->t_lock, flags);
1728 break;
1729 }
1730
81e84650 1731 return true;
b411b363
PR
1732}
1733
1734
1735void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1736{
1737 unsigned long flags;
1738
e77a0a5c 1739 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
b411b363
PR
1740
1741 /* may be called from state engine, holding the req lock irqsave */
1742 spin_lock_irqsave(&thi->t_lock, flags);
1743
e77a0a5c 1744 if (thi->t_state == NONE) {
b411b363
PR
1745 spin_unlock_irqrestore(&thi->t_lock, flags);
1746 if (restart)
1747 drbd_thread_start(thi);
1748 return;
1749 }
1750
1751 if (thi->t_state != ns) {
1752 if (thi->task == NULL) {
1753 spin_unlock_irqrestore(&thi->t_lock, flags);
1754 return;
1755 }
1756
1757 thi->t_state = ns;
1758 smp_mb();
1759 init_completion(&thi->stop);
1760 if (thi->task != current)
1761 force_sig(DRBD_SIGKILL, thi->task);
1762
1763 }
1764
1765 spin_unlock_irqrestore(&thi->t_lock, flags);
1766
1767 if (wait)
1768 wait_for_completion(&thi->stop);
1769}
1770
1771#ifdef CONFIG_SMP
1772/**
1773 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1774 * @mdev: DRBD device.
1775 *
1776 * Forces all threads of a device onto the same CPU. This is beneficial for
1777 * DRBD's performance. May be overwritten by user's configuration.
1778 */
1779void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1780{
1781 int ord, cpu;
1782
1783 /* user override. */
1784 if (cpumask_weight(mdev->cpu_mask))
1785 return;
1786
1787 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1788 for_each_online_cpu(cpu) {
1789 if (ord-- == 0) {
1790 cpumask_set_cpu(cpu, mdev->cpu_mask);
1791 return;
1792 }
1793 }
1794 /* should not be reached */
1795 cpumask_setall(mdev->cpu_mask);
1796}
1797
1798/**
1799 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1800 * @mdev: DRBD device.
1801 *
1802 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1803 * prematurely.
1804 */
1805void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1806{
1807 struct task_struct *p = current;
1808 struct drbd_thread *thi =
1809 p == mdev->asender.task ? &mdev->asender :
1810 p == mdev->receiver.task ? &mdev->receiver :
1811 p == mdev->worker.task ? &mdev->worker :
1812 NULL;
841ce241 1813 if (!expect(thi != NULL))
b411b363
PR
1814 return;
1815 if (!thi->reset_cpu_mask)
1816 return;
1817 thi->reset_cpu_mask = 0;
1818 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1819}
1820#endif
1821
1822/* the appropriate socket mutex must be held already */
1823int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
0b70a13d 1824 enum drbd_packets cmd, struct p_header80 *h,
b411b363
PR
1825 size_t size, unsigned msg_flags)
1826{
1827 int sent, ok;
1828
841ce241
AG
1829 if (!expect(h))
1830 return false;
1831 if (!expect(size))
1832 return false;
b411b363 1833
ca9bc12b 1834 h->magic = cpu_to_be32(DRBD_MAGIC);
b411b363 1835 h->command = cpu_to_be16(cmd);
0b70a13d 1836 h->length = cpu_to_be16(size-sizeof(struct p_header80));
b411b363 1837
b411b363
PR
1838 sent = drbd_send(mdev, sock, h, size, msg_flags);
1839
1840 ok = (sent == size);
0ddc5549
LE
1841 if (!ok && !signal_pending(current))
1842 dev_warn(DEV, "short sent %s size=%d sent=%d\n",
b411b363
PR
1843 cmdname(cmd), (int)size, sent);
1844 return ok;
1845}
1846
1847/* don't pass the socket. we may only look at it
1848 * when we hold the appropriate socket mutex.
1849 */
1850int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
0b70a13d 1851 enum drbd_packets cmd, struct p_header80 *h, size_t size)
b411b363
PR
1852{
1853 int ok = 0;
1854 struct socket *sock;
1855
1856 if (use_data_socket) {
1857 mutex_lock(&mdev->data.mutex);
1858 sock = mdev->data.socket;
1859 } else {
1860 mutex_lock(&mdev->meta.mutex);
1861 sock = mdev->meta.socket;
1862 }
1863
1864 /* drbd_disconnect() could have called drbd_free_sock()
1865 * while we were waiting in down()... */
1866 if (likely(sock != NULL))
1867 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1868
1869 if (use_data_socket)
1870 mutex_unlock(&mdev->data.mutex);
1871 else
1872 mutex_unlock(&mdev->meta.mutex);
1873 return ok;
1874}
1875
1876int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1877 size_t size)
1878{
0b70a13d 1879 struct p_header80 h;
b411b363
PR
1880 int ok;
1881
ca9bc12b 1882 h.magic = cpu_to_be32(DRBD_MAGIC);
b411b363
PR
1883 h.command = cpu_to_be16(cmd);
1884 h.length = cpu_to_be16(size);
1885
1886 if (!drbd_get_data_sock(mdev))
1887 return 0;
1888
b411b363
PR
1889 ok = (sizeof(h) ==
1890 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1891 ok = ok && (size ==
1892 drbd_send(mdev, mdev->data.socket, data, size, 0));
1893
1894 drbd_put_data_sock(mdev);
1895
1896 return ok;
1897}
1898
1899int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1900{
8e26f9cc 1901 struct p_rs_param_95 *p;
b411b363
PR
1902 struct socket *sock;
1903 int size, rv;
1904 const int apv = mdev->agreed_pro_version;
1905
1906 size = apv <= 87 ? sizeof(struct p_rs_param)
1907 : apv == 88 ? sizeof(struct p_rs_param)
1908 + strlen(mdev->sync_conf.verify_alg) + 1
8e26f9cc
PR
1909 : apv <= 94 ? sizeof(struct p_rs_param_89)
1910 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363
PR
1911
1912 /* used from admin command context and receiver/worker context.
1913 * to avoid kmalloc, grab the socket right here,
1914 * then use the pre-allocated sbuf there */
1915 mutex_lock(&mdev->data.mutex);
1916 sock = mdev->data.socket;
1917
1918 if (likely(sock != NULL)) {
1919 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1920
8e26f9cc 1921 p = &mdev->data.sbuf.rs_param_95;
b411b363
PR
1922
1923 /* initialize verify_alg and csums_alg */
1924 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1925
1926 p->rate = cpu_to_be32(sc->rate);
8e26f9cc
PR
1927 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1928 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1929 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1930 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
b411b363
PR
1931
1932 if (apv >= 88)
1933 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1934 if (apv >= 89)
1935 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1936
1937 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1938 } else
1939 rv = 0; /* not ok */
1940
1941 mutex_unlock(&mdev->data.mutex);
1942
1943 return rv;
1944}
1945
1946int drbd_send_protocol(struct drbd_conf *mdev)
1947{
1948 struct p_protocol *p;
cf14c2e9 1949 int size, cf, rv;
b411b363
PR
1950
1951 size = sizeof(struct p_protocol);
1952
1953 if (mdev->agreed_pro_version >= 87)
1954 size += strlen(mdev->net_conf->integrity_alg) + 1;
1955
1956 /* we must not recurse into our own queue,
1957 * as that is blocked during handshake */
1958 p = kmalloc(size, GFP_NOIO);
1959 if (p == NULL)
1960 return 0;
1961
1962 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
1963 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
1964 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
1965 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
b411b363
PR
1966 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1967
cf14c2e9
PR
1968 cf = 0;
1969 if (mdev->net_conf->want_lose)
1970 cf |= CF_WANT_LOSE;
1971 if (mdev->net_conf->dry_run) {
1972 if (mdev->agreed_pro_version >= 92)
1973 cf |= CF_DRY_RUN;
1974 else {
1975 dev_err(DEV, "--dry-run is not supported by peer");
7ac314c8 1976 kfree(p);
148efa16 1977 return -1;
cf14c2e9
PR
1978 }
1979 }
1980 p->conn_flags = cpu_to_be32(cf);
1981
b411b363
PR
1982 if (mdev->agreed_pro_version >= 87)
1983 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1984
1985 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
0b70a13d 1986 (struct p_header80 *)p, size);
b411b363
PR
1987 kfree(p);
1988 return rv;
1989}
1990
1991int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1992{
1993 struct p_uuids p;
1994 int i;
1995
1996 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
1997 return 1;
1998
1999 for (i = UI_CURRENT; i < UI_SIZE; i++)
2000 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
2001
2002 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2003 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2004 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2005 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
2006 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2007 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2008
2009 put_ldev(mdev);
2010
2011 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
0b70a13d 2012 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2013}
2014
2015int drbd_send_uuids(struct drbd_conf *mdev)
2016{
2017 return _drbd_send_uuids(mdev, 0);
2018}
2019
2020int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2021{
2022 return _drbd_send_uuids(mdev, 8);
2023}
2024
62b0da3a
LE
2025void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
2026{
2027 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2028 u64 *uuid = mdev->ldev->md.uuid;
2029 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
2030 text,
2031 (unsigned long long)uuid[UI_CURRENT],
2032 (unsigned long long)uuid[UI_BITMAP],
2033 (unsigned long long)uuid[UI_HISTORY_START],
2034 (unsigned long long)uuid[UI_HISTORY_END]);
2035 put_ldev(mdev);
2036 } else {
2037 dev_info(DEV, "%s effective data uuid: %016llX\n",
2038 text,
2039 (unsigned long long)mdev->ed_uuid);
2040 }
2041}
2042
5a22db89 2043int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
b411b363
PR
2044{
2045 struct p_rs_uuid p;
5a22db89
LE
2046 u64 uuid;
2047
2048 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
b411b363 2049
4a23f264 2050 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
5a22db89 2051 drbd_uuid_set(mdev, UI_BITMAP, uuid);
62b0da3a 2052 drbd_print_uuids(mdev, "updated sync UUID");
5a22db89
LE
2053 drbd_md_sync(mdev);
2054 p.uuid = cpu_to_be64(uuid);
b411b363
PR
2055
2056 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
0b70a13d 2057 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2058}
2059
e89b591c 2060int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
b411b363
PR
2061{
2062 struct p_sizes p;
2063 sector_t d_size, u_size;
99432fcc 2064 int q_order_type, max_bio_size;
b411b363
PR
2065 int ok;
2066
2067 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2068 D_ASSERT(mdev->ldev->backing_bdev);
2069 d_size = drbd_get_max_capacity(mdev->ldev);
2070 u_size = mdev->ldev->dc.disk_size;
2071 q_order_type = drbd_queue_order_type(mdev);
99432fcc
PR
2072 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
2073 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
b411b363
PR
2074 put_ldev(mdev);
2075 } else {
2076 d_size = 0;
2077 u_size = 0;
2078 q_order_type = QUEUE_ORDERED_NONE;
99432fcc 2079 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
b411b363
PR
2080 }
2081
2082 p.d_size = cpu_to_be64(d_size);
2083 p.u_size = cpu_to_be64(u_size);
2084 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
99432fcc 2085 p.max_bio_size = cpu_to_be32(max_bio_size);
e89b591c
PR
2086 p.queue_order_type = cpu_to_be16(q_order_type);
2087 p.dds_flags = cpu_to_be16(flags);
b411b363
PR
2088
2089 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
0b70a13d 2090 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2091 return ok;
2092}
2093
2094/**
2095 * drbd_send_state() - Sends the drbd state to the peer
2096 * @mdev: DRBD device.
2097 */
2098int drbd_send_state(struct drbd_conf *mdev)
2099{
2100 struct socket *sock;
2101 struct p_state p;
2102 int ok = 0;
2103
2104 /* Grab state lock so we wont send state if we're in the middle
2105 * of a cluster wide state change on another thread */
2106 drbd_state_lock(mdev);
2107
2108 mutex_lock(&mdev->data.mutex);
2109
2110 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2111 sock = mdev->data.socket;
2112
2113 if (likely(sock != NULL)) {
2114 ok = _drbd_send_cmd(mdev, sock, P_STATE,
0b70a13d 2115 (struct p_header80 *)&p, sizeof(p), 0);
b411b363
PR
2116 }
2117
2118 mutex_unlock(&mdev->data.mutex);
2119
2120 drbd_state_unlock(mdev);
2121 return ok;
2122}
2123
2124int drbd_send_state_req(struct drbd_conf *mdev,
2125 union drbd_state mask, union drbd_state val)
2126{
2127 struct p_req_state p;
2128
2129 p.mask = cpu_to_be32(mask.i);
2130 p.val = cpu_to_be32(val.i);
2131
2132 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
0b70a13d 2133 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2134}
2135
bf885f8a 2136int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
b411b363
PR
2137{
2138 struct p_req_state_reply p;
2139
2140 p.retcode = cpu_to_be32(retcode);
2141
2142 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
0b70a13d 2143 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2144}
2145
2146int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2147 struct p_compressed_bm *p,
2148 struct bm_xfer_ctx *c)
2149{
2150 struct bitstream bs;
2151 unsigned long plain_bits;
2152 unsigned long tmp;
2153 unsigned long rl;
2154 unsigned len;
2155 unsigned toggle;
2156 int bits;
2157
2158 /* may we use this feature? */
2159 if ((mdev->sync_conf.use_rle == 0) ||
2160 (mdev->agreed_pro_version < 90))
2161 return 0;
2162
2163 if (c->bit_offset >= c->bm_bits)
2164 return 0; /* nothing to do. */
2165
2166 /* use at most thus many bytes */
2167 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2168 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2169 /* plain bits covered in this code string */
2170 plain_bits = 0;
2171
2172 /* p->encoding & 0x80 stores whether the first run length is set.
2173 * bit offset is implicit.
2174 * start with toggle == 2 to be able to tell the first iteration */
2175 toggle = 2;
2176
2177 /* see how much plain bits we can stuff into one packet
2178 * using RLE and VLI. */
2179 do {
2180 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2181 : _drbd_bm_find_next(mdev, c->bit_offset);
2182 if (tmp == -1UL)
2183 tmp = c->bm_bits;
2184 rl = tmp - c->bit_offset;
2185
2186 if (toggle == 2) { /* first iteration */
2187 if (rl == 0) {
2188 /* the first checked bit was set,
2189 * store start value, */
2190 DCBP_set_start(p, 1);
2191 /* but skip encoding of zero run length */
2192 toggle = !toggle;
2193 continue;
2194 }
2195 DCBP_set_start(p, 0);
2196 }
2197
2198 /* paranoia: catch zero runlength.
2199 * can only happen if bitmap is modified while we scan it. */
2200 if (rl == 0) {
2201 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2202 "t:%u bo:%lu\n", toggle, c->bit_offset);
2203 return -1;
2204 }
2205
2206 bits = vli_encode_bits(&bs, rl);
2207 if (bits == -ENOBUFS) /* buffer full */
2208 break;
2209 if (bits <= 0) {
2210 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2211 return 0;
2212 }
2213
2214 toggle = !toggle;
2215 plain_bits += rl;
2216 c->bit_offset = tmp;
2217 } while (c->bit_offset < c->bm_bits);
2218
2219 len = bs.cur.b - p->code + !!bs.cur.bit;
2220
2221 if (plain_bits < (len << 3)) {
2222 /* incompressible with this method.
2223 * we need to rewind both word and bit position. */
2224 c->bit_offset -= plain_bits;
2225 bm_xfer_ctx_bit_to_word_offset(c);
2226 c->bit_offset = c->word_offset * BITS_PER_LONG;
2227 return 0;
2228 }
2229
2230 /* RLE + VLI was able to compress it just fine.
2231 * update c->word_offset. */
2232 bm_xfer_ctx_bit_to_word_offset(c);
2233
2234 /* store pad_bits */
2235 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2236
2237 return len;
2238}
2239
f70af118
AG
2240/**
2241 * send_bitmap_rle_or_plain
2242 *
2243 * Return 0 when done, 1 when another iteration is needed, and a negative error
2244 * code upon failure.
2245 */
2246static int
b411b363 2247send_bitmap_rle_or_plain(struct drbd_conf *mdev,
f70af118 2248 struct p_header80 *h, struct bm_xfer_ctx *c)
b411b363
PR
2249{
2250 struct p_compressed_bm *p = (void*)h;
2251 unsigned long num_words;
2252 int len;
2253 int ok;
2254
2255 len = fill_bitmap_rle_bits(mdev, p, c);
2256
2257 if (len < 0)
f70af118 2258 return -EIO;
b411b363
PR
2259
2260 if (len) {
2261 DCBP_set_code(p, RLE_VLI_Bits);
2262 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2263 sizeof(*p) + len, 0);
2264
2265 c->packets[0]++;
2266 c->bytes[0] += sizeof(*p) + len;
2267
2268 if (c->bit_offset >= c->bm_bits)
2269 len = 0; /* DONE */
2270 } else {
2271 /* was not compressible.
2272 * send a buffer full of plain text bits instead. */
2273 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2274 len = num_words * sizeof(long);
2275 if (len)
2276 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2277 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
0b70a13d 2278 h, sizeof(struct p_header80) + len, 0);
b411b363
PR
2279 c->word_offset += num_words;
2280 c->bit_offset = c->word_offset * BITS_PER_LONG;
2281
2282 c->packets[1]++;
0b70a13d 2283 c->bytes[1] += sizeof(struct p_header80) + len;
b411b363
PR
2284
2285 if (c->bit_offset > c->bm_bits)
2286 c->bit_offset = c->bm_bits;
2287 }
f70af118
AG
2288 if (ok) {
2289 if (len == 0) {
2290 INFO_bm_xfer_stats(mdev, "send", c);
2291 return 0;
2292 } else
2293 return 1;
2294 }
2295 return -EIO;
b411b363
PR
2296}
2297
2298/* See the comment at receive_bitmap() */
2299int _drbd_send_bitmap(struct drbd_conf *mdev)
2300{
2301 struct bm_xfer_ctx c;
0b70a13d 2302 struct p_header80 *p;
f70af118 2303 int err;
b411b363 2304
841ce241
AG
2305 if (!expect(mdev->bitmap))
2306 return false;
b411b363
PR
2307
2308 /* maybe we should use some per thread scratch page,
2309 * and allocate that during initial device creation? */
0b70a13d 2310 p = (struct p_header80 *) __get_free_page(GFP_NOIO);
b411b363
PR
2311 if (!p) {
2312 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
81e84650 2313 return false;
b411b363
PR
2314 }
2315
2316 if (get_ldev(mdev)) {
2317 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2318 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2319 drbd_bm_set_all(mdev);
2320 if (drbd_bm_write(mdev)) {
2321 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2322 * but otherwise process as per normal - need to tell other
2323 * side that a full resync is required! */
2324 dev_err(DEV, "Failed to write bitmap to disk!\n");
2325 } else {
2326 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2327 drbd_md_sync(mdev);
2328 }
2329 }
2330 put_ldev(mdev);
2331 }
2332
2333 c = (struct bm_xfer_ctx) {
2334 .bm_bits = drbd_bm_bits(mdev),
2335 .bm_words = drbd_bm_words(mdev),
2336 };
2337
2338 do {
f70af118
AG
2339 err = send_bitmap_rle_or_plain(mdev, p, &c);
2340 } while (err > 0);
b411b363
PR
2341
2342 free_page((unsigned long) p);
f70af118 2343 return err == 0;
b411b363
PR
2344}
2345
2346int drbd_send_bitmap(struct drbd_conf *mdev)
2347{
2348 int err;
2349
2350 if (!drbd_get_data_sock(mdev))
2351 return -1;
2352 err = !_drbd_send_bitmap(mdev);
2353 drbd_put_data_sock(mdev);
2354 return err;
2355}
2356
2357int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2358{
2359 int ok;
2360 struct p_barrier_ack p;
2361
2362 p.barrier = barrier_nr;
2363 p.set_size = cpu_to_be32(set_size);
2364
2365 if (mdev->state.conn < C_CONNECTED)
81e84650 2366 return false;
b411b363 2367 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
0b70a13d 2368 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2369 return ok;
2370}
2371
2372/**
2373 * _drbd_send_ack() - Sends an ack packet
2374 * @mdev: DRBD device.
2375 * @cmd: Packet command code.
2376 * @sector: sector, needs to be in big endian byte order
2377 * @blksize: size in byte, needs to be in big endian byte order
2378 * @block_id: Id, big endian byte order
2379 */
2380static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2381 u64 sector,
2382 u32 blksize,
2383 u64 block_id)
2384{
2385 int ok;
2386 struct p_block_ack p;
2387
2388 p.sector = sector;
2389 p.block_id = block_id;
2390 p.blksize = blksize;
2391 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2392
2393 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
81e84650 2394 return false;
b411b363 2395 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
0b70a13d 2396 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2397 return ok;
2398}
2399
2b2bf214
LE
2400/* dp->sector and dp->block_id already/still in network byte order,
2401 * data_size is payload size according to dp->head,
2402 * and may need to be corrected for digest size. */
b411b363 2403int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2b2bf214 2404 struct p_data *dp, int data_size)
b411b363 2405{
2b2bf214
LE
2406 data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2407 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
b411b363
PR
2408 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2409 dp->block_id);
2410}
2411
2412int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2413 struct p_block_req *rp)
2414{
2415 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2416}
2417
2418/**
2419 * drbd_send_ack() - Sends an ack packet
2420 * @mdev: DRBD device.
2421 * @cmd: Packet command code.
2422 * @e: Epoch entry.
2423 */
2424int drbd_send_ack(struct drbd_conf *mdev,
2425 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2426{
2427 return _drbd_send_ack(mdev, cmd,
010f6e67
AG
2428 cpu_to_be64(e->i.sector),
2429 cpu_to_be32(e->i.size),
b411b363
PR
2430 e->block_id);
2431}
2432
2433/* This function misuses the block_id field to signal if the blocks
2434 * are is sync or not. */
2435int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2436 sector_t sector, int blksize, u64 block_id)
2437{
2438 return _drbd_send_ack(mdev, cmd,
2439 cpu_to_be64(sector),
2440 cpu_to_be32(blksize),
2441 cpu_to_be64(block_id));
2442}
2443
2444int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2445 sector_t sector, int size, u64 block_id)
2446{
2447 int ok;
2448 struct p_block_req p;
2449
2450 p.sector = cpu_to_be64(sector);
2451 p.block_id = block_id;
2452 p.blksize = cpu_to_be32(size);
2453
2454 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
0b70a13d 2455 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2456 return ok;
2457}
2458
2459int drbd_send_drequest_csum(struct drbd_conf *mdev,
2460 sector_t sector, int size,
2461 void *digest, int digest_size,
2462 enum drbd_packets cmd)
2463{
2464 int ok;
2465 struct p_block_req p;
2466
2467 p.sector = cpu_to_be64(sector);
9a8e7753 2468 p.block_id = ID_SYNCER /* unused */;
b411b363
PR
2469 p.blksize = cpu_to_be32(size);
2470
ca9bc12b 2471 p.head.magic = cpu_to_be32(DRBD_MAGIC);
b411b363 2472 p.head.command = cpu_to_be16(cmd);
0b70a13d 2473 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
b411b363
PR
2474
2475 mutex_lock(&mdev->data.mutex);
2476
2477 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2478 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2479
2480 mutex_unlock(&mdev->data.mutex);
2481
2482 return ok;
2483}
2484
2485int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2486{
2487 int ok;
2488 struct p_block_req p;
2489
2490 p.sector = cpu_to_be64(sector);
9a8e7753 2491 p.block_id = ID_SYNCER /* unused */;
b411b363
PR
2492 p.blksize = cpu_to_be32(size);
2493
2494 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
0b70a13d 2495 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2496 return ok;
2497}
2498
2499/* called on sndtimeo
81e84650
AG
2500 * returns false if we should retry,
2501 * true if we think connection is dead
b411b363
PR
2502 */
2503static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2504{
2505 int drop_it;
2506 /* long elapsed = (long)(jiffies - mdev->last_received); */
2507
2508 drop_it = mdev->meta.socket == sock
2509 || !mdev->asender.task
e77a0a5c 2510 || get_t_state(&mdev->asender) != RUNNING
b411b363
PR
2511 || mdev->state.conn < C_CONNECTED;
2512
2513 if (drop_it)
81e84650 2514 return true;
b411b363
PR
2515
2516 drop_it = !--mdev->ko_count;
2517 if (!drop_it) {
2518 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2519 current->comm, current->pid, mdev->ko_count);
2520 request_ping(mdev);
2521 }
2522
2523 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2524}
2525
2526/* The idea of sendpage seems to be to put some kind of reference
2527 * to the page into the skb, and to hand it over to the NIC. In
2528 * this process get_page() gets called.
2529 *
2530 * As soon as the page was really sent over the network put_page()
2531 * gets called by some part of the network layer. [ NIC driver? ]
2532 *
2533 * [ get_page() / put_page() increment/decrement the count. If count
2534 * reaches 0 the page will be freed. ]
2535 *
2536 * This works nicely with pages from FSs.
2537 * But this means that in protocol A we might signal IO completion too early!
2538 *
2539 * In order not to corrupt data during a resync we must make sure
2540 * that we do not reuse our own buffer pages (EEs) to early, therefore
2541 * we have the net_ee list.
2542 *
2543 * XFS seems to have problems, still, it submits pages with page_count == 0!
2544 * As a workaround, we disable sendpage on pages
2545 * with page_count == 0 or PageSlab.
2546 */
2547static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
ba11ad9a 2548 int offset, size_t size, unsigned msg_flags)
b411b363 2549{
ba11ad9a 2550 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
b411b363
PR
2551 kunmap(page);
2552 if (sent == size)
2553 mdev->send_cnt += size>>9;
2554 return sent == size;
2555}
2556
2557static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
ba11ad9a 2558 int offset, size_t size, unsigned msg_flags)
b411b363
PR
2559{
2560 mm_segment_t oldfs = get_fs();
2561 int sent, ok;
2562 int len = size;
2563
2564 /* e.g. XFS meta- & log-data is in slab pages, which have a
2565 * page_count of 0 and/or have PageSlab() set.
2566 * we cannot use send_page for those, as that does get_page();
2567 * put_page(); and would cause either a VM_BUG directly, or
2568 * __page_cache_release a page that would actually still be referenced
2569 * by someone, leading to some obscure delayed Oops somewhere else. */
2570 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
ba11ad9a 2571 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
b411b363 2572
ba11ad9a 2573 msg_flags |= MSG_NOSIGNAL;
b411b363
PR
2574 drbd_update_congested(mdev);
2575 set_fs(KERNEL_DS);
2576 do {
2577 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2578 offset, len,
ba11ad9a 2579 msg_flags);
b411b363
PR
2580 if (sent == -EAGAIN) {
2581 if (we_should_drop_the_connection(mdev,
2582 mdev->data.socket))
2583 break;
2584 else
2585 continue;
2586 }
2587 if (sent <= 0) {
2588 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2589 __func__, (int)size, len, sent);
2590 break;
2591 }
2592 len -= sent;
2593 offset += sent;
2594 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2595 set_fs(oldfs);
2596 clear_bit(NET_CONGESTED, &mdev->flags);
2597
2598 ok = (len == 0);
2599 if (likely(ok))
2600 mdev->send_cnt += size>>9;
2601 return ok;
2602}
2603
2604static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2605{
2606 struct bio_vec *bvec;
2607 int i;
ba11ad9a 2608 /* hint all but last page with MSG_MORE */
b411b363
PR
2609 __bio_for_each_segment(bvec, bio, i, 0) {
2610 if (!_drbd_no_send_page(mdev, bvec->bv_page,
ba11ad9a
LE
2611 bvec->bv_offset, bvec->bv_len,
2612 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
b411b363
PR
2613 return 0;
2614 }
2615 return 1;
2616}
2617
2618static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2619{
2620 struct bio_vec *bvec;
2621 int i;
ba11ad9a 2622 /* hint all but last page with MSG_MORE */
b411b363
PR
2623 __bio_for_each_segment(bvec, bio, i, 0) {
2624 if (!_drbd_send_page(mdev, bvec->bv_page,
ba11ad9a
LE
2625 bvec->bv_offset, bvec->bv_len,
2626 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
b411b363
PR
2627 return 0;
2628 }
b411b363
PR
2629 return 1;
2630}
2631
45bb912b
LE
2632static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2633{
2634 struct page *page = e->pages;
010f6e67 2635 unsigned len = e->i.size;
ba11ad9a 2636 /* hint all but last page with MSG_MORE */
45bb912b
LE
2637 page_chain_for_each(page) {
2638 unsigned l = min_t(unsigned, len, PAGE_SIZE);
ba11ad9a
LE
2639 if (!_drbd_send_page(mdev, page, 0, l,
2640 page_chain_next(page) ? MSG_MORE : 0))
45bb912b
LE
2641 return 0;
2642 len -= l;
2643 }
2644 return 1;
2645}
2646
76d2e7ec
PR
2647static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2648{
2649 if (mdev->agreed_pro_version >= 95)
2650 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
76d2e7ec
PR
2651 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2652 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2653 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2654 else
721a9602 2655 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
76d2e7ec
PR
2656}
2657
b411b363
PR
2658/* Used to send write requests
2659 * R_PRIMARY -> Peer (P_DATA)
2660 */
2661int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2662{
2663 int ok = 1;
2664 struct p_data p;
2665 unsigned int dp_flags = 0;
2666 void *dgb;
2667 int dgs;
2668
2669 if (!drbd_get_data_sock(mdev))
2670 return 0;
2671
2672 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2673 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2674
ace652ac 2675 if (req->i.size <= DRBD_MAX_SIZE_H80_PACKET) {
ca9bc12b 2676 p.head.h80.magic = cpu_to_be32(DRBD_MAGIC);
0b70a13d
PR
2677 p.head.h80.command = cpu_to_be16(P_DATA);
2678 p.head.h80.length =
ace652ac 2679 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->i.size);
0b70a13d 2680 } else {
ca9bc12b 2681 p.head.h95.magic = cpu_to_be16(DRBD_MAGIC_BIG);
0b70a13d
PR
2682 p.head.h95.command = cpu_to_be16(P_DATA);
2683 p.head.h95.length =
ace652ac 2684 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->i.size);
0b70a13d 2685 }
b411b363 2686
ace652ac 2687 p.sector = cpu_to_be64(req->i.sector);
b411b363
PR
2688 p.block_id = (unsigned long)req;
2689 p.seq_num = cpu_to_be32(req->seq_num =
2690 atomic_add_return(1, &mdev->packet_seq));
b411b363 2691
76d2e7ec
PR
2692 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2693
b411b363
PR
2694 if (mdev->state.conn >= C_SYNC_SOURCE &&
2695 mdev->state.conn <= C_PAUSED_SYNC_T)
2696 dp_flags |= DP_MAY_SET_IN_SYNC;
2697
2698 p.dp_flags = cpu_to_be32(dp_flags);
b411b363
PR
2699 set_bit(UNPLUG_REMOTE, &mdev->flags);
2700 ok = (sizeof(p) ==
ba11ad9a 2701 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
b411b363
PR
2702 if (ok && dgs) {
2703 dgb = mdev->int_dig_out;
45bb912b 2704 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
cab2f74b 2705 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
b411b363
PR
2706 }
2707 if (ok) {
470be44a
LE
2708 /* For protocol A, we have to memcpy the payload into
2709 * socket buffers, as we may complete right away
2710 * as soon as we handed it over to tcp, at which point the data
2711 * pages may become invalid.
2712 *
2713 * For data-integrity enabled, we copy it as well, so we can be
2714 * sure that even if the bio pages may still be modified, it
2715 * won't change the data on the wire, thus if the digest checks
2716 * out ok after sending on this side, but does not fit on the
2717 * receiving side, we sure have detected corruption elsewhere.
2718 */
2719 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
b411b363
PR
2720 ok = _drbd_send_bio(mdev, req->master_bio);
2721 else
2722 ok = _drbd_send_zc_bio(mdev, req->master_bio);
470be44a
LE
2723
2724 /* double check digest, sometimes buffers have been modified in flight. */
2725 if (dgs > 0 && dgs <= 64) {
24c4830c 2726 /* 64 byte, 512 bit, is the largest digest size
470be44a
LE
2727 * currently supported in kernel crypto. */
2728 unsigned char digest[64];
2729 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2730 if (memcmp(mdev->int_dig_out, digest, dgs)) {
2731 dev_warn(DEV,
2732 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
ace652ac 2733 (unsigned long long)req->i.sector, req->i.size);
470be44a
LE
2734 }
2735 } /* else if (dgs > 64) {
2736 ... Be noisy about digest too large ...
2737 } */
b411b363
PR
2738 }
2739
2740 drbd_put_data_sock(mdev);
bd26bfc5 2741
b411b363
PR
2742 return ok;
2743}
2744
2745/* answer packet, used to send data back for read requests:
2746 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2747 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2748 */
2749int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2750 struct drbd_epoch_entry *e)
2751{
2752 int ok;
2753 struct p_data p;
2754 void *dgb;
2755 int dgs;
2756
2757 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2758 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2759
010f6e67 2760 if (e->i.size <= DRBD_MAX_SIZE_H80_PACKET) {
ca9bc12b 2761 p.head.h80.magic = cpu_to_be32(DRBD_MAGIC);
0b70a13d
PR
2762 p.head.h80.command = cpu_to_be16(cmd);
2763 p.head.h80.length =
010f6e67 2764 cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->i.size);
0b70a13d 2765 } else {
ca9bc12b 2766 p.head.h95.magic = cpu_to_be16(DRBD_MAGIC_BIG);
0b70a13d
PR
2767 p.head.h95.command = cpu_to_be16(cmd);
2768 p.head.h95.length =
010f6e67 2769 cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->i.size);
0b70a13d 2770 }
b411b363 2771
010f6e67 2772 p.sector = cpu_to_be64(e->i.sector);
b411b363
PR
2773 p.block_id = e->block_id;
2774 /* p.seq_num = 0; No sequence numbers here.. */
2775
2776 /* Only called by our kernel thread.
2777 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2778 * in response to admin command or module unload.
2779 */
2780 if (!drbd_get_data_sock(mdev))
2781 return 0;
2782
0b70a13d 2783 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
b411b363
PR
2784 if (ok && dgs) {
2785 dgb = mdev->int_dig_out;
45bb912b 2786 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
cab2f74b 2787 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
b411b363
PR
2788 }
2789 if (ok)
45bb912b 2790 ok = _drbd_send_zc_ee(mdev, e);
b411b363
PR
2791
2792 drbd_put_data_sock(mdev);
bd26bfc5 2793
b411b363
PR
2794 return ok;
2795}
2796
73a01a18
PR
2797int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2798{
2799 struct p_block_desc p;
2800
ace652ac
AG
2801 p.sector = cpu_to_be64(req->i.sector);
2802 p.blksize = cpu_to_be32(req->i.size);
73a01a18
PR
2803
2804 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2805}
2806
b411b363
PR
2807/*
2808 drbd_send distinguishes two cases:
2809
2810 Packets sent via the data socket "sock"
2811 and packets sent via the meta data socket "msock"
2812
2813 sock msock
2814 -----------------+-------------------------+------------------------------
2815 timeout conf.timeout / 2 conf.timeout / 2
2816 timeout action send a ping via msock Abort communication
2817 and close all sockets
2818*/
2819
2820/*
2821 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2822 */
2823int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2824 void *buf, size_t size, unsigned msg_flags)
2825{
2826 struct kvec iov;
2827 struct msghdr msg;
2828 int rv, sent = 0;
2829
2830 if (!sock)
2831 return -1000;
2832
2833 /* THINK if (signal_pending) return ... ? */
2834
2835 iov.iov_base = buf;
2836 iov.iov_len = size;
2837
2838 msg.msg_name = NULL;
2839 msg.msg_namelen = 0;
2840 msg.msg_control = NULL;
2841 msg.msg_controllen = 0;
2842 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2843
2844 if (sock == mdev->data.socket) {
2845 mdev->ko_count = mdev->net_conf->ko_count;
2846 drbd_update_congested(mdev);
2847 }
2848 do {
2849 /* STRANGE
2850 * tcp_sendmsg does _not_ use its size parameter at all ?
2851 *
2852 * -EAGAIN on timeout, -EINTR on signal.
2853 */
2854/* THINK
2855 * do we need to block DRBD_SIG if sock == &meta.socket ??
2856 * otherwise wake_asender() might interrupt some send_*Ack !
2857 */
2858 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2859 if (rv == -EAGAIN) {
2860 if (we_should_drop_the_connection(mdev, sock))
2861 break;
2862 else
2863 continue;
2864 }
2865 D_ASSERT(rv != 0);
2866 if (rv == -EINTR) {
2867 flush_signals(current);
2868 rv = 0;
2869 }
2870 if (rv < 0)
2871 break;
2872 sent += rv;
2873 iov.iov_base += rv;
2874 iov.iov_len -= rv;
2875 } while (sent < size);
2876
2877 if (sock == mdev->data.socket)
2878 clear_bit(NET_CONGESTED, &mdev->flags);
2879
2880 if (rv <= 0) {
2881 if (rv != -EAGAIN) {
2882 dev_err(DEV, "%s_sendmsg returned %d\n",
2883 sock == mdev->meta.socket ? "msock" : "sock",
2884 rv);
2885 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2886 } else
2887 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2888 }
2889
2890 return sent;
2891}
2892
2893static int drbd_open(struct block_device *bdev, fmode_t mode)
2894{
2895 struct drbd_conf *mdev = bdev->bd_disk->private_data;
2896 unsigned long flags;
2897 int rv = 0;
2898
2a48fc0a 2899 mutex_lock(&drbd_main_mutex);
b411b363
PR
2900 spin_lock_irqsave(&mdev->req_lock, flags);
2901 /* to have a stable mdev->state.role
2902 * and no race with updating open_cnt */
2903
2904 if (mdev->state.role != R_PRIMARY) {
2905 if (mode & FMODE_WRITE)
2906 rv = -EROFS;
2907 else if (!allow_oos)
2908 rv = -EMEDIUMTYPE;
2909 }
2910
2911 if (!rv)
2912 mdev->open_cnt++;
2913 spin_unlock_irqrestore(&mdev->req_lock, flags);
2a48fc0a 2914 mutex_unlock(&drbd_main_mutex);
b411b363
PR
2915
2916 return rv;
2917}
2918
2919static int drbd_release(struct gendisk *gd, fmode_t mode)
2920{
2921 struct drbd_conf *mdev = gd->private_data;
2a48fc0a 2922 mutex_lock(&drbd_main_mutex);
b411b363 2923 mdev->open_cnt--;
2a48fc0a 2924 mutex_unlock(&drbd_main_mutex);
b411b363
PR
2925 return 0;
2926}
2927
b411b363
PR
2928static void drbd_set_defaults(struct drbd_conf *mdev)
2929{
85f4cc17
PR
2930 /* This way we get a compile error when sync_conf grows,
2931 and we forgot to initialize it here */
2932 mdev->sync_conf = (struct syncer_conf) {
2933 /* .rate = */ DRBD_RATE_DEF,
2934 /* .after = */ DRBD_AFTER_DEF,
2935 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
85f4cc17
PR
2936 /* .verify_alg = */ {}, 0,
2937 /* .cpu_mask = */ {}, 0,
2938 /* .csums_alg = */ {}, 0,
e756414f 2939 /* .use_rle = */ 0,
9a31d716
PR
2940 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
2941 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
2942 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2943 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
0f0601f4
LE
2944 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
2945 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
85f4cc17
PR
2946 };
2947
2948 /* Have to use that way, because the layout differs between
2949 big endian and little endian */
b411b363
PR
2950 mdev->state = (union drbd_state) {
2951 { .role = R_SECONDARY,
2952 .peer = R_UNKNOWN,
2953 .conn = C_STANDALONE,
2954 .disk = D_DISKLESS,
2955 .pdsk = D_UNKNOWN,
fb22c402
PR
2956 .susp = 0,
2957 .susp_nod = 0,
2958 .susp_fen = 0
b411b363
PR
2959 } };
2960}
2961
2962void drbd_init_set_defaults(struct drbd_conf *mdev)
2963{
2964 /* the memset(,0,) did most of this.
2965 * note: only assignments, no allocation in here */
2966
2967 drbd_set_defaults(mdev);
2968
b411b363
PR
2969 atomic_set(&mdev->ap_bio_cnt, 0);
2970 atomic_set(&mdev->ap_pending_cnt, 0);
2971 atomic_set(&mdev->rs_pending_cnt, 0);
2972 atomic_set(&mdev->unacked_cnt, 0);
2973 atomic_set(&mdev->local_cnt, 0);
2974 atomic_set(&mdev->net_cnt, 0);
2975 atomic_set(&mdev->packet_seq, 0);
2976 atomic_set(&mdev->pp_in_use, 0);
435f0740 2977 atomic_set(&mdev->pp_in_use_by_net, 0);
778f271d 2978 atomic_set(&mdev->rs_sect_in, 0);
0f0601f4 2979 atomic_set(&mdev->rs_sect_ev, 0);
759fbdfb 2980 atomic_set(&mdev->ap_in_flight, 0);
b411b363
PR
2981
2982 mutex_init(&mdev->md_io_mutex);
2983 mutex_init(&mdev->data.mutex);
2984 mutex_init(&mdev->meta.mutex);
2985 sema_init(&mdev->data.work.s, 0);
2986 sema_init(&mdev->meta.work.s, 0);
2987 mutex_init(&mdev->state_mutex);
2988
2989 spin_lock_init(&mdev->data.work.q_lock);
2990 spin_lock_init(&mdev->meta.work.q_lock);
2991
2992 spin_lock_init(&mdev->al_lock);
2993 spin_lock_init(&mdev->req_lock);
2994 spin_lock_init(&mdev->peer_seq_lock);
2995 spin_lock_init(&mdev->epoch_lock);
2996
2997 INIT_LIST_HEAD(&mdev->active_ee);
2998 INIT_LIST_HEAD(&mdev->sync_ee);
2999 INIT_LIST_HEAD(&mdev->done_ee);
3000 INIT_LIST_HEAD(&mdev->read_ee);
3001 INIT_LIST_HEAD(&mdev->net_ee);
3002 INIT_LIST_HEAD(&mdev->resync_reads);
3003 INIT_LIST_HEAD(&mdev->data.work.q);
3004 INIT_LIST_HEAD(&mdev->meta.work.q);
3005 INIT_LIST_HEAD(&mdev->resync_work.list);
3006 INIT_LIST_HEAD(&mdev->unplug_work.list);
e9e6f3ec 3007 INIT_LIST_HEAD(&mdev->go_diskless.list);
b411b363 3008 INIT_LIST_HEAD(&mdev->md_sync_work.list);
c4752ef1 3009 INIT_LIST_HEAD(&mdev->start_resync_work.list);
b411b363 3010 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
0ced55a3 3011
794abb75 3012 mdev->resync_work.cb = w_resync_timer;
b411b363 3013 mdev->unplug_work.cb = w_send_write_hint;
e9e6f3ec 3014 mdev->go_diskless.cb = w_go_diskless;
b411b363
PR
3015 mdev->md_sync_work.cb = w_md_sync;
3016 mdev->bm_io_work.w.cb = w_bitmap_io;
370a43e7 3017 mdev->start_resync_work.cb = w_start_resync;
b411b363
PR
3018 init_timer(&mdev->resync_timer);
3019 init_timer(&mdev->md_sync_timer);
370a43e7 3020 init_timer(&mdev->start_resync_timer);
7fde2be9 3021 init_timer(&mdev->request_timer);
b411b363
PR
3022 mdev->resync_timer.function = resync_timer_fn;
3023 mdev->resync_timer.data = (unsigned long) mdev;
3024 mdev->md_sync_timer.function = md_sync_timer_fn;
3025 mdev->md_sync_timer.data = (unsigned long) mdev;
370a43e7
PR
3026 mdev->start_resync_timer.function = start_resync_timer_fn;
3027 mdev->start_resync_timer.data = (unsigned long) mdev;
7fde2be9
PR
3028 mdev->request_timer.function = request_timer_fn;
3029 mdev->request_timer.data = (unsigned long) mdev;
b411b363
PR
3030
3031 init_waitqueue_head(&mdev->misc_wait);
3032 init_waitqueue_head(&mdev->state_wait);
84dfb9f5 3033 init_waitqueue_head(&mdev->net_cnt_wait);
b411b363
PR
3034 init_waitqueue_head(&mdev->ee_wait);
3035 init_waitqueue_head(&mdev->al_wait);
3036 init_waitqueue_head(&mdev->seq_wait);
3037
3038 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3039 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3040 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3041
3042 mdev->agreed_pro_version = PRO_VERSION_MAX;
2451fc3b 3043 mdev->write_ordering = WO_bdev_flush;
b411b363 3044 mdev->resync_wenr = LC_FREE;
99432fcc
PR
3045 mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
3046 mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
b411b363
PR
3047}
3048
3049void drbd_mdev_cleanup(struct drbd_conf *mdev)
3050{
1d7734a0 3051 int i;
e77a0a5c 3052 if (mdev->receiver.t_state != NONE)
b411b363
PR
3053 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3054 mdev->receiver.t_state);
3055
3056 /* no need to lock it, I'm the only thread alive */
3057 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
3058 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3059 mdev->al_writ_cnt =
3060 mdev->bm_writ_cnt =
3061 mdev->read_cnt =
3062 mdev->recv_cnt =
3063 mdev->send_cnt =
3064 mdev->writ_cnt =
3065 mdev->p_size =
3066 mdev->rs_start =
3067 mdev->rs_total =
1d7734a0
LE
3068 mdev->rs_failed = 0;
3069 mdev->rs_last_events = 0;
0f0601f4 3070 mdev->rs_last_sect_ev = 0;
1d7734a0
LE
3071 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3072 mdev->rs_mark_left[i] = 0;
3073 mdev->rs_mark_time[i] = 0;
3074 }
b411b363
PR
3075 D_ASSERT(mdev->net_conf == NULL);
3076
3077 drbd_set_my_capacity(mdev, 0);
3078 if (mdev->bitmap) {
3079 /* maybe never allocated. */
02d9a94b 3080 drbd_bm_resize(mdev, 0, 1);
b411b363
PR
3081 drbd_bm_cleanup(mdev);
3082 }
3083
3084 drbd_free_resources(mdev);
0778286a 3085 clear_bit(AL_SUSPENDED, &mdev->flags);
b411b363
PR
3086
3087 /*
3088 * currently we drbd_init_ee only on module load, so
3089 * we may do drbd_release_ee only on module unload!
3090 */
3091 D_ASSERT(list_empty(&mdev->active_ee));
3092 D_ASSERT(list_empty(&mdev->sync_ee));
3093 D_ASSERT(list_empty(&mdev->done_ee));
3094 D_ASSERT(list_empty(&mdev->read_ee));
3095 D_ASSERT(list_empty(&mdev->net_ee));
3096 D_ASSERT(list_empty(&mdev->resync_reads));
3097 D_ASSERT(list_empty(&mdev->data.work.q));
3098 D_ASSERT(list_empty(&mdev->meta.work.q));
3099 D_ASSERT(list_empty(&mdev->resync_work.list));
3100 D_ASSERT(list_empty(&mdev->unplug_work.list));
e9e6f3ec 3101 D_ASSERT(list_empty(&mdev->go_diskless.list));
2265b473
LE
3102
3103 drbd_set_defaults(mdev);
b411b363
PR
3104}
3105
3106
3107static void drbd_destroy_mempools(void)
3108{
3109 struct page *page;
3110
3111 while (drbd_pp_pool) {
3112 page = drbd_pp_pool;
3113 drbd_pp_pool = (struct page *)page_private(page);
3114 __free_page(page);
3115 drbd_pp_vacant--;
3116 }
3117
3118 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3119
3120 if (drbd_ee_mempool)
3121 mempool_destroy(drbd_ee_mempool);
3122 if (drbd_request_mempool)
3123 mempool_destroy(drbd_request_mempool);
3124 if (drbd_ee_cache)
3125 kmem_cache_destroy(drbd_ee_cache);
3126 if (drbd_request_cache)
3127 kmem_cache_destroy(drbd_request_cache);
3128 if (drbd_bm_ext_cache)
3129 kmem_cache_destroy(drbd_bm_ext_cache);
3130 if (drbd_al_ext_cache)
3131 kmem_cache_destroy(drbd_al_ext_cache);
3132
3133 drbd_ee_mempool = NULL;
3134 drbd_request_mempool = NULL;
3135 drbd_ee_cache = NULL;
3136 drbd_request_cache = NULL;
3137 drbd_bm_ext_cache = NULL;
3138 drbd_al_ext_cache = NULL;
3139
3140 return;
3141}
3142
3143static int drbd_create_mempools(void)
3144{
3145 struct page *page;
1816a2b4 3146 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
b411b363
PR
3147 int i;
3148
3149 /* prepare our caches and mempools */
3150 drbd_request_mempool = NULL;
3151 drbd_ee_cache = NULL;
3152 drbd_request_cache = NULL;
3153 drbd_bm_ext_cache = NULL;
3154 drbd_al_ext_cache = NULL;
3155 drbd_pp_pool = NULL;
3156
3157 /* caches */
3158 drbd_request_cache = kmem_cache_create(
3159 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3160 if (drbd_request_cache == NULL)
3161 goto Enomem;
3162
3163 drbd_ee_cache = kmem_cache_create(
3164 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3165 if (drbd_ee_cache == NULL)
3166 goto Enomem;
3167
3168 drbd_bm_ext_cache = kmem_cache_create(
3169 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3170 if (drbd_bm_ext_cache == NULL)
3171 goto Enomem;
3172
3173 drbd_al_ext_cache = kmem_cache_create(
3174 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3175 if (drbd_al_ext_cache == NULL)
3176 goto Enomem;
3177
3178 /* mempools */
3179 drbd_request_mempool = mempool_create(number,
3180 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3181 if (drbd_request_mempool == NULL)
3182 goto Enomem;
3183
3184 drbd_ee_mempool = mempool_create(number,
3185 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2027ae1f 3186 if (drbd_ee_mempool == NULL)
b411b363
PR
3187 goto Enomem;
3188
3189 /* drbd's page pool */
3190 spin_lock_init(&drbd_pp_lock);
3191
3192 for (i = 0; i < number; i++) {
3193 page = alloc_page(GFP_HIGHUSER);
3194 if (!page)
3195 goto Enomem;
3196 set_page_private(page, (unsigned long)drbd_pp_pool);
3197 drbd_pp_pool = page;
3198 }
3199 drbd_pp_vacant = number;
3200
3201 return 0;
3202
3203Enomem:
3204 drbd_destroy_mempools(); /* in case we allocated some */
3205 return -ENOMEM;
3206}
3207
3208static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3209 void *unused)
3210{
3211 /* just so we have it. you never know what interesting things we
3212 * might want to do here some day...
3213 */
3214
3215 return NOTIFY_DONE;
3216}
3217
3218static struct notifier_block drbd_notifier = {
3219 .notifier_call = drbd_notify_sys,
3220};
3221
3222static void drbd_release_ee_lists(struct drbd_conf *mdev)
3223{
3224 int rr;
3225
3226 rr = drbd_release_ee(mdev, &mdev->active_ee);
3227 if (rr)
3228 dev_err(DEV, "%d EEs in active list found!\n", rr);
3229
3230 rr = drbd_release_ee(mdev, &mdev->sync_ee);
3231 if (rr)
3232 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3233
3234 rr = drbd_release_ee(mdev, &mdev->read_ee);
3235 if (rr)
3236 dev_err(DEV, "%d EEs in read list found!\n", rr);
3237
3238 rr = drbd_release_ee(mdev, &mdev->done_ee);
3239 if (rr)
3240 dev_err(DEV, "%d EEs in done list found!\n", rr);
3241
3242 rr = drbd_release_ee(mdev, &mdev->net_ee);
3243 if (rr)
3244 dev_err(DEV, "%d EEs in net list found!\n", rr);
3245}
3246
3247/* caution. no locking.
3248 * currently only used from module cleanup code. */
3249static void drbd_delete_device(unsigned int minor)
3250{
3251 struct drbd_conf *mdev = minor_to_mdev(minor);
3252
3253 if (!mdev)
3254 return;
3255
3256 /* paranoia asserts */
70dc65e1
AG
3257 D_ASSERT(mdev->open_cnt == 0);
3258 D_ASSERT(list_empty(&mdev->data.work.q));
b411b363
PR
3259 /* end paranoia asserts */
3260
3261 del_gendisk(mdev->vdisk);
3262
3263 /* cleanup stuff that may have been allocated during
3264 * device (re-)configuration or state changes */
3265
3266 if (mdev->this_bdev)
3267 bdput(mdev->this_bdev);
3268
3269 drbd_free_resources(mdev);
3270
3271 drbd_release_ee_lists(mdev);
3272
b411b363
PR
3273 lc_destroy(mdev->act_log);
3274 lc_destroy(mdev->resync);
3275
3276 kfree(mdev->p_uuid);
3277 /* mdev->p_uuid = NULL; */
3278
3279 kfree(mdev->int_dig_out);
3280 kfree(mdev->int_dig_in);
3281 kfree(mdev->int_dig_vv);
3282
3283 /* cleanup the rest that has been
3284 * allocated from drbd_new_device
3285 * and actually free the mdev itself */
3286 drbd_free_mdev(mdev);
3287}
3288
3289static void drbd_cleanup(void)
3290{
3291 unsigned int i;
3292
3293 unregister_reboot_notifier(&drbd_notifier);
3294
17a93f30
LE
3295 /* first remove proc,
3296 * drbdsetup uses it's presence to detect
3297 * whether DRBD is loaded.
3298 * If we would get stuck in proc removal,
3299 * but have netlink already deregistered,
3300 * some drbdsetup commands may wait forever
3301 * for an answer.
3302 */
3303 if (drbd_proc)
3304 remove_proc_entry("drbd", NULL);
3305
b411b363
PR
3306 drbd_nl_cleanup();
3307
3308 if (minor_table) {
b411b363
PR
3309 i = minor_count;
3310 while (i--)
3311 drbd_delete_device(i);
3312 drbd_destroy_mempools();
3313 }
3314
3315 kfree(minor_table);
3316
3317 unregister_blkdev(DRBD_MAJOR, "drbd");
3318
3319 printk(KERN_INFO "drbd: module cleanup done.\n");
3320}
3321
3322/**
3323 * drbd_congested() - Callback for pdflush
3324 * @congested_data: User data
3325 * @bdi_bits: Bits pdflush is currently interested in
3326 *
3327 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3328 */
3329static int drbd_congested(void *congested_data, int bdi_bits)
3330{
3331 struct drbd_conf *mdev = congested_data;
3332 struct request_queue *q;
3333 char reason = '-';
3334 int r = 0;
3335
1b881ef7 3336 if (!may_inc_ap_bio(mdev)) {
b411b363
PR
3337 /* DRBD has frozen IO */
3338 r = bdi_bits;
3339 reason = 'd';
3340 goto out;
3341 }
3342
3343 if (get_ldev(mdev)) {
3344 q = bdev_get_queue(mdev->ldev->backing_bdev);
3345 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3346 put_ldev(mdev);
3347 if (r)
3348 reason = 'b';
3349 }
3350
3351 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3352 r |= (1 << BDI_async_congested);
3353 reason = reason == 'b' ? 'a' : 'n';
3354 }
3355
3356out:
3357 mdev->congestion_reason = reason;
3358 return r;
3359}
3360
3361struct drbd_conf *drbd_new_device(unsigned int minor)
3362{
3363 struct drbd_conf *mdev;
3364 struct gendisk *disk;
3365 struct request_queue *q;
3366
3367 /* GFP_KERNEL, we are outside of all write-out paths */
3368 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3369 if (!mdev)
3370 return NULL;
3371 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3372 goto out_no_cpumask;
3373
3374 mdev->minor = minor;
3375
3376 drbd_init_set_defaults(mdev);
3377
3378 q = blk_alloc_queue(GFP_KERNEL);
3379 if (!q)
3380 goto out_no_q;
3381 mdev->rq_queue = q;
3382 q->queuedata = mdev;
b411b363
PR
3383
3384 disk = alloc_disk(1);
3385 if (!disk)
3386 goto out_no_disk;
3387 mdev->vdisk = disk;
3388
81e84650 3389 set_disk_ro(disk, true);
b411b363
PR
3390
3391 disk->queue = q;
3392 disk->major = DRBD_MAJOR;
3393 disk->first_minor = minor;
3394 disk->fops = &drbd_ops;
3395 sprintf(disk->disk_name, "drbd%d", minor);
3396 disk->private_data = mdev;
3397
3398 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3399 /* we have no partitions. we contain only ourselves. */
3400 mdev->this_bdev->bd_contains = mdev->this_bdev;
3401
3402 q->backing_dev_info.congested_fn = drbd_congested;
3403 q->backing_dev_info.congested_data = mdev;
3404
2f58dcfc 3405 blk_queue_make_request(q, drbd_make_request);
99432fcc
PR
3406 /* Setting the max_hw_sectors to an odd value of 8kibyte here
3407 This triggers a max_bio_size message upon first attach or connect */
3408 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
b411b363
PR
3409 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3410 blk_queue_merge_bvec(q, drbd_merge_bvec);
7eaceacc 3411 q->queue_lock = &mdev->req_lock;
b411b363
PR
3412
3413 mdev->md_io_page = alloc_page(GFP_KERNEL);
3414 if (!mdev->md_io_page)
3415 goto out_no_io_page;
3416
3417 if (drbd_bm_init(mdev))
3418 goto out_no_bitmap;
3419 /* no need to lock access, we are still initializing this minor device. */
3420 if (!tl_init(mdev))
3421 goto out_no_tl;
dac1389c 3422 mdev->read_requests = RB_ROOT;
de696716 3423 mdev->write_requests = RB_ROOT;
8b946255 3424 mdev->epoch_entries = RB_ROOT;
b411b363 3425
b411b363
PR
3426 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3427 if (!mdev->current_epoch)
3428 goto out_no_epoch;
3429
3430 INIT_LIST_HEAD(&mdev->current_epoch->list);
3431 mdev->epochs = 1;
3432
3433 return mdev;
3434
3435/* out_whatever_else:
3436 kfree(mdev->current_epoch); */
3437out_no_epoch:
b411b363
PR
3438 tl_cleanup(mdev);
3439out_no_tl:
3440 drbd_bm_cleanup(mdev);
3441out_no_bitmap:
3442 __free_page(mdev->md_io_page);
3443out_no_io_page:
3444 put_disk(disk);
3445out_no_disk:
3446 blk_cleanup_queue(q);
3447out_no_q:
3448 free_cpumask_var(mdev->cpu_mask);
3449out_no_cpumask:
3450 kfree(mdev);
3451 return NULL;
3452}
3453
3454/* counterpart of drbd_new_device.
3455 * last part of drbd_delete_device. */
3456void drbd_free_mdev(struct drbd_conf *mdev)
3457{
3458 kfree(mdev->current_epoch);
b411b363
PR
3459 tl_cleanup(mdev);
3460 if (mdev->bitmap) /* should no longer be there. */
3461 drbd_bm_cleanup(mdev);
3462 __free_page(mdev->md_io_page);
3463 put_disk(mdev->vdisk);
3464 blk_cleanup_queue(mdev->rq_queue);
3465 free_cpumask_var(mdev->cpu_mask);
3466 kfree(mdev);
3467}
3468
3469
3470int __init drbd_init(void)
3471{
3472 int err;
3473
3474 if (sizeof(struct p_handshake) != 80) {
3475 printk(KERN_ERR
3476 "drbd: never change the size or layout "
3477 "of the HandShake packet.\n");
3478 return -EINVAL;
3479 }
3480
2b8a90b5 3481 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
b411b363
PR
3482 printk(KERN_ERR
3483 "drbd: invalid minor_count (%d)\n", minor_count);
3484#ifdef MODULE
3485 return -EINVAL;
3486#else
3487 minor_count = 8;
3488#endif
3489 }
3490
3491 err = drbd_nl_init();
3492 if (err)
3493 return err;
3494
3495 err = register_blkdev(DRBD_MAJOR, "drbd");
3496 if (err) {
3497 printk(KERN_ERR
3498 "drbd: unable to register block device major %d\n",
3499 DRBD_MAJOR);
3500 return err;
3501 }
3502
3503 register_reboot_notifier(&drbd_notifier);
3504
3505 /*
3506 * allocate all necessary structs
3507 */
3508 err = -ENOMEM;
3509
3510 init_waitqueue_head(&drbd_pp_wait);
3511
3512 drbd_proc = NULL; /* play safe for drbd_cleanup */
3513 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3514 GFP_KERNEL);
3515 if (!minor_table)
3516 goto Enomem;
3517
3518 err = drbd_create_mempools();
3519 if (err)
3520 goto Enomem;
3521
8c484ee4 3522 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
b411b363
PR
3523 if (!drbd_proc) {
3524 printk(KERN_ERR "drbd: unable to register proc file\n");
3525 goto Enomem;
3526 }
3527
3528 rwlock_init(&global_state_lock);
3529
3530 printk(KERN_INFO "drbd: initialized. "
3531 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3532 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3533 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3534 printk(KERN_INFO "drbd: registered as block device major %d\n",
3535 DRBD_MAJOR);
3536 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3537
3538 return 0; /* Success! */
3539
3540Enomem:
3541 drbd_cleanup();
3542 if (err == -ENOMEM)
3543 /* currently always the case */
3544 printk(KERN_ERR "drbd: ran out of memory\n");
3545 else
3546 printk(KERN_ERR "drbd: initialization failure\n");
3547 return err;
3548}
3549
3550void drbd_free_bc(struct drbd_backing_dev *ldev)
3551{
3552 if (ldev == NULL)
3553 return;
3554
e525fd89
TH
3555 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3556 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
b411b363
PR
3557
3558 kfree(ldev);
3559}
3560
3561void drbd_free_sock(struct drbd_conf *mdev)
3562{
3563 if (mdev->data.socket) {
4589d7f8 3564 mutex_lock(&mdev->data.mutex);
b411b363
PR
3565 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3566 sock_release(mdev->data.socket);
3567 mdev->data.socket = NULL;
4589d7f8 3568 mutex_unlock(&mdev->data.mutex);
b411b363
PR
3569 }
3570 if (mdev->meta.socket) {
4589d7f8 3571 mutex_lock(&mdev->meta.mutex);
b411b363
PR
3572 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3573 sock_release(mdev->meta.socket);
3574 mdev->meta.socket = NULL;
4589d7f8 3575 mutex_unlock(&mdev->meta.mutex);
b411b363
PR
3576 }
3577}
3578
3579
3580void drbd_free_resources(struct drbd_conf *mdev)
3581{
3582 crypto_free_hash(mdev->csums_tfm);
3583 mdev->csums_tfm = NULL;
3584 crypto_free_hash(mdev->verify_tfm);
3585 mdev->verify_tfm = NULL;
3586 crypto_free_hash(mdev->cram_hmac_tfm);
3587 mdev->cram_hmac_tfm = NULL;
3588 crypto_free_hash(mdev->integrity_w_tfm);
3589 mdev->integrity_w_tfm = NULL;
3590 crypto_free_hash(mdev->integrity_r_tfm);
3591 mdev->integrity_r_tfm = NULL;
3592
3593 drbd_free_sock(mdev);
3594
3595 __no_warn(local,
3596 drbd_free_bc(mdev->ldev);
3597 mdev->ldev = NULL;);
3598}
3599
3600/* meta data management */
3601
3602struct meta_data_on_disk {
3603 u64 la_size; /* last agreed size. */
3604 u64 uuid[UI_SIZE]; /* UUIDs. */
3605 u64 device_uuid;
3606 u64 reserved_u64_1;
3607 u32 flags; /* MDF */
3608 u32 magic;
3609 u32 md_size_sect;
3610 u32 al_offset; /* offset to this block */
3611 u32 al_nr_extents; /* important for restoring the AL */
3612 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3613 u32 bm_offset; /* offset to the bitmap, from here */
3614 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
99432fcc
PR
3615 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3616 u32 reserved_u32[3];
b411b363
PR
3617
3618} __packed;
3619
3620/**
3621 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3622 * @mdev: DRBD device.
3623 */
3624void drbd_md_sync(struct drbd_conf *mdev)
3625{
3626 struct meta_data_on_disk *buffer;
3627 sector_t sector;
3628 int i;
3629
ee15b038
LE
3630 del_timer(&mdev->md_sync_timer);
3631 /* timer may be rearmed by drbd_md_mark_dirty() now. */
b411b363
PR
3632 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3633 return;
b411b363
PR
3634
3635 /* We use here D_FAILED and not D_ATTACHING because we try to write
3636 * metadata even if we detach due to a disk failure! */
3637 if (!get_ldev_if_state(mdev, D_FAILED))
3638 return;
3639
b411b363
PR
3640 mutex_lock(&mdev->md_io_mutex);
3641 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3642 memset(buffer, 0, 512);
3643
3644 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3645 for (i = UI_CURRENT; i < UI_SIZE; i++)
3646 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3647 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3648 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3649
3650 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3651 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3652 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3653 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3654 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3655
3656 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
99432fcc 3657 buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
b411b363
PR
3658
3659 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3660 sector = mdev->ldev->md.md_offset;
3661
3f3a9b84 3662 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
b411b363
PR
3663 /* this was a try anyways ... */
3664 dev_err(DEV, "meta data update failed!\n");
81e84650 3665 drbd_chk_io_error(mdev, 1, true);
b411b363
PR
3666 }
3667
3668 /* Update mdev->ldev->md.la_size_sect,
3669 * since we updated it on metadata. */
3670 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3671
3672 mutex_unlock(&mdev->md_io_mutex);
3673 put_ldev(mdev);
3674}
3675
3676/**
3677 * drbd_md_read() - Reads in the meta data super block
3678 * @mdev: DRBD device.
3679 * @bdev: Device from which the meta data should be read in.
3680 *
116676ca 3681 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
b411b363
PR
3682 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3683 */
3684int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3685{
3686 struct meta_data_on_disk *buffer;
3687 int i, rv = NO_ERROR;
3688
3689 if (!get_ldev_if_state(mdev, D_ATTACHING))
3690 return ERR_IO_MD_DISK;
3691
b411b363
PR
3692 mutex_lock(&mdev->md_io_mutex);
3693 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3694
3695 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
25985edc 3696 /* NOTE: can't do normal error processing here as this is
b411b363
PR
3697 called BEFORE disk is attached */
3698 dev_err(DEV, "Error while reading metadata.\n");
3699 rv = ERR_IO_MD_DISK;
3700 goto err;
3701 }
3702
e7fad8af 3703 if (buffer->magic != cpu_to_be32(DRBD_MD_MAGIC)) {
b411b363
PR
3704 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3705 rv = ERR_MD_INVALID;
3706 goto err;
3707 }
3708 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3709 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3710 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3711 rv = ERR_MD_INVALID;
3712 goto err;
3713 }
3714 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3715 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3716 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3717 rv = ERR_MD_INVALID;
3718 goto err;
3719 }
3720 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3721 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3722 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3723 rv = ERR_MD_INVALID;
3724 goto err;
3725 }
3726
3727 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3728 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3729 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3730 rv = ERR_MD_INVALID;
3731 goto err;
3732 }
3733
3734 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3735 for (i = UI_CURRENT; i < UI_SIZE; i++)
3736 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3737 bdev->md.flags = be32_to_cpu(buffer->flags);
3738 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3739 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3740
99432fcc
PR
3741 spin_lock_irq(&mdev->req_lock);
3742 if (mdev->state.conn < C_CONNECTED) {
3743 int peer;
3744 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3745 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
3746 mdev->peer_max_bio_size = peer;
3747 }
3748 spin_unlock_irq(&mdev->req_lock);
3749
b411b363
PR
3750 if (mdev->sync_conf.al_extents < 7)
3751 mdev->sync_conf.al_extents = 127;
3752
3753 err:
3754 mutex_unlock(&mdev->md_io_mutex);
3755 put_ldev(mdev);
3756
3757 return rv;
3758}
3759
3760/**
3761 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3762 * @mdev: DRBD device.
3763 *
3764 * Call this function if you change anything that should be written to
3765 * the meta-data super block. This function sets MD_DIRTY, and starts a
3766 * timer that ensures that within five seconds you have to call drbd_md_sync().
3767 */
ca0e6098 3768#ifdef DEBUG
ee15b038
LE
3769void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3770{
3771 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3772 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3773 mdev->last_md_mark_dirty.line = line;
3774 mdev->last_md_mark_dirty.func = func;
3775 }
3776}
3777#else
b411b363
PR
3778void drbd_md_mark_dirty(struct drbd_conf *mdev)
3779{
ee15b038 3780 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
ca0e6098 3781 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
b411b363 3782}
ee15b038 3783#endif
b411b363
PR
3784
3785static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3786{
3787 int i;
3788
62b0da3a 3789 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
b411b363 3790 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
b411b363
PR
3791}
3792
3793void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3794{
3795 if (idx == UI_CURRENT) {
3796 if (mdev->state.role == R_PRIMARY)
3797 val |= 1;
3798 else
3799 val &= ~((u64)1);
3800
3801 drbd_set_ed_uuid(mdev, val);
3802 }
3803
3804 mdev->ldev->md.uuid[idx] = val;
b411b363
PR
3805 drbd_md_mark_dirty(mdev);
3806}
3807
3808
3809void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3810{
3811 if (mdev->ldev->md.uuid[idx]) {
3812 drbd_uuid_move_history(mdev);
3813 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
b411b363
PR
3814 }
3815 _drbd_uuid_set(mdev, idx, val);
3816}
3817
3818/**
3819 * drbd_uuid_new_current() - Creates a new current UUID
3820 * @mdev: DRBD device.
3821 *
3822 * Creates a new current UUID, and rotates the old current UUID into
3823 * the bitmap slot. Causes an incremental resync upon next connect.
3824 */
3825void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3826{
3827 u64 val;
62b0da3a
LE
3828 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3829
3830 if (bm_uuid)
3831 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
b411b363 3832
b411b363 3833 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
b411b363
PR
3834
3835 get_random_bytes(&val, sizeof(u64));
3836 _drbd_uuid_set(mdev, UI_CURRENT, val);
62b0da3a 3837 drbd_print_uuids(mdev, "new current UUID");
aaa8e2b3
LE
3838 /* get it to stable storage _now_ */
3839 drbd_md_sync(mdev);
b411b363
PR
3840}
3841
3842void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3843{
3844 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3845 return;
3846
3847 if (val == 0) {
3848 drbd_uuid_move_history(mdev);
3849 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3850 mdev->ldev->md.uuid[UI_BITMAP] = 0;
b411b363 3851 } else {
62b0da3a
LE
3852 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3853 if (bm_uuid)
3854 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
b411b363 3855
62b0da3a 3856 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
b411b363
PR
3857 }
3858 drbd_md_mark_dirty(mdev);
3859}
3860
3861/**
3862 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3863 * @mdev: DRBD device.
3864 *
3865 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3866 */
3867int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3868{
3869 int rv = -EIO;
3870
3871 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3872 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3873 drbd_md_sync(mdev);
3874 drbd_bm_set_all(mdev);
3875
3876 rv = drbd_bm_write(mdev);
3877
3878 if (!rv) {
3879 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3880 drbd_md_sync(mdev);
3881 }
3882
3883 put_ldev(mdev);
3884 }
3885
3886 return rv;
3887}
3888
3889/**
3890 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3891 * @mdev: DRBD device.
3892 *
3893 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3894 */
3895int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3896{
3897 int rv = -EIO;
3898
0778286a 3899 drbd_resume_al(mdev);
b411b363
PR
3900 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3901 drbd_bm_clear_all(mdev);
3902 rv = drbd_bm_write(mdev);
3903 put_ldev(mdev);
3904 }
3905
3906 return rv;
3907}
3908
3909static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3910{
3911 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
02851e9f 3912 int rv = -EIO;
b411b363
PR
3913
3914 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3915
02851e9f 3916 if (get_ldev(mdev)) {
20ceb2b2 3917 drbd_bm_lock(mdev, work->why, work->flags);
02851e9f
LE
3918 rv = work->io_fn(mdev);
3919 drbd_bm_unlock(mdev);
3920 put_ldev(mdev);
3921 }
b411b363
PR
3922
3923 clear_bit(BITMAP_IO, &mdev->flags);
127b3178 3924 smp_mb__after_clear_bit();
b411b363
PR
3925 wake_up(&mdev->misc_wait);
3926
3927 if (work->done)
3928 work->done(mdev, rv);
3929
3930 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3931 work->why = NULL;
20ceb2b2 3932 work->flags = 0;
b411b363
PR
3933
3934 return 1;
3935}
3936
82f59cc6
LE
3937void drbd_ldev_destroy(struct drbd_conf *mdev)
3938{
3939 lc_destroy(mdev->resync);
3940 mdev->resync = NULL;
3941 lc_destroy(mdev->act_log);
3942 mdev->act_log = NULL;
3943 __no_warn(local,
3944 drbd_free_bc(mdev->ldev);
3945 mdev->ldev = NULL;);
3946
3947 if (mdev->md_io_tmpp) {
3948 __free_page(mdev->md_io_tmpp);
3949 mdev->md_io_tmpp = NULL;
3950 }
3951 clear_bit(GO_DISKLESS, &mdev->flags);
3952}
3953
e9e6f3ec
LE
3954static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3955{
3956 D_ASSERT(mdev->state.disk == D_FAILED);
9d282875
LE
3957 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3958 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
82f59cc6
LE
3959 * the protected members anymore, though, so once put_ldev reaches zero
3960 * again, it will be safe to free them. */
e9e6f3ec 3961 drbd_force_state(mdev, NS(disk, D_DISKLESS));
e9e6f3ec
LE
3962 return 1;
3963}
3964
3965void drbd_go_diskless(struct drbd_conf *mdev)
3966{
3967 D_ASSERT(mdev->state.disk == D_FAILED);
3968 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
9d282875 3969 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
e9e6f3ec
LE
3970}
3971
b411b363
PR
3972/**
3973 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3974 * @mdev: DRBD device.
3975 * @io_fn: IO callback to be called when bitmap IO is possible
3976 * @done: callback to be called after the bitmap IO was performed
3977 * @why: Descriptive text of the reason for doing the IO
3978 *
3979 * While IO on the bitmap happens we freeze application IO thus we ensure
3980 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3981 * called from worker context. It MUST NOT be used while a previous such
3982 * work is still pending!
3983 */
3984void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3985 int (*io_fn)(struct drbd_conf *),
3986 void (*done)(struct drbd_conf *, int),
20ceb2b2 3987 char *why, enum bm_flag flags)
b411b363
PR
3988{
3989 D_ASSERT(current == mdev->worker.task);
3990
3991 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3992 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3993 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3994 if (mdev->bm_io_work.why)
3995 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3996 why, mdev->bm_io_work.why);
3997
3998 mdev->bm_io_work.io_fn = io_fn;
3999 mdev->bm_io_work.done = done;
4000 mdev->bm_io_work.why = why;
20ceb2b2 4001 mdev->bm_io_work.flags = flags;
b411b363 4002
22afd7ee 4003 spin_lock_irq(&mdev->req_lock);
b411b363
PR
4004 set_bit(BITMAP_IO, &mdev->flags);
4005 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
127b3178 4006 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
b411b363 4007 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
b411b363 4008 }
22afd7ee 4009 spin_unlock_irq(&mdev->req_lock);
b411b363
PR
4010}
4011
4012/**
4013 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
4014 * @mdev: DRBD device.
4015 * @io_fn: IO callback to be called when bitmap IO is possible
4016 * @why: Descriptive text of the reason for doing the IO
4017 *
4018 * freezes application IO while that the actual IO operations runs. This
4019 * functions MAY NOT be called from worker context.
4020 */
20ceb2b2
LE
4021int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
4022 char *why, enum bm_flag flags)
b411b363
PR
4023{
4024 int rv;
4025
4026 D_ASSERT(current != mdev->worker.task);
4027
20ceb2b2
LE
4028 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4029 drbd_suspend_io(mdev);
b411b363 4030
20ceb2b2 4031 drbd_bm_lock(mdev, why, flags);
b411b363
PR
4032 rv = io_fn(mdev);
4033 drbd_bm_unlock(mdev);
4034
20ceb2b2
LE
4035 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4036 drbd_resume_io(mdev);
b411b363
PR
4037
4038 return rv;
4039}
4040
4041void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4042{
4043 if ((mdev->ldev->md.flags & flag) != flag) {
4044 drbd_md_mark_dirty(mdev);
4045 mdev->ldev->md.flags |= flag;
4046 }
4047}
4048
4049void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4050{
4051 if ((mdev->ldev->md.flags & flag) != 0) {
4052 drbd_md_mark_dirty(mdev);
4053 mdev->ldev->md.flags &= ~flag;
4054 }
4055}
4056int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4057{
4058 return (bdev->md.flags & flag) != 0;
4059}
4060
4061static void md_sync_timer_fn(unsigned long data)
4062{
4063 struct drbd_conf *mdev = (struct drbd_conf *) data;
4064
4065 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4066}
4067
4068static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4069{
4070 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
ee15b038
LE
4071#ifdef DEBUG
4072 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4073 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4074#endif
b411b363 4075 drbd_md_sync(mdev);
b411b363
PR
4076 return 1;
4077}
4078
4079#ifdef CONFIG_DRBD_FAULT_INJECTION
4080/* Fault insertion support including random number generator shamelessly
4081 * stolen from kernel/rcutorture.c */
4082struct fault_random_state {
4083 unsigned long state;
4084 unsigned long count;
4085};
4086
4087#define FAULT_RANDOM_MULT 39916801 /* prime */
4088#define FAULT_RANDOM_ADD 479001701 /* prime */
4089#define FAULT_RANDOM_REFRESH 10000
4090
4091/*
4092 * Crude but fast random-number generator. Uses a linear congruential
4093 * generator, with occasional help from get_random_bytes().
4094 */
4095static unsigned long
4096_drbd_fault_random(struct fault_random_state *rsp)
4097{
4098 long refresh;
4099
49829ea7 4100 if (!rsp->count--) {
b411b363
PR
4101 get_random_bytes(&refresh, sizeof(refresh));
4102 rsp->state += refresh;
4103 rsp->count = FAULT_RANDOM_REFRESH;
4104 }
4105 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4106 return swahw32(rsp->state);
4107}
4108
4109static char *
4110_drbd_fault_str(unsigned int type) {
4111 static char *_faults[] = {
4112 [DRBD_FAULT_MD_WR] = "Meta-data write",
4113 [DRBD_FAULT_MD_RD] = "Meta-data read",
4114 [DRBD_FAULT_RS_WR] = "Resync write",
4115 [DRBD_FAULT_RS_RD] = "Resync read",
4116 [DRBD_FAULT_DT_WR] = "Data write",
4117 [DRBD_FAULT_DT_RD] = "Data read",
4118 [DRBD_FAULT_DT_RA] = "Data read ahead",
4119 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
6b4388ac
PR
4120 [DRBD_FAULT_AL_EE] = "EE allocation",
4121 [DRBD_FAULT_RECEIVE] = "receive data corruption",
b411b363
PR
4122 };
4123
4124 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4125}
4126
4127unsigned int
4128_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4129{
4130 static struct fault_random_state rrs = {0, 0};
4131
4132 unsigned int ret = (
4133 (fault_devs == 0 ||
4134 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4135 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4136
4137 if (ret) {
4138 fault_count++;
4139
7383506c 4140 if (__ratelimit(&drbd_ratelimit_state))
b411b363
PR
4141 dev_warn(DEV, "***Simulating %s failure\n",
4142 _drbd_fault_str(type));
4143 }
4144
4145 return ret;
4146}
4147#endif
4148
4149const char *drbd_buildtag(void)
4150{
4151 /* DRBD built from external sources has here a reference to the
4152 git hash of the source code. */
4153
4154 static char buildtag[38] = "\0uilt-in";
4155
4156 if (buildtag[0] == 0) {
4157#ifdef CONFIG_MODULES
4158 if (THIS_MODULE != NULL)
4159 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4160 else
4161#endif
4162 buildtag[0] = 'b';
4163 }
4164
4165 return buildtag;
4166}
4167
4168module_init(drbd_init)
4169module_exit(drbd_cleanup)
4170
b411b363
PR
4171EXPORT_SYMBOL(drbd_conn_str);
4172EXPORT_SYMBOL(drbd_role_str);
4173EXPORT_SYMBOL(drbd_disk_str);
4174EXPORT_SYMBOL(drbd_set_st_err_str);
This page took 0.324135 seconds and 5 git commands to generate.