drbd: Fixed state transitions after async outdate-peer-handler returned
[deliverable/linux.git] / drivers / block / drbd / drbd_main.c
CommitLineData
b411b363
PR
1/*
2 drbd.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
12
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
16 any later version.
17
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27 */
28
b411b363 29#include <linux/module.h>
b411b363
PR
30#include <linux/drbd.h>
31#include <asm/uaccess.h>
32#include <asm/types.h>
33#include <net/sock.h>
34#include <linux/ctype.h>
2a48fc0a 35#include <linux/mutex.h>
b411b363
PR
36#include <linux/fs.h>
37#include <linux/file.h>
38#include <linux/proc_fs.h>
39#include <linux/init.h>
40#include <linux/mm.h>
41#include <linux/memcontrol.h>
42#include <linux/mm_inline.h>
43#include <linux/slab.h>
44#include <linux/random.h>
45#include <linux/reboot.h>
46#include <linux/notifier.h>
47#include <linux/kthread.h>
48
49#define __KERNEL_SYSCALLS__
50#include <linux/unistd.h>
51#include <linux/vmalloc.h>
52
53#include <linux/drbd_limits.h>
54#include "drbd_int.h"
b411b363
PR
55#include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57#include "drbd_vli.h"
58
59struct after_state_chg_work {
60 struct drbd_work w;
61 union drbd_state os;
62 union drbd_state ns;
63 enum chg_state_flags flags;
64 struct completion *done;
65};
66
2a48fc0a 67static DEFINE_MUTEX(drbd_main_mutex);
b411b363
PR
68int drbdd_init(struct drbd_thread *);
69int drbd_worker(struct drbd_thread *);
70int drbd_asender(struct drbd_thread *);
71
72int drbd_init(void);
73static int drbd_open(struct block_device *bdev, fmode_t mode);
74static int drbd_release(struct gendisk *gd, fmode_t mode);
75static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
76static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
77 union drbd_state ns, enum chg_state_flags flags);
78static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
79static void md_sync_timer_fn(unsigned long data);
80static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
e9e6f3ec 81static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused);
b411b363 82
b411b363
PR
83MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
86MODULE_VERSION(REL_VERSION);
87MODULE_LICENSE("GPL");
2b8a90b5
PR
88MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices ("
89 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
b411b363
PR
90MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
91
92#include <linux/moduleparam.h>
93/* allow_open_on_secondary */
94MODULE_PARM_DESC(allow_oos, "DONT USE!");
95/* thanks to these macros, if compiled into the kernel (not-module),
96 * this becomes the boot parameter drbd.minor_count */
97module_param(minor_count, uint, 0444);
98module_param(disable_sendpage, bool, 0644);
99module_param(allow_oos, bool, 0);
100module_param(cn_idx, uint, 0444);
101module_param(proc_details, int, 0644);
102
103#ifdef CONFIG_DRBD_FAULT_INJECTION
104int enable_faults;
105int fault_rate;
106static int fault_count;
107int fault_devs;
108/* bitmap of enabled faults */
109module_param(enable_faults, int, 0664);
110/* fault rate % value - applies to all enabled faults */
111module_param(fault_rate, int, 0664);
112/* count of faults inserted */
113module_param(fault_count, int, 0664);
114/* bitmap of devices to insert faults on */
115module_param(fault_devs, int, 0644);
116#endif
117
118/* module parameter, defined */
2b8a90b5 119unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
b411b363
PR
120int disable_sendpage;
121int allow_oos;
122unsigned int cn_idx = CN_IDX_DRBD;
123int proc_details; /* Detail level in proc drbd*/
124
125/* Module parameter for setting the user mode helper program
126 * to run. Default is /sbin/drbdadm */
127char usermode_helper[80] = "/sbin/drbdadm";
128
129module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
130
131/* in 2.6.x, our device mapping and config info contains our virtual gendisks
132 * as member "struct gendisk *vdisk;"
133 */
134struct drbd_conf **minor_table;
135
136struct kmem_cache *drbd_request_cache;
137struct kmem_cache *drbd_ee_cache; /* epoch entries */
138struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
139struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
140mempool_t *drbd_request_mempool;
141mempool_t *drbd_ee_mempool;
142
143/* I do not use a standard mempool, because:
144 1) I want to hand out the pre-allocated objects first.
145 2) I want to be able to interrupt sleeping allocation with a signal.
146 Note: This is a single linked list, the next pointer is the private
147 member of struct page.
148 */
149struct page *drbd_pp_pool;
150spinlock_t drbd_pp_lock;
151int drbd_pp_vacant;
152wait_queue_head_t drbd_pp_wait;
153
154DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
155
7d4e9d09 156static const struct block_device_operations drbd_ops = {
b411b363
PR
157 .owner = THIS_MODULE,
158 .open = drbd_open,
159 .release = drbd_release,
160};
161
162#define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
163
164#ifdef __CHECKER__
165/* When checking with sparse, and this is an inline function, sparse will
166 give tons of false positives. When this is a real functions sparse works.
167 */
168int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
169{
170 int io_allowed;
171
172 atomic_inc(&mdev->local_cnt);
173 io_allowed = (mdev->state.disk >= mins);
174 if (!io_allowed) {
175 if (atomic_dec_and_test(&mdev->local_cnt))
176 wake_up(&mdev->misc_wait);
177 }
178 return io_allowed;
179}
180
181#endif
182
183/**
184 * DOC: The transfer log
185 *
186 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
187 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
188 * of the list. There is always at least one &struct drbd_tl_epoch object.
189 *
190 * Each &struct drbd_tl_epoch has a circular double linked list of requests
191 * attached.
192 */
193static int tl_init(struct drbd_conf *mdev)
194{
195 struct drbd_tl_epoch *b;
196
197 /* during device minor initialization, we may well use GFP_KERNEL */
198 b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
199 if (!b)
200 return 0;
201 INIT_LIST_HEAD(&b->requests);
202 INIT_LIST_HEAD(&b->w.list);
203 b->next = NULL;
204 b->br_number = 4711;
7e602c0a 205 b->n_writes = 0;
b411b363
PR
206 b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207
208 mdev->oldest_tle = b;
209 mdev->newest_tle = b;
210 INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
211
212 mdev->tl_hash = NULL;
213 mdev->tl_hash_s = 0;
214
215 return 1;
216}
217
218static void tl_cleanup(struct drbd_conf *mdev)
219{
220 D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
221 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
222 kfree(mdev->oldest_tle);
223 mdev->oldest_tle = NULL;
224 kfree(mdev->unused_spare_tle);
225 mdev->unused_spare_tle = NULL;
226 kfree(mdev->tl_hash);
227 mdev->tl_hash = NULL;
228 mdev->tl_hash_s = 0;
229}
230
231/**
232 * _tl_add_barrier() - Adds a barrier to the transfer log
233 * @mdev: DRBD device.
234 * @new: Barrier to be added before the current head of the TL.
235 *
236 * The caller must hold the req_lock.
237 */
238void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
239{
240 struct drbd_tl_epoch *newest_before;
241
242 INIT_LIST_HEAD(&new->requests);
243 INIT_LIST_HEAD(&new->w.list);
244 new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
245 new->next = NULL;
7e602c0a 246 new->n_writes = 0;
b411b363
PR
247
248 newest_before = mdev->newest_tle;
249 /* never send a barrier number == 0, because that is special-cased
250 * when using TCQ for our write ordering code */
251 new->br_number = (newest_before->br_number+1) ?: 1;
252 if (mdev->newest_tle != new) {
253 mdev->newest_tle->next = new;
254 mdev->newest_tle = new;
255 }
256}
257
258/**
259 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
260 * @mdev: DRBD device.
261 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
262 * @set_size: Expected number of requests before that barrier.
263 *
264 * In case the passed barrier_nr or set_size does not match the oldest
265 * &struct drbd_tl_epoch objects this function will cause a termination
266 * of the connection.
267 */
268void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
269 unsigned int set_size)
270{
271 struct drbd_tl_epoch *b, *nob; /* next old barrier */
272 struct list_head *le, *tle;
273 struct drbd_request *r;
274
275 spin_lock_irq(&mdev->req_lock);
276
277 b = mdev->oldest_tle;
278
279 /* first some paranoia code */
280 if (b == NULL) {
281 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
282 barrier_nr);
283 goto bail;
284 }
285 if (b->br_number != barrier_nr) {
286 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
287 barrier_nr, b->br_number);
288 goto bail;
289 }
7e602c0a
PR
290 if (b->n_writes != set_size) {
291 dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
292 barrier_nr, set_size, b->n_writes);
b411b363
PR
293 goto bail;
294 }
295
296 /* Clean up list of requests processed during current epoch */
297 list_for_each_safe(le, tle, &b->requests) {
298 r = list_entry(le, struct drbd_request, tl_requests);
299 _req_mod(r, barrier_acked);
300 }
301 /* There could be requests on the list waiting for completion
302 of the write to the local disk. To avoid corruptions of
303 slab's data structures we have to remove the lists head.
304
305 Also there could have been a barrier ack out of sequence, overtaking
306 the write acks - which would be a bug and violating write ordering.
307 To not deadlock in case we lose connection while such requests are
308 still pending, we need some way to find them for the
309 _req_mode(connection_lost_while_pending).
310
311 These have been list_move'd to the out_of_sequence_requests list in
312 _req_mod(, barrier_acked) above.
313 */
314 list_del_init(&b->requests);
315
316 nob = b->next;
317 if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
318 _tl_add_barrier(mdev, b);
319 if (nob)
320 mdev->oldest_tle = nob;
321 /* if nob == NULL b was the only barrier, and becomes the new
322 barrier. Therefore mdev->oldest_tle points already to b */
323 } else {
324 D_ASSERT(nob != NULL);
325 mdev->oldest_tle = nob;
326 kfree(b);
327 }
328
329 spin_unlock_irq(&mdev->req_lock);
330 dec_ap_pending(mdev);
331
332 return;
333
334bail:
335 spin_unlock_irq(&mdev->req_lock);
336 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
337}
338
617049aa 339
b411b363 340/**
11b58e73 341 * _tl_restart() - Walks the transfer log, and applies an action to all requests
b411b363 342 * @mdev: DRBD device.
11b58e73 343 * @what: The action/event to perform with all request objects
b411b363 344 *
11b58e73
PR
345 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
346 * restart_frozen_disk_io.
b411b363 347 */
11b58e73 348static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
b411b363 349{
11b58e73 350 struct drbd_tl_epoch *b, *tmp, **pn;
b9b98716 351 struct list_head *le, *tle, carry_reads;
11b58e73
PR
352 struct drbd_request *req;
353 int rv, n_writes, n_reads;
b411b363
PR
354
355 b = mdev->oldest_tle;
11b58e73 356 pn = &mdev->oldest_tle;
b411b363 357 while (b) {
11b58e73
PR
358 n_writes = 0;
359 n_reads = 0;
b9b98716 360 INIT_LIST_HEAD(&carry_reads);
b411b363 361 list_for_each_safe(le, tle, &b->requests) {
11b58e73
PR
362 req = list_entry(le, struct drbd_request, tl_requests);
363 rv = _req_mod(req, what);
364
365 n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
366 n_reads += (rv & MR_READ) >> MR_READ_SHIFT;
b411b363
PR
367 }
368 tmp = b->next;
369
b9b98716 370 if (n_writes) {
11b58e73
PR
371 if (what == resend) {
372 b->n_writes = n_writes;
373 if (b->w.cb == NULL) {
374 b->w.cb = w_send_barrier;
375 inc_ap_pending(mdev);
376 set_bit(CREATE_BARRIER, &mdev->flags);
377 }
378
379 drbd_queue_work(&mdev->data.work, &b->w);
380 }
381 pn = &b->next;
382 } else {
b9b98716
PR
383 if (n_reads)
384 list_add(&carry_reads, &b->requests);
11b58e73
PR
385 /* there could still be requests on that ring list,
386 * in case local io is still pending */
387 list_del(&b->requests);
388
389 /* dec_ap_pending corresponding to queue_barrier.
390 * the newest barrier may not have been queued yet,
391 * in which case w.cb is still NULL. */
392 if (b->w.cb != NULL)
393 dec_ap_pending(mdev);
394
395 if (b == mdev->newest_tle) {
396 /* recycle, but reinit! */
397 D_ASSERT(tmp == NULL);
398 INIT_LIST_HEAD(&b->requests);
b9b98716 399 list_splice(&carry_reads, &b->requests);
11b58e73
PR
400 INIT_LIST_HEAD(&b->w.list);
401 b->w.cb = NULL;
402 b->br_number = net_random();
403 b->n_writes = 0;
404
405 *pn = b;
406 break;
407 }
408 *pn = tmp;
409 kfree(b);
b411b363 410 }
b411b363 411 b = tmp;
b9b98716 412 list_splice(&carry_reads, &b->requests);
b411b363 413 }
11b58e73
PR
414}
415
b411b363
PR
416
417/**
418 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
419 * @mdev: DRBD device.
420 *
421 * This is called after the connection to the peer was lost. The storage covered
422 * by the requests on the transfer gets marked as our of sync. Called from the
423 * receiver thread and the worker thread.
424 */
425void tl_clear(struct drbd_conf *mdev)
426{
b411b363
PR
427 struct list_head *le, *tle;
428 struct drbd_request *r;
b411b363
PR
429
430 spin_lock_irq(&mdev->req_lock);
431
11b58e73 432 _tl_restart(mdev, connection_lost_while_pending);
b411b363
PR
433
434 /* we expect this list to be empty. */
435 D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
436
437 /* but just in case, clean it up anyways! */
438 list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
439 r = list_entry(le, struct drbd_request, tl_requests);
440 /* It would be nice to complete outside of spinlock.
441 * But this is easier for now. */
442 _req_mod(r, connection_lost_while_pending);
443 }
444
445 /* ensure bit indicating barrier is required is clear */
446 clear_bit(CREATE_BARRIER, &mdev->flags);
447
288f422e
PR
448 memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
449
b411b363
PR
450 spin_unlock_irq(&mdev->req_lock);
451}
452
11b58e73
PR
453void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
454{
455 spin_lock_irq(&mdev->req_lock);
456 _tl_restart(mdev, what);
b411b363
PR
457 spin_unlock_irq(&mdev->req_lock);
458}
459
460/**
81e84650 461 * cl_wide_st_chg() - true if the state change is a cluster wide one
b411b363
PR
462 * @mdev: DRBD device.
463 * @os: old (current) state.
464 * @ns: new (wanted) state.
465 */
466static int cl_wide_st_chg(struct drbd_conf *mdev,
467 union drbd_state os, union drbd_state ns)
468{
469 return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
470 ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
471 (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
472 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
473 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
474 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
475 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
476}
477
bf885f8a
AG
478enum drbd_state_rv
479drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
480 union drbd_state mask, union drbd_state val)
b411b363
PR
481{
482 unsigned long flags;
483 union drbd_state os, ns;
bf885f8a 484 enum drbd_state_rv rv;
b411b363
PR
485
486 spin_lock_irqsave(&mdev->req_lock, flags);
487 os = mdev->state;
488 ns.i = (os.i & ~mask.i) | val.i;
489 rv = _drbd_set_state(mdev, ns, f, NULL);
490 ns = mdev->state;
491 spin_unlock_irqrestore(&mdev->req_lock, flags);
492
493 return rv;
494}
495
496/**
497 * drbd_force_state() - Impose a change which happens outside our control on our state
498 * @mdev: DRBD device.
499 * @mask: mask of state bits to change.
500 * @val: value of new state bits.
501 */
502void drbd_force_state(struct drbd_conf *mdev,
503 union drbd_state mask, union drbd_state val)
504{
505 drbd_change_state(mdev, CS_HARD, mask, val);
506}
507
bf885f8a
AG
508static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
509static enum drbd_state_rv is_valid_state_transition(struct drbd_conf *,
510 union drbd_state,
511 union drbd_state);
b411b363 512static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
02bc7174 513 union drbd_state ns, const char **warn_sync_abort);
b411b363
PR
514int drbd_send_state_req(struct drbd_conf *,
515 union drbd_state, union drbd_state);
516
c8b32563
AG
517static enum drbd_state_rv
518_req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
519 union drbd_state val)
b411b363
PR
520{
521 union drbd_state os, ns;
522 unsigned long flags;
bf885f8a 523 enum drbd_state_rv rv;
b411b363
PR
524
525 if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
526 return SS_CW_SUCCESS;
527
528 if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
529 return SS_CW_FAILED_BY_PEER;
530
531 rv = 0;
532 spin_lock_irqsave(&mdev->req_lock, flags);
533 os = mdev->state;
534 ns.i = (os.i & ~mask.i) | val.i;
535 ns = sanitize_state(mdev, os, ns, NULL);
536
537 if (!cl_wide_st_chg(mdev, os, ns))
538 rv = SS_CW_NO_NEED;
539 if (!rv) {
540 rv = is_valid_state(mdev, ns);
541 if (rv == SS_SUCCESS) {
542 rv = is_valid_state_transition(mdev, ns, os);
543 if (rv == SS_SUCCESS)
bf885f8a 544 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
b411b363
PR
545 }
546 }
547 spin_unlock_irqrestore(&mdev->req_lock, flags);
548
549 return rv;
550}
551
552/**
553 * drbd_req_state() - Perform an eventually cluster wide state change
554 * @mdev: DRBD device.
555 * @mask: mask of state bits to change.
556 * @val: value of new state bits.
557 * @f: flags
558 *
559 * Should not be called directly, use drbd_request_state() or
560 * _drbd_request_state().
561 */
bf885f8a
AG
562static enum drbd_state_rv
563drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
564 union drbd_state val, enum chg_state_flags f)
b411b363
PR
565{
566 struct completion done;
567 unsigned long flags;
568 union drbd_state os, ns;
bf885f8a 569 enum drbd_state_rv rv;
b411b363
PR
570
571 init_completion(&done);
572
573 if (f & CS_SERIALIZE)
574 mutex_lock(&mdev->state_mutex);
575
576 spin_lock_irqsave(&mdev->req_lock, flags);
577 os = mdev->state;
578 ns.i = (os.i & ~mask.i) | val.i;
579 ns = sanitize_state(mdev, os, ns, NULL);
580
581 if (cl_wide_st_chg(mdev, os, ns)) {
582 rv = is_valid_state(mdev, ns);
583 if (rv == SS_SUCCESS)
584 rv = is_valid_state_transition(mdev, ns, os);
585 spin_unlock_irqrestore(&mdev->req_lock, flags);
586
587 if (rv < SS_SUCCESS) {
588 if (f & CS_VERBOSE)
589 print_st_err(mdev, os, ns, rv);
590 goto abort;
591 }
592
593 drbd_state_lock(mdev);
594 if (!drbd_send_state_req(mdev, mask, val)) {
595 drbd_state_unlock(mdev);
596 rv = SS_CW_FAILED_BY_PEER;
597 if (f & CS_VERBOSE)
598 print_st_err(mdev, os, ns, rv);
599 goto abort;
600 }
601
602 wait_event(mdev->state_wait,
603 (rv = _req_st_cond(mdev, mask, val)));
604
605 if (rv < SS_SUCCESS) {
606 drbd_state_unlock(mdev);
607 if (f & CS_VERBOSE)
608 print_st_err(mdev, os, ns, rv);
609 goto abort;
610 }
611 spin_lock_irqsave(&mdev->req_lock, flags);
612 os = mdev->state;
613 ns.i = (os.i & ~mask.i) | val.i;
614 rv = _drbd_set_state(mdev, ns, f, &done);
615 drbd_state_unlock(mdev);
616 } else {
617 rv = _drbd_set_state(mdev, ns, f, &done);
618 }
619
620 spin_unlock_irqrestore(&mdev->req_lock, flags);
621
622 if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
623 D_ASSERT(current != mdev->worker.task);
624 wait_for_completion(&done);
625 }
626
627abort:
628 if (f & CS_SERIALIZE)
629 mutex_unlock(&mdev->state_mutex);
630
631 return rv;
632}
633
634/**
635 * _drbd_request_state() - Request a state change (with flags)
636 * @mdev: DRBD device.
637 * @mask: mask of state bits to change.
638 * @val: value of new state bits.
639 * @f: flags
640 *
641 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
642 * flag, or when logging of failed state change requests is not desired.
643 */
bf885f8a
AG
644enum drbd_state_rv
645_drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
646 union drbd_state val, enum chg_state_flags f)
b411b363 647{
bf885f8a 648 enum drbd_state_rv rv;
b411b363
PR
649
650 wait_event(mdev->state_wait,
651 (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
652
653 return rv;
654}
655
656static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
657{
658 dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
659 name,
660 drbd_conn_str(ns.conn),
661 drbd_role_str(ns.role),
662 drbd_role_str(ns.peer),
663 drbd_disk_str(ns.disk),
664 drbd_disk_str(ns.pdsk),
fb22c402 665 is_susp(ns) ? 's' : 'r',
b411b363
PR
666 ns.aftr_isp ? 'a' : '-',
667 ns.peer_isp ? 'p' : '-',
668 ns.user_isp ? 'u' : '-'
669 );
670}
671
bf885f8a
AG
672void print_st_err(struct drbd_conf *mdev, union drbd_state os,
673 union drbd_state ns, enum drbd_state_rv err)
b411b363
PR
674{
675 if (err == SS_IN_TRANSIENT_STATE)
676 return;
677 dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
678 print_st(mdev, " state", os);
679 print_st(mdev, "wanted", ns);
680}
681
682
b411b363
PR
683/**
684 * is_valid_state() - Returns an SS_ error code if ns is not valid
685 * @mdev: DRBD device.
686 * @ns: State to consider.
687 */
bf885f8a
AG
688static enum drbd_state_rv
689is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
b411b363
PR
690{
691 /* See drbd_state_sw_errors in drbd_strings.c */
692
693 enum drbd_fencing_p fp;
bf885f8a 694 enum drbd_state_rv rv = SS_SUCCESS;
b411b363
PR
695
696 fp = FP_DONT_CARE;
697 if (get_ldev(mdev)) {
698 fp = mdev->ldev->dc.fencing;
699 put_ldev(mdev);
700 }
701
702 if (get_net_conf(mdev)) {
703 if (!mdev->net_conf->two_primaries &&
704 ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
705 rv = SS_TWO_PRIMARIES;
706 put_net_conf(mdev);
707 }
708
709 if (rv <= 0)
710 /* already found a reason to abort */;
711 else if (ns.role == R_SECONDARY && mdev->open_cnt)
712 rv = SS_DEVICE_IN_USE;
713
714 else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
715 rv = SS_NO_UP_TO_DATE_DISK;
716
717 else if (fp >= FP_RESOURCE &&
718 ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
719 rv = SS_PRIMARY_NOP;
720
721 else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
722 rv = SS_NO_UP_TO_DATE_DISK;
723
724 else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
725 rv = SS_NO_LOCAL_DISK;
726
727 else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
728 rv = SS_NO_REMOTE_DISK;
729
8d4ce82b
LE
730 else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
731 rv = SS_NO_UP_TO_DATE_DISK;
732
b411b363
PR
733 else if ((ns.conn == C_CONNECTED ||
734 ns.conn == C_WF_BITMAP_S ||
735 ns.conn == C_SYNC_SOURCE ||
736 ns.conn == C_PAUSED_SYNC_S) &&
737 ns.disk == D_OUTDATED)
738 rv = SS_CONNECTED_OUTDATES;
739
740 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
741 (mdev->sync_conf.verify_alg[0] == 0))
742 rv = SS_NO_VERIFY_ALG;
743
744 else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
745 mdev->agreed_pro_version < 88)
746 rv = SS_NOT_SUPPORTED;
747
fa7d9396
PR
748 else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
749 rv = SS_CONNECTED_OUTDATES;
750
b411b363
PR
751 return rv;
752}
753
754/**
755 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
756 * @mdev: DRBD device.
757 * @ns: new state.
758 * @os: old state.
759 */
bf885f8a
AG
760static enum drbd_state_rv
761is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
762 union drbd_state os)
b411b363 763{
bf885f8a 764 enum drbd_state_rv rv = SS_SUCCESS;
b411b363
PR
765
766 if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
767 os.conn > C_CONNECTED)
768 rv = SS_RESYNC_RUNNING;
769
770 if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
771 rv = SS_ALREADY_STANDALONE;
772
773 if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
774 rv = SS_IS_DISKLESS;
775
776 if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
777 rv = SS_NO_NET_CONFIG;
778
779 if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
780 rv = SS_LOWER_THAN_OUTDATED;
781
782 if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
783 rv = SS_IN_TRANSIENT_STATE;
784
785 if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
786 rv = SS_IN_TRANSIENT_STATE;
787
788 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
789 rv = SS_NEED_CONNECTION;
790
791 if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
792 ns.conn != os.conn && os.conn > C_CONNECTED)
793 rv = SS_RESYNC_RUNNING;
794
795 if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
796 os.conn < C_CONNECTED)
797 rv = SS_NEED_CONNECTION;
798
1fc80cf3
PR
799 if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
800 && os.conn < C_WF_REPORT_PARAMS)
801 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
802
b411b363
PR
803 return rv;
804}
805
806/**
807 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
808 * @mdev: DRBD device.
809 * @os: old state.
810 * @ns: new state.
811 * @warn_sync_abort:
812 *
813 * When we loose connection, we have to set the state of the peers disk (pdsk)
814 * to D_UNKNOWN. This rule and many more along those lines are in this function.
815 */
816static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
02bc7174 817 union drbd_state ns, const char **warn_sync_abort)
b411b363
PR
818{
819 enum drbd_fencing_p fp;
ab17b68f 820 enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
b411b363
PR
821
822 fp = FP_DONT_CARE;
823 if (get_ldev(mdev)) {
824 fp = mdev->ldev->dc.fencing;
825 put_ldev(mdev);
826 }
827
828 /* Disallow Network errors to configure a device's network part */
829 if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
830 os.conn <= C_DISCONNECTING)
831 ns.conn = os.conn;
832
f2906e18
LE
833 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
834 * If you try to go into some Sync* state, that shall fail (elsewhere). */
b411b363 835 if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
f2906e18 836 ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
b411b363
PR
837 ns.conn = os.conn;
838
82f59cc6
LE
839 /* we cannot fail (again) if we already detached */
840 if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
841 ns.disk = D_DISKLESS;
842
843 /* if we are only D_ATTACHING yet,
844 * we can (and should) go directly to D_DISKLESS. */
845 if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
846 ns.disk = D_DISKLESS;
847
b411b363
PR
848 /* After C_DISCONNECTING only C_STANDALONE may follow */
849 if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
850 ns.conn = os.conn;
851
852 if (ns.conn < C_CONNECTED) {
853 ns.peer_isp = 0;
854 ns.peer = R_UNKNOWN;
855 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
856 ns.pdsk = D_UNKNOWN;
857 }
858
859 /* Clear the aftr_isp when becoming unconfigured */
860 if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
861 ns.aftr_isp = 0;
862
b411b363
PR
863 /* Abort resync if a disk fails/detaches */
864 if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
865 (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
866 if (warn_sync_abort)
02bc7174
LE
867 *warn_sync_abort =
868 os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ?
869 "Online-verify" : "Resync";
b411b363
PR
870 ns.conn = C_CONNECTED;
871 }
872
b411b363
PR
873 /* Connection breaks down before we finished "Negotiating" */
874 if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
875 get_ldev_if_state(mdev, D_NEGOTIATING)) {
876 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
877 ns.disk = mdev->new_state_tmp.disk;
878 ns.pdsk = mdev->new_state_tmp.pdsk;
879 } else {
880 dev_alert(DEV, "Connection lost while negotiating, no data!\n");
881 ns.disk = D_DISKLESS;
882 ns.pdsk = D_UNKNOWN;
883 }
884 put_ldev(mdev);
885 }
886
ab17b68f
PR
887 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
888 if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
889 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
890 ns.disk = D_UP_TO_DATE;
891 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
892 ns.pdsk = D_UP_TO_DATE;
893 }
894
895 /* Implications of the connection stat on the disk states */
896 disk_min = D_DISKLESS;
897 disk_max = D_UP_TO_DATE;
898 pdsk_min = D_INCONSISTENT;
899 pdsk_max = D_UNKNOWN;
900 switch ((enum drbd_conns)ns.conn) {
901 case C_WF_BITMAP_T:
902 case C_PAUSED_SYNC_T:
903 case C_STARTING_SYNC_T:
904 case C_WF_SYNC_UUID:
905 case C_BEHIND:
906 disk_min = D_INCONSISTENT;
907 disk_max = D_OUTDATED;
908 pdsk_min = D_UP_TO_DATE;
909 pdsk_max = D_UP_TO_DATE;
910 break;
911 case C_VERIFY_S:
912 case C_VERIFY_T:
913 disk_min = D_UP_TO_DATE;
914 disk_max = D_UP_TO_DATE;
915 pdsk_min = D_UP_TO_DATE;
916 pdsk_max = D_UP_TO_DATE;
917 break;
918 case C_CONNECTED:
919 disk_min = D_DISKLESS;
920 disk_max = D_UP_TO_DATE;
921 pdsk_min = D_DISKLESS;
922 pdsk_max = D_UP_TO_DATE;
923 break;
924 case C_WF_BITMAP_S:
925 case C_PAUSED_SYNC_S:
926 case C_STARTING_SYNC_S:
927 case C_AHEAD:
928 disk_min = D_UP_TO_DATE;
929 disk_max = D_UP_TO_DATE;
930 pdsk_min = D_INCONSISTENT;
931 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
932 break;
933 case C_SYNC_TARGET:
934 disk_min = D_INCONSISTENT;
935 disk_max = D_INCONSISTENT;
936 pdsk_min = D_UP_TO_DATE;
937 pdsk_max = D_UP_TO_DATE;
938 break;
939 case C_SYNC_SOURCE:
940 disk_min = D_UP_TO_DATE;
941 disk_max = D_UP_TO_DATE;
942 pdsk_min = D_INCONSISTENT;
943 pdsk_max = D_INCONSISTENT;
944 break;
945 case C_STANDALONE:
946 case C_DISCONNECTING:
947 case C_UNCONNECTED:
948 case C_TIMEOUT:
949 case C_BROKEN_PIPE:
950 case C_NETWORK_FAILURE:
951 case C_PROTOCOL_ERROR:
952 case C_TEAR_DOWN:
953 case C_WF_CONNECTION:
954 case C_WF_REPORT_PARAMS:
955 case C_MASK:
956 break;
957 }
958 if (ns.disk > disk_max)
959 ns.disk = disk_max;
960
961 if (ns.disk < disk_min) {
962 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
963 drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
964 ns.disk = disk_min;
965 }
966 if (ns.pdsk > pdsk_max)
967 ns.pdsk = pdsk_max;
968
969 if (ns.pdsk < pdsk_min) {
970 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
971 drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
972 ns.pdsk = pdsk_min;
973 }
974
b411b363 975 if (fp == FP_STONITH &&
0a492166
PR
976 (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) &&
977 !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED))
fb22c402 978 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
265be2d0
PR
979
980 if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
981 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) &&
982 !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE))
fb22c402 983 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
b411b363
PR
984
985 if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
986 if (ns.conn == C_SYNC_SOURCE)
987 ns.conn = C_PAUSED_SYNC_S;
988 if (ns.conn == C_SYNC_TARGET)
989 ns.conn = C_PAUSED_SYNC_T;
990 } else {
991 if (ns.conn == C_PAUSED_SYNC_S)
992 ns.conn = C_SYNC_SOURCE;
993 if (ns.conn == C_PAUSED_SYNC_T)
994 ns.conn = C_SYNC_TARGET;
995 }
996
997 return ns;
998}
999
1000/* helper for __drbd_set_state */
1001static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
1002{
30b743a2
LE
1003 if (mdev->agreed_pro_version < 90)
1004 mdev->ov_start_sector = 0;
1005 mdev->rs_total = drbd_bm_bits(mdev);
1006 mdev->ov_position = 0;
b411b363
PR
1007 if (cs == C_VERIFY_T) {
1008 /* starting online verify from an arbitrary position
1009 * does not fit well into the existing protocol.
1010 * on C_VERIFY_T, we initialize ov_left and friends
1011 * implicitly in receive_DataRequest once the
1012 * first P_OV_REQUEST is received */
1013 mdev->ov_start_sector = ~(sector_t)0;
1014 } else {
1015 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
30b743a2 1016 if (bit >= mdev->rs_total) {
b411b363
PR
1017 mdev->ov_start_sector =
1018 BM_BIT_TO_SECT(mdev->rs_total - 1);
30b743a2
LE
1019 mdev->rs_total = 1;
1020 } else
1021 mdev->rs_total -= bit;
b411b363
PR
1022 mdev->ov_position = mdev->ov_start_sector;
1023 }
30b743a2 1024 mdev->ov_left = mdev->rs_total;
b411b363
PR
1025}
1026
0778286a
PR
1027static void drbd_resume_al(struct drbd_conf *mdev)
1028{
1029 if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
1030 dev_info(DEV, "Resumed AL updates\n");
1031}
1032
b411b363
PR
1033/**
1034 * __drbd_set_state() - Set a new DRBD state
1035 * @mdev: DRBD device.
1036 * @ns: new state.
1037 * @flags: Flags
1038 * @done: Optional completion, that will get completed after the after_state_ch() finished
1039 *
1040 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
1041 */
bf885f8a
AG
1042enum drbd_state_rv
1043__drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
1044 enum chg_state_flags flags, struct completion *done)
b411b363
PR
1045{
1046 union drbd_state os;
bf885f8a 1047 enum drbd_state_rv rv = SS_SUCCESS;
02bc7174 1048 const char *warn_sync_abort = NULL;
b411b363
PR
1049 struct after_state_chg_work *ascw;
1050
1051 os = mdev->state;
1052
1053 ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
1054
1055 if (ns.i == os.i)
1056 return SS_NOTHING_TO_DO;
1057
1058 if (!(flags & CS_HARD)) {
1059 /* pre-state-change checks ; only look at ns */
1060 /* See drbd_state_sw_errors in drbd_strings.c */
1061
1062 rv = is_valid_state(mdev, ns);
1063 if (rv < SS_SUCCESS) {
1064 /* If the old state was illegal as well, then let
1065 this happen...*/
1066
1616a254 1067 if (is_valid_state(mdev, os) == rv)
b411b363 1068 rv = is_valid_state_transition(mdev, ns, os);
b411b363
PR
1069 } else
1070 rv = is_valid_state_transition(mdev, ns, os);
1071 }
1072
1073 if (rv < SS_SUCCESS) {
1074 if (flags & CS_VERBOSE)
1075 print_st_err(mdev, os, ns, rv);
1076 return rv;
1077 }
1078
1079 if (warn_sync_abort)
02bc7174 1080 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
b411b363
PR
1081
1082 {
662d91a2
AG
1083 char *pbp, pb[300];
1084 pbp = pb;
1085 *pbp = 0;
1086 if (ns.role != os.role)
1087 pbp += sprintf(pbp, "role( %s -> %s ) ",
1088 drbd_role_str(os.role),
1089 drbd_role_str(ns.role));
1090 if (ns.peer != os.peer)
1091 pbp += sprintf(pbp, "peer( %s -> %s ) ",
1092 drbd_role_str(os.peer),
1093 drbd_role_str(ns.peer));
1094 if (ns.conn != os.conn)
1095 pbp += sprintf(pbp, "conn( %s -> %s ) ",
1096 drbd_conn_str(os.conn),
1097 drbd_conn_str(ns.conn));
1098 if (ns.disk != os.disk)
1099 pbp += sprintf(pbp, "disk( %s -> %s ) ",
1100 drbd_disk_str(os.disk),
1101 drbd_disk_str(ns.disk));
1102 if (ns.pdsk != os.pdsk)
1103 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
1104 drbd_disk_str(os.pdsk),
1105 drbd_disk_str(ns.pdsk));
1106 if (is_susp(ns) != is_susp(os))
1107 pbp += sprintf(pbp, "susp( %d -> %d ) ",
1108 is_susp(os),
1109 is_susp(ns));
1110 if (ns.aftr_isp != os.aftr_isp)
1111 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
1112 os.aftr_isp,
1113 ns.aftr_isp);
1114 if (ns.peer_isp != os.peer_isp)
1115 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
1116 os.peer_isp,
1117 ns.peer_isp);
1118 if (ns.user_isp != os.user_isp)
1119 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
1120 os.user_isp,
1121 ns.user_isp);
1122 dev_info(DEV, "%s\n", pb);
b411b363
PR
1123 }
1124
1125 /* solve the race between becoming unconfigured,
1126 * worker doing the cleanup, and
1127 * admin reconfiguring us:
1128 * on (re)configure, first set CONFIG_PENDING,
1129 * then wait for a potentially exiting worker,
1130 * start the worker, and schedule one no_op.
1131 * then proceed with configuration.
1132 */
1133 if (ns.disk == D_DISKLESS &&
1134 ns.conn == C_STANDALONE &&
1135 ns.role == R_SECONDARY &&
1136 !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1137 set_bit(DEVICE_DYING, &mdev->flags);
1138
82f59cc6
LE
1139 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1140 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1141 * drbd_ldev_destroy() won't happen before our corresponding
1142 * after_state_ch works run, where we put_ldev again. */
1143 if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
1144 (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
1145 atomic_inc(&mdev->local_cnt);
1146
1147 mdev->state = ns;
62b0da3a
LE
1148
1149 if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
1150 drbd_print_uuids(mdev, "attached to UUIDs");
1151
b411b363
PR
1152 wake_up(&mdev->misc_wait);
1153 wake_up(&mdev->state_wait);
1154
b411b363
PR
1155 /* aborted verify run. log the last position */
1156 if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1157 ns.conn < C_CONNECTED) {
1158 mdev->ov_start_sector =
30b743a2 1159 BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
b411b363
PR
1160 dev_info(DEV, "Online Verify reached sector %llu\n",
1161 (unsigned long long)mdev->ov_start_sector);
1162 }
1163
1164 if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1165 (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) {
1166 dev_info(DEV, "Syncer continues.\n");
1d7734a0
LE
1167 mdev->rs_paused += (long)jiffies
1168 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
63106d3c
PR
1169 if (ns.conn == C_SYNC_TARGET)
1170 mod_timer(&mdev->resync_timer, jiffies);
b411b363
PR
1171 }
1172
1173 if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) &&
1174 (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1175 dev_info(DEV, "Resync suspended\n");
1d7734a0 1176 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
b411b363
PR
1177 }
1178
1179 if (os.conn == C_CONNECTED &&
1180 (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1d7734a0
LE
1181 unsigned long now = jiffies;
1182 int i;
1183
30b743a2 1184 set_ov_position(mdev, ns.conn);
1d7734a0 1185 mdev->rs_start = now;
0f0601f4
LE
1186 mdev->rs_last_events = 0;
1187 mdev->rs_last_sect_ev = 0;
b411b363
PR
1188 mdev->ov_last_oos_size = 0;
1189 mdev->ov_last_oos_start = 0;
1190
1d7734a0 1191 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
30b743a2 1192 mdev->rs_mark_left[i] = mdev->ov_left;
1d7734a0
LE
1193 mdev->rs_mark_time[i] = now;
1194 }
1195
2649f080
LE
1196 drbd_rs_controller_reset(mdev);
1197
b411b363
PR
1198 if (ns.conn == C_VERIFY_S) {
1199 dev_info(DEV, "Starting Online Verify from sector %llu\n",
1200 (unsigned long long)mdev->ov_position);
1201 mod_timer(&mdev->resync_timer, jiffies);
1202 }
1203 }
1204
1205 if (get_ldev(mdev)) {
1206 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1207 MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1208 MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1209
1210 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1211 mdf |= MDF_CRASHED_PRIMARY;
1212 if (mdev->state.role == R_PRIMARY ||
1213 (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1214 mdf |= MDF_PRIMARY_IND;
1215 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1216 mdf |= MDF_CONNECTED_IND;
1217 if (mdev->state.disk > D_INCONSISTENT)
1218 mdf |= MDF_CONSISTENT;
1219 if (mdev->state.disk > D_OUTDATED)
1220 mdf |= MDF_WAS_UP_TO_DATE;
1221 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1222 mdf |= MDF_PEER_OUT_DATED;
1223 if (mdf != mdev->ldev->md.flags) {
1224 mdev->ldev->md.flags = mdf;
1225 drbd_md_mark_dirty(mdev);
1226 }
1227 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1228 drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1229 put_ldev(mdev);
1230 }
1231
1232 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1233 if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1234 os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1235 set_bit(CONSIDER_RESYNC, &mdev->flags);
1236
1237 /* Receiver should clean up itself */
1238 if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1239 drbd_thread_stop_nowait(&mdev->receiver);
1240
1241 /* Now the receiver finished cleaning up itself, it should die */
1242 if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1243 drbd_thread_stop_nowait(&mdev->receiver);
1244
1245 /* Upon network failure, we need to restart the receiver. */
1246 if (os.conn > C_TEAR_DOWN &&
1247 ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1248 drbd_thread_restart_nowait(&mdev->receiver);
1249
0778286a
PR
1250 /* Resume AL writing if we get a connection */
1251 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1252 drbd_resume_al(mdev);
1253
b411b363
PR
1254 ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1255 if (ascw) {
1256 ascw->os = os;
1257 ascw->ns = ns;
1258 ascw->flags = flags;
1259 ascw->w.cb = w_after_state_ch;
1260 ascw->done = done;
1261 drbd_queue_work(&mdev->data.work, &ascw->w);
1262 } else {
1263 dev_warn(DEV, "Could not kmalloc an ascw\n");
1264 }
1265
1266 return rv;
1267}
1268
1269static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1270{
1271 struct after_state_chg_work *ascw =
1272 container_of(w, struct after_state_chg_work, w);
1273 after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1274 if (ascw->flags & CS_WAIT_COMPLETE) {
1275 D_ASSERT(ascw->done != NULL);
1276 complete(ascw->done);
1277 }
1278 kfree(ascw);
1279
1280 return 1;
1281}
1282
1283static void abw_start_sync(struct drbd_conf *mdev, int rv)
1284{
1285 if (rv) {
1286 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1287 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1288 return;
1289 }
1290
1291 switch (mdev->state.conn) {
1292 case C_STARTING_SYNC_T:
1293 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1294 break;
1295 case C_STARTING_SYNC_S:
1296 drbd_start_resync(mdev, C_SYNC_SOURCE);
1297 break;
1298 }
1299}
1300
20ceb2b2
LE
1301int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
1302 int (*io_fn)(struct drbd_conf *),
1303 char *why, enum bm_flag flags)
19f843aa
LE
1304{
1305 int rv;
1306
1307 D_ASSERT(current == mdev->worker.task);
1308
1309 /* open coded non-blocking drbd_suspend_io(mdev); */
1310 set_bit(SUSPEND_IO, &mdev->flags);
19f843aa 1311
20ceb2b2 1312 drbd_bm_lock(mdev, why, flags);
19f843aa
LE
1313 rv = io_fn(mdev);
1314 drbd_bm_unlock(mdev);
1315
1316 drbd_resume_io(mdev);
1317
1318 return rv;
1319}
1320
b411b363
PR
1321/**
1322 * after_state_ch() - Perform after state change actions that may sleep
1323 * @mdev: DRBD device.
1324 * @os: old state.
1325 * @ns: new state.
1326 * @flags: Flags
1327 */
1328static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1329 union drbd_state ns, enum chg_state_flags flags)
1330{
1331 enum drbd_fencing_p fp;
67098930 1332 enum drbd_req_event what = nothing;
fb22c402 1333 union drbd_state nsm = (union drbd_state){ .i = -1 };
b411b363
PR
1334
1335 if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1336 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1337 if (mdev->p_uuid)
1338 mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1339 }
1340
1341 fp = FP_DONT_CARE;
1342 if (get_ldev(mdev)) {
1343 fp = mdev->ldev->dc.fencing;
1344 put_ldev(mdev);
1345 }
1346
1347 /* Inform userspace about the change... */
1348 drbd_bcast_state(mdev, ns);
1349
1350 if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1351 (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1352 drbd_khelper(mdev, "pri-on-incon-degr");
1353
1354 /* Here we have the actions that are performed after a
1355 state change. This function might sleep */
1356
fb22c402
PR
1357 nsm.i = -1;
1358 if (ns.susp_nod) {
3f98688a
PR
1359 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
1360 what = resend;
265be2d0 1361
67098930 1362 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
3f98688a 1363 what = restart_frozen_disk_io;
fb22c402 1364
3f98688a
PR
1365 if (what != nothing)
1366 nsm.susp_nod = 0;
265be2d0
PR
1367 }
1368
fb22c402 1369 if (ns.susp_fen) {
43a5182c
PR
1370 /* case1: The outdate peer handler is successful: */
1371 if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
b411b363 1372 tl_clear(mdev);
43a5182c
PR
1373 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1374 drbd_uuid_new_current(mdev);
1375 clear_bit(NEW_CUR_UUID, &mdev->flags);
43a5182c 1376 }
b411b363 1377 spin_lock_irq(&mdev->req_lock);
fb22c402 1378 _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
b411b363
PR
1379 spin_unlock_irq(&mdev->req_lock);
1380 }
43a5182c
PR
1381 /* case2: The connection was established again: */
1382 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1383 clear_bit(NEW_CUR_UUID, &mdev->flags);
67098930 1384 what = resend;
fb22c402 1385 nsm.susp_fen = 0;
43a5182c 1386 }
b411b363 1387 }
67098930
PR
1388
1389 if (what != nothing) {
1390 spin_lock_irq(&mdev->req_lock);
1391 _tl_restart(mdev, what);
fb22c402
PR
1392 nsm.i &= mdev->state.i;
1393 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
67098930 1394 spin_unlock_irq(&mdev->req_lock);
b411b363 1395 }
67098930 1396
5a22db89
LE
1397 /* Became sync source. With protocol >= 96, we still need to send out
1398 * the sync uuid now. Need to do that before any drbd_send_state, or
1399 * the other side may go "paused sync" before receiving the sync uuids,
1400 * which is unexpected. */
1401 if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1402 (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1403 mdev->agreed_pro_version >= 96 && get_ldev(mdev)) {
1404 drbd_gen_and_send_sync_uuid(mdev);
1405 put_ldev(mdev);
1406 }
1407
b411b363
PR
1408 /* Do not change the order of the if above and the two below... */
1409 if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
1410 drbd_send_uuids(mdev);
1411 drbd_send_state(mdev);
1412 }
54b956ab
LE
1413 /* No point in queuing send_bitmap if we don't have a connection
1414 * anymore, so check also the _current_ state, not only the new state
1415 * at the time this work was queued. */
1416 if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1417 mdev->state.conn == C_WF_BITMAP_S)
1418 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
20ceb2b2
LE
1419 "send_bitmap (WFBitMapS)",
1420 BM_LOCKED_TEST_ALLOWED);
b411b363
PR
1421
1422 /* Lost contact to peer's copy of the data */
1423 if ((os.pdsk >= D_INCONSISTENT &&
1424 os.pdsk != D_UNKNOWN &&
1425 os.pdsk != D_OUTDATED)
1426 && (ns.pdsk < D_INCONSISTENT ||
1427 ns.pdsk == D_UNKNOWN ||
1428 ns.pdsk == D_OUTDATED)) {
b411b363
PR
1429 if (get_ldev(mdev)) {
1430 if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
2c8d1967 1431 mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
fb22c402 1432 if (is_susp(mdev->state)) {
43a5182c
PR
1433 set_bit(NEW_CUR_UUID, &mdev->flags);
1434 } else {
1435 drbd_uuid_new_current(mdev);
1436 drbd_send_uuids(mdev);
1437 }
2c8d1967 1438 }
b411b363
PR
1439 put_ldev(mdev);
1440 }
1441 }
1442
1443 if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
18a50fa2 1444 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
2c8d1967 1445 drbd_uuid_new_current(mdev);
18a50fa2
PR
1446 drbd_send_uuids(mdev);
1447 }
b411b363
PR
1448
1449 /* D_DISKLESS Peer becomes secondary */
1450 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
20ceb2b2
LE
1451 /* We may still be Primary ourselves.
1452 * No harm done if the bitmap still changes,
1453 * redirtied pages will follow later. */
1454 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1455 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
19f843aa
LE
1456 put_ldev(mdev);
1457 }
1458
06d33e96
LE
1459 /* Write out all changed bits on demote.
1460 * Though, no need to da that just yet
1461 * if there is a resync going on still */
1462 if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1463 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
20ceb2b2
LE
1464 /* No changes to the bitmap expected this time, so assert that,
1465 * even though no harm was done if it did change. */
1466 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1467 "demote", BM_LOCKED_TEST_ALLOWED);
b411b363
PR
1468 put_ldev(mdev);
1469 }
1470
1471 /* Last part of the attaching process ... */
1472 if (ns.conn >= C_CONNECTED &&
1473 os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
e89b591c 1474 drbd_send_sizes(mdev, 0, 0); /* to start sync... */
b411b363
PR
1475 drbd_send_uuids(mdev);
1476 drbd_send_state(mdev);
1477 }
1478
1479 /* We want to pause/continue resync, tell peer. */
1480 if (ns.conn >= C_CONNECTED &&
1481 ((os.aftr_isp != ns.aftr_isp) ||
1482 (os.user_isp != ns.user_isp)))
1483 drbd_send_state(mdev);
1484
1485 /* In case one of the isp bits got set, suspend other devices. */
1486 if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1487 (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1488 suspend_other_sg(mdev);
1489
1490 /* Make sure the peer gets informed about eventual state
1491 changes (ISP bits) while we were in WFReportParams. */
1492 if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1493 drbd_send_state(mdev);
1494
67531718
PR
1495 if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1496 drbd_send_state(mdev);
1497
b411b363
PR
1498 /* We are in the progress to start a full sync... */
1499 if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1500 (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
20ceb2b2
LE
1501 /* no other bitmap changes expected during this phase */
1502 drbd_queue_bitmap_io(mdev,
1503 &drbd_bmio_set_n_write, &abw_start_sync,
1504 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
b411b363
PR
1505
1506 /* We are invalidating our self... */
1507 if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1508 os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
20ceb2b2
LE
1509 /* other bitmap operation expected during this phase */
1510 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1511 "set_n_write from invalidate", BM_LOCKED_MASK);
b411b363 1512
82f59cc6
LE
1513 /* first half of local IO error, failure to attach,
1514 * or administrative detach */
1515 if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1516 enum drbd_io_error_p eh;
1517 int was_io_error;
1518 /* corresponding get_ldev was in __drbd_set_state, to serialize
1519 * our cleanup here with the transition to D_DISKLESS,
1520 * so it is safe to dreference ldev here. */
1521 eh = mdev->ldev->dc.on_io_error;
1522 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1523
1524 /* current state still has to be D_FAILED,
1525 * there is only one way out: to D_DISKLESS,
1526 * and that may only happen after our put_ldev below. */
1527 if (mdev->state.disk != D_FAILED)
1528 dev_err(DEV,
1529 "ASSERT FAILED: disk is %s during detach\n",
1530 drbd_disk_str(mdev->state.disk));
e9e6f3ec
LE
1531
1532 if (drbd_send_state(mdev))
82f59cc6 1533 dev_warn(DEV, "Notified peer that I am detaching my disk\n");
e9e6f3ec 1534 else
82f59cc6 1535 dev_err(DEV, "Sending state for detaching disk failed\n");
e9e6f3ec
LE
1536
1537 drbd_rs_cancel_all(mdev);
b411b363 1538
82f59cc6
LE
1539 /* In case we want to get something to stable storage still,
1540 * this may be the last chance.
1541 * Following put_ldev may transition to D_DISKLESS. */
1542 drbd_md_sync(mdev);
1543 put_ldev(mdev);
1544
1545 if (was_io_error && eh == EP_CALL_HELPER)
e9e6f3ec
LE
1546 drbd_khelper(mdev, "local-io-error");
1547 }
b411b363 1548
82f59cc6
LE
1549 /* second half of local IO error, failure to attach,
1550 * or administrative detach,
1551 * after local_cnt references have reached zero again */
1552 if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1553 /* We must still be diskless,
1554 * re-attach has to be serialized with this! */
1555 if (mdev->state.disk != D_DISKLESS)
1556 dev_err(DEV,
1557 "ASSERT FAILED: disk is %s while going diskless\n",
1558 drbd_disk_str(mdev->state.disk));
e9e6f3ec 1559
82f59cc6
LE
1560 mdev->rs_total = 0;
1561 mdev->rs_failed = 0;
1562 atomic_set(&mdev->rs_pending_cnt, 0);
9d282875 1563
e9e6f3ec 1564 if (drbd_send_state(mdev))
82f59cc6 1565 dev_warn(DEV, "Notified peer that I'm now diskless.\n");
82f59cc6 1566 /* corresponding get_ldev in __drbd_set_state
25985edc 1567 * this may finally trigger drbd_ldev_destroy. */
82f59cc6 1568 put_ldev(mdev);
b411b363
PR
1569 }
1570
738a84b2
PR
1571 /* Notify peer that I had a local IO error, and did not detached.. */
1572 if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1573 drbd_send_state(mdev);
1574
b411b363
PR
1575 /* Disks got bigger while they were detached */
1576 if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1577 test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1578 if (ns.conn == C_CONNECTED)
1579 resync_after_online_grow(mdev);
1580 }
1581
1582 /* A resync finished or aborted, wake paused devices... */
1583 if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1584 (os.peer_isp && !ns.peer_isp) ||
1585 (os.user_isp && !ns.user_isp))
1586 resume_next_sg(mdev);
1587
af85e8e8
LE
1588 /* sync target done with resync. Explicitly notify peer, even though
1589 * it should (at least for non-empty resyncs) already know itself. */
1590 if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1591 drbd_send_state(mdev);
1592
79a30d2d
LE
1593 /* This triggers bitmap writeout of potentially still unwritten pages
1594 * if the resync finished cleanly, or aborted because of peer disk
20ceb2b2 1595 * failure, or because of connection loss.
79a30d2d
LE
1596 * For resync aborted because of local disk failure, we cannot do
1597 * any bitmap writeout anymore.
20ceb2b2 1598 * No harm done if some bits change during this phase.
79a30d2d 1599 */
20ceb2b2
LE
1600 if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1601 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1602 "write from resync_finished", BM_LOCKED_SET_ALLOWED);
79a30d2d
LE
1603 put_ldev(mdev);
1604 }
02851e9f 1605
f70b3511 1606 /* free tl_hash if we Got thawed and are C_STANDALONE */
fb22c402 1607 if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash)
f70b3511
PR
1608 drbd_free_tl_hash(mdev);
1609
b411b363
PR
1610 /* Upon network connection, we need to start the receiver */
1611 if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1612 drbd_thread_start(&mdev->receiver);
1613
1614 /* Terminate worker thread if we are unconfigured - it will be
1615 restarted as needed... */
1616 if (ns.disk == D_DISKLESS &&
1617 ns.conn == C_STANDALONE &&
1618 ns.role == R_SECONDARY) {
1619 if (os.aftr_isp != ns.aftr_isp)
1620 resume_next_sg(mdev);
1621 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1622 if (test_bit(DEVICE_DYING, &mdev->flags))
1623 drbd_thread_stop_nowait(&mdev->worker);
1624 }
1625
1626 drbd_md_sync(mdev);
1627}
1628
1629
1630static int drbd_thread_setup(void *arg)
1631{
1632 struct drbd_thread *thi = (struct drbd_thread *) arg;
1633 struct drbd_conf *mdev = thi->mdev;
1634 unsigned long flags;
1635 int retval;
1636
1637restart:
1638 retval = thi->function(thi);
1639
1640 spin_lock_irqsave(&thi->t_lock, flags);
1641
1642 /* if the receiver has been "Exiting", the last thing it did
1643 * was set the conn state to "StandAlone",
1644 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1645 * and receiver thread will be "started".
1646 * drbd_thread_start needs to set "Restarting" in that case.
1647 * t_state check and assignment needs to be within the same spinlock,
1648 * so either thread_start sees Exiting, and can remap to Restarting,
1649 * or thread_start see None, and can proceed as normal.
1650 */
1651
1652 if (thi->t_state == Restarting) {
1653 dev_info(DEV, "Restarting %s\n", current->comm);
1654 thi->t_state = Running;
1655 spin_unlock_irqrestore(&thi->t_lock, flags);
1656 goto restart;
1657 }
1658
1659 thi->task = NULL;
1660 thi->t_state = None;
1661 smp_mb();
1662 complete(&thi->stop);
1663 spin_unlock_irqrestore(&thi->t_lock, flags);
1664
1665 dev_info(DEV, "Terminating %s\n", current->comm);
1666
1667 /* Release mod reference taken when thread was started */
1668 module_put(THIS_MODULE);
1669 return retval;
1670}
1671
1672static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1673 int (*func) (struct drbd_thread *))
1674{
1675 spin_lock_init(&thi->t_lock);
1676 thi->task = NULL;
1677 thi->t_state = None;
1678 thi->function = func;
1679 thi->mdev = mdev;
1680}
1681
1682int drbd_thread_start(struct drbd_thread *thi)
1683{
1684 struct drbd_conf *mdev = thi->mdev;
1685 struct task_struct *nt;
1686 unsigned long flags;
1687
1688 const char *me =
1689 thi == &mdev->receiver ? "receiver" :
1690 thi == &mdev->asender ? "asender" :
1691 thi == &mdev->worker ? "worker" : "NONSENSE";
1692
1693 /* is used from state engine doing drbd_thread_stop_nowait,
1694 * while holding the req lock irqsave */
1695 spin_lock_irqsave(&thi->t_lock, flags);
1696
1697 switch (thi->t_state) {
1698 case None:
1699 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1700 me, current->comm, current->pid);
1701
1702 /* Get ref on module for thread - this is released when thread exits */
1703 if (!try_module_get(THIS_MODULE)) {
1704 dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1705 spin_unlock_irqrestore(&thi->t_lock, flags);
81e84650 1706 return false;
b411b363
PR
1707 }
1708
1709 init_completion(&thi->stop);
1710 D_ASSERT(thi->task == NULL);
1711 thi->reset_cpu_mask = 1;
1712 thi->t_state = Running;
1713 spin_unlock_irqrestore(&thi->t_lock, flags);
1714 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1715
1716 nt = kthread_create(drbd_thread_setup, (void *) thi,
1717 "drbd%d_%s", mdev_to_minor(mdev), me);
1718
1719 if (IS_ERR(nt)) {
1720 dev_err(DEV, "Couldn't start thread\n");
1721
1722 module_put(THIS_MODULE);
81e84650 1723 return false;
b411b363
PR
1724 }
1725 spin_lock_irqsave(&thi->t_lock, flags);
1726 thi->task = nt;
1727 thi->t_state = Running;
1728 spin_unlock_irqrestore(&thi->t_lock, flags);
1729 wake_up_process(nt);
1730 break;
1731 case Exiting:
1732 thi->t_state = Restarting;
1733 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1734 me, current->comm, current->pid);
1735 /* fall through */
1736 case Running:
1737 case Restarting:
1738 default:
1739 spin_unlock_irqrestore(&thi->t_lock, flags);
1740 break;
1741 }
1742
81e84650 1743 return true;
b411b363
PR
1744}
1745
1746
1747void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1748{
1749 unsigned long flags;
1750
1751 enum drbd_thread_state ns = restart ? Restarting : Exiting;
1752
1753 /* may be called from state engine, holding the req lock irqsave */
1754 spin_lock_irqsave(&thi->t_lock, flags);
1755
1756 if (thi->t_state == None) {
1757 spin_unlock_irqrestore(&thi->t_lock, flags);
1758 if (restart)
1759 drbd_thread_start(thi);
1760 return;
1761 }
1762
1763 if (thi->t_state != ns) {
1764 if (thi->task == NULL) {
1765 spin_unlock_irqrestore(&thi->t_lock, flags);
1766 return;
1767 }
1768
1769 thi->t_state = ns;
1770 smp_mb();
1771 init_completion(&thi->stop);
1772 if (thi->task != current)
1773 force_sig(DRBD_SIGKILL, thi->task);
1774
1775 }
1776
1777 spin_unlock_irqrestore(&thi->t_lock, flags);
1778
1779 if (wait)
1780 wait_for_completion(&thi->stop);
1781}
1782
1783#ifdef CONFIG_SMP
1784/**
1785 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1786 * @mdev: DRBD device.
1787 *
1788 * Forces all threads of a device onto the same CPU. This is beneficial for
1789 * DRBD's performance. May be overwritten by user's configuration.
1790 */
1791void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1792{
1793 int ord, cpu;
1794
1795 /* user override. */
1796 if (cpumask_weight(mdev->cpu_mask))
1797 return;
1798
1799 ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1800 for_each_online_cpu(cpu) {
1801 if (ord-- == 0) {
1802 cpumask_set_cpu(cpu, mdev->cpu_mask);
1803 return;
1804 }
1805 }
1806 /* should not be reached */
1807 cpumask_setall(mdev->cpu_mask);
1808}
1809
1810/**
1811 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1812 * @mdev: DRBD device.
1813 *
1814 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1815 * prematurely.
1816 */
1817void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1818{
1819 struct task_struct *p = current;
1820 struct drbd_thread *thi =
1821 p == mdev->asender.task ? &mdev->asender :
1822 p == mdev->receiver.task ? &mdev->receiver :
1823 p == mdev->worker.task ? &mdev->worker :
1824 NULL;
1825 ERR_IF(thi == NULL)
1826 return;
1827 if (!thi->reset_cpu_mask)
1828 return;
1829 thi->reset_cpu_mask = 0;
1830 set_cpus_allowed_ptr(p, mdev->cpu_mask);
1831}
1832#endif
1833
1834/* the appropriate socket mutex must be held already */
1835int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
0b70a13d 1836 enum drbd_packets cmd, struct p_header80 *h,
b411b363
PR
1837 size_t size, unsigned msg_flags)
1838{
1839 int sent, ok;
1840
81e84650
AG
1841 ERR_IF(!h) return false;
1842 ERR_IF(!size) return false;
b411b363
PR
1843
1844 h->magic = BE_DRBD_MAGIC;
1845 h->command = cpu_to_be16(cmd);
0b70a13d 1846 h->length = cpu_to_be16(size-sizeof(struct p_header80));
b411b363 1847
b411b363
PR
1848 sent = drbd_send(mdev, sock, h, size, msg_flags);
1849
1850 ok = (sent == size);
0ddc5549
LE
1851 if (!ok && !signal_pending(current))
1852 dev_warn(DEV, "short sent %s size=%d sent=%d\n",
b411b363
PR
1853 cmdname(cmd), (int)size, sent);
1854 return ok;
1855}
1856
1857/* don't pass the socket. we may only look at it
1858 * when we hold the appropriate socket mutex.
1859 */
1860int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
0b70a13d 1861 enum drbd_packets cmd, struct p_header80 *h, size_t size)
b411b363
PR
1862{
1863 int ok = 0;
1864 struct socket *sock;
1865
1866 if (use_data_socket) {
1867 mutex_lock(&mdev->data.mutex);
1868 sock = mdev->data.socket;
1869 } else {
1870 mutex_lock(&mdev->meta.mutex);
1871 sock = mdev->meta.socket;
1872 }
1873
1874 /* drbd_disconnect() could have called drbd_free_sock()
1875 * while we were waiting in down()... */
1876 if (likely(sock != NULL))
1877 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1878
1879 if (use_data_socket)
1880 mutex_unlock(&mdev->data.mutex);
1881 else
1882 mutex_unlock(&mdev->meta.mutex);
1883 return ok;
1884}
1885
1886int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1887 size_t size)
1888{
0b70a13d 1889 struct p_header80 h;
b411b363
PR
1890 int ok;
1891
1892 h.magic = BE_DRBD_MAGIC;
1893 h.command = cpu_to_be16(cmd);
1894 h.length = cpu_to_be16(size);
1895
1896 if (!drbd_get_data_sock(mdev))
1897 return 0;
1898
b411b363
PR
1899 ok = (sizeof(h) ==
1900 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1901 ok = ok && (size ==
1902 drbd_send(mdev, mdev->data.socket, data, size, 0));
1903
1904 drbd_put_data_sock(mdev);
1905
1906 return ok;
1907}
1908
1909int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1910{
8e26f9cc 1911 struct p_rs_param_95 *p;
b411b363
PR
1912 struct socket *sock;
1913 int size, rv;
1914 const int apv = mdev->agreed_pro_version;
1915
1916 size = apv <= 87 ? sizeof(struct p_rs_param)
1917 : apv == 88 ? sizeof(struct p_rs_param)
1918 + strlen(mdev->sync_conf.verify_alg) + 1
8e26f9cc
PR
1919 : apv <= 94 ? sizeof(struct p_rs_param_89)
1920 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
b411b363
PR
1921
1922 /* used from admin command context and receiver/worker context.
1923 * to avoid kmalloc, grab the socket right here,
1924 * then use the pre-allocated sbuf there */
1925 mutex_lock(&mdev->data.mutex);
1926 sock = mdev->data.socket;
1927
1928 if (likely(sock != NULL)) {
1929 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1930
8e26f9cc 1931 p = &mdev->data.sbuf.rs_param_95;
b411b363
PR
1932
1933 /* initialize verify_alg and csums_alg */
1934 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1935
1936 p->rate = cpu_to_be32(sc->rate);
8e26f9cc
PR
1937 p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
1938 p->c_delay_target = cpu_to_be32(sc->c_delay_target);
1939 p->c_fill_target = cpu_to_be32(sc->c_fill_target);
1940 p->c_max_rate = cpu_to_be32(sc->c_max_rate);
b411b363
PR
1941
1942 if (apv >= 88)
1943 strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1944 if (apv >= 89)
1945 strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1946
1947 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1948 } else
1949 rv = 0; /* not ok */
1950
1951 mutex_unlock(&mdev->data.mutex);
1952
1953 return rv;
1954}
1955
1956int drbd_send_protocol(struct drbd_conf *mdev)
1957{
1958 struct p_protocol *p;
cf14c2e9 1959 int size, cf, rv;
b411b363
PR
1960
1961 size = sizeof(struct p_protocol);
1962
1963 if (mdev->agreed_pro_version >= 87)
1964 size += strlen(mdev->net_conf->integrity_alg) + 1;
1965
1966 /* we must not recurse into our own queue,
1967 * as that is blocked during handshake */
1968 p = kmalloc(size, GFP_NOIO);
1969 if (p == NULL)
1970 return 0;
1971
1972 p->protocol = cpu_to_be32(mdev->net_conf->wire_protocol);
1973 p->after_sb_0p = cpu_to_be32(mdev->net_conf->after_sb_0p);
1974 p->after_sb_1p = cpu_to_be32(mdev->net_conf->after_sb_1p);
1975 p->after_sb_2p = cpu_to_be32(mdev->net_conf->after_sb_2p);
b411b363
PR
1976 p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1977
cf14c2e9
PR
1978 cf = 0;
1979 if (mdev->net_conf->want_lose)
1980 cf |= CF_WANT_LOSE;
1981 if (mdev->net_conf->dry_run) {
1982 if (mdev->agreed_pro_version >= 92)
1983 cf |= CF_DRY_RUN;
1984 else {
1985 dev_err(DEV, "--dry-run is not supported by peer");
7ac314c8 1986 kfree(p);
148efa16 1987 return -1;
cf14c2e9
PR
1988 }
1989 }
1990 p->conn_flags = cpu_to_be32(cf);
1991
b411b363
PR
1992 if (mdev->agreed_pro_version >= 87)
1993 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1994
1995 rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
0b70a13d 1996 (struct p_header80 *)p, size);
b411b363
PR
1997 kfree(p);
1998 return rv;
1999}
2000
2001int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
2002{
2003 struct p_uuids p;
2004 int i;
2005
2006 if (!get_ldev_if_state(mdev, D_NEGOTIATING))
2007 return 1;
2008
2009 for (i = UI_CURRENT; i < UI_SIZE; i++)
2010 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
2011
2012 mdev->comm_bm_set = drbd_bm_total_weight(mdev);
2013 p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
2014 uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
2015 uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
2016 uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
2017 p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
2018
2019 put_ldev(mdev);
2020
2021 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
0b70a13d 2022 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2023}
2024
2025int drbd_send_uuids(struct drbd_conf *mdev)
2026{
2027 return _drbd_send_uuids(mdev, 0);
2028}
2029
2030int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
2031{
2032 return _drbd_send_uuids(mdev, 8);
2033}
2034
62b0da3a
LE
2035void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
2036{
2037 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2038 u64 *uuid = mdev->ldev->md.uuid;
2039 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
2040 text,
2041 (unsigned long long)uuid[UI_CURRENT],
2042 (unsigned long long)uuid[UI_BITMAP],
2043 (unsigned long long)uuid[UI_HISTORY_START],
2044 (unsigned long long)uuid[UI_HISTORY_END]);
2045 put_ldev(mdev);
2046 } else {
2047 dev_info(DEV, "%s effective data uuid: %016llX\n",
2048 text,
2049 (unsigned long long)mdev->ed_uuid);
2050 }
2051}
2052
5a22db89 2053int drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
b411b363
PR
2054{
2055 struct p_rs_uuid p;
5a22db89
LE
2056 u64 uuid;
2057
2058 D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
b411b363 2059
4a23f264 2060 uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
5a22db89 2061 drbd_uuid_set(mdev, UI_BITMAP, uuid);
62b0da3a 2062 drbd_print_uuids(mdev, "updated sync UUID");
5a22db89
LE
2063 drbd_md_sync(mdev);
2064 p.uuid = cpu_to_be64(uuid);
b411b363
PR
2065
2066 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
0b70a13d 2067 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2068}
2069
e89b591c 2070int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
b411b363
PR
2071{
2072 struct p_sizes p;
2073 sector_t d_size, u_size;
2074 int q_order_type;
2075 int ok;
2076
2077 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
2078 D_ASSERT(mdev->ldev->backing_bdev);
2079 d_size = drbd_get_max_capacity(mdev->ldev);
2080 u_size = mdev->ldev->dc.disk_size;
2081 q_order_type = drbd_queue_order_type(mdev);
b411b363
PR
2082 put_ldev(mdev);
2083 } else {
2084 d_size = 0;
2085 u_size = 0;
2086 q_order_type = QUEUE_ORDERED_NONE;
2087 }
2088
2089 p.d_size = cpu_to_be64(d_size);
2090 p.u_size = cpu_to_be64(u_size);
2091 p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1816a2b4 2092 p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9);
e89b591c
PR
2093 p.queue_order_type = cpu_to_be16(q_order_type);
2094 p.dds_flags = cpu_to_be16(flags);
b411b363
PR
2095
2096 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
0b70a13d 2097 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2098 return ok;
2099}
2100
2101/**
2102 * drbd_send_state() - Sends the drbd state to the peer
2103 * @mdev: DRBD device.
2104 */
2105int drbd_send_state(struct drbd_conf *mdev)
2106{
2107 struct socket *sock;
2108 struct p_state p;
2109 int ok = 0;
2110
2111 /* Grab state lock so we wont send state if we're in the middle
2112 * of a cluster wide state change on another thread */
2113 drbd_state_lock(mdev);
2114
2115 mutex_lock(&mdev->data.mutex);
2116
2117 p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
2118 sock = mdev->data.socket;
2119
2120 if (likely(sock != NULL)) {
2121 ok = _drbd_send_cmd(mdev, sock, P_STATE,
0b70a13d 2122 (struct p_header80 *)&p, sizeof(p), 0);
b411b363
PR
2123 }
2124
2125 mutex_unlock(&mdev->data.mutex);
2126
2127 drbd_state_unlock(mdev);
2128 return ok;
2129}
2130
2131int drbd_send_state_req(struct drbd_conf *mdev,
2132 union drbd_state mask, union drbd_state val)
2133{
2134 struct p_req_state p;
2135
2136 p.mask = cpu_to_be32(mask.i);
2137 p.val = cpu_to_be32(val.i);
2138
2139 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
0b70a13d 2140 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2141}
2142
bf885f8a 2143int drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
b411b363
PR
2144{
2145 struct p_req_state_reply p;
2146
2147 p.retcode = cpu_to_be32(retcode);
2148
2149 return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
0b70a13d 2150 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2151}
2152
2153int fill_bitmap_rle_bits(struct drbd_conf *mdev,
2154 struct p_compressed_bm *p,
2155 struct bm_xfer_ctx *c)
2156{
2157 struct bitstream bs;
2158 unsigned long plain_bits;
2159 unsigned long tmp;
2160 unsigned long rl;
2161 unsigned len;
2162 unsigned toggle;
2163 int bits;
2164
2165 /* may we use this feature? */
2166 if ((mdev->sync_conf.use_rle == 0) ||
2167 (mdev->agreed_pro_version < 90))
2168 return 0;
2169
2170 if (c->bit_offset >= c->bm_bits)
2171 return 0; /* nothing to do. */
2172
2173 /* use at most thus many bytes */
2174 bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
2175 memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
2176 /* plain bits covered in this code string */
2177 plain_bits = 0;
2178
2179 /* p->encoding & 0x80 stores whether the first run length is set.
2180 * bit offset is implicit.
2181 * start with toggle == 2 to be able to tell the first iteration */
2182 toggle = 2;
2183
2184 /* see how much plain bits we can stuff into one packet
2185 * using RLE and VLI. */
2186 do {
2187 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
2188 : _drbd_bm_find_next(mdev, c->bit_offset);
2189 if (tmp == -1UL)
2190 tmp = c->bm_bits;
2191 rl = tmp - c->bit_offset;
2192
2193 if (toggle == 2) { /* first iteration */
2194 if (rl == 0) {
2195 /* the first checked bit was set,
2196 * store start value, */
2197 DCBP_set_start(p, 1);
2198 /* but skip encoding of zero run length */
2199 toggle = !toggle;
2200 continue;
2201 }
2202 DCBP_set_start(p, 0);
2203 }
2204
2205 /* paranoia: catch zero runlength.
2206 * can only happen if bitmap is modified while we scan it. */
2207 if (rl == 0) {
2208 dev_err(DEV, "unexpected zero runlength while encoding bitmap "
2209 "t:%u bo:%lu\n", toggle, c->bit_offset);
2210 return -1;
2211 }
2212
2213 bits = vli_encode_bits(&bs, rl);
2214 if (bits == -ENOBUFS) /* buffer full */
2215 break;
2216 if (bits <= 0) {
2217 dev_err(DEV, "error while encoding bitmap: %d\n", bits);
2218 return 0;
2219 }
2220
2221 toggle = !toggle;
2222 plain_bits += rl;
2223 c->bit_offset = tmp;
2224 } while (c->bit_offset < c->bm_bits);
2225
2226 len = bs.cur.b - p->code + !!bs.cur.bit;
2227
2228 if (plain_bits < (len << 3)) {
2229 /* incompressible with this method.
2230 * we need to rewind both word and bit position. */
2231 c->bit_offset -= plain_bits;
2232 bm_xfer_ctx_bit_to_word_offset(c);
2233 c->bit_offset = c->word_offset * BITS_PER_LONG;
2234 return 0;
2235 }
2236
2237 /* RLE + VLI was able to compress it just fine.
2238 * update c->word_offset. */
2239 bm_xfer_ctx_bit_to_word_offset(c);
2240
2241 /* store pad_bits */
2242 DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
2243
2244 return len;
2245}
2246
f70af118
AG
2247/**
2248 * send_bitmap_rle_or_plain
2249 *
2250 * Return 0 when done, 1 when another iteration is needed, and a negative error
2251 * code upon failure.
2252 */
2253static int
b411b363 2254send_bitmap_rle_or_plain(struct drbd_conf *mdev,
f70af118 2255 struct p_header80 *h, struct bm_xfer_ctx *c)
b411b363
PR
2256{
2257 struct p_compressed_bm *p = (void*)h;
2258 unsigned long num_words;
2259 int len;
2260 int ok;
2261
2262 len = fill_bitmap_rle_bits(mdev, p, c);
2263
2264 if (len < 0)
f70af118 2265 return -EIO;
b411b363
PR
2266
2267 if (len) {
2268 DCBP_set_code(p, RLE_VLI_Bits);
2269 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
2270 sizeof(*p) + len, 0);
2271
2272 c->packets[0]++;
2273 c->bytes[0] += sizeof(*p) + len;
2274
2275 if (c->bit_offset >= c->bm_bits)
2276 len = 0; /* DONE */
2277 } else {
2278 /* was not compressible.
2279 * send a buffer full of plain text bits instead. */
2280 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
2281 len = num_words * sizeof(long);
2282 if (len)
2283 drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
2284 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
0b70a13d 2285 h, sizeof(struct p_header80) + len, 0);
b411b363
PR
2286 c->word_offset += num_words;
2287 c->bit_offset = c->word_offset * BITS_PER_LONG;
2288
2289 c->packets[1]++;
0b70a13d 2290 c->bytes[1] += sizeof(struct p_header80) + len;
b411b363
PR
2291
2292 if (c->bit_offset > c->bm_bits)
2293 c->bit_offset = c->bm_bits;
2294 }
f70af118
AG
2295 if (ok) {
2296 if (len == 0) {
2297 INFO_bm_xfer_stats(mdev, "send", c);
2298 return 0;
2299 } else
2300 return 1;
2301 }
2302 return -EIO;
b411b363
PR
2303}
2304
2305/* See the comment at receive_bitmap() */
2306int _drbd_send_bitmap(struct drbd_conf *mdev)
2307{
2308 struct bm_xfer_ctx c;
0b70a13d 2309 struct p_header80 *p;
f70af118 2310 int err;
b411b363 2311
81e84650 2312 ERR_IF(!mdev->bitmap) return false;
b411b363
PR
2313
2314 /* maybe we should use some per thread scratch page,
2315 * and allocate that during initial device creation? */
0b70a13d 2316 p = (struct p_header80 *) __get_free_page(GFP_NOIO);
b411b363
PR
2317 if (!p) {
2318 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
81e84650 2319 return false;
b411b363
PR
2320 }
2321
2322 if (get_ldev(mdev)) {
2323 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2324 dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2325 drbd_bm_set_all(mdev);
2326 if (drbd_bm_write(mdev)) {
2327 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2328 * but otherwise process as per normal - need to tell other
2329 * side that a full resync is required! */
2330 dev_err(DEV, "Failed to write bitmap to disk!\n");
2331 } else {
2332 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2333 drbd_md_sync(mdev);
2334 }
2335 }
2336 put_ldev(mdev);
2337 }
2338
2339 c = (struct bm_xfer_ctx) {
2340 .bm_bits = drbd_bm_bits(mdev),
2341 .bm_words = drbd_bm_words(mdev),
2342 };
2343
2344 do {
f70af118
AG
2345 err = send_bitmap_rle_or_plain(mdev, p, &c);
2346 } while (err > 0);
b411b363
PR
2347
2348 free_page((unsigned long) p);
f70af118 2349 return err == 0;
b411b363
PR
2350}
2351
2352int drbd_send_bitmap(struct drbd_conf *mdev)
2353{
2354 int err;
2355
2356 if (!drbd_get_data_sock(mdev))
2357 return -1;
2358 err = !_drbd_send_bitmap(mdev);
2359 drbd_put_data_sock(mdev);
2360 return err;
2361}
2362
2363int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2364{
2365 int ok;
2366 struct p_barrier_ack p;
2367
2368 p.barrier = barrier_nr;
2369 p.set_size = cpu_to_be32(set_size);
2370
2371 if (mdev->state.conn < C_CONNECTED)
81e84650 2372 return false;
b411b363 2373 ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
0b70a13d 2374 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2375 return ok;
2376}
2377
2378/**
2379 * _drbd_send_ack() - Sends an ack packet
2380 * @mdev: DRBD device.
2381 * @cmd: Packet command code.
2382 * @sector: sector, needs to be in big endian byte order
2383 * @blksize: size in byte, needs to be in big endian byte order
2384 * @block_id: Id, big endian byte order
2385 */
2386static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2387 u64 sector,
2388 u32 blksize,
2389 u64 block_id)
2390{
2391 int ok;
2392 struct p_block_ack p;
2393
2394 p.sector = sector;
2395 p.block_id = block_id;
2396 p.blksize = blksize;
2397 p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2398
2399 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
81e84650 2400 return false;
b411b363 2401 ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
0b70a13d 2402 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2403 return ok;
2404}
2405
2b2bf214
LE
2406/* dp->sector and dp->block_id already/still in network byte order,
2407 * data_size is payload size according to dp->head,
2408 * and may need to be corrected for digest size. */
b411b363 2409int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2b2bf214 2410 struct p_data *dp, int data_size)
b411b363 2411{
2b2bf214
LE
2412 data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
2413 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
b411b363
PR
2414 return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2415 dp->block_id);
2416}
2417
2418int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2419 struct p_block_req *rp)
2420{
2421 return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2422}
2423
2424/**
2425 * drbd_send_ack() - Sends an ack packet
2426 * @mdev: DRBD device.
2427 * @cmd: Packet command code.
2428 * @e: Epoch entry.
2429 */
2430int drbd_send_ack(struct drbd_conf *mdev,
2431 enum drbd_packets cmd, struct drbd_epoch_entry *e)
2432{
2433 return _drbd_send_ack(mdev, cmd,
2434 cpu_to_be64(e->sector),
2435 cpu_to_be32(e->size),
2436 e->block_id);
2437}
2438
2439/* This function misuses the block_id field to signal if the blocks
2440 * are is sync or not. */
2441int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2442 sector_t sector, int blksize, u64 block_id)
2443{
2444 return _drbd_send_ack(mdev, cmd,
2445 cpu_to_be64(sector),
2446 cpu_to_be32(blksize),
2447 cpu_to_be64(block_id));
2448}
2449
2450int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2451 sector_t sector, int size, u64 block_id)
2452{
2453 int ok;
2454 struct p_block_req p;
2455
2456 p.sector = cpu_to_be64(sector);
2457 p.block_id = block_id;
2458 p.blksize = cpu_to_be32(size);
2459
2460 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
0b70a13d 2461 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2462 return ok;
2463}
2464
2465int drbd_send_drequest_csum(struct drbd_conf *mdev,
2466 sector_t sector, int size,
2467 void *digest, int digest_size,
2468 enum drbd_packets cmd)
2469{
2470 int ok;
2471 struct p_block_req p;
2472
2473 p.sector = cpu_to_be64(sector);
2474 p.block_id = BE_DRBD_MAGIC + 0xbeef;
2475 p.blksize = cpu_to_be32(size);
2476
2477 p.head.magic = BE_DRBD_MAGIC;
2478 p.head.command = cpu_to_be16(cmd);
0b70a13d 2479 p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
b411b363
PR
2480
2481 mutex_lock(&mdev->data.mutex);
2482
2483 ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2484 ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2485
2486 mutex_unlock(&mdev->data.mutex);
2487
2488 return ok;
2489}
2490
2491int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2492{
2493 int ok;
2494 struct p_block_req p;
2495
2496 p.sector = cpu_to_be64(sector);
2497 p.block_id = BE_DRBD_MAGIC + 0xbabe;
2498 p.blksize = cpu_to_be32(size);
2499
2500 ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
0b70a13d 2501 (struct p_header80 *)&p, sizeof(p));
b411b363
PR
2502 return ok;
2503}
2504
2505/* called on sndtimeo
81e84650
AG
2506 * returns false if we should retry,
2507 * true if we think connection is dead
b411b363
PR
2508 */
2509static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2510{
2511 int drop_it;
2512 /* long elapsed = (long)(jiffies - mdev->last_received); */
2513
2514 drop_it = mdev->meta.socket == sock
2515 || !mdev->asender.task
2516 || get_t_state(&mdev->asender) != Running
2517 || mdev->state.conn < C_CONNECTED;
2518
2519 if (drop_it)
81e84650 2520 return true;
b411b363
PR
2521
2522 drop_it = !--mdev->ko_count;
2523 if (!drop_it) {
2524 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2525 current->comm, current->pid, mdev->ko_count);
2526 request_ping(mdev);
2527 }
2528
2529 return drop_it; /* && (mdev->state == R_PRIMARY) */;
2530}
2531
2532/* The idea of sendpage seems to be to put some kind of reference
2533 * to the page into the skb, and to hand it over to the NIC. In
2534 * this process get_page() gets called.
2535 *
2536 * As soon as the page was really sent over the network put_page()
2537 * gets called by some part of the network layer. [ NIC driver? ]
2538 *
2539 * [ get_page() / put_page() increment/decrement the count. If count
2540 * reaches 0 the page will be freed. ]
2541 *
2542 * This works nicely with pages from FSs.
2543 * But this means that in protocol A we might signal IO completion too early!
2544 *
2545 * In order not to corrupt data during a resync we must make sure
2546 * that we do not reuse our own buffer pages (EEs) to early, therefore
2547 * we have the net_ee list.
2548 *
2549 * XFS seems to have problems, still, it submits pages with page_count == 0!
2550 * As a workaround, we disable sendpage on pages
2551 * with page_count == 0 or PageSlab.
2552 */
2553static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
ba11ad9a 2554 int offset, size_t size, unsigned msg_flags)
b411b363 2555{
ba11ad9a 2556 int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, msg_flags);
b411b363
PR
2557 kunmap(page);
2558 if (sent == size)
2559 mdev->send_cnt += size>>9;
2560 return sent == size;
2561}
2562
2563static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
ba11ad9a 2564 int offset, size_t size, unsigned msg_flags)
b411b363
PR
2565{
2566 mm_segment_t oldfs = get_fs();
2567 int sent, ok;
2568 int len = size;
2569
2570 /* e.g. XFS meta- & log-data is in slab pages, which have a
2571 * page_count of 0 and/or have PageSlab() set.
2572 * we cannot use send_page for those, as that does get_page();
2573 * put_page(); and would cause either a VM_BUG directly, or
2574 * __page_cache_release a page that would actually still be referenced
2575 * by someone, leading to some obscure delayed Oops somewhere else. */
2576 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
ba11ad9a 2577 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
b411b363 2578
ba11ad9a 2579 msg_flags |= MSG_NOSIGNAL;
b411b363
PR
2580 drbd_update_congested(mdev);
2581 set_fs(KERNEL_DS);
2582 do {
2583 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2584 offset, len,
ba11ad9a 2585 msg_flags);
b411b363
PR
2586 if (sent == -EAGAIN) {
2587 if (we_should_drop_the_connection(mdev,
2588 mdev->data.socket))
2589 break;
2590 else
2591 continue;
2592 }
2593 if (sent <= 0) {
2594 dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2595 __func__, (int)size, len, sent);
2596 break;
2597 }
2598 len -= sent;
2599 offset += sent;
2600 } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2601 set_fs(oldfs);
2602 clear_bit(NET_CONGESTED, &mdev->flags);
2603
2604 ok = (len == 0);
2605 if (likely(ok))
2606 mdev->send_cnt += size>>9;
2607 return ok;
2608}
2609
2610static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2611{
2612 struct bio_vec *bvec;
2613 int i;
ba11ad9a 2614 /* hint all but last page with MSG_MORE */
b411b363
PR
2615 __bio_for_each_segment(bvec, bio, i, 0) {
2616 if (!_drbd_no_send_page(mdev, bvec->bv_page,
ba11ad9a
LE
2617 bvec->bv_offset, bvec->bv_len,
2618 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
b411b363
PR
2619 return 0;
2620 }
2621 return 1;
2622}
2623
2624static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2625{
2626 struct bio_vec *bvec;
2627 int i;
ba11ad9a 2628 /* hint all but last page with MSG_MORE */
b411b363
PR
2629 __bio_for_each_segment(bvec, bio, i, 0) {
2630 if (!_drbd_send_page(mdev, bvec->bv_page,
ba11ad9a
LE
2631 bvec->bv_offset, bvec->bv_len,
2632 i == bio->bi_vcnt -1 ? 0 : MSG_MORE))
b411b363
PR
2633 return 0;
2634 }
b411b363
PR
2635 return 1;
2636}
2637
45bb912b
LE
2638static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
2639{
2640 struct page *page = e->pages;
2641 unsigned len = e->size;
ba11ad9a 2642 /* hint all but last page with MSG_MORE */
45bb912b
LE
2643 page_chain_for_each(page) {
2644 unsigned l = min_t(unsigned, len, PAGE_SIZE);
ba11ad9a
LE
2645 if (!_drbd_send_page(mdev, page, 0, l,
2646 page_chain_next(page) ? MSG_MORE : 0))
45bb912b
LE
2647 return 0;
2648 len -= l;
2649 }
2650 return 1;
2651}
2652
76d2e7ec
PR
2653static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
2654{
2655 if (mdev->agreed_pro_version >= 95)
2656 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
76d2e7ec
PR
2657 (bi_rw & REQ_FUA ? DP_FUA : 0) |
2658 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
2659 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
2660 else
721a9602 2661 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
76d2e7ec
PR
2662}
2663
b411b363
PR
2664/* Used to send write requests
2665 * R_PRIMARY -> Peer (P_DATA)
2666 */
2667int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2668{
2669 int ok = 1;
2670 struct p_data p;
2671 unsigned int dp_flags = 0;
2672 void *dgb;
2673 int dgs;
2674
2675 if (!drbd_get_data_sock(mdev))
2676 return 0;
2677
2678 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2679 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2680
d5373389 2681 if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
0b70a13d
PR
2682 p.head.h80.magic = BE_DRBD_MAGIC;
2683 p.head.h80.command = cpu_to_be16(P_DATA);
2684 p.head.h80.length =
2685 cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2686 } else {
2687 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2688 p.head.h95.command = cpu_to_be16(P_DATA);
2689 p.head.h95.length =
2690 cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
2691 }
b411b363
PR
2692
2693 p.sector = cpu_to_be64(req->sector);
2694 p.block_id = (unsigned long)req;
2695 p.seq_num = cpu_to_be32(req->seq_num =
2696 atomic_add_return(1, &mdev->packet_seq));
b411b363 2697
76d2e7ec
PR
2698 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
2699
b411b363
PR
2700 if (mdev->state.conn >= C_SYNC_SOURCE &&
2701 mdev->state.conn <= C_PAUSED_SYNC_T)
2702 dp_flags |= DP_MAY_SET_IN_SYNC;
2703
2704 p.dp_flags = cpu_to_be32(dp_flags);
b411b363
PR
2705 set_bit(UNPLUG_REMOTE, &mdev->flags);
2706 ok = (sizeof(p) ==
ba11ad9a 2707 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
b411b363
PR
2708 if (ok && dgs) {
2709 dgb = mdev->int_dig_out;
45bb912b 2710 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
cab2f74b 2711 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
b411b363
PR
2712 }
2713 if (ok) {
470be44a
LE
2714 /* For protocol A, we have to memcpy the payload into
2715 * socket buffers, as we may complete right away
2716 * as soon as we handed it over to tcp, at which point the data
2717 * pages may become invalid.
2718 *
2719 * For data-integrity enabled, we copy it as well, so we can be
2720 * sure that even if the bio pages may still be modified, it
2721 * won't change the data on the wire, thus if the digest checks
2722 * out ok after sending on this side, but does not fit on the
2723 * receiving side, we sure have detected corruption elsewhere.
2724 */
2725 if (mdev->net_conf->wire_protocol == DRBD_PROT_A || dgs)
b411b363
PR
2726 ok = _drbd_send_bio(mdev, req->master_bio);
2727 else
2728 ok = _drbd_send_zc_bio(mdev, req->master_bio);
470be44a
LE
2729
2730 /* double check digest, sometimes buffers have been modified in flight. */
2731 if (dgs > 0 && dgs <= 64) {
2732 /* 64 byte, 512 bit, is the larges digest size
2733 * currently supported in kernel crypto. */
2734 unsigned char digest[64];
2735 drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest);
2736 if (memcmp(mdev->int_dig_out, digest, dgs)) {
2737 dev_warn(DEV,
2738 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
2739 (unsigned long long)req->sector, req->size);
2740 }
2741 } /* else if (dgs > 64) {
2742 ... Be noisy about digest too large ...
2743 } */
b411b363
PR
2744 }
2745
2746 drbd_put_data_sock(mdev);
bd26bfc5 2747
b411b363
PR
2748 return ok;
2749}
2750
2751/* answer packet, used to send data back for read requests:
2752 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2753 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2754 */
2755int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2756 struct drbd_epoch_entry *e)
2757{
2758 int ok;
2759 struct p_data p;
2760 void *dgb;
2761 int dgs;
2762
2763 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2764 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2765
d5373389 2766 if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
0b70a13d
PR
2767 p.head.h80.magic = BE_DRBD_MAGIC;
2768 p.head.h80.command = cpu_to_be16(cmd);
2769 p.head.h80.length =
2770 cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2771 } else {
2772 p.head.h95.magic = BE_DRBD_MAGIC_BIG;
2773 p.head.h95.command = cpu_to_be16(cmd);
2774 p.head.h95.length =
2775 cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
2776 }
b411b363
PR
2777
2778 p.sector = cpu_to_be64(e->sector);
2779 p.block_id = e->block_id;
2780 /* p.seq_num = 0; No sequence numbers here.. */
2781
2782 /* Only called by our kernel thread.
2783 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2784 * in response to admin command or module unload.
2785 */
2786 if (!drbd_get_data_sock(mdev))
2787 return 0;
2788
0b70a13d 2789 ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
b411b363
PR
2790 if (ok && dgs) {
2791 dgb = mdev->int_dig_out;
45bb912b 2792 drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
cab2f74b 2793 ok = dgs == drbd_send(mdev, mdev->data.socket, dgb, dgs, 0);
b411b363
PR
2794 }
2795 if (ok)
45bb912b 2796 ok = _drbd_send_zc_ee(mdev, e);
b411b363
PR
2797
2798 drbd_put_data_sock(mdev);
bd26bfc5 2799
b411b363
PR
2800 return ok;
2801}
2802
73a01a18
PR
2803int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req)
2804{
2805 struct p_block_desc p;
2806
2807 p.sector = cpu_to_be64(req->sector);
2808 p.blksize = cpu_to_be32(req->size);
2809
2810 return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OUT_OF_SYNC, &p.head, sizeof(p));
2811}
2812
b411b363
PR
2813/*
2814 drbd_send distinguishes two cases:
2815
2816 Packets sent via the data socket "sock"
2817 and packets sent via the meta data socket "msock"
2818
2819 sock msock
2820 -----------------+-------------------------+------------------------------
2821 timeout conf.timeout / 2 conf.timeout / 2
2822 timeout action send a ping via msock Abort communication
2823 and close all sockets
2824*/
2825
2826/*
2827 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2828 */
2829int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2830 void *buf, size_t size, unsigned msg_flags)
2831{
2832 struct kvec iov;
2833 struct msghdr msg;
2834 int rv, sent = 0;
2835
2836 if (!sock)
2837 return -1000;
2838
2839 /* THINK if (signal_pending) return ... ? */
2840
2841 iov.iov_base = buf;
2842 iov.iov_len = size;
2843
2844 msg.msg_name = NULL;
2845 msg.msg_namelen = 0;
2846 msg.msg_control = NULL;
2847 msg.msg_controllen = 0;
2848 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
2849
2850 if (sock == mdev->data.socket) {
2851 mdev->ko_count = mdev->net_conf->ko_count;
2852 drbd_update_congested(mdev);
2853 }
2854 do {
2855 /* STRANGE
2856 * tcp_sendmsg does _not_ use its size parameter at all ?
2857 *
2858 * -EAGAIN on timeout, -EINTR on signal.
2859 */
2860/* THINK
2861 * do we need to block DRBD_SIG if sock == &meta.socket ??
2862 * otherwise wake_asender() might interrupt some send_*Ack !
2863 */
2864 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2865 if (rv == -EAGAIN) {
2866 if (we_should_drop_the_connection(mdev, sock))
2867 break;
2868 else
2869 continue;
2870 }
2871 D_ASSERT(rv != 0);
2872 if (rv == -EINTR) {
2873 flush_signals(current);
2874 rv = 0;
2875 }
2876 if (rv < 0)
2877 break;
2878 sent += rv;
2879 iov.iov_base += rv;
2880 iov.iov_len -= rv;
2881 } while (sent < size);
2882
2883 if (sock == mdev->data.socket)
2884 clear_bit(NET_CONGESTED, &mdev->flags);
2885
2886 if (rv <= 0) {
2887 if (rv != -EAGAIN) {
2888 dev_err(DEV, "%s_sendmsg returned %d\n",
2889 sock == mdev->meta.socket ? "msock" : "sock",
2890 rv);
2891 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2892 } else
2893 drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2894 }
2895
2896 return sent;
2897}
2898
2899static int drbd_open(struct block_device *bdev, fmode_t mode)
2900{
2901 struct drbd_conf *mdev = bdev->bd_disk->private_data;
2902 unsigned long flags;
2903 int rv = 0;
2904
2a48fc0a 2905 mutex_lock(&drbd_main_mutex);
b411b363
PR
2906 spin_lock_irqsave(&mdev->req_lock, flags);
2907 /* to have a stable mdev->state.role
2908 * and no race with updating open_cnt */
2909
2910 if (mdev->state.role != R_PRIMARY) {
2911 if (mode & FMODE_WRITE)
2912 rv = -EROFS;
2913 else if (!allow_oos)
2914 rv = -EMEDIUMTYPE;
2915 }
2916
2917 if (!rv)
2918 mdev->open_cnt++;
2919 spin_unlock_irqrestore(&mdev->req_lock, flags);
2a48fc0a 2920 mutex_unlock(&drbd_main_mutex);
b411b363
PR
2921
2922 return rv;
2923}
2924
2925static int drbd_release(struct gendisk *gd, fmode_t mode)
2926{
2927 struct drbd_conf *mdev = gd->private_data;
2a48fc0a 2928 mutex_lock(&drbd_main_mutex);
b411b363 2929 mdev->open_cnt--;
2a48fc0a 2930 mutex_unlock(&drbd_main_mutex);
b411b363
PR
2931 return 0;
2932}
2933
b411b363
PR
2934static void drbd_set_defaults(struct drbd_conf *mdev)
2935{
85f4cc17
PR
2936 /* This way we get a compile error when sync_conf grows,
2937 and we forgot to initialize it here */
2938 mdev->sync_conf = (struct syncer_conf) {
2939 /* .rate = */ DRBD_RATE_DEF,
2940 /* .after = */ DRBD_AFTER_DEF,
2941 /* .al_extents = */ DRBD_AL_EXTENTS_DEF,
85f4cc17
PR
2942 /* .verify_alg = */ {}, 0,
2943 /* .cpu_mask = */ {}, 0,
2944 /* .csums_alg = */ {}, 0,
e756414f 2945 /* .use_rle = */ 0,
9a31d716
PR
2946 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF,
2947 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF,
2948 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
2949 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF,
0f0601f4
LE
2950 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF,
2951 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
85f4cc17
PR
2952 };
2953
2954 /* Have to use that way, because the layout differs between
2955 big endian and little endian */
b411b363
PR
2956 mdev->state = (union drbd_state) {
2957 { .role = R_SECONDARY,
2958 .peer = R_UNKNOWN,
2959 .conn = C_STANDALONE,
2960 .disk = D_DISKLESS,
2961 .pdsk = D_UNKNOWN,
fb22c402
PR
2962 .susp = 0,
2963 .susp_nod = 0,
2964 .susp_fen = 0
b411b363
PR
2965 } };
2966}
2967
2968void drbd_init_set_defaults(struct drbd_conf *mdev)
2969{
2970 /* the memset(,0,) did most of this.
2971 * note: only assignments, no allocation in here */
2972
2973 drbd_set_defaults(mdev);
2974
b411b363
PR
2975 atomic_set(&mdev->ap_bio_cnt, 0);
2976 atomic_set(&mdev->ap_pending_cnt, 0);
2977 atomic_set(&mdev->rs_pending_cnt, 0);
2978 atomic_set(&mdev->unacked_cnt, 0);
2979 atomic_set(&mdev->local_cnt, 0);
2980 atomic_set(&mdev->net_cnt, 0);
2981 atomic_set(&mdev->packet_seq, 0);
2982 atomic_set(&mdev->pp_in_use, 0);
435f0740 2983 atomic_set(&mdev->pp_in_use_by_net, 0);
778f271d 2984 atomic_set(&mdev->rs_sect_in, 0);
0f0601f4 2985 atomic_set(&mdev->rs_sect_ev, 0);
759fbdfb 2986 atomic_set(&mdev->ap_in_flight, 0);
b411b363
PR
2987
2988 mutex_init(&mdev->md_io_mutex);
2989 mutex_init(&mdev->data.mutex);
2990 mutex_init(&mdev->meta.mutex);
2991 sema_init(&mdev->data.work.s, 0);
2992 sema_init(&mdev->meta.work.s, 0);
2993 mutex_init(&mdev->state_mutex);
2994
2995 spin_lock_init(&mdev->data.work.q_lock);
2996 spin_lock_init(&mdev->meta.work.q_lock);
2997
2998 spin_lock_init(&mdev->al_lock);
2999 spin_lock_init(&mdev->req_lock);
3000 spin_lock_init(&mdev->peer_seq_lock);
3001 spin_lock_init(&mdev->epoch_lock);
3002
3003 INIT_LIST_HEAD(&mdev->active_ee);
3004 INIT_LIST_HEAD(&mdev->sync_ee);
3005 INIT_LIST_HEAD(&mdev->done_ee);
3006 INIT_LIST_HEAD(&mdev->read_ee);
3007 INIT_LIST_HEAD(&mdev->net_ee);
3008 INIT_LIST_HEAD(&mdev->resync_reads);
3009 INIT_LIST_HEAD(&mdev->data.work.q);
3010 INIT_LIST_HEAD(&mdev->meta.work.q);
3011 INIT_LIST_HEAD(&mdev->resync_work.list);
3012 INIT_LIST_HEAD(&mdev->unplug_work.list);
e9e6f3ec 3013 INIT_LIST_HEAD(&mdev->go_diskless.list);
b411b363 3014 INIT_LIST_HEAD(&mdev->md_sync_work.list);
c4752ef1 3015 INIT_LIST_HEAD(&mdev->start_resync_work.list);
b411b363 3016 INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
0ced55a3 3017
794abb75 3018 mdev->resync_work.cb = w_resync_timer;
b411b363 3019 mdev->unplug_work.cb = w_send_write_hint;
e9e6f3ec 3020 mdev->go_diskless.cb = w_go_diskless;
b411b363
PR
3021 mdev->md_sync_work.cb = w_md_sync;
3022 mdev->bm_io_work.w.cb = w_bitmap_io;
370a43e7 3023 mdev->start_resync_work.cb = w_start_resync;
b411b363
PR
3024 init_timer(&mdev->resync_timer);
3025 init_timer(&mdev->md_sync_timer);
370a43e7 3026 init_timer(&mdev->start_resync_timer);
7fde2be9 3027 init_timer(&mdev->request_timer);
b411b363
PR
3028 mdev->resync_timer.function = resync_timer_fn;
3029 mdev->resync_timer.data = (unsigned long) mdev;
3030 mdev->md_sync_timer.function = md_sync_timer_fn;
3031 mdev->md_sync_timer.data = (unsigned long) mdev;
370a43e7
PR
3032 mdev->start_resync_timer.function = start_resync_timer_fn;
3033 mdev->start_resync_timer.data = (unsigned long) mdev;
7fde2be9
PR
3034 mdev->request_timer.function = request_timer_fn;
3035 mdev->request_timer.data = (unsigned long) mdev;
b411b363
PR
3036
3037 init_waitqueue_head(&mdev->misc_wait);
3038 init_waitqueue_head(&mdev->state_wait);
84dfb9f5 3039 init_waitqueue_head(&mdev->net_cnt_wait);
b411b363
PR
3040 init_waitqueue_head(&mdev->ee_wait);
3041 init_waitqueue_head(&mdev->al_wait);
3042 init_waitqueue_head(&mdev->seq_wait);
3043
3044 drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
3045 drbd_thread_init(mdev, &mdev->worker, drbd_worker);
3046 drbd_thread_init(mdev, &mdev->asender, drbd_asender);
3047
3048 mdev->agreed_pro_version = PRO_VERSION_MAX;
2451fc3b 3049 mdev->write_ordering = WO_bdev_flush;
b411b363
PR
3050 mdev->resync_wenr = LC_FREE;
3051}
3052
3053void drbd_mdev_cleanup(struct drbd_conf *mdev)
3054{
1d7734a0 3055 int i;
b411b363
PR
3056 if (mdev->receiver.t_state != None)
3057 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
3058 mdev->receiver.t_state);
3059
3060 /* no need to lock it, I'm the only thread alive */
3061 if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
3062 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
3063 mdev->al_writ_cnt =
3064 mdev->bm_writ_cnt =
3065 mdev->read_cnt =
3066 mdev->recv_cnt =
3067 mdev->send_cnt =
3068 mdev->writ_cnt =
3069 mdev->p_size =
3070 mdev->rs_start =
3071 mdev->rs_total =
1d7734a0
LE
3072 mdev->rs_failed = 0;
3073 mdev->rs_last_events = 0;
0f0601f4 3074 mdev->rs_last_sect_ev = 0;
1d7734a0
LE
3075 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
3076 mdev->rs_mark_left[i] = 0;
3077 mdev->rs_mark_time[i] = 0;
3078 }
b411b363
PR
3079 D_ASSERT(mdev->net_conf == NULL);
3080
3081 drbd_set_my_capacity(mdev, 0);
3082 if (mdev->bitmap) {
3083 /* maybe never allocated. */
02d9a94b 3084 drbd_bm_resize(mdev, 0, 1);
b411b363
PR
3085 drbd_bm_cleanup(mdev);
3086 }
3087
3088 drbd_free_resources(mdev);
0778286a 3089 clear_bit(AL_SUSPENDED, &mdev->flags);
b411b363
PR
3090
3091 /*
3092 * currently we drbd_init_ee only on module load, so
3093 * we may do drbd_release_ee only on module unload!
3094 */
3095 D_ASSERT(list_empty(&mdev->active_ee));
3096 D_ASSERT(list_empty(&mdev->sync_ee));
3097 D_ASSERT(list_empty(&mdev->done_ee));
3098 D_ASSERT(list_empty(&mdev->read_ee));
3099 D_ASSERT(list_empty(&mdev->net_ee));
3100 D_ASSERT(list_empty(&mdev->resync_reads));
3101 D_ASSERT(list_empty(&mdev->data.work.q));
3102 D_ASSERT(list_empty(&mdev->meta.work.q));
3103 D_ASSERT(list_empty(&mdev->resync_work.list));
3104 D_ASSERT(list_empty(&mdev->unplug_work.list));
e9e6f3ec 3105 D_ASSERT(list_empty(&mdev->go_diskless.list));
2265b473
LE
3106
3107 drbd_set_defaults(mdev);
b411b363
PR
3108}
3109
3110
3111static void drbd_destroy_mempools(void)
3112{
3113 struct page *page;
3114
3115 while (drbd_pp_pool) {
3116 page = drbd_pp_pool;
3117 drbd_pp_pool = (struct page *)page_private(page);
3118 __free_page(page);
3119 drbd_pp_vacant--;
3120 }
3121
3122 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
3123
3124 if (drbd_ee_mempool)
3125 mempool_destroy(drbd_ee_mempool);
3126 if (drbd_request_mempool)
3127 mempool_destroy(drbd_request_mempool);
3128 if (drbd_ee_cache)
3129 kmem_cache_destroy(drbd_ee_cache);
3130 if (drbd_request_cache)
3131 kmem_cache_destroy(drbd_request_cache);
3132 if (drbd_bm_ext_cache)
3133 kmem_cache_destroy(drbd_bm_ext_cache);
3134 if (drbd_al_ext_cache)
3135 kmem_cache_destroy(drbd_al_ext_cache);
3136
3137 drbd_ee_mempool = NULL;
3138 drbd_request_mempool = NULL;
3139 drbd_ee_cache = NULL;
3140 drbd_request_cache = NULL;
3141 drbd_bm_ext_cache = NULL;
3142 drbd_al_ext_cache = NULL;
3143
3144 return;
3145}
3146
3147static int drbd_create_mempools(void)
3148{
3149 struct page *page;
1816a2b4 3150 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
b411b363
PR
3151 int i;
3152
3153 /* prepare our caches and mempools */
3154 drbd_request_mempool = NULL;
3155 drbd_ee_cache = NULL;
3156 drbd_request_cache = NULL;
3157 drbd_bm_ext_cache = NULL;
3158 drbd_al_ext_cache = NULL;
3159 drbd_pp_pool = NULL;
3160
3161 /* caches */
3162 drbd_request_cache = kmem_cache_create(
3163 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
3164 if (drbd_request_cache == NULL)
3165 goto Enomem;
3166
3167 drbd_ee_cache = kmem_cache_create(
3168 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
3169 if (drbd_ee_cache == NULL)
3170 goto Enomem;
3171
3172 drbd_bm_ext_cache = kmem_cache_create(
3173 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
3174 if (drbd_bm_ext_cache == NULL)
3175 goto Enomem;
3176
3177 drbd_al_ext_cache = kmem_cache_create(
3178 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
3179 if (drbd_al_ext_cache == NULL)
3180 goto Enomem;
3181
3182 /* mempools */
3183 drbd_request_mempool = mempool_create(number,
3184 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
3185 if (drbd_request_mempool == NULL)
3186 goto Enomem;
3187
3188 drbd_ee_mempool = mempool_create(number,
3189 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2027ae1f 3190 if (drbd_ee_mempool == NULL)
b411b363
PR
3191 goto Enomem;
3192
3193 /* drbd's page pool */
3194 spin_lock_init(&drbd_pp_lock);
3195
3196 for (i = 0; i < number; i++) {
3197 page = alloc_page(GFP_HIGHUSER);
3198 if (!page)
3199 goto Enomem;
3200 set_page_private(page, (unsigned long)drbd_pp_pool);
3201 drbd_pp_pool = page;
3202 }
3203 drbd_pp_vacant = number;
3204
3205 return 0;
3206
3207Enomem:
3208 drbd_destroy_mempools(); /* in case we allocated some */
3209 return -ENOMEM;
3210}
3211
3212static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
3213 void *unused)
3214{
3215 /* just so we have it. you never know what interesting things we
3216 * might want to do here some day...
3217 */
3218
3219 return NOTIFY_DONE;
3220}
3221
3222static struct notifier_block drbd_notifier = {
3223 .notifier_call = drbd_notify_sys,
3224};
3225
3226static void drbd_release_ee_lists(struct drbd_conf *mdev)
3227{
3228 int rr;
3229
3230 rr = drbd_release_ee(mdev, &mdev->active_ee);
3231 if (rr)
3232 dev_err(DEV, "%d EEs in active list found!\n", rr);
3233
3234 rr = drbd_release_ee(mdev, &mdev->sync_ee);
3235 if (rr)
3236 dev_err(DEV, "%d EEs in sync list found!\n", rr);
3237
3238 rr = drbd_release_ee(mdev, &mdev->read_ee);
3239 if (rr)
3240 dev_err(DEV, "%d EEs in read list found!\n", rr);
3241
3242 rr = drbd_release_ee(mdev, &mdev->done_ee);
3243 if (rr)
3244 dev_err(DEV, "%d EEs in done list found!\n", rr);
3245
3246 rr = drbd_release_ee(mdev, &mdev->net_ee);
3247 if (rr)
3248 dev_err(DEV, "%d EEs in net list found!\n", rr);
3249}
3250
3251/* caution. no locking.
3252 * currently only used from module cleanup code. */
3253static void drbd_delete_device(unsigned int minor)
3254{
3255 struct drbd_conf *mdev = minor_to_mdev(minor);
3256
3257 if (!mdev)
3258 return;
3259
3260 /* paranoia asserts */
3261 if (mdev->open_cnt != 0)
3262 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
3263 __FILE__ , __LINE__);
3264
3265 ERR_IF (!list_empty(&mdev->data.work.q)) {
3266 struct list_head *lp;
3267 list_for_each(lp, &mdev->data.work.q) {
3268 dev_err(DEV, "lp = %p\n", lp);
3269 }
3270 };
3271 /* end paranoia asserts */
3272
3273 del_gendisk(mdev->vdisk);
3274
3275 /* cleanup stuff that may have been allocated during
3276 * device (re-)configuration or state changes */
3277
3278 if (mdev->this_bdev)
3279 bdput(mdev->this_bdev);
3280
3281 drbd_free_resources(mdev);
3282
3283 drbd_release_ee_lists(mdev);
3284
3285 /* should be free'd on disconnect? */
3286 kfree(mdev->ee_hash);
3287 /*
3288 mdev->ee_hash_s = 0;
3289 mdev->ee_hash = NULL;
3290 */
3291
3292 lc_destroy(mdev->act_log);
3293 lc_destroy(mdev->resync);
3294
3295 kfree(mdev->p_uuid);
3296 /* mdev->p_uuid = NULL; */
3297
3298 kfree(mdev->int_dig_out);
3299 kfree(mdev->int_dig_in);
3300 kfree(mdev->int_dig_vv);
3301
3302 /* cleanup the rest that has been
3303 * allocated from drbd_new_device
3304 * and actually free the mdev itself */
3305 drbd_free_mdev(mdev);
3306}
3307
3308static void drbd_cleanup(void)
3309{
3310 unsigned int i;
3311
3312 unregister_reboot_notifier(&drbd_notifier);
3313
17a93f30
LE
3314 /* first remove proc,
3315 * drbdsetup uses it's presence to detect
3316 * whether DRBD is loaded.
3317 * If we would get stuck in proc removal,
3318 * but have netlink already deregistered,
3319 * some drbdsetup commands may wait forever
3320 * for an answer.
3321 */
3322 if (drbd_proc)
3323 remove_proc_entry("drbd", NULL);
3324
b411b363
PR
3325 drbd_nl_cleanup();
3326
3327 if (minor_table) {
b411b363
PR
3328 i = minor_count;
3329 while (i--)
3330 drbd_delete_device(i);
3331 drbd_destroy_mempools();
3332 }
3333
3334 kfree(minor_table);
3335
3336 unregister_blkdev(DRBD_MAJOR, "drbd");
3337
3338 printk(KERN_INFO "drbd: module cleanup done.\n");
3339}
3340
3341/**
3342 * drbd_congested() - Callback for pdflush
3343 * @congested_data: User data
3344 * @bdi_bits: Bits pdflush is currently interested in
3345 *
3346 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3347 */
3348static int drbd_congested(void *congested_data, int bdi_bits)
3349{
3350 struct drbd_conf *mdev = congested_data;
3351 struct request_queue *q;
3352 char reason = '-';
3353 int r = 0;
3354
1b881ef7 3355 if (!may_inc_ap_bio(mdev)) {
b411b363
PR
3356 /* DRBD has frozen IO */
3357 r = bdi_bits;
3358 reason = 'd';
3359 goto out;
3360 }
3361
3362 if (get_ldev(mdev)) {
3363 q = bdev_get_queue(mdev->ldev->backing_bdev);
3364 r = bdi_congested(&q->backing_dev_info, bdi_bits);
3365 put_ldev(mdev);
3366 if (r)
3367 reason = 'b';
3368 }
3369
3370 if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
3371 r |= (1 << BDI_async_congested);
3372 reason = reason == 'b' ? 'a' : 'n';
3373 }
3374
3375out:
3376 mdev->congestion_reason = reason;
3377 return r;
3378}
3379
3380struct drbd_conf *drbd_new_device(unsigned int minor)
3381{
3382 struct drbd_conf *mdev;
3383 struct gendisk *disk;
3384 struct request_queue *q;
3385
3386 /* GFP_KERNEL, we are outside of all write-out paths */
3387 mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
3388 if (!mdev)
3389 return NULL;
3390 if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
3391 goto out_no_cpumask;
3392
3393 mdev->minor = minor;
3394
3395 drbd_init_set_defaults(mdev);
3396
3397 q = blk_alloc_queue(GFP_KERNEL);
3398 if (!q)
3399 goto out_no_q;
3400 mdev->rq_queue = q;
3401 q->queuedata = mdev;
b411b363
PR
3402
3403 disk = alloc_disk(1);
3404 if (!disk)
3405 goto out_no_disk;
3406 mdev->vdisk = disk;
3407
81e84650 3408 set_disk_ro(disk, true);
b411b363
PR
3409
3410 disk->queue = q;
3411 disk->major = DRBD_MAJOR;
3412 disk->first_minor = minor;
3413 disk->fops = &drbd_ops;
3414 sprintf(disk->disk_name, "drbd%d", minor);
3415 disk->private_data = mdev;
3416
3417 mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3418 /* we have no partitions. we contain only ourselves. */
3419 mdev->this_bdev->bd_contains = mdev->this_bdev;
3420
3421 q->backing_dev_info.congested_fn = drbd_congested;
3422 q->backing_dev_info.congested_data = mdev;
3423
2f58dcfc 3424 blk_queue_make_request(q, drbd_make_request);
1816a2b4 3425 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9);
b411b363
PR
3426 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3427 blk_queue_merge_bvec(q, drbd_merge_bvec);
7eaceacc 3428 q->queue_lock = &mdev->req_lock;
b411b363
PR
3429
3430 mdev->md_io_page = alloc_page(GFP_KERNEL);
3431 if (!mdev->md_io_page)
3432 goto out_no_io_page;
3433
3434 if (drbd_bm_init(mdev))
3435 goto out_no_bitmap;
3436 /* no need to lock access, we are still initializing this minor device. */
3437 if (!tl_init(mdev))
3438 goto out_no_tl;
3439
3440 mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3441 if (!mdev->app_reads_hash)
3442 goto out_no_app_reads;
3443
3444 mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3445 if (!mdev->current_epoch)
3446 goto out_no_epoch;
3447
3448 INIT_LIST_HEAD(&mdev->current_epoch->list);
3449 mdev->epochs = 1;
3450
3451 return mdev;
3452
3453/* out_whatever_else:
3454 kfree(mdev->current_epoch); */
3455out_no_epoch:
3456 kfree(mdev->app_reads_hash);
3457out_no_app_reads:
3458 tl_cleanup(mdev);
3459out_no_tl:
3460 drbd_bm_cleanup(mdev);
3461out_no_bitmap:
3462 __free_page(mdev->md_io_page);
3463out_no_io_page:
3464 put_disk(disk);
3465out_no_disk:
3466 blk_cleanup_queue(q);
3467out_no_q:
3468 free_cpumask_var(mdev->cpu_mask);
3469out_no_cpumask:
3470 kfree(mdev);
3471 return NULL;
3472}
3473
3474/* counterpart of drbd_new_device.
3475 * last part of drbd_delete_device. */
3476void drbd_free_mdev(struct drbd_conf *mdev)
3477{
3478 kfree(mdev->current_epoch);
3479 kfree(mdev->app_reads_hash);
3480 tl_cleanup(mdev);
3481 if (mdev->bitmap) /* should no longer be there. */
3482 drbd_bm_cleanup(mdev);
3483 __free_page(mdev->md_io_page);
3484 put_disk(mdev->vdisk);
3485 blk_cleanup_queue(mdev->rq_queue);
3486 free_cpumask_var(mdev->cpu_mask);
3719094e 3487 drbd_free_tl_hash(mdev);
b411b363
PR
3488 kfree(mdev);
3489}
3490
3491
3492int __init drbd_init(void)
3493{
3494 int err;
3495
3496 if (sizeof(struct p_handshake) != 80) {
3497 printk(KERN_ERR
3498 "drbd: never change the size or layout "
3499 "of the HandShake packet.\n");
3500 return -EINVAL;
3501 }
3502
2b8a90b5 3503 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
b411b363
PR
3504 printk(KERN_ERR
3505 "drbd: invalid minor_count (%d)\n", minor_count);
3506#ifdef MODULE
3507 return -EINVAL;
3508#else
3509 minor_count = 8;
3510#endif
3511 }
3512
3513 err = drbd_nl_init();
3514 if (err)
3515 return err;
3516
3517 err = register_blkdev(DRBD_MAJOR, "drbd");
3518 if (err) {
3519 printk(KERN_ERR
3520 "drbd: unable to register block device major %d\n",
3521 DRBD_MAJOR);
3522 return err;
3523 }
3524
3525 register_reboot_notifier(&drbd_notifier);
3526
3527 /*
3528 * allocate all necessary structs
3529 */
3530 err = -ENOMEM;
3531
3532 init_waitqueue_head(&drbd_pp_wait);
3533
3534 drbd_proc = NULL; /* play safe for drbd_cleanup */
3535 minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3536 GFP_KERNEL);
3537 if (!minor_table)
3538 goto Enomem;
3539
3540 err = drbd_create_mempools();
3541 if (err)
3542 goto Enomem;
3543
8c484ee4 3544 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
b411b363
PR
3545 if (!drbd_proc) {
3546 printk(KERN_ERR "drbd: unable to register proc file\n");
3547 goto Enomem;
3548 }
3549
3550 rwlock_init(&global_state_lock);
3551
3552 printk(KERN_INFO "drbd: initialized. "
3553 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3554 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3555 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3556 printk(KERN_INFO "drbd: registered as block device major %d\n",
3557 DRBD_MAJOR);
3558 printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3559
3560 return 0; /* Success! */
3561
3562Enomem:
3563 drbd_cleanup();
3564 if (err == -ENOMEM)
3565 /* currently always the case */
3566 printk(KERN_ERR "drbd: ran out of memory\n");
3567 else
3568 printk(KERN_ERR "drbd: initialization failure\n");
3569 return err;
3570}
3571
3572void drbd_free_bc(struct drbd_backing_dev *ldev)
3573{
3574 if (ldev == NULL)
3575 return;
3576
e525fd89
TH
3577 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
3578 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
b411b363
PR
3579
3580 kfree(ldev);
3581}
3582
3583void drbd_free_sock(struct drbd_conf *mdev)
3584{
3585 if (mdev->data.socket) {
4589d7f8 3586 mutex_lock(&mdev->data.mutex);
b411b363
PR
3587 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3588 sock_release(mdev->data.socket);
3589 mdev->data.socket = NULL;
4589d7f8 3590 mutex_unlock(&mdev->data.mutex);
b411b363
PR
3591 }
3592 if (mdev->meta.socket) {
4589d7f8 3593 mutex_lock(&mdev->meta.mutex);
b411b363
PR
3594 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3595 sock_release(mdev->meta.socket);
3596 mdev->meta.socket = NULL;
4589d7f8 3597 mutex_unlock(&mdev->meta.mutex);
b411b363
PR
3598 }
3599}
3600
3601
3602void drbd_free_resources(struct drbd_conf *mdev)
3603{
3604 crypto_free_hash(mdev->csums_tfm);
3605 mdev->csums_tfm = NULL;
3606 crypto_free_hash(mdev->verify_tfm);
3607 mdev->verify_tfm = NULL;
3608 crypto_free_hash(mdev->cram_hmac_tfm);
3609 mdev->cram_hmac_tfm = NULL;
3610 crypto_free_hash(mdev->integrity_w_tfm);
3611 mdev->integrity_w_tfm = NULL;
3612 crypto_free_hash(mdev->integrity_r_tfm);
3613 mdev->integrity_r_tfm = NULL;
3614
3615 drbd_free_sock(mdev);
3616
3617 __no_warn(local,
3618 drbd_free_bc(mdev->ldev);
3619 mdev->ldev = NULL;);
3620}
3621
3622/* meta data management */
3623
3624struct meta_data_on_disk {
3625 u64 la_size; /* last agreed size. */
3626 u64 uuid[UI_SIZE]; /* UUIDs. */
3627 u64 device_uuid;
3628 u64 reserved_u64_1;
3629 u32 flags; /* MDF */
3630 u32 magic;
3631 u32 md_size_sect;
3632 u32 al_offset; /* offset to this block */
3633 u32 al_nr_extents; /* important for restoring the AL */
3634 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3635 u32 bm_offset; /* offset to the bitmap, from here */
3636 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3637 u32 reserved_u32[4];
3638
3639} __packed;
3640
3641/**
3642 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3643 * @mdev: DRBD device.
3644 */
3645void drbd_md_sync(struct drbd_conf *mdev)
3646{
3647 struct meta_data_on_disk *buffer;
3648 sector_t sector;
3649 int i;
3650
ee15b038
LE
3651 del_timer(&mdev->md_sync_timer);
3652 /* timer may be rearmed by drbd_md_mark_dirty() now. */
b411b363
PR
3653 if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3654 return;
b411b363
PR
3655
3656 /* We use here D_FAILED and not D_ATTACHING because we try to write
3657 * metadata even if we detach due to a disk failure! */
3658 if (!get_ldev_if_state(mdev, D_FAILED))
3659 return;
3660
b411b363
PR
3661 mutex_lock(&mdev->md_io_mutex);
3662 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3663 memset(buffer, 0, 512);
3664
3665 buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3666 for (i = UI_CURRENT; i < UI_SIZE; i++)
3667 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3668 buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3669 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3670
3671 buffer->md_size_sect = cpu_to_be32(mdev->ldev->md.md_size_sect);
3672 buffer->al_offset = cpu_to_be32(mdev->ldev->md.al_offset);
3673 buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3674 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3675 buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3676
3677 buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3678
3679 D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3680 sector = mdev->ldev->md.md_offset;
3681
3f3a9b84 3682 if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
b411b363
PR
3683 /* this was a try anyways ... */
3684 dev_err(DEV, "meta data update failed!\n");
81e84650 3685 drbd_chk_io_error(mdev, 1, true);
b411b363
PR
3686 }
3687
3688 /* Update mdev->ldev->md.la_size_sect,
3689 * since we updated it on metadata. */
3690 mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3691
3692 mutex_unlock(&mdev->md_io_mutex);
3693 put_ldev(mdev);
3694}
3695
3696/**
3697 * drbd_md_read() - Reads in the meta data super block
3698 * @mdev: DRBD device.
3699 * @bdev: Device from which the meta data should be read in.
3700 *
116676ca 3701 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
b411b363
PR
3702 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3703 */
3704int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3705{
3706 struct meta_data_on_disk *buffer;
3707 int i, rv = NO_ERROR;
3708
3709 if (!get_ldev_if_state(mdev, D_ATTACHING))
3710 return ERR_IO_MD_DISK;
3711
b411b363
PR
3712 mutex_lock(&mdev->md_io_mutex);
3713 buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3714
3715 if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
25985edc 3716 /* NOTE: can't do normal error processing here as this is
b411b363
PR
3717 called BEFORE disk is attached */
3718 dev_err(DEV, "Error while reading metadata.\n");
3719 rv = ERR_IO_MD_DISK;
3720 goto err;
3721 }
3722
3723 if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3724 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3725 rv = ERR_MD_INVALID;
3726 goto err;
3727 }
3728 if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3729 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3730 be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3731 rv = ERR_MD_INVALID;
3732 goto err;
3733 }
3734 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3735 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3736 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3737 rv = ERR_MD_INVALID;
3738 goto err;
3739 }
3740 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3741 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3742 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3743 rv = ERR_MD_INVALID;
3744 goto err;
3745 }
3746
3747 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3748 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3749 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3750 rv = ERR_MD_INVALID;
3751 goto err;
3752 }
3753
3754 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3755 for (i = UI_CURRENT; i < UI_SIZE; i++)
3756 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3757 bdev->md.flags = be32_to_cpu(buffer->flags);
3758 mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3759 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3760
3761 if (mdev->sync_conf.al_extents < 7)
3762 mdev->sync_conf.al_extents = 127;
3763
3764 err:
3765 mutex_unlock(&mdev->md_io_mutex);
3766 put_ldev(mdev);
3767
3768 return rv;
3769}
3770
3771/**
3772 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3773 * @mdev: DRBD device.
3774 *
3775 * Call this function if you change anything that should be written to
3776 * the meta-data super block. This function sets MD_DIRTY, and starts a
3777 * timer that ensures that within five seconds you have to call drbd_md_sync().
3778 */
ca0e6098 3779#ifdef DEBUG
ee15b038
LE
3780void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3781{
3782 if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3783 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3784 mdev->last_md_mark_dirty.line = line;
3785 mdev->last_md_mark_dirty.func = func;
3786 }
3787}
3788#else
b411b363
PR
3789void drbd_md_mark_dirty(struct drbd_conf *mdev)
3790{
ee15b038 3791 if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
ca0e6098 3792 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
b411b363 3793}
ee15b038 3794#endif
b411b363
PR
3795
3796static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3797{
3798 int i;
3799
62b0da3a 3800 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
b411b363 3801 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
b411b363
PR
3802}
3803
3804void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3805{
3806 if (idx == UI_CURRENT) {
3807 if (mdev->state.role == R_PRIMARY)
3808 val |= 1;
3809 else
3810 val &= ~((u64)1);
3811
3812 drbd_set_ed_uuid(mdev, val);
3813 }
3814
3815 mdev->ldev->md.uuid[idx] = val;
b411b363
PR
3816 drbd_md_mark_dirty(mdev);
3817}
3818
3819
3820void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3821{
3822 if (mdev->ldev->md.uuid[idx]) {
3823 drbd_uuid_move_history(mdev);
3824 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
b411b363
PR
3825 }
3826 _drbd_uuid_set(mdev, idx, val);
3827}
3828
3829/**
3830 * drbd_uuid_new_current() - Creates a new current UUID
3831 * @mdev: DRBD device.
3832 *
3833 * Creates a new current UUID, and rotates the old current UUID into
3834 * the bitmap slot. Causes an incremental resync upon next connect.
3835 */
3836void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3837{
3838 u64 val;
62b0da3a
LE
3839 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3840
3841 if (bm_uuid)
3842 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
b411b363 3843
b411b363 3844 mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
b411b363
PR
3845
3846 get_random_bytes(&val, sizeof(u64));
3847 _drbd_uuid_set(mdev, UI_CURRENT, val);
62b0da3a 3848 drbd_print_uuids(mdev, "new current UUID");
aaa8e2b3
LE
3849 /* get it to stable storage _now_ */
3850 drbd_md_sync(mdev);
b411b363
PR
3851}
3852
3853void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3854{
3855 if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3856 return;
3857
3858 if (val == 0) {
3859 drbd_uuid_move_history(mdev);
3860 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3861 mdev->ldev->md.uuid[UI_BITMAP] = 0;
b411b363 3862 } else {
62b0da3a
LE
3863 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3864 if (bm_uuid)
3865 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
b411b363 3866
62b0da3a 3867 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
b411b363
PR
3868 }
3869 drbd_md_mark_dirty(mdev);
3870}
3871
3872/**
3873 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3874 * @mdev: DRBD device.
3875 *
3876 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3877 */
3878int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3879{
3880 int rv = -EIO;
3881
3882 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3883 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3884 drbd_md_sync(mdev);
3885 drbd_bm_set_all(mdev);
3886
3887 rv = drbd_bm_write(mdev);
3888
3889 if (!rv) {
3890 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3891 drbd_md_sync(mdev);
3892 }
3893
3894 put_ldev(mdev);
3895 }
3896
3897 return rv;
3898}
3899
3900/**
3901 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3902 * @mdev: DRBD device.
3903 *
3904 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3905 */
3906int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3907{
3908 int rv = -EIO;
3909
0778286a 3910 drbd_resume_al(mdev);
b411b363
PR
3911 if (get_ldev_if_state(mdev, D_ATTACHING)) {
3912 drbd_bm_clear_all(mdev);
3913 rv = drbd_bm_write(mdev);
3914 put_ldev(mdev);
3915 }
3916
3917 return rv;
3918}
3919
3920static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3921{
3922 struct bm_io_work *work = container_of(w, struct bm_io_work, w);
02851e9f 3923 int rv = -EIO;
b411b363
PR
3924
3925 D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3926
02851e9f 3927 if (get_ldev(mdev)) {
20ceb2b2 3928 drbd_bm_lock(mdev, work->why, work->flags);
02851e9f
LE
3929 rv = work->io_fn(mdev);
3930 drbd_bm_unlock(mdev);
3931 put_ldev(mdev);
3932 }
b411b363
PR
3933
3934 clear_bit(BITMAP_IO, &mdev->flags);
127b3178 3935 smp_mb__after_clear_bit();
b411b363
PR
3936 wake_up(&mdev->misc_wait);
3937
3938 if (work->done)
3939 work->done(mdev, rv);
3940
3941 clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3942 work->why = NULL;
20ceb2b2 3943 work->flags = 0;
b411b363
PR
3944
3945 return 1;
3946}
3947
82f59cc6
LE
3948void drbd_ldev_destroy(struct drbd_conf *mdev)
3949{
3950 lc_destroy(mdev->resync);
3951 mdev->resync = NULL;
3952 lc_destroy(mdev->act_log);
3953 mdev->act_log = NULL;
3954 __no_warn(local,
3955 drbd_free_bc(mdev->ldev);
3956 mdev->ldev = NULL;);
3957
3958 if (mdev->md_io_tmpp) {
3959 __free_page(mdev->md_io_tmpp);
3960 mdev->md_io_tmpp = NULL;
3961 }
3962 clear_bit(GO_DISKLESS, &mdev->flags);
3963}
3964
e9e6f3ec
LE
3965static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3966{
3967 D_ASSERT(mdev->state.disk == D_FAILED);
9d282875
LE
3968 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3969 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
82f59cc6
LE
3970 * the protected members anymore, though, so once put_ldev reaches zero
3971 * again, it will be safe to free them. */
e9e6f3ec 3972 drbd_force_state(mdev, NS(disk, D_DISKLESS));
e9e6f3ec
LE
3973 return 1;
3974}
3975
3976void drbd_go_diskless(struct drbd_conf *mdev)
3977{
3978 D_ASSERT(mdev->state.disk == D_FAILED);
3979 if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
9d282875 3980 drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
e9e6f3ec
LE
3981}
3982
b411b363
PR
3983/**
3984 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3985 * @mdev: DRBD device.
3986 * @io_fn: IO callback to be called when bitmap IO is possible
3987 * @done: callback to be called after the bitmap IO was performed
3988 * @why: Descriptive text of the reason for doing the IO
3989 *
3990 * While IO on the bitmap happens we freeze application IO thus we ensure
3991 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3992 * called from worker context. It MUST NOT be used while a previous such
3993 * work is still pending!
3994 */
3995void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3996 int (*io_fn)(struct drbd_conf *),
3997 void (*done)(struct drbd_conf *, int),
20ceb2b2 3998 char *why, enum bm_flag flags)
b411b363
PR
3999{
4000 D_ASSERT(current == mdev->worker.task);
4001
4002 D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
4003 D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
4004 D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
4005 if (mdev->bm_io_work.why)
4006 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
4007 why, mdev->bm_io_work.why);
4008
4009 mdev->bm_io_work.io_fn = io_fn;
4010 mdev->bm_io_work.done = done;
4011 mdev->bm_io_work.why = why;
20ceb2b2 4012 mdev->bm_io_work.flags = flags;
b411b363 4013
22afd7ee 4014 spin_lock_irq(&mdev->req_lock);
b411b363
PR
4015 set_bit(BITMAP_IO, &mdev->flags);
4016 if (atomic_read(&mdev->ap_bio_cnt) == 0) {
127b3178 4017 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
b411b363 4018 drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
b411b363 4019 }
22afd7ee 4020 spin_unlock_irq(&mdev->req_lock);
b411b363
PR
4021}
4022
4023/**
4024 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
4025 * @mdev: DRBD device.
4026 * @io_fn: IO callback to be called when bitmap IO is possible
4027 * @why: Descriptive text of the reason for doing the IO
4028 *
4029 * freezes application IO while that the actual IO operations runs. This
4030 * functions MAY NOT be called from worker context.
4031 */
20ceb2b2
LE
4032int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
4033 char *why, enum bm_flag flags)
b411b363
PR
4034{
4035 int rv;
4036
4037 D_ASSERT(current != mdev->worker.task);
4038
20ceb2b2
LE
4039 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4040 drbd_suspend_io(mdev);
b411b363 4041
20ceb2b2 4042 drbd_bm_lock(mdev, why, flags);
b411b363
PR
4043 rv = io_fn(mdev);
4044 drbd_bm_unlock(mdev);
4045
20ceb2b2
LE
4046 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
4047 drbd_resume_io(mdev);
b411b363
PR
4048
4049 return rv;
4050}
4051
4052void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4053{
4054 if ((mdev->ldev->md.flags & flag) != flag) {
4055 drbd_md_mark_dirty(mdev);
4056 mdev->ldev->md.flags |= flag;
4057 }
4058}
4059
4060void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
4061{
4062 if ((mdev->ldev->md.flags & flag) != 0) {
4063 drbd_md_mark_dirty(mdev);
4064 mdev->ldev->md.flags &= ~flag;
4065 }
4066}
4067int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
4068{
4069 return (bdev->md.flags & flag) != 0;
4070}
4071
4072static void md_sync_timer_fn(unsigned long data)
4073{
4074 struct drbd_conf *mdev = (struct drbd_conf *) data;
4075
4076 drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
4077}
4078
4079static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
4080{
4081 dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
ee15b038
LE
4082#ifdef DEBUG
4083 dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
4084 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
4085#endif
b411b363 4086 drbd_md_sync(mdev);
b411b363
PR
4087 return 1;
4088}
4089
4090#ifdef CONFIG_DRBD_FAULT_INJECTION
4091/* Fault insertion support including random number generator shamelessly
4092 * stolen from kernel/rcutorture.c */
4093struct fault_random_state {
4094 unsigned long state;
4095 unsigned long count;
4096};
4097
4098#define FAULT_RANDOM_MULT 39916801 /* prime */
4099#define FAULT_RANDOM_ADD 479001701 /* prime */
4100#define FAULT_RANDOM_REFRESH 10000
4101
4102/*
4103 * Crude but fast random-number generator. Uses a linear congruential
4104 * generator, with occasional help from get_random_bytes().
4105 */
4106static unsigned long
4107_drbd_fault_random(struct fault_random_state *rsp)
4108{
4109 long refresh;
4110
49829ea7 4111 if (!rsp->count--) {
b411b363
PR
4112 get_random_bytes(&refresh, sizeof(refresh));
4113 rsp->state += refresh;
4114 rsp->count = FAULT_RANDOM_REFRESH;
4115 }
4116 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
4117 return swahw32(rsp->state);
4118}
4119
4120static char *
4121_drbd_fault_str(unsigned int type) {
4122 static char *_faults[] = {
4123 [DRBD_FAULT_MD_WR] = "Meta-data write",
4124 [DRBD_FAULT_MD_RD] = "Meta-data read",
4125 [DRBD_FAULT_RS_WR] = "Resync write",
4126 [DRBD_FAULT_RS_RD] = "Resync read",
4127 [DRBD_FAULT_DT_WR] = "Data write",
4128 [DRBD_FAULT_DT_RD] = "Data read",
4129 [DRBD_FAULT_DT_RA] = "Data read ahead",
4130 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
6b4388ac
PR
4131 [DRBD_FAULT_AL_EE] = "EE allocation",
4132 [DRBD_FAULT_RECEIVE] = "receive data corruption",
b411b363
PR
4133 };
4134
4135 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
4136}
4137
4138unsigned int
4139_drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
4140{
4141 static struct fault_random_state rrs = {0, 0};
4142
4143 unsigned int ret = (
4144 (fault_devs == 0 ||
4145 ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
4146 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
4147
4148 if (ret) {
4149 fault_count++;
4150
7383506c 4151 if (__ratelimit(&drbd_ratelimit_state))
b411b363
PR
4152 dev_warn(DEV, "***Simulating %s failure\n",
4153 _drbd_fault_str(type));
4154 }
4155
4156 return ret;
4157}
4158#endif
4159
4160const char *drbd_buildtag(void)
4161{
4162 /* DRBD built from external sources has here a reference to the
4163 git hash of the source code. */
4164
4165 static char buildtag[38] = "\0uilt-in";
4166
4167 if (buildtag[0] == 0) {
4168#ifdef CONFIG_MODULES
4169 if (THIS_MODULE != NULL)
4170 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
4171 else
4172#endif
4173 buildtag[0] = 'b';
4174 }
4175
4176 return buildtag;
4177}
4178
4179module_init(drbd_init)
4180module_exit(drbd_cleanup)
4181
b411b363
PR
4182EXPORT_SYMBOL(drbd_conn_str);
4183EXPORT_SYMBOL(drbd_role_str);
4184EXPORT_SYMBOL(drbd_disk_str);
4185EXPORT_SYMBOL(drbd_set_st_err_str);
This page took 0.303885 seconds and 5 git commands to generate.