drbd: Fixed an issue with AHEAD -> SYNC_SOURCE transitions
[deliverable/linux.git] / drivers / block / drbd / drbd_worker.c
CommitLineData
b411b363
PR
1/*
2 drbd_worker.c
3
4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
13 any later version.
14
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24 */
25
b411b363 26#include <linux/module.h>
b411b363
PR
27#include <linux/drbd.h>
28#include <linux/sched.h>
b411b363
PR
29#include <linux/wait.h>
30#include <linux/mm.h>
31#include <linux/memcontrol.h>
32#include <linux/mm_inline.h>
33#include <linux/slab.h>
34#include <linux/random.h>
b411b363
PR
35#include <linux/string.h>
36#include <linux/scatterlist.h>
37
38#include "drbd_int.h"
39#include "drbd_req.h"
b411b363 40
b411b363 41static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel);
9d77a5fe
PR
42static int w_make_resync_request(struct drbd_conf *mdev,
43 struct drbd_work *w, int cancel);
b411b363
PR
44
45
46
47/* defined here:
48 drbd_md_io_complete
45bb912b 49 drbd_endio_sec
b411b363
PR
50 drbd_endio_pri
51
52 * more endio handlers:
53 atodb_endio in drbd_actlog.c
54 drbd_bm_async_io_complete in drbd_bitmap.c
55
56 * For all these callbacks, note the following:
57 * The callbacks will be called in irq context by the IDE drivers,
58 * and in Softirqs/Tasklets/BH context by the SCSI drivers.
59 * Try to get the locking right :)
60 *
61 */
62
63
64/* About the global_state_lock
65 Each state transition on an device holds a read lock. In case we have
66 to evaluate the sync after dependencies, we grab a write lock, because
67 we need stable states on all devices for that. */
68rwlock_t global_state_lock;
69
70/* used for synchronous meta data and bitmap IO
71 * submitted by drbd_md_sync_page_io()
72 */
73void drbd_md_io_complete(struct bio *bio, int error)
74{
75 struct drbd_md_io *md_io;
76
77 md_io = (struct drbd_md_io *)bio->bi_private;
78 md_io->error = error;
79
b411b363
PR
80 complete(&md_io->event);
81}
82
83/* reads on behalf of the partner,
84 * "submitted" by the receiver
85 */
45bb912b 86void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
b411b363
PR
87{
88 unsigned long flags = 0;
45bb912b 89 struct drbd_conf *mdev = e->mdev;
b411b363
PR
90
91 D_ASSERT(e->block_id != ID_VACANT);
92
b411b363
PR
93 spin_lock_irqsave(&mdev->req_lock, flags);
94 mdev->read_cnt += e->size >> 9;
95 list_del(&e->w.list);
96 if (list_empty(&mdev->read_ee))
97 wake_up(&mdev->ee_wait);
45bb912b 98 if (test_bit(__EE_WAS_ERROR, &e->flags))
81e84650 99 __drbd_chk_io_error(mdev, false);
b411b363
PR
100 spin_unlock_irqrestore(&mdev->req_lock, flags);
101
b411b363
PR
102 drbd_queue_work(&mdev->data.work, &e->w);
103 put_ldev(mdev);
b411b363
PR
104}
105
106/* writes on behalf of the partner, or resync writes,
45bb912b
LE
107 * "submitted" by the receiver, final stage. */
108static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
b411b363
PR
109{
110 unsigned long flags = 0;
45bb912b 111 struct drbd_conf *mdev = e->mdev;
b411b363
PR
112 sector_t e_sector;
113 int do_wake;
114 int is_syncer_req;
115 int do_al_complete_io;
b411b363 116
b411b363
PR
117 D_ASSERT(e->block_id != ID_VACANT);
118
b411b363
PR
119 /* after we moved e to done_ee,
120 * we may no longer access it,
121 * it may be freed/reused already!
122 * (as soon as we release the req_lock) */
123 e_sector = e->sector;
124 do_al_complete_io = e->flags & EE_CALL_AL_COMPLETE_IO;
45bb912b 125 is_syncer_req = is_syncer_block_id(e->block_id);
b411b363 126
45bb912b
LE
127 spin_lock_irqsave(&mdev->req_lock, flags);
128 mdev->writ_cnt += e->size >> 9;
b411b363
PR
129 list_del(&e->w.list); /* has been on active_ee or sync_ee */
130 list_add_tail(&e->w.list, &mdev->done_ee);
131
b411b363
PR
132 /* No hlist_del_init(&e->colision) here, we did not send the Ack yet,
133 * neither did we wake possibly waiting conflicting requests.
134 * done from "drbd_process_done_ee" within the appropriate w.cb
135 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
136
137 do_wake = is_syncer_req
138 ? list_empty(&mdev->sync_ee)
139 : list_empty(&mdev->active_ee);
140
45bb912b 141 if (test_bit(__EE_WAS_ERROR, &e->flags))
81e84650 142 __drbd_chk_io_error(mdev, false);
b411b363
PR
143 spin_unlock_irqrestore(&mdev->req_lock, flags);
144
145 if (is_syncer_req)
146 drbd_rs_complete_io(mdev, e_sector);
147
148 if (do_wake)
149 wake_up(&mdev->ee_wait);
150
151 if (do_al_complete_io)
152 drbd_al_complete_io(mdev, e_sector);
153
154 wake_asender(mdev);
155 put_ldev(mdev);
45bb912b 156}
b411b363 157
45bb912b
LE
158/* writes on behalf of the partner, or resync writes,
159 * "submitted" by the receiver.
160 */
161void drbd_endio_sec(struct bio *bio, int error)
162{
163 struct drbd_epoch_entry *e = bio->bi_private;
164 struct drbd_conf *mdev = e->mdev;
165 int uptodate = bio_flagged(bio, BIO_UPTODATE);
166 int is_write = bio_data_dir(bio) == WRITE;
167
07194272 168 if (error && __ratelimit(&drbd_ratelimit_state))
45bb912b
LE
169 dev_warn(DEV, "%s: error=%d s=%llus\n",
170 is_write ? "write" : "read", error,
171 (unsigned long long)e->sector);
172 if (!error && !uptodate) {
07194272
LE
173 if (__ratelimit(&drbd_ratelimit_state))
174 dev_warn(DEV, "%s: setting error to -EIO s=%llus\n",
175 is_write ? "write" : "read",
176 (unsigned long long)e->sector);
45bb912b
LE
177 /* strange behavior of some lower level drivers...
178 * fail the request by clearing the uptodate flag,
179 * but do not return any error?! */
180 error = -EIO;
181 }
182
183 if (error)
184 set_bit(__EE_WAS_ERROR, &e->flags);
185
186 bio_put(bio); /* no need for the bio anymore */
187 if (atomic_dec_and_test(&e->pending_bios)) {
188 if (is_write)
189 drbd_endio_write_sec_final(e);
190 else
191 drbd_endio_read_sec_final(e);
192 }
b411b363
PR
193}
194
195/* read, readA or write requests on R_PRIMARY coming from drbd_make_request
196 */
197void drbd_endio_pri(struct bio *bio, int error)
198{
a115413d 199 unsigned long flags;
b411b363
PR
200 struct drbd_request *req = bio->bi_private;
201 struct drbd_conf *mdev = req->mdev;
a115413d 202 struct bio_and_error m;
b411b363
PR
203 enum drbd_req_event what;
204 int uptodate = bio_flagged(bio, BIO_UPTODATE);
205
b411b363
PR
206 if (!error && !uptodate) {
207 dev_warn(DEV, "p %s: setting error to -EIO\n",
208 bio_data_dir(bio) == WRITE ? "write" : "read");
209 /* strange behavior of some lower level drivers...
210 * fail the request by clearing the uptodate flag,
211 * but do not return any error?! */
212 error = -EIO;
213 }
214
b411b363
PR
215 /* to avoid recursion in __req_mod */
216 if (unlikely(error)) {
217 what = (bio_data_dir(bio) == WRITE)
218 ? write_completed_with_error
5c3c7e64 219 : (bio_rw(bio) == READ)
b411b363
PR
220 ? read_completed_with_error
221 : read_ahead_completed_with_error;
222 } else
223 what = completed_ok;
224
225 bio_put(req->private_bio);
226 req->private_bio = ERR_PTR(error);
227
a115413d
LE
228 /* not req_mod(), we need irqsave here! */
229 spin_lock_irqsave(&mdev->req_lock, flags);
230 __req_mod(req, what, &m);
231 spin_unlock_irqrestore(&mdev->req_lock, flags);
232
233 if (m.bio)
234 complete_master_bio(mdev, &m);
b411b363
PR
235}
236
b411b363
PR
237int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
238{
239 struct drbd_request *req = container_of(w, struct drbd_request, w);
240
241 /* We should not detach for read io-error,
242 * but try to WRITE the P_DATA_REPLY to the failed location,
243 * to give the disk the chance to relocate that block */
244
245 spin_lock_irq(&mdev->req_lock);
d255e5ff
LE
246 if (cancel || mdev->state.pdsk != D_UP_TO_DATE) {
247 _req_mod(req, read_retry_remote_canceled);
b411b363 248 spin_unlock_irq(&mdev->req_lock);
b411b363
PR
249 return 1;
250 }
251 spin_unlock_irq(&mdev->req_lock);
252
253 return w_send_read_req(mdev, w, 0);
254}
255
256int w_resync_inactive(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
257{
258 ERR_IF(cancel) return 1;
259 dev_err(DEV, "resync inactive, but callback triggered??\n");
260 return 1; /* Simply ignore this! */
261}
262
45bb912b
LE
263void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
264{
265 struct hash_desc desc;
266 struct scatterlist sg;
267 struct page *page = e->pages;
268 struct page *tmp;
269 unsigned len;
270
271 desc.tfm = tfm;
272 desc.flags = 0;
273
274 sg_init_table(&sg, 1);
275 crypto_hash_init(&desc);
276
277 while ((tmp = page_chain_next(page))) {
278 /* all but the last page will be fully used */
279 sg_set_page(&sg, page, PAGE_SIZE, 0);
280 crypto_hash_update(&desc, &sg, sg.length);
281 page = tmp;
282 }
283 /* and now the last, possibly only partially used page */
284 len = e->size & (PAGE_SIZE - 1);
285 sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
286 crypto_hash_update(&desc, &sg, sg.length);
287 crypto_hash_final(&desc, digest);
288}
289
290void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *bio, void *digest)
b411b363
PR
291{
292 struct hash_desc desc;
293 struct scatterlist sg;
294 struct bio_vec *bvec;
295 int i;
296
297 desc.tfm = tfm;
298 desc.flags = 0;
299
300 sg_init_table(&sg, 1);
301 crypto_hash_init(&desc);
302
303 __bio_for_each_segment(bvec, bio, i, 0) {
304 sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset);
305 crypto_hash_update(&desc, &sg, sg.length);
306 }
307 crypto_hash_final(&desc, digest);
308}
309
310static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
311{
312 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
313 int digest_size;
314 void *digest;
315 int ok;
316
317 D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef);
318
319 if (unlikely(cancel)) {
320 drbd_free_ee(mdev, e);
321 return 1;
322 }
323
45bb912b 324 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
325 digest_size = crypto_hash_digestsize(mdev->csums_tfm);
326 digest = kmalloc(digest_size, GFP_NOIO);
327 if (digest) {
45bb912b 328 drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
b411b363
PR
329
330 inc_rs_pending(mdev);
331 ok = drbd_send_drequest_csum(mdev,
332 e->sector,
333 e->size,
334 digest,
335 digest_size,
336 P_CSUM_RS_REQUEST);
337 kfree(digest);
338 } else {
339 dev_err(DEV, "kmalloc() of digest failed.\n");
340 ok = 0;
341 }
342 } else
343 ok = 1;
344
345 drbd_free_ee(mdev, e);
346
347 if (unlikely(!ok))
348 dev_err(DEV, "drbd_send_drequest(..., csum) failed\n");
349 return ok;
350}
351
352#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
353
354static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
355{
356 struct drbd_epoch_entry *e;
357
358 if (!get_ldev(mdev))
80a40e43 359 return -EIO;
b411b363 360
e3555d85 361 if (drbd_rs_should_slow_down(mdev, sector))
0f0601f4
LE
362 goto defer;
363
b411b363
PR
364 /* GFP_TRY, because if there is no memory available right now, this may
365 * be rescheduled for later. It is "only" background resync, after all. */
366 e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY);
45bb912b 367 if (!e)
80a40e43 368 goto defer;
b411b363 369
80a40e43 370 e->w.cb = w_e_send_csum;
b411b363
PR
371 spin_lock_irq(&mdev->req_lock);
372 list_add(&e->w.list, &mdev->read_ee);
373 spin_unlock_irq(&mdev->req_lock);
374
0f0601f4 375 atomic_add(size >> 9, &mdev->rs_sect_ev);
45bb912b 376 if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0)
80a40e43 377 return 0;
b411b363 378
22cc37a9
LE
379 /* drbd_submit_ee currently fails for one reason only:
380 * not being able to allocate enough bios.
381 * Is dropping the connection going to help? */
382 spin_lock_irq(&mdev->req_lock);
383 list_del(&e->w.list);
384 spin_unlock_irq(&mdev->req_lock);
385
45bb912b 386 drbd_free_ee(mdev, e);
80a40e43 387defer:
45bb912b 388 put_ldev(mdev);
80a40e43 389 return -EAGAIN;
b411b363
PR
390}
391
392void resync_timer_fn(unsigned long data)
393{
b411b363
PR
394 struct drbd_conf *mdev = (struct drbd_conf *) data;
395 int queue;
396
63106d3c
PR
397 queue = 1;
398 switch (mdev->state.conn) {
399 case C_VERIFY_S:
400 mdev->resync_work.cb = w_make_ov_request;
401 break;
402 case C_SYNC_TARGET:
403 mdev->resync_work.cb = w_make_resync_request;
404 break;
405 default:
b411b363
PR
406 queue = 0;
407 mdev->resync_work.cb = w_resync_inactive;
408 }
409
b411b363
PR
410 /* harmless race: list_empty outside data.work.q_lock */
411 if (list_empty(&mdev->resync_work.list) && queue)
412 drbd_queue_work(&mdev->data.work, &mdev->resync_work);
413}
414
778f271d
PR
415static void fifo_set(struct fifo_buffer *fb, int value)
416{
417 int i;
418
419 for (i = 0; i < fb->size; i++)
f10f2623 420 fb->values[i] = value;
778f271d
PR
421}
422
423static int fifo_push(struct fifo_buffer *fb, int value)
424{
425 int ov;
426
427 ov = fb->values[fb->head_index];
428 fb->values[fb->head_index++] = value;
429
430 if (fb->head_index >= fb->size)
431 fb->head_index = 0;
432
433 return ov;
434}
435
436static void fifo_add_val(struct fifo_buffer *fb, int value)
437{
438 int i;
439
440 for (i = 0; i < fb->size; i++)
441 fb->values[i] += value;
442}
443
9d77a5fe 444static int drbd_rs_controller(struct drbd_conf *mdev)
778f271d
PR
445{
446 unsigned int sect_in; /* Number of sectors that came in since the last turn */
447 unsigned int want; /* The number of sectors we want in the proxy */
448 int req_sect; /* Number of sectors to request in this turn */
449 int correction; /* Number of sectors more we need in the proxy*/
450 int cps; /* correction per invocation of drbd_rs_controller() */
451 int steps; /* Number of time steps to plan ahead */
452 int curr_corr;
453 int max_sect;
454
455 sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */
456 mdev->rs_in_flight -= sect_in;
457
458 spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */
459
460 steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
461
462 if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */
463 want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps;
464 } else { /* normal path */
465 want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target :
466 sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10);
467 }
468
469 correction = want - mdev->rs_in_flight - mdev->rs_planed;
470
471 /* Plan ahead */
472 cps = correction / steps;
473 fifo_add_val(&mdev->rs_plan_s, cps);
474 mdev->rs_planed += cps * steps;
475
476 /* What we do in this step */
477 curr_corr = fifo_push(&mdev->rs_plan_s, 0);
478 spin_unlock(&mdev->peer_seq_lock);
479 mdev->rs_planed -= curr_corr;
480
481 req_sect = sect_in + curr_corr;
482 if (req_sect < 0)
483 req_sect = 0;
484
485 max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ;
486 if (req_sect > max_sect)
487 req_sect = max_sect;
488
489 /*
490 dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
491 sect_in, mdev->rs_in_flight, want, correction,
492 steps, cps, mdev->rs_planed, curr_corr, req_sect);
493 */
494
495 return req_sect;
496}
497
9d77a5fe 498static int drbd_rs_number_requests(struct drbd_conf *mdev)
e65f440d
LE
499{
500 int number;
501 if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */
502 number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9);
503 mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME;
504 } else {
505 mdev->c_sync_rate = mdev->sync_conf.rate;
506 number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
507 }
508
e65f440d
LE
509 /* ignore the amount of pending requests, the resync controller should
510 * throttle down to incoming reply rate soon enough anyways. */
511 return number;
512}
513
9d77a5fe
PR
514static int w_make_resync_request(struct drbd_conf *mdev,
515 struct drbd_work *w, int cancel)
b411b363
PR
516{
517 unsigned long bit;
518 sector_t sector;
519 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1816a2b4 520 int max_bio_size;
e65f440d 521 int number, rollback_i, size;
b411b363 522 int align, queued, sndbuf;
0f0601f4 523 int i = 0;
b411b363
PR
524
525 if (unlikely(cancel))
526 return 1;
527
528 if (unlikely(mdev->state.conn < C_CONNECTED)) {
529 dev_err(DEV, "Confused in w_make_resync_request()! cstate < Connected");
530 return 0;
531 }
532
533 if (mdev->state.conn != C_SYNC_TARGET)
534 dev_err(DEV, "%s in w_make_resync_request\n",
535 drbd_conn_str(mdev->state.conn));
536
af85e8e8
LE
537 if (mdev->rs_total == 0) {
538 /* empty resync? */
539 drbd_resync_finished(mdev);
540 return 1;
541 }
542
b411b363
PR
543 if (!get_ldev(mdev)) {
544 /* Since we only need to access mdev->rsync a
545 get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
546 to continue resync with a broken disk makes no sense at
547 all */
548 dev_err(DEV, "Disk broke down during resync!\n");
549 mdev->resync_work.cb = w_resync_inactive;
550 return 1;
551 }
552
bb3d000c
LE
553 /* starting with drbd 8.3.8, we can handle multi-bio EEs,
554 * if it should be necessary */
1816a2b4
LE
555 max_bio_size =
556 mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
557 mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
bb3d000c 558
e65f440d
LE
559 number = drbd_rs_number_requests(mdev);
560 if (number == 0)
0f0601f4 561 goto requeue;
b411b363 562
b411b363
PR
563 for (i = 0; i < number; i++) {
564 /* Stop generating RS requests, when half of the send buffer is filled */
565 mutex_lock(&mdev->data.mutex);
566 if (mdev->data.socket) {
567 queued = mdev->data.socket->sk->sk_wmem_queued;
568 sndbuf = mdev->data.socket->sk->sk_sndbuf;
569 } else {
570 queued = 1;
571 sndbuf = 0;
572 }
573 mutex_unlock(&mdev->data.mutex);
574 if (queued > sndbuf / 2)
575 goto requeue;
576
577next_sector:
578 size = BM_BLOCK_SIZE;
579 bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo);
580
4b0715f0 581 if (bit == DRBD_END_OF_BITMAP) {
b411b363
PR
582 mdev->bm_resync_fo = drbd_bm_bits(mdev);
583 mdev->resync_work.cb = w_resync_inactive;
584 put_ldev(mdev);
585 return 1;
586 }
587
588 sector = BM_BIT_TO_SECT(bit);
589
e3555d85
PR
590 if (drbd_rs_should_slow_down(mdev, sector) ||
591 drbd_try_rs_begin_io(mdev, sector)) {
b411b363
PR
592 mdev->bm_resync_fo = bit;
593 goto requeue;
594 }
595 mdev->bm_resync_fo = bit + 1;
596
597 if (unlikely(drbd_bm_test_bit(mdev, bit) == 0)) {
598 drbd_rs_complete_io(mdev, sector);
599 goto next_sector;
600 }
601
1816a2b4 602#if DRBD_MAX_BIO_SIZE > BM_BLOCK_SIZE
b411b363
PR
603 /* try to find some adjacent bits.
604 * we stop if we have already the maximum req size.
605 *
606 * Additionally always align bigger requests, in order to
607 * be prepared for all stripe sizes of software RAIDs.
b411b363
PR
608 */
609 align = 1;
d207450c 610 rollback_i = i;
b411b363 611 for (;;) {
1816a2b4 612 if (size + BM_BLOCK_SIZE > max_bio_size)
b411b363
PR
613 break;
614
615 /* Be always aligned */
616 if (sector & ((1<<(align+3))-1))
617 break;
618
619 /* do not cross extent boundaries */
620 if (((bit+1) & BM_BLOCKS_PER_BM_EXT_MASK) == 0)
621 break;
622 /* now, is it actually dirty, after all?
623 * caution, drbd_bm_test_bit is tri-state for some
624 * obscure reason; ( b == 0 ) would get the out-of-band
625 * only accidentally right because of the "oddly sized"
626 * adjustment below */
627 if (drbd_bm_test_bit(mdev, bit+1) != 1)
628 break;
629 bit++;
630 size += BM_BLOCK_SIZE;
631 if ((BM_BLOCK_SIZE << align) <= size)
632 align++;
633 i++;
634 }
635 /* if we merged some,
636 * reset the offset to start the next drbd_bm_find_next from */
637 if (size > BM_BLOCK_SIZE)
638 mdev->bm_resync_fo = bit + 1;
639#endif
640
641 /* adjust very last sectors, in case we are oddly sized */
642 if (sector + (size>>9) > capacity)
643 size = (capacity-sector)<<9;
644 if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) {
645 switch (read_for_csum(mdev, sector, size)) {
80a40e43 646 case -EIO: /* Disk failure */
b411b363
PR
647 put_ldev(mdev);
648 return 0;
80a40e43 649 case -EAGAIN: /* allocation failed, or ldev busy */
b411b363
PR
650 drbd_rs_complete_io(mdev, sector);
651 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
d207450c 652 i = rollback_i;
b411b363 653 goto requeue;
80a40e43
LE
654 case 0:
655 /* everything ok */
656 break;
657 default:
658 BUG();
b411b363
PR
659 }
660 } else {
661 inc_rs_pending(mdev);
662 if (!drbd_send_drequest(mdev, P_RS_DATA_REQUEST,
663 sector, size, ID_SYNCER)) {
664 dev_err(DEV, "drbd_send_drequest() failed, aborting...\n");
665 dec_rs_pending(mdev);
666 put_ldev(mdev);
667 return 0;
668 }
669 }
670 }
671
672 if (mdev->bm_resync_fo >= drbd_bm_bits(mdev)) {
673 /* last syncer _request_ was sent,
674 * but the P_RS_DATA_REPLY not yet received. sync will end (and
675 * next sync group will resume), as soon as we receive the last
676 * resync data block, and the last bit is cleared.
677 * until then resync "work" is "inactive" ...
678 */
679 mdev->resync_work.cb = w_resync_inactive;
680 put_ldev(mdev);
681 return 1;
682 }
683
684 requeue:
778f271d 685 mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
b411b363
PR
686 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
687 put_ldev(mdev);
688 return 1;
689}
690
691static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
692{
693 int number, i, size;
694 sector_t sector;
695 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
696
697 if (unlikely(cancel))
698 return 1;
699
700 if (unlikely(mdev->state.conn < C_CONNECTED)) {
701 dev_err(DEV, "Confused in w_make_ov_request()! cstate < Connected");
702 return 0;
703 }
704
2649f080 705 number = drbd_rs_number_requests(mdev);
b411b363
PR
706
707 sector = mdev->ov_position;
708 for (i = 0; i < number; i++) {
709 if (sector >= capacity) {
710 mdev->resync_work.cb = w_resync_inactive;
711 return 1;
712 }
713
714 size = BM_BLOCK_SIZE;
715
e3555d85
PR
716 if (drbd_rs_should_slow_down(mdev, sector) ||
717 drbd_try_rs_begin_io(mdev, sector)) {
b411b363
PR
718 mdev->ov_position = sector;
719 goto requeue;
720 }
721
722 if (sector + (size>>9) > capacity)
723 size = (capacity-sector)<<9;
724
725 inc_rs_pending(mdev);
726 if (!drbd_send_ov_request(mdev, sector, size)) {
727 dec_rs_pending(mdev);
728 return 0;
729 }
730 sector += BM_SECT_PER_BIT;
731 }
732 mdev->ov_position = sector;
733
734 requeue:
2649f080 735 mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
b411b363
PR
736 mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
737 return 1;
738}
739
c4752ef1
PR
740
741int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
742{
743 drbd_start_resync(mdev, C_SYNC_SOURCE);
744
745 return 1;
746}
747
b411b363
PR
748int w_ov_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
749{
750 kfree(w);
751 ov_oos_print(mdev);
752 drbd_resync_finished(mdev);
753
754 return 1;
755}
756
757static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
758{
759 kfree(w);
760
761 drbd_resync_finished(mdev);
762
763 return 1;
764}
765
af85e8e8
LE
766static void ping_peer(struct drbd_conf *mdev)
767{
768 clear_bit(GOT_PING_ACK, &mdev->flags);
769 request_ping(mdev);
770 wait_event(mdev->misc_wait,
771 test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
772}
773
b411b363
PR
774int drbd_resync_finished(struct drbd_conf *mdev)
775{
776 unsigned long db, dt, dbdt;
777 unsigned long n_oos;
778 union drbd_state os, ns;
779 struct drbd_work *w;
780 char *khelper_cmd = NULL;
26525618 781 int verify_done = 0;
b411b363
PR
782
783 /* Remove all elements from the resync LRU. Since future actions
784 * might set bits in the (main) bitmap, then the entries in the
785 * resync LRU would be wrong. */
786 if (drbd_rs_del_all(mdev)) {
787 /* In case this is not possible now, most probably because
788 * there are P_RS_DATA_REPLY Packets lingering on the worker's
789 * queue (or even the read operations for those packets
790 * is not finished by now). Retry in 100ms. */
791
b411b363
PR
792 __set_current_state(TASK_INTERRUPTIBLE);
793 schedule_timeout(HZ / 10);
794 w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
795 if (w) {
796 w->cb = w_resync_finished;
797 drbd_queue_work(&mdev->data.work, w);
798 return 1;
799 }
800 dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
801 }
802
803 dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
804 if (dt <= 0)
805 dt = 1;
806 db = mdev->rs_total;
807 dbdt = Bit2KB(db/dt);
808 mdev->rs_paused /= HZ;
809
810 if (!get_ldev(mdev))
811 goto out;
812
af85e8e8
LE
813 ping_peer(mdev);
814
b411b363
PR
815 spin_lock_irq(&mdev->req_lock);
816 os = mdev->state;
817
26525618
LE
818 verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
819
b411b363
PR
820 /* This protects us against multiple calls (that can happen in the presence
821 of application IO), and against connectivity loss just before we arrive here. */
822 if (os.conn <= C_CONNECTED)
823 goto out_unlock;
824
825 ns = os;
826 ns.conn = C_CONNECTED;
827
828 dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
26525618 829 verify_done ? "Online verify " : "Resync",
b411b363
PR
830 dt + mdev->rs_paused, mdev->rs_paused, dbdt);
831
832 n_oos = drbd_bm_total_weight(mdev);
833
834 if (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) {
835 if (n_oos) {
836 dev_alert(DEV, "Online verify found %lu %dk block out of sync!\n",
837 n_oos, Bit2KB(1));
838 khelper_cmd = "out-of-sync";
839 }
840 } else {
841 D_ASSERT((n_oos - mdev->rs_failed) == 0);
842
843 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
844 khelper_cmd = "after-resync-target";
845
846 if (mdev->csums_tfm && mdev->rs_total) {
847 const unsigned long s = mdev->rs_same_csum;
848 const unsigned long t = mdev->rs_total;
849 const int ratio =
850 (t == 0) ? 0 :
851 (t < 100000) ? ((s*100)/t) : (s/(t/100));
852 dev_info(DEV, "%u %% had equal check sums, eliminated: %luK; "
853 "transferred %luK total %luK\n",
854 ratio,
855 Bit2KB(mdev->rs_same_csum),
856 Bit2KB(mdev->rs_total - mdev->rs_same_csum),
857 Bit2KB(mdev->rs_total));
858 }
859 }
860
861 if (mdev->rs_failed) {
862 dev_info(DEV, " %lu failed blocks\n", mdev->rs_failed);
863
864 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
865 ns.disk = D_INCONSISTENT;
866 ns.pdsk = D_UP_TO_DATE;
867 } else {
868 ns.disk = D_UP_TO_DATE;
869 ns.pdsk = D_INCONSISTENT;
870 }
871 } else {
872 ns.disk = D_UP_TO_DATE;
873 ns.pdsk = D_UP_TO_DATE;
874
875 if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) {
876 if (mdev->p_uuid) {
877 int i;
878 for (i = UI_BITMAP ; i <= UI_HISTORY_END ; i++)
879 _drbd_uuid_set(mdev, i, mdev->p_uuid[i]);
880 drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_CURRENT]);
881 _drbd_uuid_set(mdev, UI_CURRENT, mdev->p_uuid[UI_CURRENT]);
882 } else {
883 dev_err(DEV, "mdev->p_uuid is NULL! BUG\n");
884 }
885 }
886
887 drbd_uuid_set_bm(mdev, 0UL);
888
889 if (mdev->p_uuid) {
890 /* Now the two UUID sets are equal, update what we
891 * know of the peer. */
892 int i;
893 for (i = UI_CURRENT ; i <= UI_HISTORY_END ; i++)
894 mdev->p_uuid[i] = mdev->ldev->md.uuid[i];
895 }
896 }
897
898 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
899out_unlock:
900 spin_unlock_irq(&mdev->req_lock);
901 put_ldev(mdev);
902out:
903 mdev->rs_total = 0;
904 mdev->rs_failed = 0;
905 mdev->rs_paused = 0;
26525618
LE
906 if (verify_done)
907 mdev->ov_start_sector = 0;
b411b363 908
13d42685
LE
909 drbd_md_sync(mdev);
910
b411b363
PR
911 if (khelper_cmd)
912 drbd_khelper(mdev, khelper_cmd);
913
914 return 1;
915}
916
917/* helper */
918static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
919{
45bb912b 920 if (drbd_ee_has_active_page(e)) {
b411b363 921 /* This might happen if sendpage() has not finished */
78db8928 922 int i = (e->size + PAGE_SIZE -1) >> PAGE_SHIFT;
435f0740
LE
923 atomic_add(i, &mdev->pp_in_use_by_net);
924 atomic_sub(i, &mdev->pp_in_use);
b411b363
PR
925 spin_lock_irq(&mdev->req_lock);
926 list_add_tail(&e->w.list, &mdev->net_ee);
927 spin_unlock_irq(&mdev->req_lock);
435f0740 928 wake_up(&drbd_pp_wait);
b411b363
PR
929 } else
930 drbd_free_ee(mdev, e);
931}
932
933/**
934 * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
935 * @mdev: DRBD device.
936 * @w: work object.
937 * @cancel: The connection will be closed anyways
938 */
939int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
940{
941 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
942 int ok;
943
944 if (unlikely(cancel)) {
945 drbd_free_ee(mdev, e);
946 dec_unacked(mdev);
947 return 1;
948 }
949
45bb912b 950 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
951 ok = drbd_send_block(mdev, P_DATA_REPLY, e);
952 } else {
953 if (__ratelimit(&drbd_ratelimit_state))
954 dev_err(DEV, "Sending NegDReply. sector=%llus.\n",
955 (unsigned long long)e->sector);
956
957 ok = drbd_send_ack(mdev, P_NEG_DREPLY, e);
958 }
959
960 dec_unacked(mdev);
961
962 move_to_net_ee_or_free(mdev, e);
963
964 if (unlikely(!ok))
965 dev_err(DEV, "drbd_send_block() failed\n");
966 return ok;
967}
968
969/**
970 * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
971 * @mdev: DRBD device.
972 * @w: work object.
973 * @cancel: The connection will be closed anyways
974 */
975int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
976{
977 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
978 int ok;
979
980 if (unlikely(cancel)) {
981 drbd_free_ee(mdev, e);
982 dec_unacked(mdev);
983 return 1;
984 }
985
986 if (get_ldev_if_state(mdev, D_FAILED)) {
987 drbd_rs_complete_io(mdev, e->sector);
988 put_ldev(mdev);
989 }
990
45bb912b 991 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
992 if (likely(mdev->state.pdsk >= D_INCONSISTENT)) {
993 inc_rs_pending(mdev);
994 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
995 } else {
996 if (__ratelimit(&drbd_ratelimit_state))
997 dev_err(DEV, "Not sending RSDataReply, "
998 "partner DISKLESS!\n");
999 ok = 1;
1000 }
1001 } else {
1002 if (__ratelimit(&drbd_ratelimit_state))
1003 dev_err(DEV, "Sending NegRSDReply. sector %llus.\n",
1004 (unsigned long long)e->sector);
1005
1006 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1007
1008 /* update resync data with failure */
1009 drbd_rs_failed_io(mdev, e->sector, e->size);
1010 }
1011
1012 dec_unacked(mdev);
1013
1014 move_to_net_ee_or_free(mdev, e);
1015
1016 if (unlikely(!ok))
1017 dev_err(DEV, "drbd_send_block() failed\n");
1018 return ok;
1019}
1020
1021int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1022{
1023 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1024 struct digest_info *di;
1025 int digest_size;
1026 void *digest = NULL;
1027 int ok, eq = 0;
1028
1029 if (unlikely(cancel)) {
1030 drbd_free_ee(mdev, e);
1031 dec_unacked(mdev);
1032 return 1;
1033 }
1034
1d53f09e
LE
1035 if (get_ldev(mdev)) {
1036 drbd_rs_complete_io(mdev, e->sector);
1037 put_ldev(mdev);
1038 }
b411b363 1039
85719573 1040 di = e->digest;
b411b363 1041
45bb912b 1042 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
1043 /* quick hack to try to avoid a race against reconfiguration.
1044 * a real fix would be much more involved,
1045 * introducing more locking mechanisms */
1046 if (mdev->csums_tfm) {
1047 digest_size = crypto_hash_digestsize(mdev->csums_tfm);
1048 D_ASSERT(digest_size == di->digest_size);
1049 digest = kmalloc(digest_size, GFP_NOIO);
1050 }
1051 if (digest) {
45bb912b 1052 drbd_csum_ee(mdev, mdev->csums_tfm, e, digest);
b411b363
PR
1053 eq = !memcmp(digest, di->digest, digest_size);
1054 kfree(digest);
1055 }
1056
1057 if (eq) {
1058 drbd_set_in_sync(mdev, e->sector, e->size);
676396d5
LE
1059 /* rs_same_csums unit is BM_BLOCK_SIZE */
1060 mdev->rs_same_csum += e->size >> BM_BLOCK_SHIFT;
b411b363
PR
1061 ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e);
1062 } else {
1063 inc_rs_pending(mdev);
204bba99
PR
1064 e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */
1065 e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */
1066 kfree(di);
b411b363
PR
1067 ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e);
1068 }
1069 } else {
1070 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1071 if (__ratelimit(&drbd_ratelimit_state))
1072 dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1073 }
1074
1075 dec_unacked(mdev);
b411b363
PR
1076 move_to_net_ee_or_free(mdev, e);
1077
1078 if (unlikely(!ok))
1079 dev_err(DEV, "drbd_send_block/ack() failed\n");
1080 return ok;
1081}
1082
1083int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1084{
1085 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1086 int digest_size;
1087 void *digest;
1088 int ok = 1;
1089
1090 if (unlikely(cancel))
1091 goto out;
1092
45bb912b 1093 if (unlikely((e->flags & EE_WAS_ERROR) != 0))
b411b363
PR
1094 goto out;
1095
1096 digest_size = crypto_hash_digestsize(mdev->verify_tfm);
1097 /* FIXME if this allocation fails, online verify will not terminate! */
1098 digest = kmalloc(digest_size, GFP_NOIO);
1099 if (digest) {
45bb912b 1100 drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
b411b363
PR
1101 inc_rs_pending(mdev);
1102 ok = drbd_send_drequest_csum(mdev, e->sector, e->size,
1103 digest, digest_size, P_OV_REPLY);
1104 if (!ok)
1105 dec_rs_pending(mdev);
1106 kfree(digest);
1107 }
1108
1109out:
1110 drbd_free_ee(mdev, e);
1111
1112 dec_unacked(mdev);
1113
1114 return ok;
1115}
1116
1117void drbd_ov_oos_found(struct drbd_conf *mdev, sector_t sector, int size)
1118{
1119 if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) {
1120 mdev->ov_last_oos_size += size>>9;
1121 } else {
1122 mdev->ov_last_oos_start = sector;
1123 mdev->ov_last_oos_size = size>>9;
1124 }
1125 drbd_set_out_of_sync(mdev, sector, size);
b411b363
PR
1126}
1127
1128int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1129{
1130 struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
1131 struct digest_info *di;
1132 int digest_size;
1133 void *digest;
1134 int ok, eq = 0;
1135
1136 if (unlikely(cancel)) {
1137 drbd_free_ee(mdev, e);
1138 dec_unacked(mdev);
1139 return 1;
1140 }
1141
1142 /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1143 * the resync lru has been cleaned up already */
1d53f09e
LE
1144 if (get_ldev(mdev)) {
1145 drbd_rs_complete_io(mdev, e->sector);
1146 put_ldev(mdev);
1147 }
b411b363 1148
85719573 1149 di = e->digest;
b411b363 1150
45bb912b 1151 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
b411b363
PR
1152 digest_size = crypto_hash_digestsize(mdev->verify_tfm);
1153 digest = kmalloc(digest_size, GFP_NOIO);
1154 if (digest) {
45bb912b 1155 drbd_csum_ee(mdev, mdev->verify_tfm, e, digest);
b411b363
PR
1156
1157 D_ASSERT(digest_size == di->digest_size);
1158 eq = !memcmp(digest, di->digest, digest_size);
1159 kfree(digest);
1160 }
1161 } else {
1162 ok = drbd_send_ack(mdev, P_NEG_RS_DREPLY, e);
1163 if (__ratelimit(&drbd_ratelimit_state))
1164 dev_err(DEV, "Sending NegDReply. I guess it gets messy.\n");
1165 }
1166
1167 dec_unacked(mdev);
b411b363
PR
1168 if (!eq)
1169 drbd_ov_oos_found(mdev, e->sector, e->size);
1170 else
1171 ov_oos_print(mdev);
1172
1173 ok = drbd_send_ack_ex(mdev, P_OV_RESULT, e->sector, e->size,
1174 eq ? ID_IN_SYNC : ID_OUT_OF_SYNC);
1175
1176 drbd_free_ee(mdev, e);
1177
ea5442af
LE
1178 --mdev->ov_left;
1179
1180 /* let's advance progress step marks only for every other megabyte */
1181 if ((mdev->ov_left & 0x200) == 0x200)
1182 drbd_advance_rs_marks(mdev, mdev->ov_left);
1183
1184 if (mdev->ov_left == 0) {
b411b363
PR
1185 ov_oos_print(mdev);
1186 drbd_resync_finished(mdev);
1187 }
1188
1189 return ok;
1190}
1191
1192int w_prev_work_done(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1193{
1194 struct drbd_wq_barrier *b = container_of(w, struct drbd_wq_barrier, w);
1195 complete(&b->done);
1196 return 1;
1197}
1198
1199int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1200{
1201 struct drbd_tl_epoch *b = container_of(w, struct drbd_tl_epoch, w);
1202 struct p_barrier *p = &mdev->data.sbuf.barrier;
1203 int ok = 1;
1204
1205 /* really avoid racing with tl_clear. w.cb may have been referenced
1206 * just before it was reassigned and re-queued, so double check that.
1207 * actually, this race was harmless, since we only try to send the
1208 * barrier packet here, and otherwise do nothing with the object.
1209 * but compare with the head of w_clear_epoch */
1210 spin_lock_irq(&mdev->req_lock);
1211 if (w->cb != w_send_barrier || mdev->state.conn < C_CONNECTED)
1212 cancel = 1;
1213 spin_unlock_irq(&mdev->req_lock);
1214 if (cancel)
1215 return 1;
1216
1217 if (!drbd_get_data_sock(mdev))
1218 return 0;
1219 p->barrier = b->br_number;
1220 /* inc_ap_pending was done where this was queued.
1221 * dec_ap_pending will be done in got_BarrierAck
1222 * or (on connection loss) in w_clear_epoch. */
1223 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER,
0b70a13d 1224 (struct p_header80 *)p, sizeof(*p), 0);
b411b363
PR
1225 drbd_put_data_sock(mdev);
1226
1227 return ok;
1228}
1229
1230int w_send_write_hint(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1231{
1232 if (cancel)
1233 return 1;
1234 return drbd_send_short_cmd(mdev, P_UNPLUG_REMOTE);
1235}
1236
73a01a18
PR
1237int w_send_oos(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1238{
1239 struct drbd_request *req = container_of(w, struct drbd_request, w);
1240 int ok;
1241
1242 if (unlikely(cancel)) {
1243 req_mod(req, send_canceled);
1244 return 1;
1245 }
1246
1247 ok = drbd_send_oos(mdev, req);
1248 req_mod(req, oos_handed_to_network);
1249
1250 return ok;
1251}
1252
b411b363
PR
1253/**
1254 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1255 * @mdev: DRBD device.
1256 * @w: work object.
1257 * @cancel: The connection will be closed anyways
1258 */
1259int w_send_dblock(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1260{
1261 struct drbd_request *req = container_of(w, struct drbd_request, w);
1262 int ok;
1263
1264 if (unlikely(cancel)) {
1265 req_mod(req, send_canceled);
1266 return 1;
1267 }
1268
1269 ok = drbd_send_dblock(mdev, req);
1270 req_mod(req, ok ? handed_over_to_network : send_failed);
1271
1272 return ok;
1273}
1274
1275/**
1276 * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1277 * @mdev: DRBD device.
1278 * @w: work object.
1279 * @cancel: The connection will be closed anyways
1280 */
1281int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1282{
1283 struct drbd_request *req = container_of(w, struct drbd_request, w);
1284 int ok;
1285
1286 if (unlikely(cancel)) {
1287 req_mod(req, send_canceled);
1288 return 1;
1289 }
1290
1291 ok = drbd_send_drequest(mdev, P_DATA_REQUEST, req->sector, req->size,
1292 (unsigned long)req);
1293
1294 if (!ok) {
1295 /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
1296 * so this is probably redundant */
1297 if (mdev->state.conn >= C_CONNECTED)
1298 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
1299 }
1300 req_mod(req, ok ? handed_over_to_network : send_failed);
1301
1302 return ok;
1303}
1304
265be2d0
PR
1305int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1306{
1307 struct drbd_request *req = container_of(w, struct drbd_request, w);
1308
0778286a 1309 if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG)
265be2d0
PR
1310 drbd_al_begin_io(mdev, req->sector);
1311 /* Calling drbd_al_begin_io() out of the worker might deadlocks
1312 theoretically. Practically it can not deadlock, since this is
1313 only used when unfreezing IOs. All the extents of the requests
1314 that made it into the TL are already active */
1315
1316 drbd_req_make_private_bio(req, req->master_bio);
1317 req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
1318 generic_make_request(req->private_bio);
1319
1320 return 1;
1321}
1322
b411b363
PR
1323static int _drbd_may_sync_now(struct drbd_conf *mdev)
1324{
1325 struct drbd_conf *odev = mdev;
1326
1327 while (1) {
1328 if (odev->sync_conf.after == -1)
1329 return 1;
1330 odev = minor_to_mdev(odev->sync_conf.after);
1331 ERR_IF(!odev) return 1;
1332 if ((odev->state.conn >= C_SYNC_SOURCE &&
1333 odev->state.conn <= C_PAUSED_SYNC_T) ||
1334 odev->state.aftr_isp || odev->state.peer_isp ||
1335 odev->state.user_isp)
1336 return 0;
1337 }
1338}
1339
1340/**
1341 * _drbd_pause_after() - Pause resync on all devices that may not resync now
1342 * @mdev: DRBD device.
1343 *
1344 * Called from process context only (admin command and after_state_ch).
1345 */
1346static int _drbd_pause_after(struct drbd_conf *mdev)
1347{
1348 struct drbd_conf *odev;
1349 int i, rv = 0;
1350
1351 for (i = 0; i < minor_count; i++) {
1352 odev = minor_to_mdev(i);
1353 if (!odev)
1354 continue;
1355 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1356 continue;
1357 if (!_drbd_may_sync_now(odev))
1358 rv |= (__drbd_set_state(_NS(odev, aftr_isp, 1), CS_HARD, NULL)
1359 != SS_NOTHING_TO_DO);
1360 }
1361
1362 return rv;
1363}
1364
1365/**
1366 * _drbd_resume_next() - Resume resync on all devices that may resync now
1367 * @mdev: DRBD device.
1368 *
1369 * Called from process context only (admin command and worker).
1370 */
1371static int _drbd_resume_next(struct drbd_conf *mdev)
1372{
1373 struct drbd_conf *odev;
1374 int i, rv = 0;
1375
1376 for (i = 0; i < minor_count; i++) {
1377 odev = minor_to_mdev(i);
1378 if (!odev)
1379 continue;
1380 if (odev->state.conn == C_STANDALONE && odev->state.disk == D_DISKLESS)
1381 continue;
1382 if (odev->state.aftr_isp) {
1383 if (_drbd_may_sync_now(odev))
1384 rv |= (__drbd_set_state(_NS(odev, aftr_isp, 0),
1385 CS_HARD, NULL)
1386 != SS_NOTHING_TO_DO) ;
1387 }
1388 }
1389 return rv;
1390}
1391
1392void resume_next_sg(struct drbd_conf *mdev)
1393{
1394 write_lock_irq(&global_state_lock);
1395 _drbd_resume_next(mdev);
1396 write_unlock_irq(&global_state_lock);
1397}
1398
1399void suspend_other_sg(struct drbd_conf *mdev)
1400{
1401 write_lock_irq(&global_state_lock);
1402 _drbd_pause_after(mdev);
1403 write_unlock_irq(&global_state_lock);
1404}
1405
1406static int sync_after_error(struct drbd_conf *mdev, int o_minor)
1407{
1408 struct drbd_conf *odev;
1409
1410 if (o_minor == -1)
1411 return NO_ERROR;
1412 if (o_minor < -1 || minor_to_mdev(o_minor) == NULL)
1413 return ERR_SYNC_AFTER;
1414
1415 /* check for loops */
1416 odev = minor_to_mdev(o_minor);
1417 while (1) {
1418 if (odev == mdev)
1419 return ERR_SYNC_AFTER_CYCLE;
1420
1421 /* dependency chain ends here, no cycles. */
1422 if (odev->sync_conf.after == -1)
1423 return NO_ERROR;
1424
1425 /* follow the dependency chain */
1426 odev = minor_to_mdev(odev->sync_conf.after);
1427 }
1428}
1429
1430int drbd_alter_sa(struct drbd_conf *mdev, int na)
1431{
1432 int changes;
1433 int retcode;
1434
1435 write_lock_irq(&global_state_lock);
1436 retcode = sync_after_error(mdev, na);
1437 if (retcode == NO_ERROR) {
1438 mdev->sync_conf.after = na;
1439 do {
1440 changes = _drbd_pause_after(mdev);
1441 changes |= _drbd_resume_next(mdev);
1442 } while (changes);
1443 }
1444 write_unlock_irq(&global_state_lock);
1445 return retcode;
1446}
1447
9bd28d3c
LE
1448void drbd_rs_controller_reset(struct drbd_conf *mdev)
1449{
1450 atomic_set(&mdev->rs_sect_in, 0);
1451 atomic_set(&mdev->rs_sect_ev, 0);
1452 mdev->rs_in_flight = 0;
1453 mdev->rs_planed = 0;
1454 spin_lock(&mdev->peer_seq_lock);
1455 fifo_set(&mdev->rs_plan_s, 0);
1456 spin_unlock(&mdev->peer_seq_lock);
1457}
1458
b411b363
PR
1459/**
1460 * drbd_start_resync() - Start the resync process
1461 * @mdev: DRBD device.
1462 * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
1463 *
1464 * This function might bring you directly into one of the
1465 * C_PAUSED_SYNC_* states.
1466 */
1467void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side)
1468{
1469 union drbd_state ns;
1470 int r;
1471
c4752ef1 1472 if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn < C_AHEAD) {
b411b363
PR
1473 dev_err(DEV, "Resync already running!\n");
1474 return;
1475 }
1476
59817f4f
PR
1477 if (mdev->state.conn < C_AHEAD) {
1478 /* In case a previous resync run was aborted by an IO error/detach on the peer. */
1479 drbd_rs_cancel_all(mdev);
1480 /* This should be done when we abort the resync. We definitely do not
1481 want to have this for connections going back and forth between
1482 Ahead/Behind and SyncSource/SyncTarget */
1483 }
b411b363
PR
1484
1485 if (side == C_SYNC_TARGET) {
1486 /* Since application IO was locked out during C_WF_BITMAP_T and
1487 C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
1488 we check that we might make the data inconsistent. */
1489 r = drbd_khelper(mdev, "before-resync-target");
1490 r = (r >> 8) & 0xff;
1491 if (r > 0) {
1492 dev_info(DEV, "before-resync-target handler returned %d, "
1493 "dropping connection.\n", r);
1494 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1495 return;
1496 }
09b9e797
PR
1497 } else /* C_SYNC_SOURCE */ {
1498 r = drbd_khelper(mdev, "before-resync-source");
1499 r = (r >> 8) & 0xff;
1500 if (r > 0) {
1501 if (r == 3) {
1502 dev_info(DEV, "before-resync-source handler returned %d, "
1503 "ignoring. Old userland tools?", r);
1504 } else {
1505 dev_info(DEV, "before-resync-source handler returned %d, "
1506 "dropping connection.\n", r);
1507 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
1508 return;
1509 }
1510 }
b411b363
PR
1511 }
1512
1513 drbd_state_lock(mdev);
1514
1515 if (!get_ldev_if_state(mdev, D_NEGOTIATING)) {
1516 drbd_state_unlock(mdev);
1517 return;
1518 }
1519
b411b363
PR
1520 write_lock_irq(&global_state_lock);
1521 ns = mdev->state;
1522
1523 ns.aftr_isp = !_drbd_may_sync_now(mdev);
1524
1525 ns.conn = side;
1526
1527 if (side == C_SYNC_TARGET)
1528 ns.disk = D_INCONSISTENT;
1529 else /* side == C_SYNC_SOURCE */
1530 ns.pdsk = D_INCONSISTENT;
1531
1532 r = __drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1533 ns = mdev->state;
1534
1535 if (ns.conn < C_CONNECTED)
1536 r = SS_UNKNOWN_ERROR;
1537
1538 if (r == SS_SUCCESS) {
1d7734a0
LE
1539 unsigned long tw = drbd_bm_total_weight(mdev);
1540 unsigned long now = jiffies;
1541 int i;
1542
b411b363
PR
1543 mdev->rs_failed = 0;
1544 mdev->rs_paused = 0;
b411b363 1545 mdev->rs_same_csum = 0;
0f0601f4
LE
1546 mdev->rs_last_events = 0;
1547 mdev->rs_last_sect_ev = 0;
1d7734a0
LE
1548 mdev->rs_total = tw;
1549 mdev->rs_start = now;
1550 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1551 mdev->rs_mark_left[i] = tw;
1552 mdev->rs_mark_time[i] = now;
1553 }
b411b363
PR
1554 _drbd_pause_after(mdev);
1555 }
1556 write_unlock_irq(&global_state_lock);
5a22db89
LE
1557
1558 if (side == C_SYNC_TARGET)
1559 mdev->bm_resync_fo = 0;
1560
1561 /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
1562 * with w_send_oos, or the sync target will get confused as to
1563 * how much bits to resync. We cannot do that always, because for an
1564 * empty resync and protocol < 95, we need to do it here, as we call
1565 * drbd_resync_finished from here in that case.
1566 * We drbd_gen_and_send_sync_uuid here for protocol < 96,
1567 * and from after_state_ch otherwise. */
1568 if (side == C_SYNC_SOURCE && mdev->agreed_pro_version < 96)
1569 drbd_gen_and_send_sync_uuid(mdev);
b411b363
PR
1570
1571 if (r == SS_SUCCESS) {
1572 dev_info(DEV, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
1573 drbd_conn_str(ns.conn),
1574 (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10),
1575 (unsigned long) mdev->rs_total);
1576
af85e8e8
LE
1577 if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) {
1578 /* This still has a race (about when exactly the peers
1579 * detect connection loss) that can lead to a full sync
1580 * on next handshake. In 8.3.9 we fixed this with explicit
1581 * resync-finished notifications, but the fix
1582 * introduces a protocol change. Sleeping for some
1583 * time longer than the ping interval + timeout on the
1584 * SyncSource, to give the SyncTarget the chance to
1585 * detect connection loss, then waiting for a ping
1586 * response (implicit in drbd_resync_finished) reduces
1587 * the race considerably, but does not solve it. */
1588 if (side == C_SYNC_SOURCE)
1589 schedule_timeout_interruptible(
1590 mdev->net_conf->ping_int * HZ +
1591 mdev->net_conf->ping_timeo*HZ/9);
b411b363 1592 drbd_resync_finished(mdev);
b411b363
PR
1593 }
1594
9bd28d3c 1595 drbd_rs_controller_reset(mdev);
b411b363
PR
1596 /* ns.conn may already be != mdev->state.conn,
1597 * we may have been paused in between, or become paused until
1598 * the timer triggers.
1599 * No matter, that is handled in resync_timer_fn() */
1600 if (ns.conn == C_SYNC_TARGET)
1601 mod_timer(&mdev->resync_timer, jiffies);
1602
1603 drbd_md_sync(mdev);
1604 }
5a22db89 1605 put_ldev(mdev);
d0c3f60f 1606 drbd_state_unlock(mdev);
b411b363
PR
1607}
1608
1609int drbd_worker(struct drbd_thread *thi)
1610{
1611 struct drbd_conf *mdev = thi->mdev;
1612 struct drbd_work *w = NULL;
1613 LIST_HEAD(work_list);
1614 int intr = 0, i;
1615
1616 sprintf(current->comm, "drbd%d_worker", mdev_to_minor(mdev));
1617
1618 while (get_t_state(thi) == Running) {
1619 drbd_thread_current_set_cpu(mdev);
1620
1621 if (down_trylock(&mdev->data.work.s)) {
1622 mutex_lock(&mdev->data.mutex);
1623 if (mdev->data.socket && !mdev->net_conf->no_cork)
1624 drbd_tcp_uncork(mdev->data.socket);
1625 mutex_unlock(&mdev->data.mutex);
1626
1627 intr = down_interruptible(&mdev->data.work.s);
1628
1629 mutex_lock(&mdev->data.mutex);
1630 if (mdev->data.socket && !mdev->net_conf->no_cork)
1631 drbd_tcp_cork(mdev->data.socket);
1632 mutex_unlock(&mdev->data.mutex);
1633 }
1634
1635 if (intr) {
1636 D_ASSERT(intr == -EINTR);
1637 flush_signals(current);
1638 ERR_IF (get_t_state(thi) == Running)
1639 continue;
1640 break;
1641 }
1642
1643 if (get_t_state(thi) != Running)
1644 break;
1645 /* With this break, we have done a down() but not consumed
1646 the entry from the list. The cleanup code takes care of
1647 this... */
1648
1649 w = NULL;
1650 spin_lock_irq(&mdev->data.work.q_lock);
1651 ERR_IF(list_empty(&mdev->data.work.q)) {
1652 /* something terribly wrong in our logic.
1653 * we were able to down() the semaphore,
1654 * but the list is empty... doh.
1655 *
1656 * what is the best thing to do now?
1657 * try again from scratch, restarting the receiver,
1658 * asender, whatnot? could break even more ugly,
1659 * e.g. when we are primary, but no good local data.
1660 *
1661 * I'll try to get away just starting over this loop.
1662 */
1663 spin_unlock_irq(&mdev->data.work.q_lock);
1664 continue;
1665 }
1666 w = list_entry(mdev->data.work.q.next, struct drbd_work, list);
1667 list_del_init(&w->list);
1668 spin_unlock_irq(&mdev->data.work.q_lock);
1669
1670 if (!w->cb(mdev, w, mdev->state.conn < C_CONNECTED)) {
1671 /* dev_warn(DEV, "worker: a callback failed! \n"); */
1672 if (mdev->state.conn >= C_CONNECTED)
1673 drbd_force_state(mdev,
1674 NS(conn, C_NETWORK_FAILURE));
1675 }
1676 }
1677 D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
1678 D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
1679
1680 spin_lock_irq(&mdev->data.work.q_lock);
1681 i = 0;
1682 while (!list_empty(&mdev->data.work.q)) {
1683 list_splice_init(&mdev->data.work.q, &work_list);
1684 spin_unlock_irq(&mdev->data.work.q_lock);
1685
1686 while (!list_empty(&work_list)) {
1687 w = list_entry(work_list.next, struct drbd_work, list);
1688 list_del_init(&w->list);
1689 w->cb(mdev, w, 1);
1690 i++; /* dead debugging code */
1691 }
1692
1693 spin_lock_irq(&mdev->data.work.q_lock);
1694 }
1695 sema_init(&mdev->data.work.s, 0);
1696 /* DANGEROUS race: if someone did queue his work within the spinlock,
1697 * but up() ed outside the spinlock, we could get an up() on the
1698 * semaphore without corresponding list entry.
1699 * So don't do that.
1700 */
1701 spin_unlock_irq(&mdev->data.work.q_lock);
1702
1703 D_ASSERT(mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE);
1704 /* _drbd_set_state only uses stop_nowait.
1705 * wait here for the Exiting receiver. */
1706 drbd_thread_stop(&mdev->receiver);
1707 drbd_mdev_cleanup(mdev);
1708
1709 dev_info(DEV, "worker terminated\n");
1710
1711 clear_bit(DEVICE_DYING, &mdev->flags);
1712 clear_bit(CONFIG_PENDING, &mdev->flags);
1713 wake_up(&mdev->state_wait);
1714
1715 return 0;
1716}
This page took 0.259923 seconds and 5 git commands to generate.