dm raid1: hold write bios when errors are handled
[deliverable/linux.git] / drivers / md / dm-raid1.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
1f965b19 3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
06386bbf 8#include "dm-bio-record.h"
1da177e4 9
1da177e4
LT
10#include <linux/init.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/pagemap.h>
14#include <linux/slab.h>
1da177e4 15#include <linux/workqueue.h>
1f965b19 16#include <linux/device-mapper.h>
a765e20e
AK
17#include <linux/dm-io.h>
18#include <linux/dm-dirty-log.h>
19#include <linux/dm-kcopyd.h>
1f965b19 20#include <linux/dm-region-hash.h>
1da177e4 21
72d94861 22#define DM_MSG_PREFIX "raid1"
1f965b19
HM
23
24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
88be163a 25#define DM_IO_PAGES 64
1f965b19 26#define DM_KCOPYD_PAGES 64
72d94861 27
a8e6afa2 28#define DM_RAID1_HANDLE_ERRORS 0x01
f44db678 29#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
a8e6afa2 30
33184048 31static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
1da177e4 32
e4c8b3ba
NB
33/*-----------------------------------------------------------------
34 * Mirror set structures.
35 *---------------------------------------------------------------*/
72f4b314
JB
36enum dm_raid1_error {
37 DM_RAID1_WRITE_ERROR,
64b30c46 38 DM_RAID1_FLUSH_ERROR,
72f4b314
JB
39 DM_RAID1_SYNC_ERROR,
40 DM_RAID1_READ_ERROR
41};
42
e4c8b3ba 43struct mirror {
aa5617c5 44 struct mirror_set *ms;
e4c8b3ba 45 atomic_t error_count;
39ed7adb 46 unsigned long error_type;
e4c8b3ba
NB
47 struct dm_dev *dev;
48 sector_t offset;
49};
50
51struct mirror_set {
52 struct dm_target *ti;
53 struct list_head list;
1f965b19 54
a8e6afa2 55 uint64_t features;
e4c8b3ba 56
72f4b314 57 spinlock_t lock; /* protects the lists */
e4c8b3ba
NB
58 struct bio_list reads;
59 struct bio_list writes;
72f4b314 60 struct bio_list failures;
04788507 61 struct bio_list holds; /* bios are waiting until suspend */
e4c8b3ba 62
1f965b19
HM
63 struct dm_region_hash *rh;
64 struct dm_kcopyd_client *kcopyd_client;
88be163a 65 struct dm_io_client *io_client;
06386bbf 66 mempool_t *read_record_pool;
88be163a 67
e4c8b3ba
NB
68 /* recovery */
69 region_t nr_regions;
70 int in_sync;
fc1ff958 71 int log_failure;
b80aa7a0 72 atomic_t suspend;
e4c8b3ba 73
72f4b314 74 atomic_t default_mirror; /* Default mirror */
e4c8b3ba 75
6ad36fe2
HS
76 struct workqueue_struct *kmirrord_wq;
77 struct work_struct kmirrord_work;
a2aebe03
MP
78 struct timer_list timer;
79 unsigned long timer_pending;
80
72f4b314 81 struct work_struct trigger_event;
6ad36fe2 82
1f965b19 83 unsigned nr_mirrors;
e4c8b3ba
NB
84 struct mirror mirror[0];
85};
86
1f965b19 87static void wakeup_mirrord(void *context)
1da177e4 88{
1f965b19 89 struct mirror_set *ms = context;
1da177e4 90
6ad36fe2
HS
91 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
92}
93
a2aebe03
MP
94static void delayed_wake_fn(unsigned long data)
95{
96 struct mirror_set *ms = (struct mirror_set *) data;
97
98 clear_bit(0, &ms->timer_pending);
1f965b19 99 wakeup_mirrord(ms);
a2aebe03
MP
100}
101
102static void delayed_wake(struct mirror_set *ms)
103{
104 if (test_and_set_bit(0, &ms->timer_pending))
105 return;
106
107 ms->timer.expires = jiffies + HZ / 5;
108 ms->timer.data = (unsigned long) ms;
109 ms->timer.function = delayed_wake_fn;
110 add_timer(&ms->timer);
111}
112
1f965b19 113static void wakeup_all_recovery_waiters(void *context)
1da177e4 114{
1f965b19 115 wake_up_all(&_kmirrord_recovery_stopped);
1da177e4
LT
116}
117
1f965b19 118static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1da177e4
LT
119{
120 unsigned long flags;
1da177e4 121 int should_wake = 0;
1f965b19 122 struct bio_list *bl;
1da177e4 123
1f965b19
HM
124 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
125 spin_lock_irqsave(&ms->lock, flags);
126 should_wake = !(bl->head);
127 bio_list_add(bl, bio);
128 spin_unlock_irqrestore(&ms->lock, flags);
1da177e4
LT
129
130 if (should_wake)
1f965b19 131 wakeup_mirrord(ms);
1da177e4
LT
132}
133
1f965b19 134static void dispatch_bios(void *context, struct bio_list *bio_list)
1da177e4 135{
1f965b19
HM
136 struct mirror_set *ms = context;
137 struct bio *bio;
1da177e4 138
1f965b19
HM
139 while ((bio = bio_list_pop(bio_list)))
140 queue_bio(ms, bio, WRITE);
1da177e4
LT
141}
142
06386bbf
JB
143#define MIN_READ_RECORDS 20
144struct dm_raid1_read_record {
145 struct mirror *m;
146 struct dm_bio_details details;
147};
148
95f8fac8
MP
149static struct kmem_cache *_dm_raid1_read_record_cache;
150
1da177e4
LT
151/*
152 * Every mirror should look like this one.
153 */
154#define DEFAULT_MIRROR 0
155
156/*
06386bbf
JB
157 * This is yucky. We squirrel the mirror struct away inside
158 * bi_next for read/write buffers. This is safe since the bh
1da177e4
LT
159 * doesn't get submitted to the lower levels of block layer.
160 */
06386bbf 161static struct mirror *bio_get_m(struct bio *bio)
1da177e4 162{
06386bbf 163 return (struct mirror *) bio->bi_next;
1da177e4
LT
164}
165
06386bbf 166static void bio_set_m(struct bio *bio, struct mirror *m)
1da177e4 167{
06386bbf 168 bio->bi_next = (struct bio *) m;
1da177e4
LT
169}
170
72f4b314
JB
171static struct mirror *get_default_mirror(struct mirror_set *ms)
172{
173 return &ms->mirror[atomic_read(&ms->default_mirror)];
174}
175
176static void set_default_mirror(struct mirror *m)
177{
178 struct mirror_set *ms = m->ms;
179 struct mirror *m0 = &(ms->mirror[0]);
180
181 atomic_set(&ms->default_mirror, m - m0);
182}
183
87968ddd
MP
184static struct mirror *get_valid_mirror(struct mirror_set *ms)
185{
186 struct mirror *m;
187
188 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
189 if (!atomic_read(&m->error_count))
190 return m;
191
192 return NULL;
193}
194
72f4b314
JB
195/* fail_mirror
196 * @m: mirror device to fail
197 * @error_type: one of the enum's, DM_RAID1_*_ERROR
198 *
199 * If errors are being handled, record the type of
200 * error encountered for this device. If this type
201 * of error has already been recorded, we can return;
202 * otherwise, we must signal userspace by triggering
203 * an event. Additionally, if the device is the
204 * primary device, we must choose a new primary, but
205 * only if the mirror is in-sync.
206 *
207 * This function must not block.
208 */
209static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
210{
211 struct mirror_set *ms = m->ms;
212 struct mirror *new;
213
72f4b314
JB
214 /*
215 * error_count is used for nothing more than a
216 * simple way to tell if a device has encountered
217 * errors.
218 */
219 atomic_inc(&m->error_count);
220
221 if (test_and_set_bit(error_type, &m->error_type))
222 return;
223
d460c65a
JB
224 if (!errors_handled(ms))
225 return;
226
72f4b314
JB
227 if (m != get_default_mirror(ms))
228 goto out;
229
230 if (!ms->in_sync) {
231 /*
232 * Better to issue requests to same failing device
233 * than to risk returning corrupt data.
234 */
235 DMERR("Primary mirror (%s) failed while out-of-sync: "
236 "Reads may fail.", m->dev->name);
237 goto out;
238 }
239
87968ddd
MP
240 new = get_valid_mirror(ms);
241 if (new)
242 set_default_mirror(new);
243 else
72f4b314
JB
244 DMWARN("All sides of mirror have failed.");
245
246out:
247 schedule_work(&ms->trigger_event);
248}
249
c0da3748
MP
250static int mirror_flush(struct dm_target *ti)
251{
252 struct mirror_set *ms = ti->private;
253 unsigned long error_bits;
254
255 unsigned int i;
256 struct dm_io_region io[ms->nr_mirrors];
257 struct mirror *m;
258 struct dm_io_request io_req = {
259 .bi_rw = WRITE_BARRIER,
260 .mem.type = DM_IO_KMEM,
261 .mem.ptr.bvec = NULL,
262 .client = ms->io_client,
263 };
264
265 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
266 io[i].bdev = m->dev->bdev;
267 io[i].sector = 0;
268 io[i].count = 0;
269 }
270
271 error_bits = -1;
272 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
273 if (unlikely(error_bits != 0)) {
274 for (i = 0; i < ms->nr_mirrors; i++)
275 if (test_bit(i, &error_bits))
276 fail_mirror(ms->mirror + i,
64b30c46 277 DM_RAID1_FLUSH_ERROR);
c0da3748
MP
278 return -EIO;
279 }
280
281 return 0;
282}
283
1da177e4
LT
284/*-----------------------------------------------------------------
285 * Recovery.
286 *
287 * When a mirror is first activated we may find that some regions
288 * are in the no-sync state. We have to recover these by
289 * recopying from the default mirror to all the others.
290 *---------------------------------------------------------------*/
4cdc1d1f 291static void recovery_complete(int read_err, unsigned long write_err,
1da177e4
LT
292 void *context)
293{
1f965b19
HM
294 struct dm_region *reg = context;
295 struct mirror_set *ms = dm_rh_region_context(reg);
8f0205b7 296 int m, bit = 0;
1da177e4 297
8f0205b7 298 if (read_err) {
f44db678
JB
299 /* Read error means the failure of default mirror. */
300 DMERR_LIMIT("Unable to read primary mirror during recovery");
8f0205b7
JB
301 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
302 }
f44db678 303
8f0205b7 304 if (write_err) {
4cdc1d1f 305 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
f44db678 306 write_err);
8f0205b7
JB
307 /*
308 * Bits correspond to devices (excluding default mirror).
309 * The default mirror cannot change during recovery.
310 */
311 for (m = 0; m < ms->nr_mirrors; m++) {
312 if (&ms->mirror[m] == get_default_mirror(ms))
313 continue;
314 if (test_bit(bit, &write_err))
315 fail_mirror(ms->mirror + m,
316 DM_RAID1_SYNC_ERROR);
317 bit++;
318 }
319 }
f44db678 320
1f965b19 321 dm_rh_recovery_end(reg, !(read_err || write_err));
1da177e4
LT
322}
323
1f965b19 324static int recover(struct mirror_set *ms, struct dm_region *reg)
1da177e4
LT
325{
326 int r;
1f965b19 327 unsigned i;
eb69aca5 328 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
1da177e4
LT
329 struct mirror *m;
330 unsigned long flags = 0;
1f965b19
HM
331 region_t key = dm_rh_get_region_key(reg);
332 sector_t region_size = dm_rh_get_region_size(ms->rh);
1da177e4
LT
333
334 /* fill in the source */
72f4b314 335 m = get_default_mirror(ms);
1da177e4 336 from.bdev = m->dev->bdev;
1f965b19
HM
337 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
338 if (key == (ms->nr_regions - 1)) {
1da177e4
LT
339 /*
340 * The final region may be smaller than
341 * region_size.
342 */
1f965b19 343 from.count = ms->ti->len & (region_size - 1);
1da177e4 344 if (!from.count)
1f965b19 345 from.count = region_size;
1da177e4 346 } else
1f965b19 347 from.count = region_size;
1da177e4
LT
348
349 /* fill in the destinations */
350 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
72f4b314 351 if (&ms->mirror[i] == get_default_mirror(ms))
1da177e4
LT
352 continue;
353
354 m = ms->mirror + i;
355 dest->bdev = m->dev->bdev;
1f965b19 356 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
1da177e4
LT
357 dest->count = from.count;
358 dest++;
359 }
360
361 /* hand to kcopyd */
f7c83e2e
JB
362 if (!errors_handled(ms))
363 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
364
eb69aca5
HM
365 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
366 flags, recovery_complete, reg);
1da177e4
LT
367
368 return r;
369}
370
371static void do_recovery(struct mirror_set *ms)
372{
1f965b19
HM
373 struct dm_region *reg;
374 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4 375 int r;
1da177e4
LT
376
377 /*
378 * Start quiescing some regions.
379 */
1f965b19 380 dm_rh_recovery_prepare(ms->rh);
1da177e4
LT
381
382 /*
383 * Copy any already quiesced regions.
384 */
1f965b19 385 while ((reg = dm_rh_recovery_start(ms->rh))) {
1da177e4
LT
386 r = recover(ms, reg);
387 if (r)
1f965b19 388 dm_rh_recovery_end(reg, 0);
1da177e4
LT
389 }
390
391 /*
392 * Update the in sync flag.
393 */
394 if (!ms->in_sync &&
395 (log->type->get_sync_count(log) == ms->nr_regions)) {
396 /* the sync is complete */
397 dm_table_event(ms->ti->table);
398 ms->in_sync = 1;
399 }
400}
401
402/*-----------------------------------------------------------------
403 * Reads
404 *---------------------------------------------------------------*/
405static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
406{
06386bbf
JB
407 struct mirror *m = get_default_mirror(ms);
408
409 do {
410 if (likely(!atomic_read(&m->error_count)))
411 return m;
412
413 if (m-- == ms->mirror)
414 m += ms->nr_mirrors;
415 } while (m != get_default_mirror(ms));
416
417 return NULL;
418}
419
420static int default_ok(struct mirror *m)
421{
422 struct mirror *default_mirror = get_default_mirror(m->ms);
423
424 return !atomic_read(&default_mirror->error_count);
425}
426
427static int mirror_available(struct mirror_set *ms, struct bio *bio)
428{
1f965b19
HM
429 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
430 region_t region = dm_rh_bio_to_region(ms->rh, bio);
06386bbf 431
1f965b19 432 if (log->type->in_sync(log, region, 0))
06386bbf
JB
433 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
434
435 return 0;
1da177e4
LT
436}
437
438/*
439 * remap a buffer to a particular mirror.
440 */
06386bbf
JB
441static sector_t map_sector(struct mirror *m, struct bio *bio)
442{
4184153f
MP
443 if (unlikely(!bio->bi_size))
444 return 0;
06386bbf
JB
445 return m->offset + (bio->bi_sector - m->ms->ti->begin);
446}
447
448static void map_bio(struct mirror *m, struct bio *bio)
1da177e4
LT
449{
450 bio->bi_bdev = m->dev->bdev;
06386bbf
JB
451 bio->bi_sector = map_sector(m, bio);
452}
453
22a1ceb1 454static void map_region(struct dm_io_region *io, struct mirror *m,
06386bbf
JB
455 struct bio *bio)
456{
457 io->bdev = m->dev->bdev;
458 io->sector = map_sector(m, bio);
459 io->count = bio->bi_size >> 9;
460}
461
04788507
MP
462static void hold_bio(struct mirror_set *ms, struct bio *bio)
463{
464 /*
465 * If device is suspended, complete the bio.
466 */
467 if (atomic_read(&ms->suspend)) {
468 if (dm_noflush_suspending(ms->ti))
469 bio_endio(bio, DM_ENDIO_REQUEUE);
470 else
471 bio_endio(bio, -EIO);
472 return;
473 }
474
475 /*
476 * Hold bio until the suspend is complete.
477 */
478 spin_lock_irq(&ms->lock);
479 bio_list_add(&ms->holds, bio);
480 spin_unlock_irq(&ms->lock);
481}
482
06386bbf
JB
483/*-----------------------------------------------------------------
484 * Reads
485 *---------------------------------------------------------------*/
486static void read_callback(unsigned long error, void *context)
487{
488 struct bio *bio = context;
489 struct mirror *m;
490
491 m = bio_get_m(bio);
492 bio_set_m(bio, NULL);
493
494 if (likely(!error)) {
495 bio_endio(bio, 0);
496 return;
497 }
498
499 fail_mirror(m, DM_RAID1_READ_ERROR);
500
501 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
502 DMWARN_LIMIT("Read failure on mirror device %s. "
503 "Trying alternative device.",
504 m->dev->name);
505 queue_bio(m->ms, bio, bio_rw(bio));
506 return;
507 }
508
509 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
510 m->dev->name);
511 bio_endio(bio, -EIO);
512}
513
514/* Asynchronous read. */
515static void read_async_bio(struct mirror *m, struct bio *bio)
516{
22a1ceb1 517 struct dm_io_region io;
06386bbf
JB
518 struct dm_io_request io_req = {
519 .bi_rw = READ,
520 .mem.type = DM_IO_BVEC,
521 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
522 .notify.fn = read_callback,
523 .notify.context = bio,
524 .client = m->ms->io_client,
525 };
526
527 map_region(&io, m, bio);
528 bio_set_m(bio, m);
1f965b19
HM
529 BUG_ON(dm_io(&io_req, 1, &io, NULL));
530}
531
532static inline int region_in_sync(struct mirror_set *ms, region_t region,
533 int may_block)
534{
535 int state = dm_rh_get_state(ms->rh, region, may_block);
536 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
1da177e4
LT
537}
538
539static void do_reads(struct mirror_set *ms, struct bio_list *reads)
540{
541 region_t region;
542 struct bio *bio;
543 struct mirror *m;
544
545 while ((bio = bio_list_pop(reads))) {
1f965b19 546 region = dm_rh_bio_to_region(ms->rh, bio);
06386bbf 547 m = get_default_mirror(ms);
1da177e4
LT
548
549 /*
550 * We can only read balance if the region is in sync.
551 */
1f965b19 552 if (likely(region_in_sync(ms, region, 1)))
1da177e4 553 m = choose_mirror(ms, bio->bi_sector);
06386bbf
JB
554 else if (m && atomic_read(&m->error_count))
555 m = NULL;
1da177e4 556
06386bbf
JB
557 if (likely(m))
558 read_async_bio(m, bio);
559 else
560 bio_endio(bio, -EIO);
1da177e4
LT
561 }
562}
563
564/*-----------------------------------------------------------------
565 * Writes.
566 *
567 * We do different things with the write io depending on the
568 * state of the region that it's in:
569 *
570 * SYNC: increment pending, use kcopyd to write to *all* mirrors
571 * RECOVERING: delay the io until recovery completes
572 * NOSYNC: increment pending, just write to the default mirror
573 *---------------------------------------------------------------*/
72f4b314 574
72f4b314 575
1da177e4
LT
576static void write_callback(unsigned long error, void *context)
577{
72f4b314 578 unsigned i, ret = 0;
1da177e4
LT
579 struct bio *bio = (struct bio *) context;
580 struct mirror_set *ms;
72f4b314
JB
581 int should_wake = 0;
582 unsigned long flags;
1da177e4 583
06386bbf
JB
584 ms = bio_get_m(bio)->ms;
585 bio_set_m(bio, NULL);
1da177e4
LT
586
587 /*
588 * NOTE: We don't decrement the pending count here,
589 * instead it is done by the targets endio function.
590 * This way we handle both writes to SYNC and NOSYNC
591 * regions with the same code.
592 */
60f355ea
MP
593 if (likely(!error)) {
594 bio_endio(bio, ret);
595 return;
596 }
1da177e4 597
72f4b314
JB
598 for (i = 0; i < ms->nr_mirrors; i++)
599 if (test_bit(i, &error))
600 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
72f4b314 601
60f355ea
MP
602 /*
603 * Need to raise event. Since raising
604 * events can block, we need to do it in
605 * the main thread.
606 */
607 spin_lock_irqsave(&ms->lock, flags);
608 if (!ms->failures.head)
609 should_wake = 1;
610 bio_list_add(&ms->failures, bio);
611 spin_unlock_irqrestore(&ms->lock, flags);
612 if (should_wake)
613 wakeup_mirrord(ms);
1da177e4
LT
614}
615
616static void do_write(struct mirror_set *ms, struct bio *bio)
617{
618 unsigned int i;
22a1ceb1 619 struct dm_io_region io[ms->nr_mirrors], *dest = io;
1da177e4 620 struct mirror *m;
88be163a 621 struct dm_io_request io_req = {
4184153f 622 .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER),
88be163a
MB
623 .mem.type = DM_IO_BVEC,
624 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
625 .notify.fn = write_callback,
626 .notify.context = bio,
627 .client = ms->io_client,
628 };
1da177e4 629
06386bbf
JB
630 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
631 map_region(dest++, m, bio);
1da177e4 632
06386bbf
JB
633 /*
634 * Use default mirror because we only need it to retrieve the reference
635 * to the mirror set in write_callback().
636 */
637 bio_set_m(bio, get_default_mirror(ms));
88be163a 638
1f965b19 639 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
1da177e4
LT
640}
641
642static void do_writes(struct mirror_set *ms, struct bio_list *writes)
643{
644 int state;
645 struct bio *bio;
646 struct bio_list sync, nosync, recover, *this_list = NULL;
7513c2a7
JB
647 struct bio_list requeue;
648 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
649 region_t region;
1da177e4
LT
650
651 if (!writes->head)
652 return;
653
654 /*
655 * Classify each write.
656 */
657 bio_list_init(&sync);
658 bio_list_init(&nosync);
659 bio_list_init(&recover);
7513c2a7 660 bio_list_init(&requeue);
1da177e4
LT
661
662 while ((bio = bio_list_pop(writes))) {
4184153f
MP
663 if (unlikely(bio_empty_barrier(bio))) {
664 bio_list_add(&sync, bio);
665 continue;
666 }
667
7513c2a7
JB
668 region = dm_rh_bio_to_region(ms->rh, bio);
669
670 if (log->type->is_remote_recovering &&
671 log->type->is_remote_recovering(log, region)) {
672 bio_list_add(&requeue, bio);
673 continue;
674 }
675
676 state = dm_rh_get_state(ms->rh, region, 1);
1da177e4 677 switch (state) {
1f965b19
HM
678 case DM_RH_CLEAN:
679 case DM_RH_DIRTY:
1da177e4
LT
680 this_list = &sync;
681 break;
682
1f965b19 683 case DM_RH_NOSYNC:
1da177e4
LT
684 this_list = &nosync;
685 break;
686
1f965b19 687 case DM_RH_RECOVERING:
1da177e4
LT
688 this_list = &recover;
689 break;
690 }
691
692 bio_list_add(this_list, bio);
693 }
694
7513c2a7
JB
695 /*
696 * Add bios that are delayed due to remote recovery
697 * back on to the write queue
698 */
699 if (unlikely(requeue.head)) {
700 spin_lock_irq(&ms->lock);
701 bio_list_merge(&ms->writes, &requeue);
702 spin_unlock_irq(&ms->lock);
69885683 703 delayed_wake(ms);
7513c2a7
JB
704 }
705
1da177e4
LT
706 /*
707 * Increment the pending counts for any regions that will
708 * be written to (writes to recover regions are going to
709 * be delayed).
710 */
1f965b19
HM
711 dm_rh_inc_pending(ms->rh, &sync);
712 dm_rh_inc_pending(ms->rh, &nosync);
d2b69864
JB
713
714 /*
715 * If the flush fails on a previous call and succeeds here,
716 * we must not reset the log_failure variable. We need
717 * userspace interaction to do that.
718 */
719 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
1da177e4
LT
720
721 /*
722 * Dispatch io.
723 */
b80aa7a0
JB
724 if (unlikely(ms->log_failure)) {
725 spin_lock_irq(&ms->lock);
726 bio_list_merge(&ms->failures, &sync);
727 spin_unlock_irq(&ms->lock);
1f965b19 728 wakeup_mirrord(ms);
b80aa7a0 729 } else
fc1ff958 730 while ((bio = bio_list_pop(&sync)))
b80aa7a0 731 do_write(ms, bio);
1da177e4
LT
732
733 while ((bio = bio_list_pop(&recover)))
1f965b19 734 dm_rh_delay(ms->rh, bio);
1da177e4
LT
735
736 while ((bio = bio_list_pop(&nosync))) {
06386bbf 737 map_bio(get_default_mirror(ms), bio);
1da177e4
LT
738 generic_make_request(bio);
739 }
740}
741
72f4b314
JB
742static void do_failures(struct mirror_set *ms, struct bio_list *failures)
743{
744 struct bio *bio;
745
0f398a84 746 if (likely(!failures->head))
72f4b314
JB
747 return;
748
b80aa7a0
JB
749 /*
750 * If the log has failed, unattempted writes are being
0f398a84 751 * put on the holds list. We can't issue those writes
b80aa7a0
JB
752 * until a log has been marked, so we must store them.
753 *
754 * If a 'noflush' suspend is in progress, we can requeue
755 * the I/O's to the core. This give userspace a chance
756 * to reconfigure the mirror, at which point the core
757 * will reissue the writes. If the 'noflush' flag is
758 * not set, we have no choice but to return errors.
759 *
760 * Some writes on the failures list may have been
761 * submitted before the log failure and represent a
762 * failure to write to one of the devices. It is ok
763 * for us to treat them the same and requeue them
764 * as well.
765 */
0f398a84 766 while ((bio = bio_list_pop(failures))) {
60f355ea 767 if (!ms->log_failure) {
0f398a84 768 ms->in_sync = 0;
c58098be 769 dm_rh_mark_nosync(ms->rh, bio);
0f398a84 770 }
60f355ea
MP
771
772 /*
773 * If all the legs are dead, fail the I/O.
774 * If we have been told to handle errors, hold the bio
775 * and wait for userspace to deal with the problem.
776 * Otherwise pretend that the I/O succeeded. (This would
777 * be wrong if the failed leg returned after reboot and
778 * got replicated back to the good legs.)
779 */
780 if (!get_valid_mirror(ms))
781 bio_endio(bio, -EIO);
782 else if (errors_handled(ms))
783 hold_bio(ms, bio);
784 else
785 bio_endio(bio, 0);
b80aa7a0 786 }
72f4b314
JB
787}
788
789static void trigger_event(struct work_struct *work)
790{
791 struct mirror_set *ms =
792 container_of(work, struct mirror_set, trigger_event);
793
794 dm_table_event(ms->ti->table);
795}
796
1da177e4
LT
797/*-----------------------------------------------------------------
798 * kmirrord
799 *---------------------------------------------------------------*/
a2aebe03 800static void do_mirror(struct work_struct *work)
1da177e4 801{
1f965b19
HM
802 struct mirror_set *ms = container_of(work, struct mirror_set,
803 kmirrord_work);
72f4b314
JB
804 struct bio_list reads, writes, failures;
805 unsigned long flags;
1da177e4 806
72f4b314 807 spin_lock_irqsave(&ms->lock, flags);
1da177e4
LT
808 reads = ms->reads;
809 writes = ms->writes;
72f4b314 810 failures = ms->failures;
1da177e4
LT
811 bio_list_init(&ms->reads);
812 bio_list_init(&ms->writes);
72f4b314
JB
813 bio_list_init(&ms->failures);
814 spin_unlock_irqrestore(&ms->lock, flags);
1da177e4 815
1f965b19 816 dm_rh_update_states(ms->rh, errors_handled(ms));
1da177e4
LT
817 do_recovery(ms);
818 do_reads(ms, &reads);
819 do_writes(ms, &writes);
72f4b314 820 do_failures(ms, &failures);
7ff14a36
MP
821
822 dm_table_unplug_all(ms->ti->table);
1da177e4
LT
823}
824
1da177e4
LT
825/*-----------------------------------------------------------------
826 * Target functions
827 *---------------------------------------------------------------*/
828static struct mirror_set *alloc_context(unsigned int nr_mirrors,
829 uint32_t region_size,
830 struct dm_target *ti,
416cd17b 831 struct dm_dirty_log *dl)
1da177e4
LT
832{
833 size_t len;
834 struct mirror_set *ms = NULL;
835
1da177e4
LT
836 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
837
dd00cc48 838 ms = kzalloc(len, GFP_KERNEL);
1da177e4 839 if (!ms) {
72d94861 840 ti->error = "Cannot allocate mirror context";
1da177e4
LT
841 return NULL;
842 }
843
1da177e4
LT
844 spin_lock_init(&ms->lock);
845
846 ms->ti = ti;
847 ms->nr_mirrors = nr_mirrors;
848 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
849 ms->in_sync = 0;
b80aa7a0
JB
850 ms->log_failure = 0;
851 atomic_set(&ms->suspend, 0);
72f4b314 852 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
1da177e4 853
95f8fac8
MP
854 ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
855 _dm_raid1_read_record_cache);
856
06386bbf
JB
857 if (!ms->read_record_pool) {
858 ti->error = "Error creating mirror read_record_pool";
859 kfree(ms);
860 return NULL;
861 }
862
88be163a
MB
863 ms->io_client = dm_io_client_create(DM_IO_PAGES);
864 if (IS_ERR(ms->io_client)) {
865 ti->error = "Error creating dm_io client";
06386bbf 866 mempool_destroy(ms->read_record_pool);
88be163a
MB
867 kfree(ms);
868 return NULL;
869 }
870
1f965b19
HM
871 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
872 wakeup_all_recovery_waiters,
873 ms->ti->begin, MAX_RECOVERY,
874 dl, region_size, ms->nr_regions);
875 if (IS_ERR(ms->rh)) {
72d94861 876 ti->error = "Error creating dirty region hash";
a72cf737 877 dm_io_client_destroy(ms->io_client);
06386bbf 878 mempool_destroy(ms->read_record_pool);
1da177e4
LT
879 kfree(ms);
880 return NULL;
881 }
882
883 return ms;
884}
885
886static void free_context(struct mirror_set *ms, struct dm_target *ti,
887 unsigned int m)
888{
889 while (m--)
890 dm_put_device(ti, ms->mirror[m].dev);
891
88be163a 892 dm_io_client_destroy(ms->io_client);
1f965b19 893 dm_region_hash_destroy(ms->rh);
06386bbf 894 mempool_destroy(ms->read_record_pool);
1da177e4
LT
895 kfree(ms);
896}
897
1da177e4
LT
898static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
899 unsigned int mirror, char **argv)
900{
4ee218cd 901 unsigned long long offset;
1da177e4 902
4ee218cd 903 if (sscanf(argv[1], "%llu", &offset) != 1) {
72d94861 904 ti->error = "Invalid offset";
1da177e4
LT
905 return -EINVAL;
906 }
907
908 if (dm_get_device(ti, argv[0], offset, ti->len,
909 dm_table_get_mode(ti->table),
910 &ms->mirror[mirror].dev)) {
72d94861 911 ti->error = "Device lookup failure";
1da177e4
LT
912 return -ENXIO;
913 }
914
aa5617c5 915 ms->mirror[mirror].ms = ms;
72f4b314
JB
916 atomic_set(&(ms->mirror[mirror].error_count), 0);
917 ms->mirror[mirror].error_type = 0;
1da177e4
LT
918 ms->mirror[mirror].offset = offset;
919
920 return 0;
921}
922
1da177e4
LT
923/*
924 * Create dirty log: log_type #log_params <log_params>
925 */
416cd17b 926static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
1f965b19
HM
927 unsigned argc, char **argv,
928 unsigned *args_used)
1da177e4 929{
1f965b19 930 unsigned param_count;
416cd17b 931 struct dm_dirty_log *dl;
1da177e4
LT
932
933 if (argc < 2) {
72d94861 934 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
935 return NULL;
936 }
937
938 if (sscanf(argv[1], "%u", &param_count) != 1) {
72d94861 939 ti->error = "Invalid mirror log argument count";
1da177e4
LT
940 return NULL;
941 }
942
943 *args_used = 2 + param_count;
944
945 if (argc < *args_used) {
72d94861 946 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
947 return NULL;
948 }
949
c0da3748
MP
950 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
951 argv + 2);
1da177e4 952 if (!dl) {
72d94861 953 ti->error = "Error creating mirror dirty log";
1da177e4
LT
954 return NULL;
955 }
956
1da177e4
LT
957 return dl;
958}
959
a8e6afa2
JB
960static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
961 unsigned *args_used)
962{
963 unsigned num_features;
964 struct dm_target *ti = ms->ti;
965
966 *args_used = 0;
967
968 if (!argc)
969 return 0;
970
971 if (sscanf(argv[0], "%u", &num_features) != 1) {
972 ti->error = "Invalid number of features";
973 return -EINVAL;
974 }
975
976 argc--;
977 argv++;
978 (*args_used)++;
979
980 if (num_features > argc) {
981 ti->error = "Not enough arguments to support feature count";
982 return -EINVAL;
983 }
984
985 if (!strcmp("handle_errors", argv[0]))
986 ms->features |= DM_RAID1_HANDLE_ERRORS;
987 else {
988 ti->error = "Unrecognised feature requested";
989 return -EINVAL;
990 }
991
992 (*args_used)++;
993
994 return 0;
995}
996
1da177e4
LT
997/*
998 * Construct a mirror mapping:
999 *
1000 * log_type #log_params <log_params>
1001 * #mirrors [mirror_path offset]{2,}
a8e6afa2 1002 * [#features <features>]
1da177e4
LT
1003 *
1004 * log_type is "core" or "disk"
1005 * #log_params is between 1 and 3
a8e6afa2
JB
1006 *
1007 * If present, features must be "handle_errors".
1da177e4 1008 */
1da177e4
LT
1009static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1010{
1011 int r;
1012 unsigned int nr_mirrors, m, args_used;
1013 struct mirror_set *ms;
416cd17b 1014 struct dm_dirty_log *dl;
1da177e4
LT
1015
1016 dl = create_dirty_log(ti, argc, argv, &args_used);
1017 if (!dl)
1018 return -EINVAL;
1019
1020 argv += args_used;
1021 argc -= args_used;
1022
1023 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
eb69aca5 1024 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
72d94861 1025 ti->error = "Invalid number of mirrors";
416cd17b 1026 dm_dirty_log_destroy(dl);
1da177e4
LT
1027 return -EINVAL;
1028 }
1029
1030 argv++, argc--;
1031
a8e6afa2
JB
1032 if (argc < nr_mirrors * 2) {
1033 ti->error = "Too few mirror arguments";
416cd17b 1034 dm_dirty_log_destroy(dl);
1da177e4
LT
1035 return -EINVAL;
1036 }
1037
1038 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1039 if (!ms) {
416cd17b 1040 dm_dirty_log_destroy(dl);
1da177e4
LT
1041 return -ENOMEM;
1042 }
1043
1044 /* Get the mirror parameter sets */
1045 for (m = 0; m < nr_mirrors; m++) {
1046 r = get_mirror(ms, ti, m, argv);
1047 if (r) {
1048 free_context(ms, ti, m);
1049 return r;
1050 }
1051 argv += 2;
1052 argc -= 2;
1053 }
1054
1055 ti->private = ms;
1f965b19 1056 ti->split_io = dm_rh_get_region_size(ms->rh);
4184153f 1057 ti->num_flush_requests = 1;
1da177e4 1058
6ad36fe2
HS
1059 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1060 if (!ms->kmirrord_wq) {
1061 DMERR("couldn't start kmirrord");
a72cf737
DM
1062 r = -ENOMEM;
1063 goto err_free_context;
6ad36fe2
HS
1064 }
1065 INIT_WORK(&ms->kmirrord_work, do_mirror);
a2aebe03
MP
1066 init_timer(&ms->timer);
1067 ms->timer_pending = 0;
72f4b314 1068 INIT_WORK(&ms->trigger_event, trigger_event);
6ad36fe2 1069
a8e6afa2 1070 r = parse_features(ms, argc, argv, &args_used);
a72cf737
DM
1071 if (r)
1072 goto err_destroy_wq;
a8e6afa2
JB
1073
1074 argv += args_used;
1075 argc -= args_used;
1076
f44db678
JB
1077 /*
1078 * Any read-balancing addition depends on the
1079 * DM_RAID1_HANDLE_ERRORS flag being present.
1080 * This is because the decision to balance depends
1081 * on the sync state of a region. If the above
1082 * flag is not present, we ignore errors; and
1083 * the sync state may be inaccurate.
1084 */
1085
a8e6afa2
JB
1086 if (argc) {
1087 ti->error = "Too many mirror arguments";
a72cf737
DM
1088 r = -EINVAL;
1089 goto err_destroy_wq;
a8e6afa2
JB
1090 }
1091
1f965b19 1092 r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
a72cf737
DM
1093 if (r)
1094 goto err_destroy_wq;
1da177e4 1095
1f965b19 1096 wakeup_mirrord(ms);
1da177e4 1097 return 0;
a72cf737
DM
1098
1099err_destroy_wq:
1100 destroy_workqueue(ms->kmirrord_wq);
1101err_free_context:
1102 free_context(ms, ti, ms->nr_mirrors);
1103 return r;
1da177e4
LT
1104}
1105
1106static void mirror_dtr(struct dm_target *ti)
1107{
1108 struct mirror_set *ms = (struct mirror_set *) ti->private;
1109
a2aebe03 1110 del_timer_sync(&ms->timer);
6ad36fe2 1111 flush_workqueue(ms->kmirrord_wq);
18776c73 1112 flush_scheduled_work();
eb69aca5 1113 dm_kcopyd_client_destroy(ms->kcopyd_client);
6ad36fe2 1114 destroy_workqueue(ms->kmirrord_wq);
1da177e4
LT
1115 free_context(ms, ti, ms->nr_mirrors);
1116}
1117
1da177e4
LT
1118/*
1119 * Mirror mapping function
1120 */
1121static int mirror_map(struct dm_target *ti, struct bio *bio,
1122 union map_info *map_context)
1123{
1124 int r, rw = bio_rw(bio);
1125 struct mirror *m;
1126 struct mirror_set *ms = ti->private;
06386bbf 1127 struct dm_raid1_read_record *read_record = NULL;
1f965b19 1128 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4
LT
1129
1130 if (rw == WRITE) {
06386bbf 1131 /* Save region for mirror_end_io() handler */
1f965b19 1132 map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
1da177e4 1133 queue_bio(ms, bio, rw);
d2a7ad29 1134 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1135 }
1136
1f965b19 1137 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
1da177e4
LT
1138 if (r < 0 && r != -EWOULDBLOCK)
1139 return r;
1140
1da177e4 1141 /*
06386bbf 1142 * If region is not in-sync queue the bio.
1da177e4 1143 */
06386bbf
JB
1144 if (!r || (r == -EWOULDBLOCK)) {
1145 if (rw == READA)
1146 return -EWOULDBLOCK;
1da177e4 1147
1da177e4 1148 queue_bio(ms, bio, rw);
d2a7ad29 1149 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1150 }
1151
06386bbf
JB
1152 /*
1153 * The region is in-sync and we can perform reads directly.
1154 * Store enough information so we can retry if it fails.
1155 */
1da177e4 1156 m = choose_mirror(ms, bio->bi_sector);
06386bbf 1157 if (unlikely(!m))
1da177e4
LT
1158 return -EIO;
1159
06386bbf
JB
1160 read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
1161 if (likely(read_record)) {
1162 dm_bio_record(&read_record->details, bio);
1163 map_context->ptr = read_record;
1164 read_record->m = m;
1165 }
1166
1167 map_bio(m, bio);
1168
d2a7ad29 1169 return DM_MAPIO_REMAPPED;
1da177e4
LT
1170}
1171
1172static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1173 int error, union map_info *map_context)
1174{
1175 int rw = bio_rw(bio);
1176 struct mirror_set *ms = (struct mirror_set *) ti->private;
06386bbf
JB
1177 struct mirror *m = NULL;
1178 struct dm_bio_details *bd = NULL;
1179 struct dm_raid1_read_record *read_record = map_context->ptr;
1da177e4
LT
1180
1181 /*
1182 * We need to dec pending if this was a write.
1183 */
06386bbf 1184 if (rw == WRITE) {
4184153f
MP
1185 if (likely(!bio_empty_barrier(bio)))
1186 dm_rh_dec(ms->rh, map_context->ll);
06386bbf
JB
1187 return error;
1188 }
1da177e4 1189
06386bbf
JB
1190 if (error == -EOPNOTSUPP)
1191 goto out;
1192
1f98a13f 1193 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
06386bbf
JB
1194 goto out;
1195
1196 if (unlikely(error)) {
1197 if (!read_record) {
1198 /*
1199 * There wasn't enough memory to record necessary
1200 * information for a retry or there was no other
1201 * mirror in-sync.
1202 */
e03f1a84 1203 DMERR_LIMIT("Mirror read failed.");
06386bbf
JB
1204 return -EIO;
1205 }
e03f1a84
AB
1206
1207 m = read_record->m;
1208
06386bbf
JB
1209 DMERR("Mirror read failed from %s. Trying alternative device.",
1210 m->dev->name);
1211
06386bbf
JB
1212 fail_mirror(m, DM_RAID1_READ_ERROR);
1213
1214 /*
1215 * A failed read is requeued for another attempt using an intact
1216 * mirror.
1217 */
1218 if (default_ok(m) || mirror_available(ms, bio)) {
1219 bd = &read_record->details;
1220
1221 dm_bio_restore(bd, bio);
1222 mempool_free(read_record, ms->read_record_pool);
1223 map_context->ptr = NULL;
1224 queue_bio(ms, bio, rw);
1225 return 1;
1226 }
1227 DMERR("All replicated volumes dead, failing I/O");
1228 }
1229
1230out:
1231 if (read_record) {
1232 mempool_free(read_record, ms->read_record_pool);
1233 map_context->ptr = NULL;
1234 }
1235
1236 return error;
1da177e4
LT
1237}
1238
b80aa7a0 1239static void mirror_presuspend(struct dm_target *ti)
1da177e4
LT
1240{
1241 struct mirror_set *ms = (struct mirror_set *) ti->private;
1f965b19 1242 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
1da177e4 1243
04788507
MP
1244 struct bio_list holds;
1245 struct bio *bio;
1246
b80aa7a0
JB
1247 atomic_set(&ms->suspend, 1);
1248
1249 /*
1250 * We must finish up all the work that we've
1251 * generated (i.e. recovery work).
1252 */
1f965b19 1253 dm_rh_stop_recovery(ms->rh);
33184048 1254
33184048 1255 wait_event(_kmirrord_recovery_stopped,
1f965b19 1256 !dm_rh_recovery_in_flight(ms->rh));
33184048 1257
b80aa7a0
JB
1258 if (log->type->presuspend && log->type->presuspend(log))
1259 /* FIXME: need better error handling */
1260 DMWARN("log presuspend failed");
1261
1262 /*
1263 * Now that recovery is complete/stopped and the
1264 * delayed bios are queued, we need to wait for
1265 * the worker thread to complete. This way,
1266 * we know that all of our I/O has been pushed.
1267 */
1268 flush_workqueue(ms->kmirrord_wq);
04788507
MP
1269
1270 /*
1271 * Now set ms->suspend is set and the workqueue flushed, no more
1272 * entries can be added to ms->hold list, so process it.
1273 *
1274 * Bios can still arrive concurrently with or after this
1275 * presuspend function, but they cannot join the hold list
1276 * because ms->suspend is set.
1277 */
1278 spin_lock_irq(&ms->lock);
1279 holds = ms->holds;
1280 bio_list_init(&ms->holds);
1281 spin_unlock_irq(&ms->lock);
1282
1283 while ((bio = bio_list_pop(&holds)))
1284 hold_bio(ms, bio);
b80aa7a0
JB
1285}
1286
1287static void mirror_postsuspend(struct dm_target *ti)
1288{
1289 struct mirror_set *ms = ti->private;
1f965b19 1290 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
b80aa7a0 1291
6b3df0d7 1292 if (log->type->postsuspend && log->type->postsuspend(log))
1da177e4 1293 /* FIXME: need better error handling */
b80aa7a0 1294 DMWARN("log postsuspend failed");
1da177e4
LT
1295}
1296
1297static void mirror_resume(struct dm_target *ti)
1298{
b80aa7a0 1299 struct mirror_set *ms = ti->private;
1f965b19 1300 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
b80aa7a0
JB
1301
1302 atomic_set(&ms->suspend, 0);
1da177e4
LT
1303 if (log->type->resume && log->type->resume(log))
1304 /* FIXME: need better error handling */
1305 DMWARN("log resume failed");
1f965b19 1306 dm_rh_start_recovery(ms->rh);
1da177e4
LT
1307}
1308
af195ac8
JB
1309/*
1310 * device_status_char
1311 * @m: mirror device/leg we want the status of
1312 *
1313 * We return one character representing the most severe error
1314 * we have encountered.
1315 * A => Alive - No failures
1316 * D => Dead - A write failure occurred leaving mirror out-of-sync
1317 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1318 * R => Read - A read failure occurred, mirror data unaffected
1319 *
1320 * Returns: <char>
1321 */
1322static char device_status_char(struct mirror *m)
1323{
1324 if (!atomic_read(&(m->error_count)))
1325 return 'A';
1326
64b30c46
MP
1327 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1328 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
af195ac8
JB
1329 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1330 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1331}
1332
1333
1da177e4
LT
1334static int mirror_status(struct dm_target *ti, status_type_t type,
1335 char *result, unsigned int maxlen)
1336{
315dcc22 1337 unsigned int m, sz = 0;
1da177e4 1338 struct mirror_set *ms = (struct mirror_set *) ti->private;
1f965b19 1339 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
af195ac8 1340 char buffer[ms->nr_mirrors + 1];
1da177e4 1341
1da177e4
LT
1342 switch (type) {
1343 case STATUSTYPE_INFO:
1344 DMEMIT("%d ", ms->nr_mirrors);
af195ac8 1345 for (m = 0; m < ms->nr_mirrors; m++) {
1da177e4 1346 DMEMIT("%s ", ms->mirror[m].dev->name);
af195ac8
JB
1347 buffer[m] = device_status_char(&(ms->mirror[m]));
1348 }
1349 buffer[m] = '\0';
1da177e4 1350
af195ac8 1351 DMEMIT("%llu/%llu 1 %s ",
1f965b19 1352 (unsigned long long)log->type->get_sync_count(log),
af195ac8 1353 (unsigned long long)ms->nr_regions, buffer);
315dcc22 1354
1f965b19 1355 sz += log->type->status(log, type, result+sz, maxlen-sz);
315dcc22 1356
1da177e4
LT
1357 break;
1358
1359 case STATUSTYPE_TABLE:
1f965b19 1360 sz = log->type->status(log, type, result, maxlen);
315dcc22 1361
e52b8f6d 1362 DMEMIT("%d", ms->nr_mirrors);
1da177e4 1363 for (m = 0; m < ms->nr_mirrors; m++)
e52b8f6d 1364 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
b80aa7a0 1365 (unsigned long long)ms->mirror[m].offset);
a8e6afa2
JB
1366
1367 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1368 DMEMIT(" 1 handle_errors");
1da177e4
LT
1369 }
1370
1371 return 0;
1372}
1373
af4874e0
MS
1374static int mirror_iterate_devices(struct dm_target *ti,
1375 iterate_devices_callout_fn fn, void *data)
1376{
1377 struct mirror_set *ms = ti->private;
1378 int ret = 0;
1379 unsigned i;
1380
1381 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1382 ret = fn(ti, ms->mirror[i].dev,
5dea271b 1383 ms->mirror[i].offset, ti->len, data);
af4874e0
MS
1384
1385 return ret;
1386}
1387
1da177e4
LT
1388static struct target_type mirror_target = {
1389 .name = "mirror",
af4874e0 1390 .version = {1, 12, 0},
1da177e4
LT
1391 .module = THIS_MODULE,
1392 .ctr = mirror_ctr,
1393 .dtr = mirror_dtr,
1394 .map = mirror_map,
1395 .end_io = mirror_end_io,
b80aa7a0 1396 .presuspend = mirror_presuspend,
1da177e4
LT
1397 .postsuspend = mirror_postsuspend,
1398 .resume = mirror_resume,
1399 .status = mirror_status,
af4874e0 1400 .iterate_devices = mirror_iterate_devices,
1da177e4
LT
1401};
1402
1403static int __init dm_mirror_init(void)
1404{
1405 int r;
1406
95f8fac8
MP
1407 _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
1408 if (!_dm_raid1_read_record_cache) {
1409 DMERR("Can't allocate dm_raid1_read_record cache");
1410 r = -ENOMEM;
1411 goto bad_cache;
1412 }
1413
1da177e4 1414 r = dm_register_target(&mirror_target);
95f8fac8 1415 if (r < 0) {
0cd33124 1416 DMERR("Failed to register mirror target");
95f8fac8
MP
1417 goto bad_target;
1418 }
1419
1420 return 0;
1da177e4 1421
95f8fac8
MP
1422bad_target:
1423 kmem_cache_destroy(_dm_raid1_read_record_cache);
1424bad_cache:
1da177e4
LT
1425 return r;
1426}
1427
1428static void __exit dm_mirror_exit(void)
1429{
10d3bd09 1430 dm_unregister_target(&mirror_target);
95f8fac8 1431 kmem_cache_destroy(_dm_raid1_read_record_cache);
1da177e4
LT
1432}
1433
1434/* Module hooks */
1435module_init(dm_mirror_init);
1436module_exit(dm_mirror_exit);
1437
1438MODULE_DESCRIPTION(DM_NAME " mirror target");
1439MODULE_AUTHOR("Joe Thornber");
1440MODULE_LICENSE("GPL");
This page took 0.595662 seconds and 5 git commands to generate.