dm raid1: handle resync failures
[deliverable/linux.git] / drivers / md / dm-raid1.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-bio-list.h"
9#include "dm-io.h"
10#include "dm-log.h"
11#include "kcopyd.h"
12
13#include <linux/ctype.h>
14#include <linux/init.h>
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/pagemap.h>
18#include <linux/slab.h>
19#include <linux/time.h>
20#include <linux/vmalloc.h>
21#include <linux/workqueue.h>
22
72d94861 23#define DM_MSG_PREFIX "raid1"
88be163a 24#define DM_IO_PAGES 64
72d94861 25
a8e6afa2 26#define DM_RAID1_HANDLE_ERRORS 0x01
f44db678 27#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
a8e6afa2 28
33184048 29static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
1da177e4 30
1da177e4
LT
31/*-----------------------------------------------------------------
32 * Region hash
33 *
34 * The mirror splits itself up into discrete regions. Each
35 * region can be in one of three states: clean, dirty,
36 * nosync. There is no need to put clean regions in the hash.
37 *
38 * In addition to being present in the hash table a region _may_
39 * be present on one of three lists.
40 *
41 * clean_regions: Regions on this list have no io pending to
42 * them, they are in sync, we are no longer interested in them,
43 * they are dull. rh_update_states() will remove them from the
44 * hash table.
45 *
46 * quiesced_regions: These regions have been spun down, ready
47 * for recovery. rh_recovery_start() will remove regions from
48 * this list and hand them to kmirrord, which will schedule the
49 * recovery io with kcopyd.
50 *
51 * recovered_regions: Regions that kcopyd has successfully
52 * recovered. rh_update_states() will now schedule any delayed
53 * io, up the recovery_count, and remove the region from the
54 * hash.
55 *
56 * There are 2 locks:
57 * A rw spin lock 'hash_lock' protects just the hash table,
58 * this is never held in write mode from interrupt context,
59 * which I believe means that we only have to disable irqs when
60 * doing a write lock.
61 *
62 * An ordinary spin lock 'region_lock' that protects the three
63 * lists in the region_hash, with the 'state', 'list' and
64 * 'bhs_delayed' fields of the regions. This is used from irq
65 * context, so all other uses will have to suspend local irqs.
66 *---------------------------------------------------------------*/
67struct mirror_set;
68struct region_hash {
69 struct mirror_set *ms;
70 uint32_t region_size;
71 unsigned region_shift;
72
73 /* holds persistent region state */
74 struct dirty_log *log;
75
76 /* hash table */
77 rwlock_t hash_lock;
78 mempool_t *region_pool;
79 unsigned int mask;
80 unsigned int nr_buckets;
81 struct list_head *buckets;
82
83 spinlock_t region_lock;
33184048 84 atomic_t recovery_in_flight;
1da177e4
LT
85 struct semaphore recovery_count;
86 struct list_head clean_regions;
87 struct list_head quiesced_regions;
88 struct list_head recovered_regions;
f44db678 89 struct list_head failed_recovered_regions;
1da177e4
LT
90};
91
92enum {
93 RH_CLEAN,
94 RH_DIRTY,
95 RH_NOSYNC,
96 RH_RECOVERING
97};
98
99struct region {
100 struct region_hash *rh; /* FIXME: can we get rid of this ? */
101 region_t key;
102 int state;
103
104 struct list_head hash_list;
105 struct list_head list;
106
107 atomic_t pending;
108 struct bio_list delayed_bios;
109};
110
e4c8b3ba
NB
111
112/*-----------------------------------------------------------------
113 * Mirror set structures.
114 *---------------------------------------------------------------*/
115struct mirror {
116 atomic_t error_count;
117 struct dm_dev *dev;
118 sector_t offset;
119};
120
121struct mirror_set {
122 struct dm_target *ti;
123 struct list_head list;
124 struct region_hash rh;
125 struct kcopyd_client *kcopyd_client;
a8e6afa2 126 uint64_t features;
e4c8b3ba
NB
127
128 spinlock_t lock; /* protects the next two lists */
129 struct bio_list reads;
130 struct bio_list writes;
131
88be163a
MB
132 struct dm_io_client *io_client;
133
e4c8b3ba
NB
134 /* recovery */
135 region_t nr_regions;
136 int in_sync;
137
138 struct mirror *default_mirror; /* Default mirror */
139
6ad36fe2
HS
140 struct workqueue_struct *kmirrord_wq;
141 struct work_struct kmirrord_work;
142
e4c8b3ba
NB
143 unsigned int nr_mirrors;
144 struct mirror mirror[0];
145};
146
1da177e4
LT
147/*
148 * Conversion fns
149 */
150static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
151{
e4c8b3ba 152 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
1da177e4
LT
153}
154
155static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
156{
157 return region << rh->region_shift;
158}
159
6ad36fe2
HS
160static void wake(struct mirror_set *ms)
161{
162 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
163}
164
1da177e4
LT
165/* FIXME move this */
166static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
167
1da177e4
LT
168#define MIN_REGIONS 64
169#define MAX_RECOVERY 1
170static int rh_init(struct region_hash *rh, struct mirror_set *ms,
171 struct dirty_log *log, uint32_t region_size,
172 region_t nr_regions)
173{
174 unsigned int nr_buckets, max_buckets;
175 size_t i;
176
177 /*
178 * Calculate a suitable number of buckets for our hash
179 * table.
180 */
181 max_buckets = nr_regions >> 6;
182 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
183 ;
184 nr_buckets >>= 1;
185
186 rh->ms = ms;
187 rh->log = log;
188 rh->region_size = region_size;
189 rh->region_shift = ffs(region_size) - 1;
190 rwlock_init(&rh->hash_lock);
191 rh->mask = nr_buckets - 1;
192 rh->nr_buckets = nr_buckets;
193
194 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
195 if (!rh->buckets) {
196 DMERR("unable to allocate region hash memory");
197 return -ENOMEM;
198 }
199
200 for (i = 0; i < nr_buckets; i++)
201 INIT_LIST_HEAD(rh->buckets + i);
202
203 spin_lock_init(&rh->region_lock);
204 sema_init(&rh->recovery_count, 0);
33184048 205 atomic_set(&rh->recovery_in_flight, 0);
1da177e4
LT
206 INIT_LIST_HEAD(&rh->clean_regions);
207 INIT_LIST_HEAD(&rh->quiesced_regions);
208 INIT_LIST_HEAD(&rh->recovered_regions);
f44db678 209 INIT_LIST_HEAD(&rh->failed_recovered_regions);
1da177e4 210
0eaae62a
MD
211 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
212 sizeof(struct region));
1da177e4
LT
213 if (!rh->region_pool) {
214 vfree(rh->buckets);
215 rh->buckets = NULL;
216 return -ENOMEM;
217 }
218
219 return 0;
220}
221
222static void rh_exit(struct region_hash *rh)
223{
224 unsigned int h;
225 struct region *reg, *nreg;
226
227 BUG_ON(!list_empty(&rh->quiesced_regions));
228 for (h = 0; h < rh->nr_buckets; h++) {
229 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
230 BUG_ON(atomic_read(&reg->pending));
231 mempool_free(reg, rh->region_pool);
232 }
233 }
234
235 if (rh->log)
236 dm_destroy_dirty_log(rh->log);
237 if (rh->region_pool)
238 mempool_destroy(rh->region_pool);
239 vfree(rh->buckets);
240}
241
242#define RH_HASH_MULT 2654435387U
243
244static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
245{
246 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
247}
248
249static struct region *__rh_lookup(struct region_hash *rh, region_t region)
250{
251 struct region *reg;
252
253 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
254 if (reg->key == region)
255 return reg;
256
257 return NULL;
258}
259
260static void __rh_insert(struct region_hash *rh, struct region *reg)
261{
262 unsigned int h = rh_hash(rh, reg->key);
263 list_add(&reg->hash_list, rh->buckets + h);
264}
265
266static struct region *__rh_alloc(struct region_hash *rh, region_t region)
267{
268 struct region *reg, *nreg;
269
270 read_unlock(&rh->hash_lock);
c06aad85
DK
271 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
272 if (unlikely(!nreg))
273 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
1da177e4
LT
274 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
275 RH_CLEAN : RH_NOSYNC;
276 nreg->rh = rh;
277 nreg->key = region;
278
279 INIT_LIST_HEAD(&nreg->list);
280
281 atomic_set(&nreg->pending, 0);
282 bio_list_init(&nreg->delayed_bios);
283 write_lock_irq(&rh->hash_lock);
284
285 reg = __rh_lookup(rh, region);
286 if (reg)
287 /* we lost the race */
288 mempool_free(nreg, rh->region_pool);
289
290 else {
291 __rh_insert(rh, nreg);
292 if (nreg->state == RH_CLEAN) {
293 spin_lock(&rh->region_lock);
294 list_add(&nreg->list, &rh->clean_regions);
295 spin_unlock(&rh->region_lock);
296 }
297 reg = nreg;
298 }
299 write_unlock_irq(&rh->hash_lock);
300 read_lock(&rh->hash_lock);
301
302 return reg;
303}
304
305static inline struct region *__rh_find(struct region_hash *rh, region_t region)
306{
307 struct region *reg;
308
309 reg = __rh_lookup(rh, region);
310 if (!reg)
311 reg = __rh_alloc(rh, region);
312
313 return reg;
314}
315
316static int rh_state(struct region_hash *rh, region_t region, int may_block)
317{
318 int r;
319 struct region *reg;
320
321 read_lock(&rh->hash_lock);
322 reg = __rh_lookup(rh, region);
323 read_unlock(&rh->hash_lock);
324
325 if (reg)
326 return reg->state;
327
328 /*
329 * The region wasn't in the hash, so we fall back to the
330 * dirty log.
331 */
332 r = rh->log->type->in_sync(rh->log, region, may_block);
333
334 /*
335 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
336 * taken as a RH_NOSYNC
337 */
338 return r == 1 ? RH_CLEAN : RH_NOSYNC;
339}
340
341static inline int rh_in_sync(struct region_hash *rh,
342 region_t region, int may_block)
343{
344 int state = rh_state(rh, region, may_block);
345 return state == RH_CLEAN || state == RH_DIRTY;
346}
347
348static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
349{
350 struct bio *bio;
351
352 while ((bio = bio_list_pop(bio_list))) {
353 queue_bio(ms, bio, WRITE);
354 }
355}
356
f3ee6b2f
JB
357static void complete_resync_work(struct region *reg, int success)
358{
359 struct region_hash *rh = reg->rh;
360
361 rh->log->type->set_region_sync(rh->log, reg->key, success);
362 dispatch_bios(rh->ms, &reg->delayed_bios);
363 if (atomic_dec_and_test(&rh->recovery_in_flight))
364 wake_up_all(&_kmirrord_recovery_stopped);
365 up(&rh->recovery_count);
366}
367
1da177e4
LT
368static void rh_update_states(struct region_hash *rh)
369{
370 struct region *reg, *next;
371
372 LIST_HEAD(clean);
373 LIST_HEAD(recovered);
f44db678 374 LIST_HEAD(failed_recovered);
1da177e4
LT
375
376 /*
377 * Quickly grab the lists.
378 */
379 write_lock_irq(&rh->hash_lock);
380 spin_lock(&rh->region_lock);
381 if (!list_empty(&rh->clean_regions)) {
382 list_splice(&rh->clean_regions, &clean);
383 INIT_LIST_HEAD(&rh->clean_regions);
384
943317ef 385 list_for_each_entry(reg, &clean, list)
1da177e4 386 list_del(&reg->hash_list);
1da177e4
LT
387 }
388
389 if (!list_empty(&rh->recovered_regions)) {
390 list_splice(&rh->recovered_regions, &recovered);
391 INIT_LIST_HEAD(&rh->recovered_regions);
392
393 list_for_each_entry (reg, &recovered, list)
394 list_del(&reg->hash_list);
395 }
f44db678
JB
396
397 if (!list_empty(&rh->failed_recovered_regions)) {
398 list_splice(&rh->failed_recovered_regions, &failed_recovered);
399 INIT_LIST_HEAD(&rh->failed_recovered_regions);
400
401 list_for_each_entry(reg, &failed_recovered, list)
402 list_del(&reg->hash_list);
403 }
404
1da177e4
LT
405 spin_unlock(&rh->region_lock);
406 write_unlock_irq(&rh->hash_lock);
407
408 /*
409 * All the regions on the recovered and clean lists have
410 * now been pulled out of the system, so no need to do
411 * any more locking.
412 */
413 list_for_each_entry_safe (reg, next, &recovered, list) {
414 rh->log->type->clear_region(rh->log, reg->key);
f3ee6b2f 415 complete_resync_work(reg, 1);
1da177e4
LT
416 mempool_free(reg, rh->region_pool);
417 }
418
f44db678
JB
419 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
420 complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
421 mempool_free(reg, rh->region_pool);
422 }
423
943317ef
JB
424 list_for_each_entry_safe(reg, next, &clean, list) {
425 rh->log->type->clear_region(rh->log, reg->key);
1da177e4 426 mempool_free(reg, rh->region_pool);
943317ef
JB
427 }
428
429 rh->log->type->flush(rh->log);
1da177e4
LT
430}
431
432static void rh_inc(struct region_hash *rh, region_t region)
433{
434 struct region *reg;
435
436 read_lock(&rh->hash_lock);
437 reg = __rh_find(rh, region);
844e8d90 438
7692c5dd 439 spin_lock_irq(&rh->region_lock);
844e8d90
JN
440 atomic_inc(&reg->pending);
441
1da177e4 442 if (reg->state == RH_CLEAN) {
1da177e4
LT
443 reg->state = RH_DIRTY;
444 list_del_init(&reg->list); /* take off the clean list */
7692c5dd
JB
445 spin_unlock_irq(&rh->region_lock);
446
447 rh->log->type->mark_region(rh->log, reg->key);
448 } else
449 spin_unlock_irq(&rh->region_lock);
450
1da177e4 451
1da177e4
LT
452 read_unlock(&rh->hash_lock);
453}
454
455static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
456{
457 struct bio *bio;
458
459 for (bio = bios->head; bio; bio = bio->bi_next)
460 rh_inc(rh, bio_to_region(rh, bio));
461}
462
463static void rh_dec(struct region_hash *rh, region_t region)
464{
465 unsigned long flags;
466 struct region *reg;
467 int should_wake = 0;
468
469 read_lock(&rh->hash_lock);
470 reg = __rh_lookup(rh, region);
471 read_unlock(&rh->hash_lock);
472
7692c5dd 473 spin_lock_irqsave(&rh->region_lock, flags);
1da177e4 474 if (atomic_dec_and_test(&reg->pending)) {
930d332a
JN
475 /*
476 * There is no pending I/O for this region.
477 * We can move the region to corresponding list for next action.
478 * At this point, the region is not yet connected to any list.
479 *
480 * If the state is RH_NOSYNC, the region should be kept off
481 * from clean list.
482 * The hash entry for RH_NOSYNC will remain in memory
483 * until the region is recovered or the map is reloaded.
484 */
485
486 /* do nothing for RH_NOSYNC */
1da177e4
LT
487 if (reg->state == RH_RECOVERING) {
488 list_add_tail(&reg->list, &rh->quiesced_regions);
930d332a 489 } else if (reg->state == RH_DIRTY) {
1da177e4
LT
490 reg->state = RH_CLEAN;
491 list_add(&reg->list, &rh->clean_regions);
492 }
1da177e4
LT
493 should_wake = 1;
494 }
7692c5dd 495 spin_unlock_irqrestore(&rh->region_lock, flags);
1da177e4
LT
496
497 if (should_wake)
6ad36fe2 498 wake(rh->ms);
1da177e4
LT
499}
500
501/*
502 * Starts quiescing a region in preparation for recovery.
503 */
504static int __rh_recovery_prepare(struct region_hash *rh)
505{
506 int r;
507 struct region *reg;
508 region_t region;
509
510 /*
511 * Ask the dirty log what's next.
512 */
513 r = rh->log->type->get_resync_work(rh->log, &region);
514 if (r <= 0)
515 return r;
516
517 /*
518 * Get this region, and start it quiescing by setting the
519 * recovering flag.
520 */
521 read_lock(&rh->hash_lock);
522 reg = __rh_find(rh, region);
523 read_unlock(&rh->hash_lock);
524
525 spin_lock_irq(&rh->region_lock);
526 reg->state = RH_RECOVERING;
527
528 /* Already quiesced ? */
529 if (atomic_read(&reg->pending))
530 list_del_init(&reg->list);
179e0917
AM
531 else
532 list_move(&reg->list, &rh->quiesced_regions);
1da177e4 533
1da177e4
LT
534 spin_unlock_irq(&rh->region_lock);
535
536 return 1;
537}
538
539static void rh_recovery_prepare(struct region_hash *rh)
540{
33184048
JB
541 /* Extra reference to avoid race with rh_stop_recovery */
542 atomic_inc(&rh->recovery_in_flight);
543
544 while (!down_trylock(&rh->recovery_count)) {
545 atomic_inc(&rh->recovery_in_flight);
1da177e4 546 if (__rh_recovery_prepare(rh) <= 0) {
33184048 547 atomic_dec(&rh->recovery_in_flight);
1da177e4
LT
548 up(&rh->recovery_count);
549 break;
550 }
33184048
JB
551 }
552
553 /* Drop the extra reference */
554 if (atomic_dec_and_test(&rh->recovery_in_flight))
555 wake_up_all(&_kmirrord_recovery_stopped);
1da177e4
LT
556}
557
558/*
559 * Returns any quiesced regions.
560 */
561static struct region *rh_recovery_start(struct region_hash *rh)
562{
563 struct region *reg = NULL;
564
565 spin_lock_irq(&rh->region_lock);
566 if (!list_empty(&rh->quiesced_regions)) {
567 reg = list_entry(rh->quiesced_regions.next,
568 struct region, list);
569 list_del_init(&reg->list); /* remove from the quiesced list */
570 }
571 spin_unlock_irq(&rh->region_lock);
572
573 return reg;
574}
575
1da177e4
LT
576static void rh_recovery_end(struct region *reg, int success)
577{
578 struct region_hash *rh = reg->rh;
579
580 spin_lock_irq(&rh->region_lock);
f44db678
JB
581 if (success)
582 list_add(&reg->list, &reg->rh->recovered_regions);
583 else {
584 reg->state = RH_NOSYNC;
585 list_add(&reg->list, &reg->rh->failed_recovered_regions);
586 }
1da177e4
LT
587 spin_unlock_irq(&rh->region_lock);
588
6ad36fe2 589 wake(rh->ms);
1da177e4
LT
590}
591
592static void rh_flush(struct region_hash *rh)
593{
594 rh->log->type->flush(rh->log);
595}
596
597static void rh_delay(struct region_hash *rh, struct bio *bio)
598{
599 struct region *reg;
600
601 read_lock(&rh->hash_lock);
602 reg = __rh_find(rh, bio_to_region(rh, bio));
603 bio_list_add(&reg->delayed_bios, bio);
604 read_unlock(&rh->hash_lock);
605}
606
607static void rh_stop_recovery(struct region_hash *rh)
608{
609 int i;
610
611 /* wait for any recovering regions */
612 for (i = 0; i < MAX_RECOVERY; i++)
613 down(&rh->recovery_count);
614}
615
616static void rh_start_recovery(struct region_hash *rh)
617{
618 int i;
619
620 for (i = 0; i < MAX_RECOVERY; i++)
621 up(&rh->recovery_count);
622
6ad36fe2 623 wake(rh->ms);
1da177e4
LT
624}
625
1da177e4
LT
626/*
627 * Every mirror should look like this one.
628 */
629#define DEFAULT_MIRROR 0
630
631/*
632 * This is yucky. We squirrel the mirror_set struct away inside
633 * bi_next for write buffers. This is safe since the bh
634 * doesn't get submitted to the lower levels of block layer.
635 */
636static struct mirror_set *bio_get_ms(struct bio *bio)
637{
638 return (struct mirror_set *) bio->bi_next;
639}
640
641static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
642{
643 bio->bi_next = (struct bio *) ms;
644}
645
646/*-----------------------------------------------------------------
647 * Recovery.
648 *
649 * When a mirror is first activated we may find that some regions
650 * are in the no-sync state. We have to recover these by
651 * recopying from the default mirror to all the others.
652 *---------------------------------------------------------------*/
653static void recovery_complete(int read_err, unsigned int write_err,
654 void *context)
655{
656 struct region *reg = (struct region *) context;
657
f44db678
JB
658 if (read_err)
659 /* Read error means the failure of default mirror. */
660 DMERR_LIMIT("Unable to read primary mirror during recovery");
661
662 if (write_err)
663 DMERR_LIMIT("Write error during recovery (error = 0x%x)",
664 write_err);
665
ce503f59 666 rh_recovery_end(reg, !(read_err || write_err));
1da177e4
LT
667}
668
669static int recover(struct mirror_set *ms, struct region *reg)
670{
671 int r;
672 unsigned int i;
673 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
674 struct mirror *m;
675 unsigned long flags = 0;
676
677 /* fill in the source */
a1a19080 678 m = ms->default_mirror;
1da177e4
LT
679 from.bdev = m->dev->bdev;
680 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
681 if (reg->key == (ms->nr_regions - 1)) {
682 /*
683 * The final region may be smaller than
684 * region_size.
685 */
686 from.count = ms->ti->len & (reg->rh->region_size - 1);
687 if (!from.count)
688 from.count = reg->rh->region_size;
689 } else
690 from.count = reg->rh->region_size;
691
692 /* fill in the destinations */
693 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
a1a19080 694 if (&ms->mirror[i] == ms->default_mirror)
1da177e4
LT
695 continue;
696
697 m = ms->mirror + i;
698 dest->bdev = m->dev->bdev;
699 dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
700 dest->count = from.count;
701 dest++;
702 }
703
704 /* hand to kcopyd */
705 set_bit(KCOPYD_IGNORE_ERROR, &flags);
706 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
707 recovery_complete, reg);
708
709 return r;
710}
711
712static void do_recovery(struct mirror_set *ms)
713{
714 int r;
715 struct region *reg;
716 struct dirty_log *log = ms->rh.log;
717
718 /*
719 * Start quiescing some regions.
720 */
721 rh_recovery_prepare(&ms->rh);
722
723 /*
724 * Copy any already quiesced regions.
725 */
726 while ((reg = rh_recovery_start(&ms->rh))) {
727 r = recover(ms, reg);
728 if (r)
729 rh_recovery_end(reg, 0);
730 }
731
732 /*
733 * Update the in sync flag.
734 */
735 if (!ms->in_sync &&
736 (log->type->get_sync_count(log) == ms->nr_regions)) {
737 /* the sync is complete */
738 dm_table_event(ms->ti->table);
739 ms->in_sync = 1;
740 }
741}
742
743/*-----------------------------------------------------------------
744 * Reads
745 *---------------------------------------------------------------*/
746static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
747{
748 /* FIXME: add read balancing */
a1a19080 749 return ms->default_mirror;
1da177e4
LT
750}
751
752/*
753 * remap a buffer to a particular mirror.
754 */
755static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
756{
757 bio->bi_bdev = m->dev->bdev;
758 bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
759}
760
761static void do_reads(struct mirror_set *ms, struct bio_list *reads)
762{
763 region_t region;
764 struct bio *bio;
765 struct mirror *m;
766
767 while ((bio = bio_list_pop(reads))) {
768 region = bio_to_region(&ms->rh, bio);
769
770 /*
771 * We can only read balance if the region is in sync.
772 */
b997b82d 773 if (rh_in_sync(&ms->rh, region, 1))
1da177e4
LT
774 m = choose_mirror(ms, bio->bi_sector);
775 else
a1a19080 776 m = ms->default_mirror;
1da177e4
LT
777
778 map_bio(ms, m, bio);
779 generic_make_request(bio);
780 }
781}
782
783/*-----------------------------------------------------------------
784 * Writes.
785 *
786 * We do different things with the write io depending on the
787 * state of the region that it's in:
788 *
789 * SYNC: increment pending, use kcopyd to write to *all* mirrors
790 * RECOVERING: delay the io until recovery completes
791 * NOSYNC: increment pending, just write to the default mirror
792 *---------------------------------------------------------------*/
793static void write_callback(unsigned long error, void *context)
794{
795 unsigned int i;
796 int uptodate = 1;
797 struct bio *bio = (struct bio *) context;
798 struct mirror_set *ms;
799
800 ms = bio_get_ms(bio);
801 bio_set_ms(bio, NULL);
802
803 /*
804 * NOTE: We don't decrement the pending count here,
805 * instead it is done by the targets endio function.
806 * This way we handle both writes to SYNC and NOSYNC
807 * regions with the same code.
808 */
809
810 if (error) {
811 /*
812 * only error the io if all mirrors failed.
813 * FIXME: bogus
814 */
815 uptodate = 0;
816 for (i = 0; i < ms->nr_mirrors; i++)
817 if (!test_bit(i, &error)) {
818 uptodate = 1;
819 break;
820 }
821 }
822 bio_endio(bio, bio->bi_size, 0);
823}
824
825static void do_write(struct mirror_set *ms, struct bio *bio)
826{
827 unsigned int i;
828 struct io_region io[KCOPYD_MAX_REGIONS+1];
829 struct mirror *m;
88be163a
MB
830 struct dm_io_request io_req = {
831 .bi_rw = WRITE,
832 .mem.type = DM_IO_BVEC,
833 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
834 .notify.fn = write_callback,
835 .notify.context = bio,
836 .client = ms->io_client,
837 };
1da177e4
LT
838
839 for (i = 0; i < ms->nr_mirrors; i++) {
840 m = ms->mirror + i;
841
842 io[i].bdev = m->dev->bdev;
843 io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
844 io[i].count = bio->bi_size >> 9;
845 }
846
847 bio_set_ms(bio, ms);
88be163a
MB
848
849 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
1da177e4
LT
850}
851
852static void do_writes(struct mirror_set *ms, struct bio_list *writes)
853{
854 int state;
855 struct bio *bio;
856 struct bio_list sync, nosync, recover, *this_list = NULL;
857
858 if (!writes->head)
859 return;
860
861 /*
862 * Classify each write.
863 */
864 bio_list_init(&sync);
865 bio_list_init(&nosync);
866 bio_list_init(&recover);
867
868 while ((bio = bio_list_pop(writes))) {
869 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
870 switch (state) {
871 case RH_CLEAN:
872 case RH_DIRTY:
873 this_list = &sync;
874 break;
875
876 case RH_NOSYNC:
877 this_list = &nosync;
878 break;
879
880 case RH_RECOVERING:
881 this_list = &recover;
882 break;
883 }
884
885 bio_list_add(this_list, bio);
886 }
887
888 /*
889 * Increment the pending counts for any regions that will
890 * be written to (writes to recover regions are going to
891 * be delayed).
892 */
893 rh_inc_pending(&ms->rh, &sync);
894 rh_inc_pending(&ms->rh, &nosync);
895 rh_flush(&ms->rh);
896
897 /*
898 * Dispatch io.
899 */
900 while ((bio = bio_list_pop(&sync)))
901 do_write(ms, bio);
902
903 while ((bio = bio_list_pop(&recover)))
904 rh_delay(&ms->rh, bio);
905
906 while ((bio = bio_list_pop(&nosync))) {
a1a19080 907 map_bio(ms, ms->default_mirror, bio);
1da177e4
LT
908 generic_make_request(bio);
909 }
910}
911
912/*-----------------------------------------------------------------
913 * kmirrord
914 *---------------------------------------------------------------*/
6ad36fe2 915static void do_mirror(struct work_struct *work)
1da177e4 916{
6ad36fe2
HS
917 struct mirror_set *ms =container_of(work, struct mirror_set,
918 kmirrord_work);
1da177e4
LT
919 struct bio_list reads, writes;
920
921 spin_lock(&ms->lock);
922 reads = ms->reads;
923 writes = ms->writes;
924 bio_list_init(&ms->reads);
925 bio_list_init(&ms->writes);
926 spin_unlock(&ms->lock);
927
928 rh_update_states(&ms->rh);
929 do_recovery(ms);
930 do_reads(ms, &reads);
931 do_writes(ms, &writes);
932}
933
1da177e4
LT
934/*-----------------------------------------------------------------
935 * Target functions
936 *---------------------------------------------------------------*/
937static struct mirror_set *alloc_context(unsigned int nr_mirrors,
938 uint32_t region_size,
939 struct dm_target *ti,
940 struct dirty_log *dl)
941{
942 size_t len;
943 struct mirror_set *ms = NULL;
944
945 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
946 return NULL;
947
948 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
949
950 ms = kmalloc(len, GFP_KERNEL);
951 if (!ms) {
72d94861 952 ti->error = "Cannot allocate mirror context";
1da177e4
LT
953 return NULL;
954 }
955
956 memset(ms, 0, len);
957 spin_lock_init(&ms->lock);
958
959 ms->ti = ti;
960 ms->nr_mirrors = nr_mirrors;
961 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
962 ms->in_sync = 0;
a1a19080 963 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
1da177e4 964
88be163a
MB
965 ms->io_client = dm_io_client_create(DM_IO_PAGES);
966 if (IS_ERR(ms->io_client)) {
967 ti->error = "Error creating dm_io client";
968 kfree(ms);
969 return NULL;
970 }
971
1da177e4 972 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
72d94861 973 ti->error = "Error creating dirty region hash";
1da177e4
LT
974 kfree(ms);
975 return NULL;
976 }
977
978 return ms;
979}
980
981static void free_context(struct mirror_set *ms, struct dm_target *ti,
982 unsigned int m)
983{
984 while (m--)
985 dm_put_device(ti, ms->mirror[m].dev);
986
88be163a 987 dm_io_client_destroy(ms->io_client);
1da177e4
LT
988 rh_exit(&ms->rh);
989 kfree(ms);
990}
991
992static inline int _check_region_size(struct dm_target *ti, uint32_t size)
993{
994 return !(size % (PAGE_SIZE >> 9) || (size & (size - 1)) ||
995 size > ti->len);
996}
997
998static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
999 unsigned int mirror, char **argv)
1000{
4ee218cd 1001 unsigned long long offset;
1da177e4 1002
4ee218cd 1003 if (sscanf(argv[1], "%llu", &offset) != 1) {
72d94861 1004 ti->error = "Invalid offset";
1da177e4
LT
1005 return -EINVAL;
1006 }
1007
1008 if (dm_get_device(ti, argv[0], offset, ti->len,
1009 dm_table_get_mode(ti->table),
1010 &ms->mirror[mirror].dev)) {
72d94861 1011 ti->error = "Device lookup failure";
1da177e4
LT
1012 return -ENXIO;
1013 }
1014
1015 ms->mirror[mirror].offset = offset;
1016
1017 return 0;
1018}
1019
1da177e4
LT
1020/*
1021 * Create dirty log: log_type #log_params <log_params>
1022 */
1023static struct dirty_log *create_dirty_log(struct dm_target *ti,
1024 unsigned int argc, char **argv,
1025 unsigned int *args_used)
1026{
1027 unsigned int param_count;
1028 struct dirty_log *dl;
1029
1030 if (argc < 2) {
72d94861 1031 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
1032 return NULL;
1033 }
1034
1035 if (sscanf(argv[1], "%u", &param_count) != 1) {
72d94861 1036 ti->error = "Invalid mirror log argument count";
1da177e4
LT
1037 return NULL;
1038 }
1039
1040 *args_used = 2 + param_count;
1041
1042 if (argc < *args_used) {
72d94861 1043 ti->error = "Insufficient mirror log arguments";
1da177e4
LT
1044 return NULL;
1045 }
1046
1047 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1048 if (!dl) {
72d94861 1049 ti->error = "Error creating mirror dirty log";
1da177e4
LT
1050 return NULL;
1051 }
1052
1053 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
72d94861 1054 ti->error = "Invalid region size";
1da177e4
LT
1055 dm_destroy_dirty_log(dl);
1056 return NULL;
1057 }
1058
1059 return dl;
1060}
1061
a8e6afa2
JB
1062static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1063 unsigned *args_used)
1064{
1065 unsigned num_features;
1066 struct dm_target *ti = ms->ti;
1067
1068 *args_used = 0;
1069
1070 if (!argc)
1071 return 0;
1072
1073 if (sscanf(argv[0], "%u", &num_features) != 1) {
1074 ti->error = "Invalid number of features";
1075 return -EINVAL;
1076 }
1077
1078 argc--;
1079 argv++;
1080 (*args_used)++;
1081
1082 if (num_features > argc) {
1083 ti->error = "Not enough arguments to support feature count";
1084 return -EINVAL;
1085 }
1086
1087 if (!strcmp("handle_errors", argv[0]))
1088 ms->features |= DM_RAID1_HANDLE_ERRORS;
1089 else {
1090 ti->error = "Unrecognised feature requested";
1091 return -EINVAL;
1092 }
1093
1094 (*args_used)++;
1095
1096 return 0;
1097}
1098
1da177e4
LT
1099/*
1100 * Construct a mirror mapping:
1101 *
1102 * log_type #log_params <log_params>
1103 * #mirrors [mirror_path offset]{2,}
a8e6afa2 1104 * [#features <features>]
1da177e4
LT
1105 *
1106 * log_type is "core" or "disk"
1107 * #log_params is between 1 and 3
a8e6afa2
JB
1108 *
1109 * If present, features must be "handle_errors".
1da177e4 1110 */
1da177e4
LT
1111static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1112{
1113 int r;
1114 unsigned int nr_mirrors, m, args_used;
1115 struct mirror_set *ms;
1116 struct dirty_log *dl;
1117
1118 dl = create_dirty_log(ti, argc, argv, &args_used);
1119 if (!dl)
1120 return -EINVAL;
1121
1122 argv += args_used;
1123 argc -= args_used;
1124
1125 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1126 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
72d94861 1127 ti->error = "Invalid number of mirrors";
1da177e4
LT
1128 dm_destroy_dirty_log(dl);
1129 return -EINVAL;
1130 }
1131
1132 argv++, argc--;
1133
a8e6afa2
JB
1134 if (argc < nr_mirrors * 2) {
1135 ti->error = "Too few mirror arguments";
1da177e4
LT
1136 dm_destroy_dirty_log(dl);
1137 return -EINVAL;
1138 }
1139
1140 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1141 if (!ms) {
1142 dm_destroy_dirty_log(dl);
1143 return -ENOMEM;
1144 }
1145
1146 /* Get the mirror parameter sets */
1147 for (m = 0; m < nr_mirrors; m++) {
1148 r = get_mirror(ms, ti, m, argv);
1149 if (r) {
1150 free_context(ms, ti, m);
1151 return r;
1152 }
1153 argv += 2;
1154 argc -= 2;
1155 }
1156
1157 ti->private = ms;
d88854f0 1158 ti->split_io = ms->rh.region_size;
1da177e4 1159
6ad36fe2
HS
1160 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1161 if (!ms->kmirrord_wq) {
1162 DMERR("couldn't start kmirrord");
1163 free_context(ms, ti, m);
1164 return -ENOMEM;
1165 }
1166 INIT_WORK(&ms->kmirrord_work, do_mirror);
1167
a8e6afa2
JB
1168 r = parse_features(ms, argc, argv, &args_used);
1169 if (r) {
1170 free_context(ms, ti, ms->nr_mirrors);
1171 return r;
1172 }
1173
1174 argv += args_used;
1175 argc -= args_used;
1176
f44db678
JB
1177 /*
1178 * Any read-balancing addition depends on the
1179 * DM_RAID1_HANDLE_ERRORS flag being present.
1180 * This is because the decision to balance depends
1181 * on the sync state of a region. If the above
1182 * flag is not present, we ignore errors; and
1183 * the sync state may be inaccurate.
1184 */
1185
a8e6afa2
JB
1186 if (argc) {
1187 ti->error = "Too many mirror arguments";
1188 free_context(ms, ti, ms->nr_mirrors);
1189 return -EINVAL;
1190 }
1191
1da177e4
LT
1192 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
1193 if (r) {
6ad36fe2 1194 destroy_workqueue(ms->kmirrord_wq);
1da177e4
LT
1195 free_context(ms, ti, ms->nr_mirrors);
1196 return r;
1197 }
1198
6ad36fe2 1199 wake(ms);
1da177e4
LT
1200 return 0;
1201}
1202
1203static void mirror_dtr(struct dm_target *ti)
1204{
1205 struct mirror_set *ms = (struct mirror_set *) ti->private;
1206
6ad36fe2 1207 flush_workqueue(ms->kmirrord_wq);
1da177e4 1208 kcopyd_client_destroy(ms->kcopyd_client);
6ad36fe2 1209 destroy_workqueue(ms->kmirrord_wq);
1da177e4
LT
1210 free_context(ms, ti, ms->nr_mirrors);
1211}
1212
1213static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1214{
1215 int should_wake = 0;
1216 struct bio_list *bl;
1217
1218 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1219 spin_lock(&ms->lock);
1220 should_wake = !(bl->head);
1221 bio_list_add(bl, bio);
1222 spin_unlock(&ms->lock);
1223
1224 if (should_wake)
6ad36fe2 1225 wake(ms);
1da177e4
LT
1226}
1227
1228/*
1229 * Mirror mapping function
1230 */
1231static int mirror_map(struct dm_target *ti, struct bio *bio,
1232 union map_info *map_context)
1233{
1234 int r, rw = bio_rw(bio);
1235 struct mirror *m;
1236 struct mirror_set *ms = ti->private;
1237
e4c8b3ba 1238 map_context->ll = bio_to_region(&ms->rh, bio);
1da177e4
LT
1239
1240 if (rw == WRITE) {
1241 queue_bio(ms, bio, rw);
d2a7ad29 1242 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1243 }
1244
1245 r = ms->rh.log->type->in_sync(ms->rh.log,
1246 bio_to_region(&ms->rh, bio), 0);
1247 if (r < 0 && r != -EWOULDBLOCK)
1248 return r;
1249
1250 if (r == -EWOULDBLOCK) /* FIXME: ugly */
d2a7ad29 1251 r = DM_MAPIO_SUBMITTED;
1da177e4
LT
1252
1253 /*
1254 * We don't want to fast track a recovery just for a read
1255 * ahead. So we just let it silently fail.
1256 * FIXME: get rid of this.
1257 */
1258 if (!r && rw == READA)
1259 return -EIO;
1260
1261 if (!r) {
1262 /* Pass this io over to the daemon */
1263 queue_bio(ms, bio, rw);
d2a7ad29 1264 return DM_MAPIO_SUBMITTED;
1da177e4
LT
1265 }
1266
1267 m = choose_mirror(ms, bio->bi_sector);
1268 if (!m)
1269 return -EIO;
1270
1271 map_bio(ms, m, bio);
d2a7ad29 1272 return DM_MAPIO_REMAPPED;
1da177e4
LT
1273}
1274
1275static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1276 int error, union map_info *map_context)
1277{
1278 int rw = bio_rw(bio);
1279 struct mirror_set *ms = (struct mirror_set *) ti->private;
1280 region_t region = map_context->ll;
1281
1282 /*
1283 * We need to dec pending if this was a write.
1284 */
1285 if (rw == WRITE)
1286 rh_dec(&ms->rh, region);
1287
1288 return 0;
1289}
1290
1291static void mirror_postsuspend(struct dm_target *ti)
1292{
1293 struct mirror_set *ms = (struct mirror_set *) ti->private;
1294 struct dirty_log *log = ms->rh.log;
1295
1296 rh_stop_recovery(&ms->rh);
33184048
JB
1297
1298 /* Wait for all I/O we generated to complete */
1299 wait_event(_kmirrord_recovery_stopped,
1300 !atomic_read(&ms->rh.recovery_in_flight));
1301
1da177e4
LT
1302 if (log->type->suspend && log->type->suspend(log))
1303 /* FIXME: need better error handling */
1304 DMWARN("log suspend failed");
1305}
1306
1307static void mirror_resume(struct dm_target *ti)
1308{
1309 struct mirror_set *ms = (struct mirror_set *) ti->private;
1310 struct dirty_log *log = ms->rh.log;
1311 if (log->type->resume && log->type->resume(log))
1312 /* FIXME: need better error handling */
1313 DMWARN("log resume failed");
1314 rh_start_recovery(&ms->rh);
1315}
1316
1317static int mirror_status(struct dm_target *ti, status_type_t type,
1318 char *result, unsigned int maxlen)
1319{
315dcc22 1320 unsigned int m, sz = 0;
1da177e4
LT
1321 struct mirror_set *ms = (struct mirror_set *) ti->private;
1322
1da177e4
LT
1323 switch (type) {
1324 case STATUSTYPE_INFO:
1325 DMEMIT("%d ", ms->nr_mirrors);
1326 for (m = 0; m < ms->nr_mirrors; m++)
1327 DMEMIT("%s ", ms->mirror[m].dev->name);
1328
c95bc206 1329 DMEMIT("%llu/%llu 0 ",
4ee218cd
AM
1330 (unsigned long long)ms->rh.log->type->
1331 get_sync_count(ms->rh.log),
1332 (unsigned long long)ms->nr_regions);
315dcc22 1333
c95bc206 1334 sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
315dcc22 1335
1da177e4
LT
1336 break;
1337
1338 case STATUSTYPE_TABLE:
315dcc22
JB
1339 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1340
e52b8f6d 1341 DMEMIT("%d", ms->nr_mirrors);
1da177e4 1342 for (m = 0; m < ms->nr_mirrors; m++)
e52b8f6d 1343 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
4ee218cd 1344 (unsigned long long)ms->mirror[m].offset);
a8e6afa2
JB
1345
1346 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1347 DMEMIT(" 1 handle_errors");
1da177e4
LT
1348 }
1349
1350 return 0;
1351}
1352
1353static struct target_type mirror_target = {
1354 .name = "mirror",
315dcc22 1355 .version = {1, 0, 3},
1da177e4
LT
1356 .module = THIS_MODULE,
1357 .ctr = mirror_ctr,
1358 .dtr = mirror_dtr,
1359 .map = mirror_map,
1360 .end_io = mirror_end_io,
1361 .postsuspend = mirror_postsuspend,
1362 .resume = mirror_resume,
1363 .status = mirror_status,
1364};
1365
1366static int __init dm_mirror_init(void)
1367{
1368 int r;
1369
1370 r = dm_dirty_log_init();
1371 if (r)
1372 return r;
1373
1da177e4
LT
1374 r = dm_register_target(&mirror_target);
1375 if (r < 0) {
0cd33124 1376 DMERR("Failed to register mirror target");
1da177e4 1377 dm_dirty_log_exit();
1da177e4
LT
1378 }
1379
1380 return r;
1381}
1382
1383static void __exit dm_mirror_exit(void)
1384{
1385 int r;
1386
1387 r = dm_unregister_target(&mirror_target);
1388 if (r < 0)
0cd33124 1389 DMERR("unregister failed %d", r);
1da177e4 1390
1da177e4
LT
1391 dm_dirty_log_exit();
1392}
1393
1394/* Module hooks */
1395module_init(dm_mirror_init);
1396module_exit(dm_mirror_exit);
1397
1398MODULE_DESCRIPTION(DM_NAME " mirror target");
1399MODULE_AUTHOR("Joe Thornber");
1400MODULE_LICENSE("GPL");
This page took 0.305416 seconds and 5 git commands to generate.