Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * dm-snapshot.c | |
3 | * | |
4 | * Copyright (C) 2001-2002 Sistina Software (UK) Limited. | |
5 | * | |
6 | * This file is released under the GPL. | |
7 | */ | |
8 | ||
9 | #include <linux/blkdev.h> | |
1da177e4 | 10 | #include <linux/device-mapper.h> |
90fa1527 | 11 | #include <linux/delay.h> |
1da177e4 LT |
12 | #include <linux/fs.h> |
13 | #include <linux/init.h> | |
14 | #include <linux/kdev_t.h> | |
15 | #include <linux/list.h> | |
16 | #include <linux/mempool.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/vmalloc.h> | |
6f3c3f0a | 20 | #include <linux/log2.h> |
a765e20e | 21 | #include <linux/dm-kcopyd.h> |
ccc45ea8 | 22 | #include <linux/workqueue.h> |
1da177e4 | 23 | |
aea53d92 | 24 | #include "dm-exception-store.h" |
1da177e4 | 25 | |
72d94861 AK |
26 | #define DM_MSG_PREFIX "snapshots" |
27 | ||
1da177e4 LT |
28 | /* |
29 | * The percentage increment we will wake up users at | |
30 | */ | |
31 | #define WAKE_UP_PERCENT 5 | |
32 | ||
33 | /* | |
34 | * kcopyd priority of snapshot operations | |
35 | */ | |
36 | #define SNAPSHOT_COPY_PRIORITY 2 | |
37 | ||
38 | /* | |
8ee2767a | 39 | * Reserve 1MB for each snapshot initially (with minimum of 1 page). |
1da177e4 | 40 | */ |
8ee2767a | 41 | #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) |
1da177e4 | 42 | |
cd45daff MP |
43 | /* |
44 | * The size of the mempool used to track chunks in use. | |
45 | */ | |
46 | #define MIN_IOS 256 | |
47 | ||
ccc45ea8 JB |
48 | #define DM_TRACKED_CHUNK_HASH_SIZE 16 |
49 | #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ | |
50 | (DM_TRACKED_CHUNK_HASH_SIZE - 1)) | |
51 | ||
191437a5 | 52 | struct dm_exception_table { |
ccc45ea8 JB |
53 | uint32_t hash_mask; |
54 | unsigned hash_shift; | |
55 | struct list_head *table; | |
56 | }; | |
57 | ||
58 | struct dm_snapshot { | |
59 | struct rw_semaphore lock; | |
60 | ||
61 | struct dm_dev *origin; | |
fc56f6fb MS |
62 | struct dm_dev *cow; |
63 | ||
64 | struct dm_target *ti; | |
ccc45ea8 JB |
65 | |
66 | /* List of snapshots per Origin */ | |
67 | struct list_head list; | |
68 | ||
69 | /* You can't use a snapshot if this is 0 (e.g. if full) */ | |
70 | int valid; | |
71 | ||
72 | /* Origin writes don't trigger exceptions until this is set */ | |
73 | int active; | |
74 | ||
c26655ca MS |
75 | /* Whether or not owning mapped_device is suspended */ |
76 | int suspended; | |
77 | ||
ccc45ea8 JB |
78 | mempool_t *pending_pool; |
79 | ||
80 | atomic_t pending_exceptions_count; | |
81 | ||
191437a5 JB |
82 | struct dm_exception_table pending; |
83 | struct dm_exception_table complete; | |
ccc45ea8 JB |
84 | |
85 | /* | |
86 | * pe_lock protects all pending_exception operations and access | |
87 | * as well as the snapshot_bios list. | |
88 | */ | |
89 | spinlock_t pe_lock; | |
90 | ||
91 | /* The on disk metadata handler */ | |
92 | struct dm_exception_store *store; | |
93 | ||
94 | struct dm_kcopyd_client *kcopyd_client; | |
95 | ||
96 | /* Queue of snapshot writes for ksnapd to flush */ | |
97 | struct bio_list queued_bios; | |
98 | struct work_struct queued_bios_work; | |
99 | ||
100 | /* Chunks with outstanding reads */ | |
101 | mempool_t *tracked_chunk_pool; | |
102 | spinlock_t tracked_chunk_lock; | |
103 | struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; | |
104 | }; | |
105 | ||
fc56f6fb MS |
106 | struct dm_dev *dm_snap_cow(struct dm_snapshot *s) |
107 | { | |
108 | return s->cow; | |
109 | } | |
110 | EXPORT_SYMBOL(dm_snap_cow); | |
111 | ||
c642f9e0 | 112 | static struct workqueue_struct *ksnapd; |
c4028958 | 113 | static void flush_queued_bios(struct work_struct *work); |
ca3a931f | 114 | |
ccc45ea8 JB |
115 | static sector_t chunk_to_sector(struct dm_exception_store *store, |
116 | chunk_t chunk) | |
117 | { | |
118 | return chunk << store->chunk_shift; | |
119 | } | |
120 | ||
121 | static int bdev_equal(struct block_device *lhs, struct block_device *rhs) | |
122 | { | |
123 | /* | |
124 | * There is only ever one instance of a particular block | |
125 | * device so we can compare pointers safely. | |
126 | */ | |
127 | return lhs == rhs; | |
128 | } | |
129 | ||
028867ac | 130 | struct dm_snap_pending_exception { |
1d4989c8 | 131 | struct dm_exception e; |
1da177e4 LT |
132 | |
133 | /* | |
134 | * Origin buffers waiting for this to complete are held | |
135 | * in a bio list | |
136 | */ | |
137 | struct bio_list origin_bios; | |
138 | struct bio_list snapshot_bios; | |
139 | ||
eccf0817 AK |
140 | /* |
141 | * Short-term queue of pending exceptions prior to submission. | |
142 | */ | |
143 | struct list_head list; | |
144 | ||
1da177e4 | 145 | /* |
b4b610f6 | 146 | * The primary pending_exception is the one that holds |
4b832e8d | 147 | * the ref_count and the list of origin_bios for a |
b4b610f6 AK |
148 | * group of pending_exceptions. It is always last to get freed. |
149 | * These fields get set up when writing to the origin. | |
1da177e4 | 150 | */ |
028867ac | 151 | struct dm_snap_pending_exception *primary_pe; |
b4b610f6 AK |
152 | |
153 | /* | |
154 | * Number of pending_exceptions processing this chunk. | |
155 | * When this drops to zero we must complete the origin bios. | |
156 | * If incrementing or decrementing this, hold pe->snap->lock for | |
157 | * the sibling concerned and not pe->primary_pe->snap->lock unless | |
158 | * they are the same. | |
159 | */ | |
4b832e8d | 160 | atomic_t ref_count; |
1da177e4 LT |
161 | |
162 | /* Pointer back to snapshot context */ | |
163 | struct dm_snapshot *snap; | |
164 | ||
165 | /* | |
166 | * 1 indicates the exception has already been sent to | |
167 | * kcopyd. | |
168 | */ | |
169 | int started; | |
170 | }; | |
171 | ||
172 | /* | |
173 | * Hash table mapping origin volumes to lists of snapshots and | |
174 | * a lock to protect it | |
175 | */ | |
e18b890b CL |
176 | static struct kmem_cache *exception_cache; |
177 | static struct kmem_cache *pending_cache; | |
1da177e4 | 178 | |
cd45daff MP |
179 | struct dm_snap_tracked_chunk { |
180 | struct hlist_node node; | |
181 | chunk_t chunk; | |
182 | }; | |
183 | ||
184 | static struct kmem_cache *tracked_chunk_cache; | |
185 | ||
186 | static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, | |
187 | chunk_t chunk) | |
188 | { | |
189 | struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, | |
190 | GFP_NOIO); | |
191 | unsigned long flags; | |
192 | ||
193 | c->chunk = chunk; | |
194 | ||
195 | spin_lock_irqsave(&s->tracked_chunk_lock, flags); | |
196 | hlist_add_head(&c->node, | |
197 | &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); | |
198 | spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); | |
199 | ||
200 | return c; | |
201 | } | |
202 | ||
203 | static void stop_tracking_chunk(struct dm_snapshot *s, | |
204 | struct dm_snap_tracked_chunk *c) | |
205 | { | |
206 | unsigned long flags; | |
207 | ||
208 | spin_lock_irqsave(&s->tracked_chunk_lock, flags); | |
209 | hlist_del(&c->node); | |
210 | spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); | |
211 | ||
212 | mempool_free(c, s->tracked_chunk_pool); | |
213 | } | |
214 | ||
a8d41b59 MP |
215 | static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) |
216 | { | |
217 | struct dm_snap_tracked_chunk *c; | |
218 | struct hlist_node *hn; | |
219 | int found = 0; | |
220 | ||
221 | spin_lock_irq(&s->tracked_chunk_lock); | |
222 | ||
223 | hlist_for_each_entry(c, hn, | |
224 | &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { | |
225 | if (c->chunk == chunk) { | |
226 | found = 1; | |
227 | break; | |
228 | } | |
229 | } | |
230 | ||
231 | spin_unlock_irq(&s->tracked_chunk_lock); | |
232 | ||
233 | return found; | |
234 | } | |
235 | ||
615d1eb9 MS |
236 | /* |
237 | * This conflicting I/O is extremely improbable in the caller, | |
238 | * so msleep(1) is sufficient and there is no need for a wait queue. | |
239 | */ | |
240 | static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) | |
241 | { | |
242 | while (__chunk_is_tracked(s, chunk)) | |
243 | msleep(1); | |
244 | } | |
245 | ||
1da177e4 LT |
246 | /* |
247 | * One of these per registered origin, held in the snapshot_origins hash | |
248 | */ | |
249 | struct origin { | |
250 | /* The origin device */ | |
251 | struct block_device *bdev; | |
252 | ||
253 | struct list_head hash_list; | |
254 | ||
255 | /* List of snapshots for this origin */ | |
256 | struct list_head snapshots; | |
257 | }; | |
258 | ||
259 | /* | |
260 | * Size of the hash table for origin volumes. If we make this | |
261 | * the size of the minors list then it should be nearly perfect | |
262 | */ | |
263 | #define ORIGIN_HASH_SIZE 256 | |
264 | #define ORIGIN_MASK 0xFF | |
265 | static struct list_head *_origins; | |
266 | static struct rw_semaphore _origins_lock; | |
267 | ||
268 | static int init_origin_hash(void) | |
269 | { | |
270 | int i; | |
271 | ||
272 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | |
273 | GFP_KERNEL); | |
274 | if (!_origins) { | |
72d94861 | 275 | DMERR("unable to allocate memory"); |
1da177e4 LT |
276 | return -ENOMEM; |
277 | } | |
278 | ||
279 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | |
280 | INIT_LIST_HEAD(_origins + i); | |
281 | init_rwsem(&_origins_lock); | |
282 | ||
283 | return 0; | |
284 | } | |
285 | ||
286 | static void exit_origin_hash(void) | |
287 | { | |
288 | kfree(_origins); | |
289 | } | |
290 | ||
028867ac | 291 | static unsigned origin_hash(struct block_device *bdev) |
1da177e4 LT |
292 | { |
293 | return bdev->bd_dev & ORIGIN_MASK; | |
294 | } | |
295 | ||
296 | static struct origin *__lookup_origin(struct block_device *origin) | |
297 | { | |
298 | struct list_head *ol; | |
299 | struct origin *o; | |
300 | ||
301 | ol = &_origins[origin_hash(origin)]; | |
302 | list_for_each_entry (o, ol, hash_list) | |
303 | if (bdev_equal(o->bdev, origin)) | |
304 | return o; | |
305 | ||
306 | return NULL; | |
307 | } | |
308 | ||
309 | static void __insert_origin(struct origin *o) | |
310 | { | |
311 | struct list_head *sl = &_origins[origin_hash(o->bdev)]; | |
312 | list_add_tail(&o->hash_list, sl); | |
313 | } | |
314 | ||
c1f0c183 MS |
315 | /* |
316 | * _origins_lock must be held when calling this function. | |
317 | * Returns number of snapshots registered using the supplied cow device, plus: | |
318 | * snap_src - a snapshot suitable for use as a source of exception handover | |
319 | * snap_dest - a snapshot capable of receiving exception handover. | |
320 | * | |
321 | * Possible return values and states: | |
322 | * 0: NULL, NULL - first new snapshot | |
323 | * 1: snap_src, NULL - normal snapshot | |
324 | * 2: snap_src, snap_dest - waiting for handover | |
325 | * 2: snap_src, NULL - handed over, waiting for old to be deleted | |
326 | * 1: NULL, snap_dest - source got destroyed without handover | |
327 | */ | |
328 | static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, | |
329 | struct dm_snapshot **snap_src, | |
330 | struct dm_snapshot **snap_dest) | |
331 | { | |
332 | struct dm_snapshot *s; | |
333 | struct origin *o; | |
334 | int count = 0; | |
335 | int active; | |
336 | ||
337 | o = __lookup_origin(snap->origin->bdev); | |
338 | if (!o) | |
339 | goto out; | |
340 | ||
341 | list_for_each_entry(s, &o->snapshots, list) { | |
342 | if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) | |
343 | continue; | |
344 | ||
345 | down_read(&s->lock); | |
346 | active = s->active; | |
347 | up_read(&s->lock); | |
348 | ||
349 | if (active) { | |
350 | if (snap_src) | |
351 | *snap_src = s; | |
352 | } else if (snap_dest) | |
353 | *snap_dest = s; | |
354 | ||
355 | count++; | |
356 | } | |
357 | ||
358 | out: | |
359 | return count; | |
360 | } | |
361 | ||
362 | /* | |
363 | * On success, returns 1 if this snapshot is a handover destination, | |
364 | * otherwise returns 0. | |
365 | */ | |
366 | static int __validate_exception_handover(struct dm_snapshot *snap) | |
367 | { | |
368 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; | |
369 | ||
370 | /* Does snapshot need exceptions handed over to it? */ | |
371 | if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest) == 2) || | |
372 | snap_dest) { | |
373 | snap->ti->error = "Snapshot cow pairing for exception " | |
374 | "table handover failed"; | |
375 | return -EINVAL; | |
376 | } | |
377 | ||
378 | /* | |
379 | * If no snap_src was found, snap cannot become a handover | |
380 | * destination. | |
381 | */ | |
382 | if (!snap_src) | |
383 | return 0; | |
384 | ||
385 | return 1; | |
386 | } | |
387 | ||
388 | static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) | |
389 | { | |
390 | struct dm_snapshot *l; | |
391 | ||
392 | /* Sort the list according to chunk size, largest-first smallest-last */ | |
393 | list_for_each_entry(l, &o->snapshots, list) | |
394 | if (l->store->chunk_size < s->store->chunk_size) | |
395 | break; | |
396 | list_add_tail(&s->list, &l->list); | |
397 | } | |
398 | ||
1da177e4 LT |
399 | /* |
400 | * Make a note of the snapshot and its origin so we can look it | |
401 | * up when the origin has a write on it. | |
c1f0c183 MS |
402 | * |
403 | * Also validate snapshot exception store handovers. | |
404 | * On success, returns 1 if this registration is a handover destination, | |
405 | * otherwise returns 0. | |
1da177e4 LT |
406 | */ |
407 | static int register_snapshot(struct dm_snapshot *snap) | |
408 | { | |
c1f0c183 | 409 | struct origin *o, *new_o = NULL; |
1da177e4 | 410 | struct block_device *bdev = snap->origin->bdev; |
c1f0c183 | 411 | int r = 0; |
1da177e4 | 412 | |
60c856c8 MP |
413 | new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); |
414 | if (!new_o) | |
415 | return -ENOMEM; | |
416 | ||
1da177e4 | 417 | down_write(&_origins_lock); |
1da177e4 | 418 | |
c1f0c183 MS |
419 | r = __validate_exception_handover(snap); |
420 | if (r < 0) { | |
421 | kfree(new_o); | |
422 | goto out; | |
423 | } | |
424 | ||
425 | o = __lookup_origin(bdev); | |
60c856c8 MP |
426 | if (o) |
427 | kfree(new_o); | |
428 | else { | |
1da177e4 | 429 | /* New origin */ |
60c856c8 | 430 | o = new_o; |
1da177e4 LT |
431 | |
432 | /* Initialise the struct */ | |
433 | INIT_LIST_HEAD(&o->snapshots); | |
434 | o->bdev = bdev; | |
435 | ||
436 | __insert_origin(o); | |
437 | } | |
438 | ||
c1f0c183 MS |
439 | __insert_snapshot(o, snap); |
440 | ||
441 | out: | |
442 | up_write(&_origins_lock); | |
443 | ||
444 | return r; | |
445 | } | |
446 | ||
447 | /* | |
448 | * Move snapshot to correct place in list according to chunk size. | |
449 | */ | |
450 | static void reregister_snapshot(struct dm_snapshot *s) | |
451 | { | |
452 | struct block_device *bdev = s->origin->bdev; | |
453 | ||
454 | down_write(&_origins_lock); | |
455 | ||
456 | list_del(&s->list); | |
457 | __insert_snapshot(__lookup_origin(bdev), s); | |
1da177e4 LT |
458 | |
459 | up_write(&_origins_lock); | |
1da177e4 LT |
460 | } |
461 | ||
462 | static void unregister_snapshot(struct dm_snapshot *s) | |
463 | { | |
464 | struct origin *o; | |
465 | ||
466 | down_write(&_origins_lock); | |
467 | o = __lookup_origin(s->origin->bdev); | |
468 | ||
469 | list_del(&s->list); | |
c1f0c183 | 470 | if (o && list_empty(&o->snapshots)) { |
1da177e4 LT |
471 | list_del(&o->hash_list); |
472 | kfree(o); | |
473 | } | |
474 | ||
475 | up_write(&_origins_lock); | |
476 | } | |
477 | ||
478 | /* | |
479 | * Implementation of the exception hash tables. | |
d74f81f8 MB |
480 | * The lowest hash_shift bits of the chunk number are ignored, allowing |
481 | * some consecutive chunks to be grouped together. | |
1da177e4 | 482 | */ |
3510cb94 JB |
483 | static int dm_exception_table_init(struct dm_exception_table *et, |
484 | uint32_t size, unsigned hash_shift) | |
1da177e4 LT |
485 | { |
486 | unsigned int i; | |
487 | ||
d74f81f8 | 488 | et->hash_shift = hash_shift; |
1da177e4 LT |
489 | et->hash_mask = size - 1; |
490 | et->table = dm_vcalloc(size, sizeof(struct list_head)); | |
491 | if (!et->table) | |
492 | return -ENOMEM; | |
493 | ||
494 | for (i = 0; i < size; i++) | |
495 | INIT_LIST_HEAD(et->table + i); | |
496 | ||
497 | return 0; | |
498 | } | |
499 | ||
3510cb94 JB |
500 | static void dm_exception_table_exit(struct dm_exception_table *et, |
501 | struct kmem_cache *mem) | |
1da177e4 LT |
502 | { |
503 | struct list_head *slot; | |
1d4989c8 | 504 | struct dm_exception *ex, *next; |
1da177e4 LT |
505 | int i, size; |
506 | ||
507 | size = et->hash_mask + 1; | |
508 | for (i = 0; i < size; i++) { | |
509 | slot = et->table + i; | |
510 | ||
511 | list_for_each_entry_safe (ex, next, slot, hash_list) | |
512 | kmem_cache_free(mem, ex); | |
513 | } | |
514 | ||
515 | vfree(et->table); | |
516 | } | |
517 | ||
191437a5 | 518 | static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) |
1da177e4 | 519 | { |
d74f81f8 | 520 | return (chunk >> et->hash_shift) & et->hash_mask; |
1da177e4 LT |
521 | } |
522 | ||
3510cb94 | 523 | static void dm_remove_exception(struct dm_exception *e) |
1da177e4 LT |
524 | { |
525 | list_del(&e->hash_list); | |
526 | } | |
527 | ||
528 | /* | |
529 | * Return the exception data for a sector, or NULL if not | |
530 | * remapped. | |
531 | */ | |
3510cb94 JB |
532 | static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, |
533 | chunk_t chunk) | |
1da177e4 LT |
534 | { |
535 | struct list_head *slot; | |
1d4989c8 | 536 | struct dm_exception *e; |
1da177e4 LT |
537 | |
538 | slot = &et->table[exception_hash(et, chunk)]; | |
539 | list_for_each_entry (e, slot, hash_list) | |
d74f81f8 MB |
540 | if (chunk >= e->old_chunk && |
541 | chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) | |
1da177e4 LT |
542 | return e; |
543 | ||
544 | return NULL; | |
545 | } | |
546 | ||
3510cb94 | 547 | static struct dm_exception *alloc_completed_exception(void) |
1da177e4 | 548 | { |
1d4989c8 | 549 | struct dm_exception *e; |
1da177e4 LT |
550 | |
551 | e = kmem_cache_alloc(exception_cache, GFP_NOIO); | |
552 | if (!e) | |
553 | e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); | |
554 | ||
555 | return e; | |
556 | } | |
557 | ||
3510cb94 | 558 | static void free_completed_exception(struct dm_exception *e) |
1da177e4 LT |
559 | { |
560 | kmem_cache_free(exception_cache, e); | |
561 | } | |
562 | ||
92e86812 | 563 | static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) |
1da177e4 | 564 | { |
92e86812 MP |
565 | struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, |
566 | GFP_NOIO); | |
567 | ||
879129d2 | 568 | atomic_inc(&s->pending_exceptions_count); |
92e86812 MP |
569 | pe->snap = s; |
570 | ||
571 | return pe; | |
1da177e4 LT |
572 | } |
573 | ||
028867ac | 574 | static void free_pending_exception(struct dm_snap_pending_exception *pe) |
1da177e4 | 575 | { |
879129d2 MP |
576 | struct dm_snapshot *s = pe->snap; |
577 | ||
578 | mempool_free(pe, s->pending_pool); | |
579 | smp_mb__before_atomic_dec(); | |
580 | atomic_dec(&s->pending_exceptions_count); | |
1da177e4 LT |
581 | } |
582 | ||
3510cb94 JB |
583 | static void dm_insert_exception(struct dm_exception_table *eh, |
584 | struct dm_exception *new_e) | |
d74f81f8 | 585 | { |
d74f81f8 | 586 | struct list_head *l; |
1d4989c8 | 587 | struct dm_exception *e = NULL; |
d74f81f8 MB |
588 | |
589 | l = &eh->table[exception_hash(eh, new_e->old_chunk)]; | |
590 | ||
591 | /* Add immediately if this table doesn't support consecutive chunks */ | |
592 | if (!eh->hash_shift) | |
593 | goto out; | |
594 | ||
595 | /* List is ordered by old_chunk */ | |
596 | list_for_each_entry_reverse(e, l, hash_list) { | |
597 | /* Insert after an existing chunk? */ | |
598 | if (new_e->old_chunk == (e->old_chunk + | |
599 | dm_consecutive_chunk_count(e) + 1) && | |
600 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) + | |
601 | dm_consecutive_chunk_count(e) + 1)) { | |
602 | dm_consecutive_chunk_count_inc(e); | |
3510cb94 | 603 | free_completed_exception(new_e); |
d74f81f8 MB |
604 | return; |
605 | } | |
606 | ||
607 | /* Insert before an existing chunk? */ | |
608 | if (new_e->old_chunk == (e->old_chunk - 1) && | |
609 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { | |
610 | dm_consecutive_chunk_count_inc(e); | |
611 | e->old_chunk--; | |
612 | e->new_chunk--; | |
3510cb94 | 613 | free_completed_exception(new_e); |
d74f81f8 MB |
614 | return; |
615 | } | |
616 | ||
617 | if (new_e->old_chunk > e->old_chunk) | |
618 | break; | |
619 | } | |
620 | ||
621 | out: | |
622 | list_add(&new_e->hash_list, e ? &e->hash_list : l); | |
623 | } | |
624 | ||
a159c1ac JB |
625 | /* |
626 | * Callback used by the exception stores to load exceptions when | |
627 | * initialising. | |
628 | */ | |
629 | static int dm_add_exception(void *context, chunk_t old, chunk_t new) | |
1da177e4 | 630 | { |
a159c1ac | 631 | struct dm_snapshot *s = context; |
1d4989c8 | 632 | struct dm_exception *e; |
1da177e4 | 633 | |
3510cb94 | 634 | e = alloc_completed_exception(); |
1da177e4 LT |
635 | if (!e) |
636 | return -ENOMEM; | |
637 | ||
638 | e->old_chunk = old; | |
d74f81f8 MB |
639 | |
640 | /* Consecutive_count is implicitly initialised to zero */ | |
1da177e4 | 641 | e->new_chunk = new; |
d74f81f8 | 642 | |
3510cb94 | 643 | dm_insert_exception(&s->complete, e); |
d74f81f8 | 644 | |
1da177e4 LT |
645 | return 0; |
646 | } | |
647 | ||
7e201b35 MP |
648 | #define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r))) |
649 | ||
650 | /* | |
651 | * Return a minimum chunk size of all snapshots that have the specified origin. | |
652 | * Return zero if the origin has no snapshots. | |
653 | */ | |
654 | static sector_t __minimum_chunk_size(struct origin *o) | |
655 | { | |
656 | struct dm_snapshot *snap; | |
657 | unsigned chunk_size = 0; | |
658 | ||
659 | if (o) | |
660 | list_for_each_entry(snap, &o->snapshots, list) | |
661 | chunk_size = min_not_zero(chunk_size, | |
662 | snap->store->chunk_size); | |
663 | ||
664 | return chunk_size; | |
665 | } | |
666 | ||
1da177e4 LT |
667 | /* |
668 | * Hard coded magic. | |
669 | */ | |
670 | static int calc_max_buckets(void) | |
671 | { | |
672 | /* use a fixed size of 2MB */ | |
673 | unsigned long mem = 2 * 1024 * 1024; | |
674 | mem /= sizeof(struct list_head); | |
675 | ||
676 | return mem; | |
677 | } | |
678 | ||
1da177e4 LT |
679 | /* |
680 | * Allocate room for a suitable hash table. | |
681 | */ | |
fee1998e | 682 | static int init_hash_tables(struct dm_snapshot *s) |
1da177e4 LT |
683 | { |
684 | sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; | |
685 | ||
686 | /* | |
687 | * Calculate based on the size of the original volume or | |
688 | * the COW volume... | |
689 | */ | |
fc56f6fb | 690 | cow_dev_size = get_dev_size(s->cow->bdev); |
1da177e4 LT |
691 | origin_dev_size = get_dev_size(s->origin->bdev); |
692 | max_buckets = calc_max_buckets(); | |
693 | ||
fee1998e | 694 | hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; |
1da177e4 LT |
695 | hash_size = min(hash_size, max_buckets); |
696 | ||
8e87b9b8 MP |
697 | if (hash_size < 64) |
698 | hash_size = 64; | |
8defd830 | 699 | hash_size = rounddown_pow_of_two(hash_size); |
3510cb94 JB |
700 | if (dm_exception_table_init(&s->complete, hash_size, |
701 | DM_CHUNK_CONSECUTIVE_BITS)) | |
1da177e4 LT |
702 | return -ENOMEM; |
703 | ||
704 | /* | |
705 | * Allocate hash table for in-flight exceptions | |
706 | * Make this smaller than the real hash table | |
707 | */ | |
708 | hash_size >>= 3; | |
709 | if (hash_size < 64) | |
710 | hash_size = 64; | |
711 | ||
3510cb94 JB |
712 | if (dm_exception_table_init(&s->pending, hash_size, 0)) { |
713 | dm_exception_table_exit(&s->complete, exception_cache); | |
1da177e4 LT |
714 | return -ENOMEM; |
715 | } | |
716 | ||
717 | return 0; | |
718 | } | |
719 | ||
1da177e4 LT |
720 | /* |
721 | * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> | |
722 | */ | |
723 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
724 | { | |
725 | struct dm_snapshot *s; | |
cd45daff | 726 | int i; |
1da177e4 | 727 | int r = -EINVAL; |
fc56f6fb | 728 | char *origin_path, *cow_path; |
fee1998e | 729 | unsigned args_used; |
1da177e4 | 730 | |
4c7e3bf4 | 731 | if (argc != 4) { |
72d94861 | 732 | ti->error = "requires exactly 4 arguments"; |
1da177e4 | 733 | r = -EINVAL; |
fc56f6fb | 734 | goto bad; |
1da177e4 LT |
735 | } |
736 | ||
737 | origin_path = argv[0]; | |
fee1998e JB |
738 | argv++; |
739 | argc--; | |
1da177e4 | 740 | |
fc56f6fb MS |
741 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
742 | if (!s) { | |
743 | ti->error = "Cannot allocate snapshot context private " | |
744 | "structure"; | |
745 | r = -ENOMEM; | |
746 | goto bad; | |
747 | } | |
748 | ||
749 | cow_path = argv[0]; | |
750 | argv++; | |
751 | argc--; | |
752 | ||
753 | r = dm_get_device(ti, cow_path, 0, 0, | |
754 | FMODE_READ | FMODE_WRITE, &s->cow); | |
755 | if (r) { | |
756 | ti->error = "Cannot get COW device"; | |
757 | goto bad_cow; | |
758 | } | |
759 | ||
760 | r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); | |
fee1998e JB |
761 | if (r) { |
762 | ti->error = "Couldn't create exception store"; | |
1da177e4 | 763 | r = -EINVAL; |
fc56f6fb | 764 | goto bad_store; |
1da177e4 LT |
765 | } |
766 | ||
fee1998e JB |
767 | argv += args_used; |
768 | argc -= args_used; | |
769 | ||
1da177e4 LT |
770 | r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); |
771 | if (r) { | |
772 | ti->error = "Cannot get origin device"; | |
fee1998e | 773 | goto bad_origin; |
1da177e4 LT |
774 | } |
775 | ||
fc56f6fb | 776 | s->ti = ti; |
1da177e4 | 777 | s->valid = 1; |
aa14edeb | 778 | s->active = 0; |
c26655ca | 779 | s->suspended = 0; |
879129d2 | 780 | atomic_set(&s->pending_exceptions_count, 0); |
1da177e4 | 781 | init_rwsem(&s->lock); |
c1f0c183 | 782 | INIT_LIST_HEAD(&s->list); |
ca3a931f | 783 | spin_lock_init(&s->pe_lock); |
1da177e4 LT |
784 | |
785 | /* Allocate hash table for COW data */ | |
fee1998e | 786 | if (init_hash_tables(s)) { |
1da177e4 LT |
787 | ti->error = "Unable to allocate hash table space"; |
788 | r = -ENOMEM; | |
fee1998e | 789 | goto bad_hash_tables; |
1da177e4 LT |
790 | } |
791 | ||
eb69aca5 | 792 | r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); |
1da177e4 LT |
793 | if (r) { |
794 | ti->error = "Could not create kcopyd client"; | |
fee1998e | 795 | goto bad_kcopyd; |
1da177e4 LT |
796 | } |
797 | ||
92e86812 MP |
798 | s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); |
799 | if (!s->pending_pool) { | |
800 | ti->error = "Could not allocate mempool for pending exceptions"; | |
fee1998e | 801 | goto bad_pending_pool; |
92e86812 MP |
802 | } |
803 | ||
cd45daff MP |
804 | s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, |
805 | tracked_chunk_cache); | |
806 | if (!s->tracked_chunk_pool) { | |
807 | ti->error = "Could not allocate tracked_chunk mempool for " | |
808 | "tracking reads"; | |
92e86812 | 809 | goto bad_tracked_chunk_pool; |
cd45daff MP |
810 | } |
811 | ||
812 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) | |
813 | INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); | |
814 | ||
815 | spin_lock_init(&s->tracked_chunk_lock); | |
816 | ||
c1f0c183 MS |
817 | bio_list_init(&s->queued_bios); |
818 | INIT_WORK(&s->queued_bios_work, flush_queued_bios); | |
819 | ||
820 | ti->private = s; | |
821 | ti->num_flush_requests = 1; | |
822 | ||
823 | /* Add snapshot to the list of snapshots for this origin */ | |
824 | /* Exceptions aren't triggered till snapshot_resume() is called */ | |
825 | r = register_snapshot(s); | |
826 | if (r == -ENOMEM) { | |
827 | ti->error = "Snapshot origin struct allocation failed"; | |
828 | goto bad_load_and_register; | |
829 | } else if (r < 0) { | |
830 | /* invalid handover, register_snapshot has set ti->error */ | |
831 | goto bad_load_and_register; | |
832 | } | |
833 | ||
834 | /* | |
835 | * Metadata must only be loaded into one table at once, so skip this | |
836 | * if metadata will be handed over during resume. | |
837 | * Chunk size will be set during the handover - set it to zero to | |
838 | * ensure it's ignored. | |
839 | */ | |
840 | if (r > 0) { | |
841 | s->store->chunk_size = 0; | |
842 | return 0; | |
843 | } | |
844 | ||
493df71c JB |
845 | r = s->store->type->read_metadata(s->store, dm_add_exception, |
846 | (void *)s); | |
0764147b | 847 | if (r < 0) { |
f9cea4f7 | 848 | ti->error = "Failed to read snapshot metadata"; |
c1f0c183 | 849 | goto bad_read_metadata; |
0764147b MB |
850 | } else if (r > 0) { |
851 | s->valid = 0; | |
852 | DMWARN("Snapshot is marked invalid."); | |
f9cea4f7 | 853 | } |
aa14edeb | 854 | |
3f2412dc MP |
855 | if (!s->store->chunk_size) { |
856 | ti->error = "Chunk size not set"; | |
c1f0c183 | 857 | goto bad_read_metadata; |
1da177e4 | 858 | } |
d0216849 | 859 | ti->split_io = s->store->chunk_size; |
1da177e4 LT |
860 | |
861 | return 0; | |
862 | ||
c1f0c183 MS |
863 | bad_read_metadata: |
864 | unregister_snapshot(s); | |
865 | ||
fee1998e | 866 | bad_load_and_register: |
cd45daff MP |
867 | mempool_destroy(s->tracked_chunk_pool); |
868 | ||
fee1998e | 869 | bad_tracked_chunk_pool: |
92e86812 MP |
870 | mempool_destroy(s->pending_pool); |
871 | ||
fee1998e | 872 | bad_pending_pool: |
eb69aca5 | 873 | dm_kcopyd_client_destroy(s->kcopyd_client); |
1da177e4 | 874 | |
fee1998e | 875 | bad_kcopyd: |
3510cb94 JB |
876 | dm_exception_table_exit(&s->pending, pending_cache); |
877 | dm_exception_table_exit(&s->complete, exception_cache); | |
1da177e4 | 878 | |
fee1998e | 879 | bad_hash_tables: |
1da177e4 LT |
880 | dm_put_device(ti, s->origin); |
881 | ||
fee1998e | 882 | bad_origin: |
fc56f6fb | 883 | dm_exception_store_destroy(s->store); |
1da177e4 | 884 | |
fc56f6fb MS |
885 | bad_store: |
886 | dm_put_device(ti, s->cow); | |
fee1998e | 887 | |
fc56f6fb MS |
888 | bad_cow: |
889 | kfree(s); | |
890 | ||
891 | bad: | |
1da177e4 LT |
892 | return r; |
893 | } | |
894 | ||
31c93a0c MB |
895 | static void __free_exceptions(struct dm_snapshot *s) |
896 | { | |
eb69aca5 | 897 | dm_kcopyd_client_destroy(s->kcopyd_client); |
31c93a0c MB |
898 | s->kcopyd_client = NULL; |
899 | ||
3510cb94 JB |
900 | dm_exception_table_exit(&s->pending, pending_cache); |
901 | dm_exception_table_exit(&s->complete, exception_cache); | |
31c93a0c MB |
902 | } |
903 | ||
c1f0c183 MS |
904 | static void __handover_exceptions(struct dm_snapshot *snap_src, |
905 | struct dm_snapshot *snap_dest) | |
906 | { | |
907 | union { | |
908 | struct dm_exception_table table_swap; | |
909 | struct dm_exception_store *store_swap; | |
910 | } u; | |
911 | ||
912 | /* | |
913 | * Swap all snapshot context information between the two instances. | |
914 | */ | |
915 | u.table_swap = snap_dest->complete; | |
916 | snap_dest->complete = snap_src->complete; | |
917 | snap_src->complete = u.table_swap; | |
918 | ||
919 | u.store_swap = snap_dest->store; | |
920 | snap_dest->store = snap_src->store; | |
921 | snap_src->store = u.store_swap; | |
922 | ||
923 | snap_dest->store->snap = snap_dest; | |
924 | snap_src->store->snap = snap_src; | |
925 | ||
926 | snap_dest->ti->split_io = snap_dest->store->chunk_size; | |
927 | snap_dest->valid = snap_src->valid; | |
928 | ||
929 | /* | |
930 | * Set source invalid to ensure it receives no further I/O. | |
931 | */ | |
932 | snap_src->valid = 0; | |
933 | } | |
934 | ||
1da177e4 LT |
935 | static void snapshot_dtr(struct dm_target *ti) |
936 | { | |
cd45daff MP |
937 | #ifdef CONFIG_DM_DEBUG |
938 | int i; | |
939 | #endif | |
028867ac | 940 | struct dm_snapshot *s = ti->private; |
c1f0c183 | 941 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; |
1da177e4 | 942 | |
ca3a931f AK |
943 | flush_workqueue(ksnapd); |
944 | ||
c1f0c183 MS |
945 | down_read(&_origins_lock); |
946 | /* Check whether exception handover must be cancelled */ | |
947 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest); | |
948 | if (snap_src && snap_dest && (s == snap_src)) { | |
949 | down_write(&snap_dest->lock); | |
950 | snap_dest->valid = 0; | |
951 | up_write(&snap_dest->lock); | |
952 | DMERR("Cancelling snapshot handover."); | |
953 | } | |
954 | up_read(&_origins_lock); | |
955 | ||
138728dc AK |
956 | /* Prevent further origin writes from using this snapshot. */ |
957 | /* After this returns there can be no new kcopyd jobs. */ | |
1da177e4 LT |
958 | unregister_snapshot(s); |
959 | ||
879129d2 | 960 | while (atomic_read(&s->pending_exceptions_count)) |
90fa1527 | 961 | msleep(1); |
879129d2 MP |
962 | /* |
963 | * Ensure instructions in mempool_destroy aren't reordered | |
964 | * before atomic_read. | |
965 | */ | |
966 | smp_mb(); | |
967 | ||
cd45daff MP |
968 | #ifdef CONFIG_DM_DEBUG |
969 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) | |
970 | BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); | |
971 | #endif | |
972 | ||
973 | mempool_destroy(s->tracked_chunk_pool); | |
974 | ||
31c93a0c | 975 | __free_exceptions(s); |
1da177e4 | 976 | |
92e86812 MP |
977 | mempool_destroy(s->pending_pool); |
978 | ||
1da177e4 | 979 | dm_put_device(ti, s->origin); |
fee1998e JB |
980 | |
981 | dm_exception_store_destroy(s->store); | |
138728dc | 982 | |
fc56f6fb MS |
983 | dm_put_device(ti, s->cow); |
984 | ||
1da177e4 LT |
985 | kfree(s); |
986 | } | |
987 | ||
988 | /* | |
989 | * Flush a list of buffers. | |
990 | */ | |
991 | static void flush_bios(struct bio *bio) | |
992 | { | |
993 | struct bio *n; | |
994 | ||
995 | while (bio) { | |
996 | n = bio->bi_next; | |
997 | bio->bi_next = NULL; | |
998 | generic_make_request(bio); | |
999 | bio = n; | |
1000 | } | |
1001 | } | |
1002 | ||
c4028958 | 1003 | static void flush_queued_bios(struct work_struct *work) |
ca3a931f | 1004 | { |
c4028958 DH |
1005 | struct dm_snapshot *s = |
1006 | container_of(work, struct dm_snapshot, queued_bios_work); | |
ca3a931f AK |
1007 | struct bio *queued_bios; |
1008 | unsigned long flags; | |
1009 | ||
1010 | spin_lock_irqsave(&s->pe_lock, flags); | |
1011 | queued_bios = bio_list_get(&s->queued_bios); | |
1012 | spin_unlock_irqrestore(&s->pe_lock, flags); | |
1013 | ||
1014 | flush_bios(queued_bios); | |
1015 | } | |
1016 | ||
1da177e4 LT |
1017 | /* |
1018 | * Error a list of buffers. | |
1019 | */ | |
1020 | static void error_bios(struct bio *bio) | |
1021 | { | |
1022 | struct bio *n; | |
1023 | ||
1024 | while (bio) { | |
1025 | n = bio->bi_next; | |
1026 | bio->bi_next = NULL; | |
6712ecf8 | 1027 | bio_io_error(bio); |
1da177e4 LT |
1028 | bio = n; |
1029 | } | |
1030 | } | |
1031 | ||
695368ac | 1032 | static void __invalidate_snapshot(struct dm_snapshot *s, int err) |
76df1c65 AK |
1033 | { |
1034 | if (!s->valid) | |
1035 | return; | |
1036 | ||
1037 | if (err == -EIO) | |
1038 | DMERR("Invalidating snapshot: Error reading/writing."); | |
1039 | else if (err == -ENOMEM) | |
1040 | DMERR("Invalidating snapshot: Unable to allocate exception."); | |
1041 | ||
493df71c JB |
1042 | if (s->store->type->drop_snapshot) |
1043 | s->store->type->drop_snapshot(s->store); | |
76df1c65 AK |
1044 | |
1045 | s->valid = 0; | |
1046 | ||
fc56f6fb | 1047 | dm_table_event(s->ti->table); |
76df1c65 AK |
1048 | } |
1049 | ||
028867ac | 1050 | static void get_pending_exception(struct dm_snap_pending_exception *pe) |
4b832e8d AK |
1051 | { |
1052 | atomic_inc(&pe->ref_count); | |
1053 | } | |
1054 | ||
028867ac | 1055 | static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) |
4b832e8d | 1056 | { |
028867ac | 1057 | struct dm_snap_pending_exception *primary_pe; |
4b832e8d AK |
1058 | struct bio *origin_bios = NULL; |
1059 | ||
1060 | primary_pe = pe->primary_pe; | |
1061 | ||
1062 | /* | |
1063 | * If this pe is involved in a write to the origin and | |
1064 | * it is the last sibling to complete then release | |
1065 | * the bios for the original write to the origin. | |
1066 | */ | |
1067 | if (primary_pe && | |
7c5f78b9 | 1068 | atomic_dec_and_test(&primary_pe->ref_count)) { |
4b832e8d | 1069 | origin_bios = bio_list_get(&primary_pe->origin_bios); |
7c5f78b9 MP |
1070 | free_pending_exception(primary_pe); |
1071 | } | |
4b832e8d AK |
1072 | |
1073 | /* | |
1074 | * Free the pe if it's not linked to an origin write or if | |
1075 | * it's not itself a primary pe. | |
1076 | */ | |
1077 | if (!primary_pe || primary_pe != pe) | |
1078 | free_pending_exception(pe); | |
1079 | ||
4b832e8d AK |
1080 | return origin_bios; |
1081 | } | |
1082 | ||
028867ac | 1083 | static void pending_complete(struct dm_snap_pending_exception *pe, int success) |
1da177e4 | 1084 | { |
1d4989c8 | 1085 | struct dm_exception *e; |
1da177e4 | 1086 | struct dm_snapshot *s = pe->snap; |
9d493fa8 AK |
1087 | struct bio *origin_bios = NULL; |
1088 | struct bio *snapshot_bios = NULL; | |
1089 | int error = 0; | |
1da177e4 | 1090 | |
76df1c65 AK |
1091 | if (!success) { |
1092 | /* Read/write error - snapshot is unusable */ | |
1da177e4 | 1093 | down_write(&s->lock); |
695368ac | 1094 | __invalidate_snapshot(s, -EIO); |
9d493fa8 | 1095 | error = 1; |
76df1c65 AK |
1096 | goto out; |
1097 | } | |
1098 | ||
3510cb94 | 1099 | e = alloc_completed_exception(); |
76df1c65 | 1100 | if (!e) { |
1da177e4 | 1101 | down_write(&s->lock); |
695368ac | 1102 | __invalidate_snapshot(s, -ENOMEM); |
9d493fa8 | 1103 | error = 1; |
76df1c65 AK |
1104 | goto out; |
1105 | } | |
1106 | *e = pe->e; | |
1da177e4 | 1107 | |
76df1c65 AK |
1108 | down_write(&s->lock); |
1109 | if (!s->valid) { | |
3510cb94 | 1110 | free_completed_exception(e); |
9d493fa8 | 1111 | error = 1; |
76df1c65 | 1112 | goto out; |
1da177e4 LT |
1113 | } |
1114 | ||
615d1eb9 MS |
1115 | /* Check for conflicting reads */ |
1116 | __check_for_conflicting_io(s, pe->e.old_chunk); | |
a8d41b59 | 1117 | |
9d493fa8 AK |
1118 | /* |
1119 | * Add a proper exception, and remove the | |
1120 | * in-flight exception from the list. | |
1121 | */ | |
3510cb94 | 1122 | dm_insert_exception(&s->complete, e); |
76df1c65 | 1123 | |
1da177e4 | 1124 | out: |
3510cb94 | 1125 | dm_remove_exception(&pe->e); |
9d493fa8 | 1126 | snapshot_bios = bio_list_get(&pe->snapshot_bios); |
4b832e8d | 1127 | origin_bios = put_pending_exception(pe); |
1da177e4 | 1128 | |
9d493fa8 AK |
1129 | up_write(&s->lock); |
1130 | ||
1131 | /* Submit any pending write bios */ | |
1132 | if (error) | |
1133 | error_bios(snapshot_bios); | |
1134 | else | |
1135 | flush_bios(snapshot_bios); | |
1136 | ||
1137 | flush_bios(origin_bios); | |
1da177e4 LT |
1138 | } |
1139 | ||
1140 | static void commit_callback(void *context, int success) | |
1141 | { | |
028867ac AK |
1142 | struct dm_snap_pending_exception *pe = context; |
1143 | ||
1da177e4 LT |
1144 | pending_complete(pe, success); |
1145 | } | |
1146 | ||
1147 | /* | |
1148 | * Called when the copy I/O has finished. kcopyd actually runs | |
1149 | * this code so don't block. | |
1150 | */ | |
4cdc1d1f | 1151 | static void copy_callback(int read_err, unsigned long write_err, void *context) |
1da177e4 | 1152 | { |
028867ac | 1153 | struct dm_snap_pending_exception *pe = context; |
1da177e4 LT |
1154 | struct dm_snapshot *s = pe->snap; |
1155 | ||
1156 | if (read_err || write_err) | |
1157 | pending_complete(pe, 0); | |
1158 | ||
1159 | else | |
1160 | /* Update the metadata if we are persistent */ | |
493df71c JB |
1161 | s->store->type->commit_exception(s->store, &pe->e, |
1162 | commit_callback, pe); | |
1da177e4 LT |
1163 | } |
1164 | ||
1165 | /* | |
1166 | * Dispatches the copy operation to kcopyd. | |
1167 | */ | |
028867ac | 1168 | static void start_copy(struct dm_snap_pending_exception *pe) |
1da177e4 LT |
1169 | { |
1170 | struct dm_snapshot *s = pe->snap; | |
22a1ceb1 | 1171 | struct dm_io_region src, dest; |
1da177e4 LT |
1172 | struct block_device *bdev = s->origin->bdev; |
1173 | sector_t dev_size; | |
1174 | ||
1175 | dev_size = get_dev_size(bdev); | |
1176 | ||
1177 | src.bdev = bdev; | |
71fab00a | 1178 | src.sector = chunk_to_sector(s->store, pe->e.old_chunk); |
df96eee6 | 1179 | src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); |
1da177e4 | 1180 | |
fc56f6fb | 1181 | dest.bdev = s->cow->bdev; |
71fab00a | 1182 | dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); |
1da177e4 LT |
1183 | dest.count = src.count; |
1184 | ||
1185 | /* Hand over to kcopyd */ | |
eb69aca5 | 1186 | dm_kcopyd_copy(s->kcopyd_client, |
1da177e4 LT |
1187 | &src, 1, &dest, 0, copy_callback, pe); |
1188 | } | |
1189 | ||
2913808e MP |
1190 | static struct dm_snap_pending_exception * |
1191 | __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) | |
1192 | { | |
3510cb94 | 1193 | struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); |
2913808e MP |
1194 | |
1195 | if (!e) | |
1196 | return NULL; | |
1197 | ||
1198 | return container_of(e, struct dm_snap_pending_exception, e); | |
1199 | } | |
1200 | ||
1da177e4 LT |
1201 | /* |
1202 | * Looks to see if this snapshot already has a pending exception | |
1203 | * for this chunk, otherwise it allocates a new one and inserts | |
1204 | * it into the pending table. | |
1205 | * | |
1206 | * NOTE: a write lock must be held on snap->lock before calling | |
1207 | * this. | |
1208 | */ | |
028867ac | 1209 | static struct dm_snap_pending_exception * |
c6621392 MP |
1210 | __find_pending_exception(struct dm_snapshot *s, |
1211 | struct dm_snap_pending_exception *pe, chunk_t chunk) | |
1da177e4 | 1212 | { |
c6621392 | 1213 | struct dm_snap_pending_exception *pe2; |
1da177e4 | 1214 | |
2913808e MP |
1215 | pe2 = __lookup_pending_exception(s, chunk); |
1216 | if (pe2) { | |
76df1c65 | 1217 | free_pending_exception(pe); |
2913808e | 1218 | return pe2; |
1da177e4 LT |
1219 | } |
1220 | ||
76df1c65 AK |
1221 | pe->e.old_chunk = chunk; |
1222 | bio_list_init(&pe->origin_bios); | |
1223 | bio_list_init(&pe->snapshot_bios); | |
1224 | pe->primary_pe = NULL; | |
4b832e8d | 1225 | atomic_set(&pe->ref_count, 0); |
76df1c65 AK |
1226 | pe->started = 0; |
1227 | ||
493df71c | 1228 | if (s->store->type->prepare_exception(s->store, &pe->e)) { |
76df1c65 AK |
1229 | free_pending_exception(pe); |
1230 | return NULL; | |
1231 | } | |
1232 | ||
4b832e8d | 1233 | get_pending_exception(pe); |
3510cb94 | 1234 | dm_insert_exception(&s->pending, &pe->e); |
76df1c65 | 1235 | |
1da177e4 LT |
1236 | return pe; |
1237 | } | |
1238 | ||
1d4989c8 | 1239 | static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, |
d74f81f8 | 1240 | struct bio *bio, chunk_t chunk) |
1da177e4 | 1241 | { |
fc56f6fb | 1242 | bio->bi_bdev = s->cow->bdev; |
71fab00a JB |
1243 | bio->bi_sector = chunk_to_sector(s->store, |
1244 | dm_chunk_number(e->new_chunk) + | |
1245 | (chunk - e->old_chunk)) + | |
1246 | (bio->bi_sector & | |
1247 | s->store->chunk_mask); | |
1da177e4 LT |
1248 | } |
1249 | ||
1250 | static int snapshot_map(struct dm_target *ti, struct bio *bio, | |
1251 | union map_info *map_context) | |
1252 | { | |
1d4989c8 | 1253 | struct dm_exception *e; |
028867ac | 1254 | struct dm_snapshot *s = ti->private; |
d2a7ad29 | 1255 | int r = DM_MAPIO_REMAPPED; |
1da177e4 | 1256 | chunk_t chunk; |
028867ac | 1257 | struct dm_snap_pending_exception *pe = NULL; |
1da177e4 | 1258 | |
494b3ee7 | 1259 | if (unlikely(bio_empty_barrier(bio))) { |
fc56f6fb | 1260 | bio->bi_bdev = s->cow->bdev; |
494b3ee7 MP |
1261 | return DM_MAPIO_REMAPPED; |
1262 | } | |
1263 | ||
71fab00a | 1264 | chunk = sector_to_chunk(s->store, bio->bi_sector); |
1da177e4 LT |
1265 | |
1266 | /* Full snapshots are not usable */ | |
76df1c65 | 1267 | /* To get here the table must be live so s->active is always set. */ |
1da177e4 | 1268 | if (!s->valid) |
f6a80ea8 | 1269 | return -EIO; |
1da177e4 | 1270 | |
ba40a2aa AK |
1271 | /* FIXME: should only take write lock if we need |
1272 | * to copy an exception */ | |
1273 | down_write(&s->lock); | |
1274 | ||
1275 | if (!s->valid) { | |
1276 | r = -EIO; | |
1277 | goto out_unlock; | |
1278 | } | |
1279 | ||
1280 | /* If the block is already remapped - use that, else remap it */ | |
3510cb94 | 1281 | e = dm_lookup_exception(&s->complete, chunk); |
ba40a2aa | 1282 | if (e) { |
d74f81f8 | 1283 | remap_exception(s, e, bio, chunk); |
ba40a2aa AK |
1284 | goto out_unlock; |
1285 | } | |
1286 | ||
1da177e4 LT |
1287 | /* |
1288 | * Write to snapshot - higher level takes care of RW/RO | |
1289 | * flags so we should only get this if we are | |
1290 | * writeable. | |
1291 | */ | |
1292 | if (bio_rw(bio) == WRITE) { | |
2913808e | 1293 | pe = __lookup_pending_exception(s, chunk); |
76df1c65 | 1294 | if (!pe) { |
c6621392 MP |
1295 | up_write(&s->lock); |
1296 | pe = alloc_pending_exception(s); | |
1297 | down_write(&s->lock); | |
1298 | ||
1299 | if (!s->valid) { | |
1300 | free_pending_exception(pe); | |
1301 | r = -EIO; | |
1302 | goto out_unlock; | |
1303 | } | |
1304 | ||
3510cb94 | 1305 | e = dm_lookup_exception(&s->complete, chunk); |
35bf659b MP |
1306 | if (e) { |
1307 | free_pending_exception(pe); | |
1308 | remap_exception(s, e, bio, chunk); | |
1309 | goto out_unlock; | |
1310 | } | |
1311 | ||
c6621392 | 1312 | pe = __find_pending_exception(s, pe, chunk); |
2913808e MP |
1313 | if (!pe) { |
1314 | __invalidate_snapshot(s, -ENOMEM); | |
1315 | r = -EIO; | |
1316 | goto out_unlock; | |
1317 | } | |
1da177e4 LT |
1318 | } |
1319 | ||
d74f81f8 | 1320 | remap_exception(s, &pe->e, bio, chunk); |
76df1c65 AK |
1321 | bio_list_add(&pe->snapshot_bios, bio); |
1322 | ||
d2a7ad29 | 1323 | r = DM_MAPIO_SUBMITTED; |
ba40a2aa | 1324 | |
76df1c65 AK |
1325 | if (!pe->started) { |
1326 | /* this is protected by snap->lock */ | |
1327 | pe->started = 1; | |
ba40a2aa | 1328 | up_write(&s->lock); |
76df1c65 | 1329 | start_copy(pe); |
ba40a2aa AK |
1330 | goto out; |
1331 | } | |
cd45daff | 1332 | } else { |
ba40a2aa | 1333 | bio->bi_bdev = s->origin->bdev; |
cd45daff MP |
1334 | map_context->ptr = track_chunk(s, chunk); |
1335 | } | |
1da177e4 | 1336 | |
ba40a2aa AK |
1337 | out_unlock: |
1338 | up_write(&s->lock); | |
1339 | out: | |
1da177e4 LT |
1340 | return r; |
1341 | } | |
1342 | ||
cd45daff MP |
1343 | static int snapshot_end_io(struct dm_target *ti, struct bio *bio, |
1344 | int error, union map_info *map_context) | |
1345 | { | |
1346 | struct dm_snapshot *s = ti->private; | |
1347 | struct dm_snap_tracked_chunk *c = map_context->ptr; | |
1348 | ||
1349 | if (c) | |
1350 | stop_tracking_chunk(s, c); | |
1351 | ||
1352 | return 0; | |
1353 | } | |
1354 | ||
c26655ca MS |
1355 | static void snapshot_postsuspend(struct dm_target *ti) |
1356 | { | |
1357 | struct dm_snapshot *s = ti->private; | |
1358 | ||
1359 | down_write(&s->lock); | |
1360 | s->suspended = 1; | |
1361 | up_write(&s->lock); | |
1362 | } | |
1363 | ||
c1f0c183 MS |
1364 | static int snapshot_preresume(struct dm_target *ti) |
1365 | { | |
1366 | int r = 0; | |
1367 | struct dm_snapshot *s = ti->private; | |
1368 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; | |
1369 | ||
1370 | down_read(&_origins_lock); | |
1371 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest); | |
1372 | if (snap_src && snap_dest) { | |
1373 | down_read(&snap_src->lock); | |
1374 | if (s == snap_src) { | |
1375 | DMERR("Unable to resume snapshot source until " | |
1376 | "handover completes."); | |
1377 | r = -EINVAL; | |
1378 | } else if (!snap_src->suspended) { | |
1379 | DMERR("Unable to perform snapshot handover until " | |
1380 | "source is suspended."); | |
1381 | r = -EINVAL; | |
1382 | } | |
1383 | up_read(&snap_src->lock); | |
1384 | } | |
1385 | up_read(&_origins_lock); | |
1386 | ||
1387 | return r; | |
1388 | } | |
1389 | ||
1da177e4 LT |
1390 | static void snapshot_resume(struct dm_target *ti) |
1391 | { | |
028867ac | 1392 | struct dm_snapshot *s = ti->private; |
c1f0c183 MS |
1393 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; |
1394 | ||
1395 | down_read(&_origins_lock); | |
1396 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest); | |
1397 | if (snap_src && snap_dest) { | |
1398 | down_write(&snap_src->lock); | |
1399 | down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); | |
1400 | __handover_exceptions(snap_src, snap_dest); | |
1401 | up_write(&snap_dest->lock); | |
1402 | up_write(&snap_src->lock); | |
1403 | } | |
1404 | up_read(&_origins_lock); | |
1405 | ||
1406 | /* Now we have correct chunk size, reregister */ | |
1407 | reregister_snapshot(s); | |
1da177e4 | 1408 | |
aa14edeb AK |
1409 | down_write(&s->lock); |
1410 | s->active = 1; | |
c26655ca | 1411 | s->suspended = 0; |
aa14edeb | 1412 | up_write(&s->lock); |
1da177e4 LT |
1413 | } |
1414 | ||
1415 | static int snapshot_status(struct dm_target *ti, status_type_t type, | |
1416 | char *result, unsigned int maxlen) | |
1417 | { | |
2e4a31df | 1418 | unsigned sz = 0; |
028867ac | 1419 | struct dm_snapshot *snap = ti->private; |
1da177e4 LT |
1420 | |
1421 | switch (type) { | |
1422 | case STATUSTYPE_INFO: | |
94e76572 MP |
1423 | |
1424 | down_write(&snap->lock); | |
1425 | ||
1da177e4 | 1426 | if (!snap->valid) |
2e4a31df | 1427 | DMEMIT("Invalid"); |
1da177e4 | 1428 | else { |
985903bb MS |
1429 | if (snap->store->type->usage) { |
1430 | sector_t total_sectors, sectors_allocated, | |
1431 | metadata_sectors; | |
1432 | snap->store->type->usage(snap->store, | |
1433 | &total_sectors, | |
1434 | §ors_allocated, | |
1435 | &metadata_sectors); | |
1436 | DMEMIT("%llu/%llu %llu", | |
1437 | (unsigned long long)sectors_allocated, | |
1438 | (unsigned long long)total_sectors, | |
1439 | (unsigned long long)metadata_sectors); | |
1da177e4 LT |
1440 | } |
1441 | else | |
2e4a31df | 1442 | DMEMIT("Unknown"); |
1da177e4 | 1443 | } |
94e76572 MP |
1444 | |
1445 | up_write(&snap->lock); | |
1446 | ||
1da177e4 LT |
1447 | break; |
1448 | ||
1449 | case STATUSTYPE_TABLE: | |
1450 | /* | |
1451 | * kdevname returns a static pointer so we need | |
1452 | * to make private copies if the output is to | |
1453 | * make sense. | |
1454 | */ | |
fc56f6fb | 1455 | DMEMIT("%s %s", snap->origin->name, snap->cow->name); |
1e302a92 JB |
1456 | snap->store->type->status(snap->store, type, result + sz, |
1457 | maxlen - sz); | |
1da177e4 LT |
1458 | break; |
1459 | } | |
1460 | ||
1461 | return 0; | |
1462 | } | |
1463 | ||
8811f46c MS |
1464 | static int snapshot_iterate_devices(struct dm_target *ti, |
1465 | iterate_devices_callout_fn fn, void *data) | |
1466 | { | |
1467 | struct dm_snapshot *snap = ti->private; | |
1468 | ||
1469 | return fn(ti, snap->origin, 0, ti->len, data); | |
1470 | } | |
1471 | ||
1472 | ||
1da177e4 LT |
1473 | /*----------------------------------------------------------------- |
1474 | * Origin methods | |
1475 | *---------------------------------------------------------------*/ | |
9eaae8ff MP |
1476 | |
1477 | /* | |
1478 | * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any | |
1479 | * supplied bio was ignored. The caller may submit it immediately. | |
1480 | * (No remapping actually occurs as the origin is always a direct linear | |
1481 | * map.) | |
1482 | * | |
1483 | * If further exceptions are required, DM_MAPIO_SUBMITTED is returned | |
1484 | * and any supplied bio is added to a list to be submitted once all | |
1485 | * the necessary exceptions exist. | |
1486 | */ | |
1487 | static int __origin_write(struct list_head *snapshots, sector_t sector, | |
1488 | struct bio *bio) | |
1da177e4 | 1489 | { |
d2a7ad29 | 1490 | int r = DM_MAPIO_REMAPPED, first = 0; |
1da177e4 | 1491 | struct dm_snapshot *snap; |
1d4989c8 | 1492 | struct dm_exception *e; |
028867ac | 1493 | struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; |
1da177e4 | 1494 | chunk_t chunk; |
eccf0817 | 1495 | LIST_HEAD(pe_queue); |
1da177e4 LT |
1496 | |
1497 | /* Do all the snapshots on this origin */ | |
1498 | list_for_each_entry (snap, snapshots, list) { | |
1499 | ||
76df1c65 AK |
1500 | down_write(&snap->lock); |
1501 | ||
aa14edeb AK |
1502 | /* Only deal with valid and active snapshots */ |
1503 | if (!snap->valid || !snap->active) | |
76df1c65 | 1504 | goto next_snapshot; |
1da177e4 | 1505 | |
d5e404c1 | 1506 | /* Nothing to do if writing beyond end of snapshot */ |
9eaae8ff | 1507 | if (sector >= dm_table_get_size(snap->ti->table)) |
76df1c65 | 1508 | goto next_snapshot; |
1da177e4 LT |
1509 | |
1510 | /* | |
1511 | * Remember, different snapshots can have | |
1512 | * different chunk sizes. | |
1513 | */ | |
9eaae8ff | 1514 | chunk = sector_to_chunk(snap->store, sector); |
1da177e4 LT |
1515 | |
1516 | /* | |
1517 | * Check exception table to see if block | |
1518 | * is already remapped in this snapshot | |
1519 | * and trigger an exception if not. | |
b4b610f6 | 1520 | * |
4b832e8d | 1521 | * ref_count is initialised to 1 so pending_complete() |
b4b610f6 | 1522 | * won't destroy the primary_pe while we're inside this loop. |
1da177e4 | 1523 | */ |
3510cb94 | 1524 | e = dm_lookup_exception(&snap->complete, chunk); |
76df1c65 AK |
1525 | if (e) |
1526 | goto next_snapshot; | |
1527 | ||
2913808e | 1528 | pe = __lookup_pending_exception(snap, chunk); |
76df1c65 | 1529 | if (!pe) { |
c6621392 MP |
1530 | up_write(&snap->lock); |
1531 | pe = alloc_pending_exception(snap); | |
1532 | down_write(&snap->lock); | |
1533 | ||
1534 | if (!snap->valid) { | |
1535 | free_pending_exception(pe); | |
1536 | goto next_snapshot; | |
1537 | } | |
1538 | ||
3510cb94 | 1539 | e = dm_lookup_exception(&snap->complete, chunk); |
35bf659b MP |
1540 | if (e) { |
1541 | free_pending_exception(pe); | |
1542 | goto next_snapshot; | |
1543 | } | |
1544 | ||
c6621392 | 1545 | pe = __find_pending_exception(snap, pe, chunk); |
2913808e MP |
1546 | if (!pe) { |
1547 | __invalidate_snapshot(snap, -ENOMEM); | |
1548 | goto next_snapshot; | |
1549 | } | |
76df1c65 AK |
1550 | } |
1551 | ||
1552 | if (!primary_pe) { | |
1553 | /* | |
1554 | * Either every pe here has same | |
1555 | * primary_pe or none has one yet. | |
1556 | */ | |
1557 | if (pe->primary_pe) | |
1558 | primary_pe = pe->primary_pe; | |
1559 | else { | |
1560 | primary_pe = pe; | |
1561 | first = 1; | |
1da177e4 | 1562 | } |
76df1c65 | 1563 | |
9eaae8ff MP |
1564 | if (bio) |
1565 | bio_list_add(&primary_pe->origin_bios, bio); | |
76df1c65 | 1566 | |
d2a7ad29 | 1567 | r = DM_MAPIO_SUBMITTED; |
76df1c65 AK |
1568 | } |
1569 | ||
1570 | if (!pe->primary_pe) { | |
76df1c65 | 1571 | pe->primary_pe = primary_pe; |
4b832e8d | 1572 | get_pending_exception(primary_pe); |
76df1c65 AK |
1573 | } |
1574 | ||
1575 | if (!pe->started) { | |
1576 | pe->started = 1; | |
1577 | list_add_tail(&pe->list, &pe_queue); | |
1da177e4 LT |
1578 | } |
1579 | ||
76df1c65 | 1580 | next_snapshot: |
1da177e4 LT |
1581 | up_write(&snap->lock); |
1582 | } | |
1583 | ||
b4b610f6 | 1584 | if (!primary_pe) |
4b832e8d | 1585 | return r; |
b4b610f6 AK |
1586 | |
1587 | /* | |
1588 | * If this is the first time we're processing this chunk and | |
4b832e8d | 1589 | * ref_count is now 1 it means all the pending exceptions |
b4b610f6 AK |
1590 | * got completed while we were in the loop above, so it falls to |
1591 | * us here to remove the primary_pe and submit any origin_bios. | |
1592 | */ | |
1593 | ||
4b832e8d | 1594 | if (first && atomic_dec_and_test(&primary_pe->ref_count)) { |
b4b610f6 AK |
1595 | flush_bios(bio_list_get(&primary_pe->origin_bios)); |
1596 | free_pending_exception(primary_pe); | |
1597 | /* If we got here, pe_queue is necessarily empty. */ | |
4b832e8d | 1598 | return r; |
b4b610f6 AK |
1599 | } |
1600 | ||
1da177e4 LT |
1601 | /* |
1602 | * Now that we have a complete pe list we can start the copying. | |
1603 | */ | |
eccf0817 AK |
1604 | list_for_each_entry_safe(pe, next_pe, &pe_queue, list) |
1605 | start_copy(pe); | |
1da177e4 LT |
1606 | |
1607 | return r; | |
1608 | } | |
1609 | ||
1610 | /* | |
1611 | * Called on a write from the origin driver. | |
1612 | */ | |
1613 | static int do_origin(struct dm_dev *origin, struct bio *bio) | |
1614 | { | |
1615 | struct origin *o; | |
d2a7ad29 | 1616 | int r = DM_MAPIO_REMAPPED; |
1da177e4 LT |
1617 | |
1618 | down_read(&_origins_lock); | |
1619 | o = __lookup_origin(origin->bdev); | |
1620 | if (o) | |
9eaae8ff | 1621 | r = __origin_write(&o->snapshots, bio->bi_sector, bio); |
1da177e4 LT |
1622 | up_read(&_origins_lock); |
1623 | ||
1624 | return r; | |
1625 | } | |
1626 | ||
1627 | /* | |
1628 | * Origin: maps a linear range of a device, with hooks for snapshotting. | |
1629 | */ | |
1630 | ||
1631 | /* | |
1632 | * Construct an origin mapping: <dev_path> | |
1633 | * The context for an origin is merely a 'struct dm_dev *' | |
1634 | * pointing to the real device. | |
1635 | */ | |
1636 | static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
1637 | { | |
1638 | int r; | |
1639 | struct dm_dev *dev; | |
1640 | ||
1641 | if (argc != 1) { | |
72d94861 | 1642 | ti->error = "origin: incorrect number of arguments"; |
1da177e4 LT |
1643 | return -EINVAL; |
1644 | } | |
1645 | ||
1646 | r = dm_get_device(ti, argv[0], 0, ti->len, | |
1647 | dm_table_get_mode(ti->table), &dev); | |
1648 | if (r) { | |
1649 | ti->error = "Cannot get target device"; | |
1650 | return r; | |
1651 | } | |
1652 | ||
1653 | ti->private = dev; | |
494b3ee7 MP |
1654 | ti->num_flush_requests = 1; |
1655 | ||
1da177e4 LT |
1656 | return 0; |
1657 | } | |
1658 | ||
1659 | static void origin_dtr(struct dm_target *ti) | |
1660 | { | |
028867ac | 1661 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
1662 | dm_put_device(ti, dev); |
1663 | } | |
1664 | ||
1665 | static int origin_map(struct dm_target *ti, struct bio *bio, | |
1666 | union map_info *map_context) | |
1667 | { | |
028867ac | 1668 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
1669 | bio->bi_bdev = dev->bdev; |
1670 | ||
494b3ee7 MP |
1671 | if (unlikely(bio_empty_barrier(bio))) |
1672 | return DM_MAPIO_REMAPPED; | |
1673 | ||
1da177e4 | 1674 | /* Only tell snapshots if this is a write */ |
d2a7ad29 | 1675 | return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; |
1da177e4 LT |
1676 | } |
1677 | ||
1da177e4 LT |
1678 | /* |
1679 | * Set the target "split_io" field to the minimum of all the snapshots' | |
1680 | * chunk sizes. | |
1681 | */ | |
1682 | static void origin_resume(struct dm_target *ti) | |
1683 | { | |
028867ac | 1684 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
1685 | |
1686 | down_read(&_origins_lock); | |
1da177e4 | 1687 | |
7e201b35 MP |
1688 | ti->split_io = __minimum_chunk_size(__lookup_origin(dev->bdev)); |
1689 | ||
1690 | up_read(&_origins_lock); | |
1da177e4 LT |
1691 | } |
1692 | ||
1693 | static int origin_status(struct dm_target *ti, status_type_t type, char *result, | |
1694 | unsigned int maxlen) | |
1695 | { | |
028867ac | 1696 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
1697 | |
1698 | switch (type) { | |
1699 | case STATUSTYPE_INFO: | |
1700 | result[0] = '\0'; | |
1701 | break; | |
1702 | ||
1703 | case STATUSTYPE_TABLE: | |
1704 | snprintf(result, maxlen, "%s", dev->name); | |
1705 | break; | |
1706 | } | |
1707 | ||
1708 | return 0; | |
1709 | } | |
1710 | ||
8811f46c MS |
1711 | static int origin_iterate_devices(struct dm_target *ti, |
1712 | iterate_devices_callout_fn fn, void *data) | |
1713 | { | |
1714 | struct dm_dev *dev = ti->private; | |
1715 | ||
1716 | return fn(ti, dev, 0, ti->len, data); | |
1717 | } | |
1718 | ||
1da177e4 LT |
1719 | static struct target_type origin_target = { |
1720 | .name = "snapshot-origin", | |
8811f46c | 1721 | .version = {1, 7, 0}, |
1da177e4 LT |
1722 | .module = THIS_MODULE, |
1723 | .ctr = origin_ctr, | |
1724 | .dtr = origin_dtr, | |
1725 | .map = origin_map, | |
1726 | .resume = origin_resume, | |
1727 | .status = origin_status, | |
8811f46c | 1728 | .iterate_devices = origin_iterate_devices, |
1da177e4 LT |
1729 | }; |
1730 | ||
1731 | static struct target_type snapshot_target = { | |
1732 | .name = "snapshot", | |
c26655ca | 1733 | .version = {1, 9, 0}, |
1da177e4 LT |
1734 | .module = THIS_MODULE, |
1735 | .ctr = snapshot_ctr, | |
1736 | .dtr = snapshot_dtr, | |
1737 | .map = snapshot_map, | |
cd45daff | 1738 | .end_io = snapshot_end_io, |
c26655ca | 1739 | .postsuspend = snapshot_postsuspend, |
c1f0c183 | 1740 | .preresume = snapshot_preresume, |
1da177e4 LT |
1741 | .resume = snapshot_resume, |
1742 | .status = snapshot_status, | |
8811f46c | 1743 | .iterate_devices = snapshot_iterate_devices, |
1da177e4 LT |
1744 | }; |
1745 | ||
1746 | static int __init dm_snapshot_init(void) | |
1747 | { | |
1748 | int r; | |
1749 | ||
4db6bfe0 AK |
1750 | r = dm_exception_store_init(); |
1751 | if (r) { | |
1752 | DMERR("Failed to initialize exception stores"); | |
1753 | return r; | |
1754 | } | |
1755 | ||
1da177e4 LT |
1756 | r = dm_register_target(&snapshot_target); |
1757 | if (r) { | |
1758 | DMERR("snapshot target register failed %d", r); | |
034a186d | 1759 | goto bad_register_snapshot_target; |
1da177e4 LT |
1760 | } |
1761 | ||
1762 | r = dm_register_target(&origin_target); | |
1763 | if (r < 0) { | |
72d94861 | 1764 | DMERR("Origin target register failed %d", r); |
1da177e4 LT |
1765 | goto bad1; |
1766 | } | |
1767 | ||
1768 | r = init_origin_hash(); | |
1769 | if (r) { | |
1770 | DMERR("init_origin_hash failed."); | |
1771 | goto bad2; | |
1772 | } | |
1773 | ||
1d4989c8 | 1774 | exception_cache = KMEM_CACHE(dm_exception, 0); |
1da177e4 LT |
1775 | if (!exception_cache) { |
1776 | DMERR("Couldn't create exception cache."); | |
1777 | r = -ENOMEM; | |
1778 | goto bad3; | |
1779 | } | |
1780 | ||
028867ac | 1781 | pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); |
1da177e4 LT |
1782 | if (!pending_cache) { |
1783 | DMERR("Couldn't create pending cache."); | |
1784 | r = -ENOMEM; | |
1785 | goto bad4; | |
1786 | } | |
1787 | ||
cd45daff MP |
1788 | tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); |
1789 | if (!tracked_chunk_cache) { | |
1790 | DMERR("Couldn't create cache to track chunks in use."); | |
1791 | r = -ENOMEM; | |
1792 | goto bad5; | |
1793 | } | |
1794 | ||
ca3a931f AK |
1795 | ksnapd = create_singlethread_workqueue("ksnapd"); |
1796 | if (!ksnapd) { | |
1797 | DMERR("Failed to create ksnapd workqueue."); | |
1798 | r = -ENOMEM; | |
92e86812 | 1799 | goto bad_pending_pool; |
ca3a931f AK |
1800 | } |
1801 | ||
1da177e4 LT |
1802 | return 0; |
1803 | ||
4db6bfe0 | 1804 | bad_pending_pool: |
cd45daff | 1805 | kmem_cache_destroy(tracked_chunk_cache); |
4db6bfe0 | 1806 | bad5: |
1da177e4 | 1807 | kmem_cache_destroy(pending_cache); |
4db6bfe0 | 1808 | bad4: |
1da177e4 | 1809 | kmem_cache_destroy(exception_cache); |
4db6bfe0 | 1810 | bad3: |
1da177e4 | 1811 | exit_origin_hash(); |
4db6bfe0 | 1812 | bad2: |
1da177e4 | 1813 | dm_unregister_target(&origin_target); |
4db6bfe0 | 1814 | bad1: |
1da177e4 | 1815 | dm_unregister_target(&snapshot_target); |
034a186d JB |
1816 | |
1817 | bad_register_snapshot_target: | |
1818 | dm_exception_store_exit(); | |
1da177e4 LT |
1819 | return r; |
1820 | } | |
1821 | ||
1822 | static void __exit dm_snapshot_exit(void) | |
1823 | { | |
ca3a931f AK |
1824 | destroy_workqueue(ksnapd); |
1825 | ||
10d3bd09 MP |
1826 | dm_unregister_target(&snapshot_target); |
1827 | dm_unregister_target(&origin_target); | |
1da177e4 LT |
1828 | |
1829 | exit_origin_hash(); | |
1da177e4 LT |
1830 | kmem_cache_destroy(pending_cache); |
1831 | kmem_cache_destroy(exception_cache); | |
cd45daff | 1832 | kmem_cache_destroy(tracked_chunk_cache); |
4db6bfe0 AK |
1833 | |
1834 | dm_exception_store_exit(); | |
1da177e4 LT |
1835 | } |
1836 | ||
1837 | /* Module hooks */ | |
1838 | module_init(dm_snapshot_init); | |
1839 | module_exit(dm_snapshot_exit); | |
1840 | ||
1841 | MODULE_DESCRIPTION(DM_NAME " snapshot target"); | |
1842 | MODULE_AUTHOR("Joe Thornber"); | |
1843 | MODULE_LICENSE("GPL"); |