Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * dm-snapshot.c | |
3 | * | |
4 | * Copyright (C) 2001-2002 Sistina Software (UK) Limited. | |
5 | * | |
6 | * This file is released under the GPL. | |
7 | */ | |
8 | ||
9 | #include <linux/blkdev.h> | |
1da177e4 | 10 | #include <linux/device-mapper.h> |
90fa1527 | 11 | #include <linux/delay.h> |
1da177e4 LT |
12 | #include <linux/fs.h> |
13 | #include <linux/init.h> | |
14 | #include <linux/kdev_t.h> | |
15 | #include <linux/list.h> | |
16 | #include <linux/mempool.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/vmalloc.h> | |
6f3c3f0a | 20 | #include <linux/log2.h> |
a765e20e | 21 | #include <linux/dm-kcopyd.h> |
1da177e4 | 22 | |
b735fede MP |
23 | #include "dm.h" |
24 | ||
aea53d92 | 25 | #include "dm-exception-store.h" |
1da177e4 | 26 | |
72d94861 AK |
27 | #define DM_MSG_PREFIX "snapshots" |
28 | ||
d698aa45 MP |
29 | static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; |
30 | ||
31 | #define dm_target_is_snapshot_merge(ti) \ | |
32 | ((ti)->type->name == dm_snapshot_merge_target_name) | |
33 | ||
cd45daff MP |
34 | /* |
35 | * The size of the mempool used to track chunks in use. | |
36 | */ | |
37 | #define MIN_IOS 256 | |
38 | ||
ccc45ea8 JB |
39 | #define DM_TRACKED_CHUNK_HASH_SIZE 16 |
40 | #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ | |
41 | (DM_TRACKED_CHUNK_HASH_SIZE - 1)) | |
42 | ||
191437a5 | 43 | struct dm_exception_table { |
ccc45ea8 JB |
44 | uint32_t hash_mask; |
45 | unsigned hash_shift; | |
46 | struct list_head *table; | |
47 | }; | |
48 | ||
49 | struct dm_snapshot { | |
50 | struct rw_semaphore lock; | |
51 | ||
52 | struct dm_dev *origin; | |
fc56f6fb MS |
53 | struct dm_dev *cow; |
54 | ||
55 | struct dm_target *ti; | |
ccc45ea8 JB |
56 | |
57 | /* List of snapshots per Origin */ | |
58 | struct list_head list; | |
59 | ||
d8ddb1cf MS |
60 | /* |
61 | * You can't use a snapshot if this is 0 (e.g. if full). | |
62 | * A snapshot-merge target never clears this. | |
63 | */ | |
ccc45ea8 JB |
64 | int valid; |
65 | ||
76c44f6d MP |
66 | /* |
67 | * The snapshot overflowed because of a write to the snapshot device. | |
68 | * We don't have to invalidate the snapshot in this case, but we need | |
69 | * to prevent further writes. | |
70 | */ | |
71 | int snapshot_overflowed; | |
72 | ||
ccc45ea8 JB |
73 | /* Origin writes don't trigger exceptions until this is set */ |
74 | int active; | |
75 | ||
ccc45ea8 JB |
76 | atomic_t pending_exceptions_count; |
77 | ||
230c83af MP |
78 | /* Protected by "lock" */ |
79 | sector_t exception_start_sequence; | |
80 | ||
81 | /* Protected by kcopyd single-threaded callback */ | |
82 | sector_t exception_complete_sequence; | |
83 | ||
84 | /* | |
85 | * A list of pending exceptions that completed out of order. | |
86 | * Protected by kcopyd single-threaded callback. | |
87 | */ | |
88 | struct list_head out_of_order_list; | |
89 | ||
924e600d MS |
90 | mempool_t *pending_pool; |
91 | ||
191437a5 JB |
92 | struct dm_exception_table pending; |
93 | struct dm_exception_table complete; | |
ccc45ea8 JB |
94 | |
95 | /* | |
96 | * pe_lock protects all pending_exception operations and access | |
97 | * as well as the snapshot_bios list. | |
98 | */ | |
99 | spinlock_t pe_lock; | |
100 | ||
924e600d MS |
101 | /* Chunks with outstanding reads */ |
102 | spinlock_t tracked_chunk_lock; | |
924e600d MS |
103 | struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; |
104 | ||
ccc45ea8 JB |
105 | /* The on disk metadata handler */ |
106 | struct dm_exception_store *store; | |
107 | ||
108 | struct dm_kcopyd_client *kcopyd_client; | |
109 | ||
924e600d MS |
110 | /* Wait for events based on state_bits */ |
111 | unsigned long state_bits; | |
112 | ||
113 | /* Range of chunks currently being merged. */ | |
114 | chunk_t first_merging_chunk; | |
115 | int num_merging_chunks; | |
1e03f97e | 116 | |
d8ddb1cf MS |
117 | /* |
118 | * The merge operation failed if this flag is set. | |
119 | * Failure modes are handled as follows: | |
120 | * - I/O error reading the header | |
121 | * => don't load the target; abort. | |
122 | * - Header does not have "valid" flag set | |
123 | * => use the origin; forget about the snapshot. | |
124 | * - I/O error when reading exceptions | |
125 | * => don't load the target; abort. | |
126 | * (We can't use the intermediate origin state.) | |
127 | * - I/O error while merging | |
128 | * => stop merging; set merge_failed; process I/O normally. | |
129 | */ | |
130 | int merge_failed; | |
131 | ||
9fe86254 MP |
132 | /* |
133 | * Incoming bios that overlap with chunks being merged must wait | |
134 | * for them to be committed. | |
135 | */ | |
136 | struct bio_list bios_queued_during_merge; | |
ccc45ea8 JB |
137 | }; |
138 | ||
1e03f97e MP |
139 | /* |
140 | * state_bits: | |
141 | * RUNNING_MERGE - Merge operation is in progress. | |
142 | * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped; | |
143 | * cleared afterwards. | |
144 | */ | |
145 | #define RUNNING_MERGE 0 | |
146 | #define SHUTDOWN_MERGE 1 | |
147 | ||
df5d2e90 MP |
148 | DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, |
149 | "A percentage of time allocated for copy on write"); | |
150 | ||
c2411045 MP |
151 | struct dm_dev *dm_snap_origin(struct dm_snapshot *s) |
152 | { | |
153 | return s->origin; | |
154 | } | |
155 | EXPORT_SYMBOL(dm_snap_origin); | |
156 | ||
fc56f6fb MS |
157 | struct dm_dev *dm_snap_cow(struct dm_snapshot *s) |
158 | { | |
159 | return s->cow; | |
160 | } | |
161 | EXPORT_SYMBOL(dm_snap_cow); | |
162 | ||
ccc45ea8 JB |
163 | static sector_t chunk_to_sector(struct dm_exception_store *store, |
164 | chunk_t chunk) | |
165 | { | |
166 | return chunk << store->chunk_shift; | |
167 | } | |
168 | ||
169 | static int bdev_equal(struct block_device *lhs, struct block_device *rhs) | |
170 | { | |
171 | /* | |
172 | * There is only ever one instance of a particular block | |
173 | * device so we can compare pointers safely. | |
174 | */ | |
175 | return lhs == rhs; | |
176 | } | |
177 | ||
028867ac | 178 | struct dm_snap_pending_exception { |
1d4989c8 | 179 | struct dm_exception e; |
1da177e4 LT |
180 | |
181 | /* | |
182 | * Origin buffers waiting for this to complete are held | |
183 | * in a bio list | |
184 | */ | |
185 | struct bio_list origin_bios; | |
186 | struct bio_list snapshot_bios; | |
187 | ||
1da177e4 LT |
188 | /* Pointer back to snapshot context */ |
189 | struct dm_snapshot *snap; | |
190 | ||
191 | /* | |
192 | * 1 indicates the exception has already been sent to | |
193 | * kcopyd. | |
194 | */ | |
195 | int started; | |
a6e50b40 | 196 | |
230c83af MP |
197 | /* There was copying error. */ |
198 | int copy_error; | |
199 | ||
200 | /* A sequence number, it is used for in-order completion. */ | |
201 | sector_t exception_sequence; | |
202 | ||
203 | struct list_head out_of_order_entry; | |
204 | ||
a6e50b40 MP |
205 | /* |
206 | * For writing a complete chunk, bypassing the copy. | |
207 | */ | |
208 | struct bio *full_bio; | |
209 | bio_end_io_t *full_bio_end_io; | |
210 | void *full_bio_private; | |
1da177e4 LT |
211 | }; |
212 | ||
213 | /* | |
214 | * Hash table mapping origin volumes to lists of snapshots and | |
215 | * a lock to protect it | |
216 | */ | |
e18b890b CL |
217 | static struct kmem_cache *exception_cache; |
218 | static struct kmem_cache *pending_cache; | |
1da177e4 | 219 | |
cd45daff MP |
220 | struct dm_snap_tracked_chunk { |
221 | struct hlist_node node; | |
222 | chunk_t chunk; | |
223 | }; | |
224 | ||
ee18026a MP |
225 | static void init_tracked_chunk(struct bio *bio) |
226 | { | |
227 | struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); | |
228 | INIT_HLIST_NODE(&c->node); | |
229 | } | |
230 | ||
231 | static bool is_bio_tracked(struct bio *bio) | |
232 | { | |
233 | struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); | |
234 | return !hlist_unhashed(&c->node); | |
235 | } | |
236 | ||
237 | static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) | |
cd45daff | 238 | { |
42bc954f | 239 | struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); |
cd45daff MP |
240 | |
241 | c->chunk = chunk; | |
242 | ||
9aa0c0e6 | 243 | spin_lock_irq(&s->tracked_chunk_lock); |
cd45daff MP |
244 | hlist_add_head(&c->node, |
245 | &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); | |
9aa0c0e6 | 246 | spin_unlock_irq(&s->tracked_chunk_lock); |
cd45daff MP |
247 | } |
248 | ||
ee18026a | 249 | static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) |
cd45daff | 250 | { |
ee18026a | 251 | struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk)); |
cd45daff MP |
252 | unsigned long flags; |
253 | ||
254 | spin_lock_irqsave(&s->tracked_chunk_lock, flags); | |
255 | hlist_del(&c->node); | |
256 | spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); | |
cd45daff MP |
257 | } |
258 | ||
a8d41b59 MP |
259 | static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) |
260 | { | |
261 | struct dm_snap_tracked_chunk *c; | |
a8d41b59 MP |
262 | int found = 0; |
263 | ||
264 | spin_lock_irq(&s->tracked_chunk_lock); | |
265 | ||
b67bfe0d | 266 | hlist_for_each_entry(c, |
a8d41b59 MP |
267 | &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { |
268 | if (c->chunk == chunk) { | |
269 | found = 1; | |
270 | break; | |
271 | } | |
272 | } | |
273 | ||
274 | spin_unlock_irq(&s->tracked_chunk_lock); | |
275 | ||
276 | return found; | |
277 | } | |
278 | ||
615d1eb9 MS |
279 | /* |
280 | * This conflicting I/O is extremely improbable in the caller, | |
281 | * so msleep(1) is sufficient and there is no need for a wait queue. | |
282 | */ | |
283 | static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) | |
284 | { | |
285 | while (__chunk_is_tracked(s, chunk)) | |
286 | msleep(1); | |
287 | } | |
288 | ||
1da177e4 LT |
289 | /* |
290 | * One of these per registered origin, held in the snapshot_origins hash | |
291 | */ | |
292 | struct origin { | |
293 | /* The origin device */ | |
294 | struct block_device *bdev; | |
295 | ||
296 | struct list_head hash_list; | |
297 | ||
298 | /* List of snapshots for this origin */ | |
299 | struct list_head snapshots; | |
300 | }; | |
301 | ||
b735fede MP |
302 | /* |
303 | * This structure is allocated for each origin target | |
304 | */ | |
305 | struct dm_origin { | |
306 | struct dm_dev *dev; | |
307 | struct dm_target *ti; | |
308 | unsigned split_boundary; | |
309 | struct list_head hash_list; | |
310 | }; | |
311 | ||
1da177e4 LT |
312 | /* |
313 | * Size of the hash table for origin volumes. If we make this | |
314 | * the size of the minors list then it should be nearly perfect | |
315 | */ | |
316 | #define ORIGIN_HASH_SIZE 256 | |
317 | #define ORIGIN_MASK 0xFF | |
318 | static struct list_head *_origins; | |
b735fede | 319 | static struct list_head *_dm_origins; |
1da177e4 LT |
320 | static struct rw_semaphore _origins_lock; |
321 | ||
73dfd078 MP |
322 | static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); |
323 | static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock); | |
324 | static uint64_t _pending_exceptions_done_count; | |
325 | ||
1da177e4 LT |
326 | static int init_origin_hash(void) |
327 | { | |
328 | int i; | |
329 | ||
330 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | |
331 | GFP_KERNEL); | |
332 | if (!_origins) { | |
b735fede | 333 | DMERR("unable to allocate memory for _origins"); |
1da177e4 LT |
334 | return -ENOMEM; |
335 | } | |
1da177e4 LT |
336 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) |
337 | INIT_LIST_HEAD(_origins + i); | |
b735fede MP |
338 | |
339 | _dm_origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | |
340 | GFP_KERNEL); | |
341 | if (!_dm_origins) { | |
342 | DMERR("unable to allocate memory for _dm_origins"); | |
343 | kfree(_origins); | |
344 | return -ENOMEM; | |
345 | } | |
346 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | |
347 | INIT_LIST_HEAD(_dm_origins + i); | |
348 | ||
1da177e4 LT |
349 | init_rwsem(&_origins_lock); |
350 | ||
351 | return 0; | |
352 | } | |
353 | ||
354 | static void exit_origin_hash(void) | |
355 | { | |
356 | kfree(_origins); | |
b735fede | 357 | kfree(_dm_origins); |
1da177e4 LT |
358 | } |
359 | ||
028867ac | 360 | static unsigned origin_hash(struct block_device *bdev) |
1da177e4 LT |
361 | { |
362 | return bdev->bd_dev & ORIGIN_MASK; | |
363 | } | |
364 | ||
365 | static struct origin *__lookup_origin(struct block_device *origin) | |
366 | { | |
367 | struct list_head *ol; | |
368 | struct origin *o; | |
369 | ||
370 | ol = &_origins[origin_hash(origin)]; | |
371 | list_for_each_entry (o, ol, hash_list) | |
372 | if (bdev_equal(o->bdev, origin)) | |
373 | return o; | |
374 | ||
375 | return NULL; | |
376 | } | |
377 | ||
378 | static void __insert_origin(struct origin *o) | |
379 | { | |
380 | struct list_head *sl = &_origins[origin_hash(o->bdev)]; | |
381 | list_add_tail(&o->hash_list, sl); | |
382 | } | |
383 | ||
b735fede MP |
384 | static struct dm_origin *__lookup_dm_origin(struct block_device *origin) |
385 | { | |
386 | struct list_head *ol; | |
387 | struct dm_origin *o; | |
388 | ||
389 | ol = &_dm_origins[origin_hash(origin)]; | |
390 | list_for_each_entry (o, ol, hash_list) | |
391 | if (bdev_equal(o->dev->bdev, origin)) | |
392 | return o; | |
393 | ||
394 | return NULL; | |
395 | } | |
396 | ||
397 | static void __insert_dm_origin(struct dm_origin *o) | |
398 | { | |
399 | struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)]; | |
400 | list_add_tail(&o->hash_list, sl); | |
401 | } | |
402 | ||
403 | static void __remove_dm_origin(struct dm_origin *o) | |
404 | { | |
405 | list_del(&o->hash_list); | |
406 | } | |
407 | ||
c1f0c183 MS |
408 | /* |
409 | * _origins_lock must be held when calling this function. | |
410 | * Returns number of snapshots registered using the supplied cow device, plus: | |
411 | * snap_src - a snapshot suitable for use as a source of exception handover | |
412 | * snap_dest - a snapshot capable of receiving exception handover. | |
9d3b15c4 MP |
413 | * snap_merge - an existing snapshot-merge target linked to the same origin. |
414 | * There can be at most one snapshot-merge target. The parameter is optional. | |
c1f0c183 | 415 | * |
9d3b15c4 | 416 | * Possible return values and states of snap_src and snap_dest. |
c1f0c183 MS |
417 | * 0: NULL, NULL - first new snapshot |
418 | * 1: snap_src, NULL - normal snapshot | |
419 | * 2: snap_src, snap_dest - waiting for handover | |
420 | * 2: snap_src, NULL - handed over, waiting for old to be deleted | |
421 | * 1: NULL, snap_dest - source got destroyed without handover | |
422 | */ | |
423 | static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, | |
424 | struct dm_snapshot **snap_src, | |
9d3b15c4 MP |
425 | struct dm_snapshot **snap_dest, |
426 | struct dm_snapshot **snap_merge) | |
c1f0c183 MS |
427 | { |
428 | struct dm_snapshot *s; | |
429 | struct origin *o; | |
430 | int count = 0; | |
431 | int active; | |
432 | ||
433 | o = __lookup_origin(snap->origin->bdev); | |
434 | if (!o) | |
435 | goto out; | |
436 | ||
437 | list_for_each_entry(s, &o->snapshots, list) { | |
9d3b15c4 MP |
438 | if (dm_target_is_snapshot_merge(s->ti) && snap_merge) |
439 | *snap_merge = s; | |
c1f0c183 MS |
440 | if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) |
441 | continue; | |
442 | ||
443 | down_read(&s->lock); | |
444 | active = s->active; | |
445 | up_read(&s->lock); | |
446 | ||
447 | if (active) { | |
448 | if (snap_src) | |
449 | *snap_src = s; | |
450 | } else if (snap_dest) | |
451 | *snap_dest = s; | |
452 | ||
453 | count++; | |
454 | } | |
455 | ||
456 | out: | |
457 | return count; | |
458 | } | |
459 | ||
460 | /* | |
461 | * On success, returns 1 if this snapshot is a handover destination, | |
462 | * otherwise returns 0. | |
463 | */ | |
464 | static int __validate_exception_handover(struct dm_snapshot *snap) | |
465 | { | |
466 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; | |
9d3b15c4 | 467 | struct dm_snapshot *snap_merge = NULL; |
c1f0c183 MS |
468 | |
469 | /* Does snapshot need exceptions handed over to it? */ | |
9d3b15c4 MP |
470 | if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, |
471 | &snap_merge) == 2) || | |
c1f0c183 MS |
472 | snap_dest) { |
473 | snap->ti->error = "Snapshot cow pairing for exception " | |
474 | "table handover failed"; | |
475 | return -EINVAL; | |
476 | } | |
477 | ||
478 | /* | |
479 | * If no snap_src was found, snap cannot become a handover | |
480 | * destination. | |
481 | */ | |
482 | if (!snap_src) | |
483 | return 0; | |
484 | ||
9d3b15c4 MP |
485 | /* |
486 | * Non-snapshot-merge handover? | |
487 | */ | |
488 | if (!dm_target_is_snapshot_merge(snap->ti)) | |
489 | return 1; | |
490 | ||
491 | /* | |
492 | * Do not allow more than one merging snapshot. | |
493 | */ | |
494 | if (snap_merge) { | |
495 | snap->ti->error = "A snapshot is already merging."; | |
496 | return -EINVAL; | |
497 | } | |
498 | ||
1e03f97e MP |
499 | if (!snap_src->store->type->prepare_merge || |
500 | !snap_src->store->type->commit_merge) { | |
501 | snap->ti->error = "Snapshot exception store does not " | |
502 | "support snapshot-merge."; | |
503 | return -EINVAL; | |
504 | } | |
505 | ||
c1f0c183 MS |
506 | return 1; |
507 | } | |
508 | ||
509 | static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) | |
510 | { | |
511 | struct dm_snapshot *l; | |
512 | ||
513 | /* Sort the list according to chunk size, largest-first smallest-last */ | |
514 | list_for_each_entry(l, &o->snapshots, list) | |
515 | if (l->store->chunk_size < s->store->chunk_size) | |
516 | break; | |
517 | list_add_tail(&s->list, &l->list); | |
518 | } | |
519 | ||
1da177e4 LT |
520 | /* |
521 | * Make a note of the snapshot and its origin so we can look it | |
522 | * up when the origin has a write on it. | |
c1f0c183 MS |
523 | * |
524 | * Also validate snapshot exception store handovers. | |
525 | * On success, returns 1 if this registration is a handover destination, | |
526 | * otherwise returns 0. | |
1da177e4 LT |
527 | */ |
528 | static int register_snapshot(struct dm_snapshot *snap) | |
529 | { | |
c1f0c183 | 530 | struct origin *o, *new_o = NULL; |
1da177e4 | 531 | struct block_device *bdev = snap->origin->bdev; |
c1f0c183 | 532 | int r = 0; |
1da177e4 | 533 | |
60c856c8 MP |
534 | new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); |
535 | if (!new_o) | |
536 | return -ENOMEM; | |
537 | ||
1da177e4 | 538 | down_write(&_origins_lock); |
1da177e4 | 539 | |
c1f0c183 MS |
540 | r = __validate_exception_handover(snap); |
541 | if (r < 0) { | |
542 | kfree(new_o); | |
543 | goto out; | |
544 | } | |
545 | ||
546 | o = __lookup_origin(bdev); | |
60c856c8 MP |
547 | if (o) |
548 | kfree(new_o); | |
549 | else { | |
1da177e4 | 550 | /* New origin */ |
60c856c8 | 551 | o = new_o; |
1da177e4 LT |
552 | |
553 | /* Initialise the struct */ | |
554 | INIT_LIST_HEAD(&o->snapshots); | |
555 | o->bdev = bdev; | |
556 | ||
557 | __insert_origin(o); | |
558 | } | |
559 | ||
c1f0c183 MS |
560 | __insert_snapshot(o, snap); |
561 | ||
562 | out: | |
563 | up_write(&_origins_lock); | |
564 | ||
565 | return r; | |
566 | } | |
567 | ||
568 | /* | |
569 | * Move snapshot to correct place in list according to chunk size. | |
570 | */ | |
571 | static void reregister_snapshot(struct dm_snapshot *s) | |
572 | { | |
573 | struct block_device *bdev = s->origin->bdev; | |
574 | ||
575 | down_write(&_origins_lock); | |
576 | ||
577 | list_del(&s->list); | |
578 | __insert_snapshot(__lookup_origin(bdev), s); | |
1da177e4 LT |
579 | |
580 | up_write(&_origins_lock); | |
1da177e4 LT |
581 | } |
582 | ||
583 | static void unregister_snapshot(struct dm_snapshot *s) | |
584 | { | |
585 | struct origin *o; | |
586 | ||
587 | down_write(&_origins_lock); | |
588 | o = __lookup_origin(s->origin->bdev); | |
589 | ||
590 | list_del(&s->list); | |
c1f0c183 | 591 | if (o && list_empty(&o->snapshots)) { |
1da177e4 LT |
592 | list_del(&o->hash_list); |
593 | kfree(o); | |
594 | } | |
595 | ||
596 | up_write(&_origins_lock); | |
597 | } | |
598 | ||
599 | /* | |
600 | * Implementation of the exception hash tables. | |
d74f81f8 MB |
601 | * The lowest hash_shift bits of the chunk number are ignored, allowing |
602 | * some consecutive chunks to be grouped together. | |
1da177e4 | 603 | */ |
3510cb94 JB |
604 | static int dm_exception_table_init(struct dm_exception_table *et, |
605 | uint32_t size, unsigned hash_shift) | |
1da177e4 LT |
606 | { |
607 | unsigned int i; | |
608 | ||
d74f81f8 | 609 | et->hash_shift = hash_shift; |
1da177e4 LT |
610 | et->hash_mask = size - 1; |
611 | et->table = dm_vcalloc(size, sizeof(struct list_head)); | |
612 | if (!et->table) | |
613 | return -ENOMEM; | |
614 | ||
615 | for (i = 0; i < size; i++) | |
616 | INIT_LIST_HEAD(et->table + i); | |
617 | ||
618 | return 0; | |
619 | } | |
620 | ||
3510cb94 JB |
621 | static void dm_exception_table_exit(struct dm_exception_table *et, |
622 | struct kmem_cache *mem) | |
1da177e4 LT |
623 | { |
624 | struct list_head *slot; | |
1d4989c8 | 625 | struct dm_exception *ex, *next; |
1da177e4 LT |
626 | int i, size; |
627 | ||
628 | size = et->hash_mask + 1; | |
629 | for (i = 0; i < size; i++) { | |
630 | slot = et->table + i; | |
631 | ||
632 | list_for_each_entry_safe (ex, next, slot, hash_list) | |
633 | kmem_cache_free(mem, ex); | |
634 | } | |
635 | ||
636 | vfree(et->table); | |
637 | } | |
638 | ||
191437a5 | 639 | static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) |
1da177e4 | 640 | { |
d74f81f8 | 641 | return (chunk >> et->hash_shift) & et->hash_mask; |
1da177e4 LT |
642 | } |
643 | ||
3510cb94 | 644 | static void dm_remove_exception(struct dm_exception *e) |
1da177e4 LT |
645 | { |
646 | list_del(&e->hash_list); | |
647 | } | |
648 | ||
649 | /* | |
650 | * Return the exception data for a sector, or NULL if not | |
651 | * remapped. | |
652 | */ | |
3510cb94 JB |
653 | static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, |
654 | chunk_t chunk) | |
1da177e4 LT |
655 | { |
656 | struct list_head *slot; | |
1d4989c8 | 657 | struct dm_exception *e; |
1da177e4 LT |
658 | |
659 | slot = &et->table[exception_hash(et, chunk)]; | |
660 | list_for_each_entry (e, slot, hash_list) | |
d74f81f8 MB |
661 | if (chunk >= e->old_chunk && |
662 | chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) | |
1da177e4 LT |
663 | return e; |
664 | ||
665 | return NULL; | |
666 | } | |
667 | ||
119bc547 | 668 | static struct dm_exception *alloc_completed_exception(gfp_t gfp) |
1da177e4 | 669 | { |
1d4989c8 | 670 | struct dm_exception *e; |
1da177e4 | 671 | |
119bc547 MP |
672 | e = kmem_cache_alloc(exception_cache, gfp); |
673 | if (!e && gfp == GFP_NOIO) | |
1da177e4 LT |
674 | e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); |
675 | ||
676 | return e; | |
677 | } | |
678 | ||
3510cb94 | 679 | static void free_completed_exception(struct dm_exception *e) |
1da177e4 LT |
680 | { |
681 | kmem_cache_free(exception_cache, e); | |
682 | } | |
683 | ||
92e86812 | 684 | static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) |
1da177e4 | 685 | { |
92e86812 MP |
686 | struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, |
687 | GFP_NOIO); | |
688 | ||
879129d2 | 689 | atomic_inc(&s->pending_exceptions_count); |
92e86812 MP |
690 | pe->snap = s; |
691 | ||
692 | return pe; | |
1da177e4 LT |
693 | } |
694 | ||
028867ac | 695 | static void free_pending_exception(struct dm_snap_pending_exception *pe) |
1da177e4 | 696 | { |
879129d2 MP |
697 | struct dm_snapshot *s = pe->snap; |
698 | ||
699 | mempool_free(pe, s->pending_pool); | |
4e857c58 | 700 | smp_mb__before_atomic(); |
879129d2 | 701 | atomic_dec(&s->pending_exceptions_count); |
1da177e4 LT |
702 | } |
703 | ||
3510cb94 JB |
704 | static void dm_insert_exception(struct dm_exception_table *eh, |
705 | struct dm_exception *new_e) | |
d74f81f8 | 706 | { |
d74f81f8 | 707 | struct list_head *l; |
1d4989c8 | 708 | struct dm_exception *e = NULL; |
d74f81f8 MB |
709 | |
710 | l = &eh->table[exception_hash(eh, new_e->old_chunk)]; | |
711 | ||
712 | /* Add immediately if this table doesn't support consecutive chunks */ | |
713 | if (!eh->hash_shift) | |
714 | goto out; | |
715 | ||
716 | /* List is ordered by old_chunk */ | |
717 | list_for_each_entry_reverse(e, l, hash_list) { | |
718 | /* Insert after an existing chunk? */ | |
719 | if (new_e->old_chunk == (e->old_chunk + | |
720 | dm_consecutive_chunk_count(e) + 1) && | |
721 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) + | |
722 | dm_consecutive_chunk_count(e) + 1)) { | |
723 | dm_consecutive_chunk_count_inc(e); | |
3510cb94 | 724 | free_completed_exception(new_e); |
d74f81f8 MB |
725 | return; |
726 | } | |
727 | ||
728 | /* Insert before an existing chunk? */ | |
729 | if (new_e->old_chunk == (e->old_chunk - 1) && | |
730 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { | |
731 | dm_consecutive_chunk_count_inc(e); | |
732 | e->old_chunk--; | |
733 | e->new_chunk--; | |
3510cb94 | 734 | free_completed_exception(new_e); |
d74f81f8 MB |
735 | return; |
736 | } | |
737 | ||
738 | if (new_e->old_chunk > e->old_chunk) | |
739 | break; | |
740 | } | |
741 | ||
742 | out: | |
743 | list_add(&new_e->hash_list, e ? &e->hash_list : l); | |
744 | } | |
745 | ||
a159c1ac JB |
746 | /* |
747 | * Callback used by the exception stores to load exceptions when | |
748 | * initialising. | |
749 | */ | |
750 | static int dm_add_exception(void *context, chunk_t old, chunk_t new) | |
1da177e4 | 751 | { |
a159c1ac | 752 | struct dm_snapshot *s = context; |
1d4989c8 | 753 | struct dm_exception *e; |
1da177e4 | 754 | |
119bc547 | 755 | e = alloc_completed_exception(GFP_KERNEL); |
1da177e4 LT |
756 | if (!e) |
757 | return -ENOMEM; | |
758 | ||
759 | e->old_chunk = old; | |
d74f81f8 MB |
760 | |
761 | /* Consecutive_count is implicitly initialised to zero */ | |
1da177e4 | 762 | e->new_chunk = new; |
d74f81f8 | 763 | |
3510cb94 | 764 | dm_insert_exception(&s->complete, e); |
d74f81f8 | 765 | |
1da177e4 LT |
766 | return 0; |
767 | } | |
768 | ||
7e201b35 MP |
769 | /* |
770 | * Return a minimum chunk size of all snapshots that have the specified origin. | |
771 | * Return zero if the origin has no snapshots. | |
772 | */ | |
542f9038 | 773 | static uint32_t __minimum_chunk_size(struct origin *o) |
7e201b35 MP |
774 | { |
775 | struct dm_snapshot *snap; | |
776 | unsigned chunk_size = 0; | |
777 | ||
778 | if (o) | |
779 | list_for_each_entry(snap, &o->snapshots, list) | |
780 | chunk_size = min_not_zero(chunk_size, | |
781 | snap->store->chunk_size); | |
782 | ||
542f9038 | 783 | return (uint32_t) chunk_size; |
7e201b35 MP |
784 | } |
785 | ||
1da177e4 LT |
786 | /* |
787 | * Hard coded magic. | |
788 | */ | |
789 | static int calc_max_buckets(void) | |
790 | { | |
791 | /* use a fixed size of 2MB */ | |
792 | unsigned long mem = 2 * 1024 * 1024; | |
793 | mem /= sizeof(struct list_head); | |
794 | ||
795 | return mem; | |
796 | } | |
797 | ||
1da177e4 LT |
798 | /* |
799 | * Allocate room for a suitable hash table. | |
800 | */ | |
fee1998e | 801 | static int init_hash_tables(struct dm_snapshot *s) |
1da177e4 | 802 | { |
60e356f3 | 803 | sector_t hash_size, cow_dev_size, max_buckets; |
1da177e4 LT |
804 | |
805 | /* | |
806 | * Calculate based on the size of the original volume or | |
807 | * the COW volume... | |
808 | */ | |
fc56f6fb | 809 | cow_dev_size = get_dev_size(s->cow->bdev); |
1da177e4 LT |
810 | max_buckets = calc_max_buckets(); |
811 | ||
60e356f3 | 812 | hash_size = cow_dev_size >> s->store->chunk_shift; |
1da177e4 LT |
813 | hash_size = min(hash_size, max_buckets); |
814 | ||
8e87b9b8 MP |
815 | if (hash_size < 64) |
816 | hash_size = 64; | |
8defd830 | 817 | hash_size = rounddown_pow_of_two(hash_size); |
3510cb94 JB |
818 | if (dm_exception_table_init(&s->complete, hash_size, |
819 | DM_CHUNK_CONSECUTIVE_BITS)) | |
1da177e4 LT |
820 | return -ENOMEM; |
821 | ||
822 | /* | |
823 | * Allocate hash table for in-flight exceptions | |
824 | * Make this smaller than the real hash table | |
825 | */ | |
826 | hash_size >>= 3; | |
827 | if (hash_size < 64) | |
828 | hash_size = 64; | |
829 | ||
3510cb94 JB |
830 | if (dm_exception_table_init(&s->pending, hash_size, 0)) { |
831 | dm_exception_table_exit(&s->complete, exception_cache); | |
1da177e4 LT |
832 | return -ENOMEM; |
833 | } | |
834 | ||
835 | return 0; | |
836 | } | |
837 | ||
1e03f97e MP |
838 | static void merge_shutdown(struct dm_snapshot *s) |
839 | { | |
840 | clear_bit_unlock(RUNNING_MERGE, &s->state_bits); | |
4e857c58 | 841 | smp_mb__after_atomic(); |
1e03f97e MP |
842 | wake_up_bit(&s->state_bits, RUNNING_MERGE); |
843 | } | |
844 | ||
9fe86254 MP |
845 | static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) |
846 | { | |
847 | s->first_merging_chunk = 0; | |
848 | s->num_merging_chunks = 0; | |
849 | ||
850 | return bio_list_get(&s->bios_queued_during_merge); | |
851 | } | |
852 | ||
1e03f97e MP |
853 | /* |
854 | * Remove one chunk from the index of completed exceptions. | |
855 | */ | |
856 | static int __remove_single_exception_chunk(struct dm_snapshot *s, | |
857 | chunk_t old_chunk) | |
858 | { | |
859 | struct dm_exception *e; | |
860 | ||
1e03f97e MP |
861 | e = dm_lookup_exception(&s->complete, old_chunk); |
862 | if (!e) { | |
863 | DMERR("Corruption detected: exception for block %llu is " | |
864 | "on disk but not in memory", | |
865 | (unsigned long long)old_chunk); | |
866 | return -EINVAL; | |
867 | } | |
868 | ||
869 | /* | |
870 | * If this is the only chunk using this exception, remove exception. | |
871 | */ | |
872 | if (!dm_consecutive_chunk_count(e)) { | |
873 | dm_remove_exception(e); | |
874 | free_completed_exception(e); | |
875 | return 0; | |
876 | } | |
877 | ||
878 | /* | |
879 | * The chunk may be either at the beginning or the end of a | |
880 | * group of consecutive chunks - never in the middle. We are | |
881 | * removing chunks in the opposite order to that in which they | |
882 | * were added, so this should always be true. | |
883 | * Decrement the consecutive chunk counter and adjust the | |
884 | * starting point if necessary. | |
885 | */ | |
886 | if (old_chunk == e->old_chunk) { | |
887 | e->old_chunk++; | |
888 | e->new_chunk++; | |
889 | } else if (old_chunk != e->old_chunk + | |
890 | dm_consecutive_chunk_count(e)) { | |
891 | DMERR("Attempt to merge block %llu from the " | |
892 | "middle of a chunk range [%llu - %llu]", | |
893 | (unsigned long long)old_chunk, | |
894 | (unsigned long long)e->old_chunk, | |
895 | (unsigned long long) | |
896 | e->old_chunk + dm_consecutive_chunk_count(e)); | |
897 | return -EINVAL; | |
898 | } | |
899 | ||
900 | dm_consecutive_chunk_count_dec(e); | |
901 | ||
902 | return 0; | |
903 | } | |
904 | ||
9fe86254 MP |
905 | static void flush_bios(struct bio *bio); |
906 | ||
907 | static int remove_single_exception_chunk(struct dm_snapshot *s) | |
1e03f97e | 908 | { |
9fe86254 MP |
909 | struct bio *b = NULL; |
910 | int r; | |
911 | chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; | |
1e03f97e MP |
912 | |
913 | down_write(&s->lock); | |
9fe86254 MP |
914 | |
915 | /* | |
916 | * Process chunks (and associated exceptions) in reverse order | |
917 | * so that dm_consecutive_chunk_count_dec() accounting works. | |
918 | */ | |
919 | do { | |
920 | r = __remove_single_exception_chunk(s, old_chunk); | |
921 | if (r) | |
922 | goto out; | |
923 | } while (old_chunk-- > s->first_merging_chunk); | |
924 | ||
925 | b = __release_queued_bios_after_merge(s); | |
926 | ||
927 | out: | |
1e03f97e | 928 | up_write(&s->lock); |
9fe86254 MP |
929 | if (b) |
930 | flush_bios(b); | |
1e03f97e MP |
931 | |
932 | return r; | |
933 | } | |
934 | ||
73dfd078 MP |
935 | static int origin_write_extent(struct dm_snapshot *merging_snap, |
936 | sector_t sector, unsigned chunk_size); | |
937 | ||
1e03f97e MP |
938 | static void merge_callback(int read_err, unsigned long write_err, |
939 | void *context); | |
940 | ||
73dfd078 MP |
941 | static uint64_t read_pending_exceptions_done_count(void) |
942 | { | |
943 | uint64_t pending_exceptions_done; | |
944 | ||
945 | spin_lock(&_pending_exceptions_done_spinlock); | |
946 | pending_exceptions_done = _pending_exceptions_done_count; | |
947 | spin_unlock(&_pending_exceptions_done_spinlock); | |
948 | ||
949 | return pending_exceptions_done; | |
950 | } | |
951 | ||
952 | static void increment_pending_exceptions_done_count(void) | |
953 | { | |
954 | spin_lock(&_pending_exceptions_done_spinlock); | |
955 | _pending_exceptions_done_count++; | |
956 | spin_unlock(&_pending_exceptions_done_spinlock); | |
957 | ||
958 | wake_up_all(&_pending_exceptions_done); | |
959 | } | |
960 | ||
1e03f97e MP |
961 | static void snapshot_merge_next_chunks(struct dm_snapshot *s) |
962 | { | |
8a2d5286 | 963 | int i, linear_chunks; |
1e03f97e MP |
964 | chunk_t old_chunk, new_chunk; |
965 | struct dm_io_region src, dest; | |
8a2d5286 | 966 | sector_t io_size; |
73dfd078 | 967 | uint64_t previous_count; |
1e03f97e MP |
968 | |
969 | BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); | |
970 | if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) | |
971 | goto shut; | |
972 | ||
973 | /* | |
974 | * valid flag never changes during merge, so no lock required. | |
975 | */ | |
976 | if (!s->valid) { | |
977 | DMERR("Snapshot is invalid: can't merge"); | |
978 | goto shut; | |
979 | } | |
980 | ||
8a2d5286 MS |
981 | linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, |
982 | &new_chunk); | |
983 | if (linear_chunks <= 0) { | |
d8ddb1cf | 984 | if (linear_chunks < 0) { |
1e03f97e MP |
985 | DMERR("Read error in exception store: " |
986 | "shutting down merge"); | |
d8ddb1cf MS |
987 | down_write(&s->lock); |
988 | s->merge_failed = 1; | |
989 | up_write(&s->lock); | |
990 | } | |
1e03f97e MP |
991 | goto shut; |
992 | } | |
993 | ||
8a2d5286 MS |
994 | /* Adjust old_chunk and new_chunk to reflect start of linear region */ |
995 | old_chunk = old_chunk + 1 - linear_chunks; | |
996 | new_chunk = new_chunk + 1 - linear_chunks; | |
997 | ||
998 | /* | |
999 | * Use one (potentially large) I/O to copy all 'linear_chunks' | |
1000 | * from the exception store to the origin | |
1001 | */ | |
1002 | io_size = linear_chunks * s->store->chunk_size; | |
1e03f97e | 1003 | |
1e03f97e MP |
1004 | dest.bdev = s->origin->bdev; |
1005 | dest.sector = chunk_to_sector(s->store, old_chunk); | |
8a2d5286 | 1006 | dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); |
1e03f97e MP |
1007 | |
1008 | src.bdev = s->cow->bdev; | |
1009 | src.sector = chunk_to_sector(s->store, new_chunk); | |
1010 | src.count = dest.count; | |
1011 | ||
73dfd078 MP |
1012 | /* |
1013 | * Reallocate any exceptions needed in other snapshots then | |
1014 | * wait for the pending exceptions to complete. | |
1015 | * Each time any pending exception (globally on the system) | |
1016 | * completes we are woken and repeat the process to find out | |
1017 | * if we can proceed. While this may not seem a particularly | |
1018 | * efficient algorithm, it is not expected to have any | |
1019 | * significant impact on performance. | |
1020 | */ | |
1021 | previous_count = read_pending_exceptions_done_count(); | |
8a2d5286 | 1022 | while (origin_write_extent(s, dest.sector, io_size)) { |
73dfd078 MP |
1023 | wait_event(_pending_exceptions_done, |
1024 | (read_pending_exceptions_done_count() != | |
1025 | previous_count)); | |
1026 | /* Retry after the wait, until all exceptions are done. */ | |
1027 | previous_count = read_pending_exceptions_done_count(); | |
1028 | } | |
1029 | ||
9fe86254 MP |
1030 | down_write(&s->lock); |
1031 | s->first_merging_chunk = old_chunk; | |
8a2d5286 | 1032 | s->num_merging_chunks = linear_chunks; |
9fe86254 MP |
1033 | up_write(&s->lock); |
1034 | ||
8a2d5286 MS |
1035 | /* Wait until writes to all 'linear_chunks' drain */ |
1036 | for (i = 0; i < linear_chunks; i++) | |
1037 | __check_for_conflicting_io(s, old_chunk + i); | |
9fe86254 | 1038 | |
1e03f97e MP |
1039 | dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); |
1040 | return; | |
1041 | ||
1042 | shut: | |
1043 | merge_shutdown(s); | |
1044 | } | |
1045 | ||
9fe86254 MP |
1046 | static void error_bios(struct bio *bio); |
1047 | ||
1e03f97e MP |
1048 | static void merge_callback(int read_err, unsigned long write_err, void *context) |
1049 | { | |
1050 | struct dm_snapshot *s = context; | |
9fe86254 | 1051 | struct bio *b = NULL; |
1e03f97e MP |
1052 | |
1053 | if (read_err || write_err) { | |
1054 | if (read_err) | |
1055 | DMERR("Read error: shutting down merge."); | |
1056 | else | |
1057 | DMERR("Write error: shutting down merge."); | |
1058 | goto shut; | |
1059 | } | |
1060 | ||
9fe86254 MP |
1061 | if (s->store->type->commit_merge(s->store, |
1062 | s->num_merging_chunks) < 0) { | |
1e03f97e MP |
1063 | DMERR("Write error in exception store: shutting down merge"); |
1064 | goto shut; | |
1065 | } | |
1066 | ||
9fe86254 MP |
1067 | if (remove_single_exception_chunk(s) < 0) |
1068 | goto shut; | |
1069 | ||
1e03f97e MP |
1070 | snapshot_merge_next_chunks(s); |
1071 | ||
1072 | return; | |
1073 | ||
1074 | shut: | |
9fe86254 | 1075 | down_write(&s->lock); |
d8ddb1cf | 1076 | s->merge_failed = 1; |
9fe86254 MP |
1077 | b = __release_queued_bios_after_merge(s); |
1078 | up_write(&s->lock); | |
1079 | error_bios(b); | |
1080 | ||
1e03f97e MP |
1081 | merge_shutdown(s); |
1082 | } | |
1083 | ||
1084 | static void start_merge(struct dm_snapshot *s) | |
1085 | { | |
1086 | if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits)) | |
1087 | snapshot_merge_next_chunks(s); | |
1088 | } | |
1089 | ||
1e03f97e MP |
1090 | /* |
1091 | * Stop the merging process and wait until it finishes. | |
1092 | */ | |
1093 | static void stop_merge(struct dm_snapshot *s) | |
1094 | { | |
1095 | set_bit(SHUTDOWN_MERGE, &s->state_bits); | |
74316201 | 1096 | wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE); |
1e03f97e MP |
1097 | clear_bit(SHUTDOWN_MERGE, &s->state_bits); |
1098 | } | |
1099 | ||
1da177e4 | 1100 | /* |
b0d3cc01 | 1101 | * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size> |
1da177e4 LT |
1102 | */ |
1103 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
1104 | { | |
1105 | struct dm_snapshot *s; | |
cd45daff | 1106 | int i; |
1da177e4 | 1107 | int r = -EINVAL; |
fc56f6fb | 1108 | char *origin_path, *cow_path; |
55a62eef | 1109 | unsigned args_used, num_flush_bios = 1; |
10b8106a | 1110 | fmode_t origin_mode = FMODE_READ; |
1da177e4 | 1111 | |
4c7e3bf4 | 1112 | if (argc != 4) { |
72d94861 | 1113 | ti->error = "requires exactly 4 arguments"; |
1da177e4 | 1114 | r = -EINVAL; |
fc56f6fb | 1115 | goto bad; |
1da177e4 LT |
1116 | } |
1117 | ||
10b8106a | 1118 | if (dm_target_is_snapshot_merge(ti)) { |
55a62eef | 1119 | num_flush_bios = 2; |
10b8106a MS |
1120 | origin_mode = FMODE_WRITE; |
1121 | } | |
1122 | ||
fc56f6fb MS |
1123 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
1124 | if (!s) { | |
a2d2b034 | 1125 | ti->error = "Cannot allocate private snapshot structure"; |
fc56f6fb MS |
1126 | r = -ENOMEM; |
1127 | goto bad; | |
1128 | } | |
1129 | ||
c2411045 MP |
1130 | origin_path = argv[0]; |
1131 | argv++; | |
1132 | argc--; | |
1133 | ||
1134 | r = dm_get_device(ti, origin_path, origin_mode, &s->origin); | |
1135 | if (r) { | |
1136 | ti->error = "Cannot get origin device"; | |
1137 | goto bad_origin; | |
1138 | } | |
1139 | ||
fc56f6fb MS |
1140 | cow_path = argv[0]; |
1141 | argv++; | |
1142 | argc--; | |
1143 | ||
024d37e9 | 1144 | r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow); |
fc56f6fb MS |
1145 | if (r) { |
1146 | ti->error = "Cannot get COW device"; | |
1147 | goto bad_cow; | |
1148 | } | |
1149 | ||
1150 | r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); | |
fee1998e JB |
1151 | if (r) { |
1152 | ti->error = "Couldn't create exception store"; | |
1da177e4 | 1153 | r = -EINVAL; |
fc56f6fb | 1154 | goto bad_store; |
1da177e4 LT |
1155 | } |
1156 | ||
fee1998e JB |
1157 | argv += args_used; |
1158 | argc -= args_used; | |
1159 | ||
fc56f6fb | 1160 | s->ti = ti; |
1da177e4 | 1161 | s->valid = 1; |
76c44f6d | 1162 | s->snapshot_overflowed = 0; |
aa14edeb | 1163 | s->active = 0; |
879129d2 | 1164 | atomic_set(&s->pending_exceptions_count, 0); |
230c83af MP |
1165 | s->exception_start_sequence = 0; |
1166 | s->exception_complete_sequence = 0; | |
1167 | INIT_LIST_HEAD(&s->out_of_order_list); | |
1da177e4 | 1168 | init_rwsem(&s->lock); |
c1f0c183 | 1169 | INIT_LIST_HEAD(&s->list); |
ca3a931f | 1170 | spin_lock_init(&s->pe_lock); |
1e03f97e | 1171 | s->state_bits = 0; |
d8ddb1cf | 1172 | s->merge_failed = 0; |
9fe86254 MP |
1173 | s->first_merging_chunk = 0; |
1174 | s->num_merging_chunks = 0; | |
1175 | bio_list_init(&s->bios_queued_during_merge); | |
1da177e4 LT |
1176 | |
1177 | /* Allocate hash table for COW data */ | |
fee1998e | 1178 | if (init_hash_tables(s)) { |
1da177e4 LT |
1179 | ti->error = "Unable to allocate hash table space"; |
1180 | r = -ENOMEM; | |
fee1998e | 1181 | goto bad_hash_tables; |
1da177e4 LT |
1182 | } |
1183 | ||
df5d2e90 | 1184 | s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); |
fa34ce73 MP |
1185 | if (IS_ERR(s->kcopyd_client)) { |
1186 | r = PTR_ERR(s->kcopyd_client); | |
1da177e4 | 1187 | ti->error = "Could not create kcopyd client"; |
fee1998e | 1188 | goto bad_kcopyd; |
1da177e4 LT |
1189 | } |
1190 | ||
92e86812 MP |
1191 | s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); |
1192 | if (!s->pending_pool) { | |
1193 | ti->error = "Could not allocate mempool for pending exceptions"; | |
09e8b813 | 1194 | r = -ENOMEM; |
fee1998e | 1195 | goto bad_pending_pool; |
92e86812 MP |
1196 | } |
1197 | ||
cd45daff MP |
1198 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) |
1199 | INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); | |
1200 | ||
1201 | spin_lock_init(&s->tracked_chunk_lock); | |
1202 | ||
c1f0c183 | 1203 | ti->private = s; |
55a62eef | 1204 | ti->num_flush_bios = num_flush_bios; |
42bc954f | 1205 | ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk); |
c1f0c183 MS |
1206 | |
1207 | /* Add snapshot to the list of snapshots for this origin */ | |
1208 | /* Exceptions aren't triggered till snapshot_resume() is called */ | |
1209 | r = register_snapshot(s); | |
1210 | if (r == -ENOMEM) { | |
1211 | ti->error = "Snapshot origin struct allocation failed"; | |
1212 | goto bad_load_and_register; | |
1213 | } else if (r < 0) { | |
1214 | /* invalid handover, register_snapshot has set ti->error */ | |
1215 | goto bad_load_and_register; | |
1216 | } | |
1217 | ||
1218 | /* | |
1219 | * Metadata must only be loaded into one table at once, so skip this | |
1220 | * if metadata will be handed over during resume. | |
1221 | * Chunk size will be set during the handover - set it to zero to | |
1222 | * ensure it's ignored. | |
1223 | */ | |
1224 | if (r > 0) { | |
1225 | s->store->chunk_size = 0; | |
1226 | return 0; | |
1227 | } | |
1228 | ||
493df71c JB |
1229 | r = s->store->type->read_metadata(s->store, dm_add_exception, |
1230 | (void *)s); | |
0764147b | 1231 | if (r < 0) { |
f9cea4f7 | 1232 | ti->error = "Failed to read snapshot metadata"; |
c1f0c183 | 1233 | goto bad_read_metadata; |
0764147b MB |
1234 | } else if (r > 0) { |
1235 | s->valid = 0; | |
1236 | DMWARN("Snapshot is marked invalid."); | |
f9cea4f7 | 1237 | } |
aa14edeb | 1238 | |
3f2412dc MP |
1239 | if (!s->store->chunk_size) { |
1240 | ti->error = "Chunk size not set"; | |
c1f0c183 | 1241 | goto bad_read_metadata; |
1da177e4 | 1242 | } |
542f9038 MS |
1243 | |
1244 | r = dm_set_target_max_io_len(ti, s->store->chunk_size); | |
1245 | if (r) | |
1246 | goto bad_read_metadata; | |
1da177e4 LT |
1247 | |
1248 | return 0; | |
1249 | ||
c1f0c183 MS |
1250 | bad_read_metadata: |
1251 | unregister_snapshot(s); | |
1252 | ||
fee1998e | 1253 | bad_load_and_register: |
92e86812 MP |
1254 | mempool_destroy(s->pending_pool); |
1255 | ||
fee1998e | 1256 | bad_pending_pool: |
eb69aca5 | 1257 | dm_kcopyd_client_destroy(s->kcopyd_client); |
1da177e4 | 1258 | |
fee1998e | 1259 | bad_kcopyd: |
3510cb94 JB |
1260 | dm_exception_table_exit(&s->pending, pending_cache); |
1261 | dm_exception_table_exit(&s->complete, exception_cache); | |
1da177e4 | 1262 | |
fee1998e | 1263 | bad_hash_tables: |
fc56f6fb | 1264 | dm_exception_store_destroy(s->store); |
1da177e4 | 1265 | |
fc56f6fb MS |
1266 | bad_store: |
1267 | dm_put_device(ti, s->cow); | |
fee1998e | 1268 | |
fc56f6fb | 1269 | bad_cow: |
c2411045 MP |
1270 | dm_put_device(ti, s->origin); |
1271 | ||
1272 | bad_origin: | |
fc56f6fb MS |
1273 | kfree(s); |
1274 | ||
1275 | bad: | |
1da177e4 LT |
1276 | return r; |
1277 | } | |
1278 | ||
31c93a0c MB |
1279 | static void __free_exceptions(struct dm_snapshot *s) |
1280 | { | |
eb69aca5 | 1281 | dm_kcopyd_client_destroy(s->kcopyd_client); |
31c93a0c MB |
1282 | s->kcopyd_client = NULL; |
1283 | ||
3510cb94 JB |
1284 | dm_exception_table_exit(&s->pending, pending_cache); |
1285 | dm_exception_table_exit(&s->complete, exception_cache); | |
31c93a0c MB |
1286 | } |
1287 | ||
c1f0c183 MS |
1288 | static void __handover_exceptions(struct dm_snapshot *snap_src, |
1289 | struct dm_snapshot *snap_dest) | |
1290 | { | |
1291 | union { | |
1292 | struct dm_exception_table table_swap; | |
1293 | struct dm_exception_store *store_swap; | |
1294 | } u; | |
1295 | ||
1296 | /* | |
1297 | * Swap all snapshot context information between the two instances. | |
1298 | */ | |
1299 | u.table_swap = snap_dest->complete; | |
1300 | snap_dest->complete = snap_src->complete; | |
1301 | snap_src->complete = u.table_swap; | |
1302 | ||
1303 | u.store_swap = snap_dest->store; | |
1304 | snap_dest->store = snap_src->store; | |
b0d3cc01 | 1305 | snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow; |
c1f0c183 MS |
1306 | snap_src->store = u.store_swap; |
1307 | ||
1308 | snap_dest->store->snap = snap_dest; | |
1309 | snap_src->store->snap = snap_src; | |
1310 | ||
542f9038 | 1311 | snap_dest->ti->max_io_len = snap_dest->store->chunk_size; |
c1f0c183 | 1312 | snap_dest->valid = snap_src->valid; |
76c44f6d | 1313 | snap_dest->snapshot_overflowed = snap_src->snapshot_overflowed; |
c1f0c183 MS |
1314 | |
1315 | /* | |
1316 | * Set source invalid to ensure it receives no further I/O. | |
1317 | */ | |
1318 | snap_src->valid = 0; | |
1319 | } | |
1320 | ||
1da177e4 LT |
1321 | static void snapshot_dtr(struct dm_target *ti) |
1322 | { | |
cd45daff MP |
1323 | #ifdef CONFIG_DM_DEBUG |
1324 | int i; | |
1325 | #endif | |
028867ac | 1326 | struct dm_snapshot *s = ti->private; |
c1f0c183 | 1327 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; |
1da177e4 | 1328 | |
c1f0c183 MS |
1329 | down_read(&_origins_lock); |
1330 | /* Check whether exception handover must be cancelled */ | |
9d3b15c4 | 1331 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); |
c1f0c183 MS |
1332 | if (snap_src && snap_dest && (s == snap_src)) { |
1333 | down_write(&snap_dest->lock); | |
1334 | snap_dest->valid = 0; | |
1335 | up_write(&snap_dest->lock); | |
1336 | DMERR("Cancelling snapshot handover."); | |
1337 | } | |
1338 | up_read(&_origins_lock); | |
1339 | ||
1e03f97e MP |
1340 | if (dm_target_is_snapshot_merge(ti)) |
1341 | stop_merge(s); | |
1342 | ||
138728dc AK |
1343 | /* Prevent further origin writes from using this snapshot. */ |
1344 | /* After this returns there can be no new kcopyd jobs. */ | |
1da177e4 LT |
1345 | unregister_snapshot(s); |
1346 | ||
879129d2 | 1347 | while (atomic_read(&s->pending_exceptions_count)) |
90fa1527 | 1348 | msleep(1); |
879129d2 MP |
1349 | /* |
1350 | * Ensure instructions in mempool_destroy aren't reordered | |
1351 | * before atomic_read. | |
1352 | */ | |
1353 | smp_mb(); | |
1354 | ||
cd45daff MP |
1355 | #ifdef CONFIG_DM_DEBUG |
1356 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) | |
1357 | BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); | |
1358 | #endif | |
1359 | ||
31c93a0c | 1360 | __free_exceptions(s); |
1da177e4 | 1361 | |
92e86812 MP |
1362 | mempool_destroy(s->pending_pool); |
1363 | ||
fee1998e | 1364 | dm_exception_store_destroy(s->store); |
138728dc | 1365 | |
fc56f6fb MS |
1366 | dm_put_device(ti, s->cow); |
1367 | ||
c2411045 MP |
1368 | dm_put_device(ti, s->origin); |
1369 | ||
1da177e4 LT |
1370 | kfree(s); |
1371 | } | |
1372 | ||
1373 | /* | |
1374 | * Flush a list of buffers. | |
1375 | */ | |
1376 | static void flush_bios(struct bio *bio) | |
1377 | { | |
1378 | struct bio *n; | |
1379 | ||
1380 | while (bio) { | |
1381 | n = bio->bi_next; | |
1382 | bio->bi_next = NULL; | |
1383 | generic_make_request(bio); | |
1384 | bio = n; | |
1385 | } | |
1386 | } | |
1387 | ||
515ad66c MP |
1388 | static int do_origin(struct dm_dev *origin, struct bio *bio); |
1389 | ||
1390 | /* | |
1391 | * Flush a list of buffers. | |
1392 | */ | |
1393 | static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) | |
1394 | { | |
1395 | struct bio *n; | |
1396 | int r; | |
1397 | ||
1398 | while (bio) { | |
1399 | n = bio->bi_next; | |
1400 | bio->bi_next = NULL; | |
1401 | r = do_origin(s->origin, bio); | |
1402 | if (r == DM_MAPIO_REMAPPED) | |
1403 | generic_make_request(bio); | |
1404 | bio = n; | |
1405 | } | |
1406 | } | |
1407 | ||
1da177e4 LT |
1408 | /* |
1409 | * Error a list of buffers. | |
1410 | */ | |
1411 | static void error_bios(struct bio *bio) | |
1412 | { | |
1413 | struct bio *n; | |
1414 | ||
1415 | while (bio) { | |
1416 | n = bio->bi_next; | |
1417 | bio->bi_next = NULL; | |
6712ecf8 | 1418 | bio_io_error(bio); |
1da177e4 LT |
1419 | bio = n; |
1420 | } | |
1421 | } | |
1422 | ||
695368ac | 1423 | static void __invalidate_snapshot(struct dm_snapshot *s, int err) |
76df1c65 AK |
1424 | { |
1425 | if (!s->valid) | |
1426 | return; | |
1427 | ||
1428 | if (err == -EIO) | |
1429 | DMERR("Invalidating snapshot: Error reading/writing."); | |
1430 | else if (err == -ENOMEM) | |
1431 | DMERR("Invalidating snapshot: Unable to allocate exception."); | |
1432 | ||
493df71c JB |
1433 | if (s->store->type->drop_snapshot) |
1434 | s->store->type->drop_snapshot(s->store); | |
76df1c65 AK |
1435 | |
1436 | s->valid = 0; | |
1437 | ||
fc56f6fb | 1438 | dm_table_event(s->ti->table); |
76df1c65 AK |
1439 | } |
1440 | ||
028867ac | 1441 | static void pending_complete(struct dm_snap_pending_exception *pe, int success) |
1da177e4 | 1442 | { |
1d4989c8 | 1443 | struct dm_exception *e; |
1da177e4 | 1444 | struct dm_snapshot *s = pe->snap; |
9d493fa8 AK |
1445 | struct bio *origin_bios = NULL; |
1446 | struct bio *snapshot_bios = NULL; | |
a6e50b40 | 1447 | struct bio *full_bio = NULL; |
9d493fa8 | 1448 | int error = 0; |
1da177e4 | 1449 | |
76df1c65 AK |
1450 | if (!success) { |
1451 | /* Read/write error - snapshot is unusable */ | |
1da177e4 | 1452 | down_write(&s->lock); |
695368ac | 1453 | __invalidate_snapshot(s, -EIO); |
9d493fa8 | 1454 | error = 1; |
76df1c65 AK |
1455 | goto out; |
1456 | } | |
1457 | ||
119bc547 | 1458 | e = alloc_completed_exception(GFP_NOIO); |
76df1c65 | 1459 | if (!e) { |
1da177e4 | 1460 | down_write(&s->lock); |
695368ac | 1461 | __invalidate_snapshot(s, -ENOMEM); |
9d493fa8 | 1462 | error = 1; |
76df1c65 AK |
1463 | goto out; |
1464 | } | |
1465 | *e = pe->e; | |
1da177e4 | 1466 | |
76df1c65 AK |
1467 | down_write(&s->lock); |
1468 | if (!s->valid) { | |
3510cb94 | 1469 | free_completed_exception(e); |
9d493fa8 | 1470 | error = 1; |
76df1c65 | 1471 | goto out; |
1da177e4 LT |
1472 | } |
1473 | ||
615d1eb9 MS |
1474 | /* Check for conflicting reads */ |
1475 | __check_for_conflicting_io(s, pe->e.old_chunk); | |
a8d41b59 | 1476 | |
9d493fa8 AK |
1477 | /* |
1478 | * Add a proper exception, and remove the | |
1479 | * in-flight exception from the list. | |
1480 | */ | |
3510cb94 | 1481 | dm_insert_exception(&s->complete, e); |
76df1c65 | 1482 | |
a2d2b034 | 1483 | out: |
3510cb94 | 1484 | dm_remove_exception(&pe->e); |
9d493fa8 | 1485 | snapshot_bios = bio_list_get(&pe->snapshot_bios); |
515ad66c | 1486 | origin_bios = bio_list_get(&pe->origin_bios); |
a6e50b40 MP |
1487 | full_bio = pe->full_bio; |
1488 | if (full_bio) { | |
1489 | full_bio->bi_end_io = pe->full_bio_end_io; | |
1490 | full_bio->bi_private = pe->full_bio_private; | |
1491 | } | |
73dfd078 MP |
1492 | increment_pending_exceptions_done_count(); |
1493 | ||
9d493fa8 AK |
1494 | up_write(&s->lock); |
1495 | ||
1496 | /* Submit any pending write bios */ | |
a6e50b40 MP |
1497 | if (error) { |
1498 | if (full_bio) | |
1499 | bio_io_error(full_bio); | |
9d493fa8 | 1500 | error_bios(snapshot_bios); |
a6e50b40 MP |
1501 | } else { |
1502 | if (full_bio) | |
4246a0b6 | 1503 | bio_endio(full_bio); |
9d493fa8 | 1504 | flush_bios(snapshot_bios); |
a6e50b40 | 1505 | } |
9d493fa8 | 1506 | |
515ad66c | 1507 | retry_origin_bios(s, origin_bios); |
22aa66a3 MP |
1508 | |
1509 | free_pending_exception(pe); | |
1da177e4 LT |
1510 | } |
1511 | ||
1512 | static void commit_callback(void *context, int success) | |
1513 | { | |
028867ac AK |
1514 | struct dm_snap_pending_exception *pe = context; |
1515 | ||
1da177e4 LT |
1516 | pending_complete(pe, success); |
1517 | } | |
1518 | ||
230c83af MP |
1519 | static void complete_exception(struct dm_snap_pending_exception *pe) |
1520 | { | |
1521 | struct dm_snapshot *s = pe->snap; | |
1522 | ||
1523 | if (unlikely(pe->copy_error)) | |
1524 | pending_complete(pe, 0); | |
1525 | ||
1526 | else | |
1527 | /* Update the metadata if we are persistent */ | |
1528 | s->store->type->commit_exception(s->store, &pe->e, | |
1529 | commit_callback, pe); | |
1530 | } | |
1531 | ||
1da177e4 LT |
1532 | /* |
1533 | * Called when the copy I/O has finished. kcopyd actually runs | |
1534 | * this code so don't block. | |
1535 | */ | |
4cdc1d1f | 1536 | static void copy_callback(int read_err, unsigned long write_err, void *context) |
1da177e4 | 1537 | { |
028867ac | 1538 | struct dm_snap_pending_exception *pe = context; |
1da177e4 LT |
1539 | struct dm_snapshot *s = pe->snap; |
1540 | ||
230c83af | 1541 | pe->copy_error = read_err || write_err; |
1da177e4 | 1542 | |
230c83af MP |
1543 | if (pe->exception_sequence == s->exception_complete_sequence) { |
1544 | s->exception_complete_sequence++; | |
1545 | complete_exception(pe); | |
1546 | ||
1547 | while (!list_empty(&s->out_of_order_list)) { | |
1548 | pe = list_entry(s->out_of_order_list.next, | |
1549 | struct dm_snap_pending_exception, out_of_order_entry); | |
1550 | if (pe->exception_sequence != s->exception_complete_sequence) | |
1551 | break; | |
1552 | s->exception_complete_sequence++; | |
1553 | list_del(&pe->out_of_order_entry); | |
1554 | complete_exception(pe); | |
1555 | } | |
1556 | } else { | |
1557 | struct list_head *lh; | |
1558 | struct dm_snap_pending_exception *pe2; | |
1559 | ||
1560 | list_for_each_prev(lh, &s->out_of_order_list) { | |
1561 | pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry); | |
1562 | if (pe2->exception_sequence < pe->exception_sequence) | |
1563 | break; | |
1564 | } | |
1565 | list_add(&pe->out_of_order_entry, lh); | |
1566 | } | |
1da177e4 LT |
1567 | } |
1568 | ||
1569 | /* | |
1570 | * Dispatches the copy operation to kcopyd. | |
1571 | */ | |
028867ac | 1572 | static void start_copy(struct dm_snap_pending_exception *pe) |
1da177e4 LT |
1573 | { |
1574 | struct dm_snapshot *s = pe->snap; | |
22a1ceb1 | 1575 | struct dm_io_region src, dest; |
1da177e4 LT |
1576 | struct block_device *bdev = s->origin->bdev; |
1577 | sector_t dev_size; | |
1578 | ||
1579 | dev_size = get_dev_size(bdev); | |
1580 | ||
1581 | src.bdev = bdev; | |
71fab00a | 1582 | src.sector = chunk_to_sector(s->store, pe->e.old_chunk); |
df96eee6 | 1583 | src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); |
1da177e4 | 1584 | |
fc56f6fb | 1585 | dest.bdev = s->cow->bdev; |
71fab00a | 1586 | dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); |
1da177e4 LT |
1587 | dest.count = src.count; |
1588 | ||
1589 | /* Hand over to kcopyd */ | |
a2d2b034 | 1590 | dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); |
1da177e4 LT |
1591 | } |
1592 | ||
4246a0b6 | 1593 | static void full_bio_end_io(struct bio *bio) |
a6e50b40 MP |
1594 | { |
1595 | void *callback_data = bio->bi_private; | |
1596 | ||
4246a0b6 | 1597 | dm_kcopyd_do_callback(callback_data, 0, bio->bi_error ? 1 : 0); |
a6e50b40 MP |
1598 | } |
1599 | ||
1600 | static void start_full_bio(struct dm_snap_pending_exception *pe, | |
1601 | struct bio *bio) | |
1602 | { | |
1603 | struct dm_snapshot *s = pe->snap; | |
1604 | void *callback_data; | |
1605 | ||
1606 | pe->full_bio = bio; | |
1607 | pe->full_bio_end_io = bio->bi_end_io; | |
1608 | pe->full_bio_private = bio->bi_private; | |
1609 | ||
1610 | callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, | |
1611 | copy_callback, pe); | |
1612 | ||
1613 | bio->bi_end_io = full_bio_end_io; | |
1614 | bio->bi_private = callback_data; | |
1615 | ||
1616 | generic_make_request(bio); | |
1617 | } | |
1618 | ||
2913808e MP |
1619 | static struct dm_snap_pending_exception * |
1620 | __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) | |
1621 | { | |
3510cb94 | 1622 | struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); |
2913808e MP |
1623 | |
1624 | if (!e) | |
1625 | return NULL; | |
1626 | ||
1627 | return container_of(e, struct dm_snap_pending_exception, e); | |
1628 | } | |
1629 | ||
1da177e4 LT |
1630 | /* |
1631 | * Looks to see if this snapshot already has a pending exception | |
1632 | * for this chunk, otherwise it allocates a new one and inserts | |
1633 | * it into the pending table. | |
1634 | * | |
1635 | * NOTE: a write lock must be held on snap->lock before calling | |
1636 | * this. | |
1637 | */ | |
028867ac | 1638 | static struct dm_snap_pending_exception * |
c6621392 MP |
1639 | __find_pending_exception(struct dm_snapshot *s, |
1640 | struct dm_snap_pending_exception *pe, chunk_t chunk) | |
1da177e4 | 1641 | { |
c6621392 | 1642 | struct dm_snap_pending_exception *pe2; |
1da177e4 | 1643 | |
2913808e MP |
1644 | pe2 = __lookup_pending_exception(s, chunk); |
1645 | if (pe2) { | |
76df1c65 | 1646 | free_pending_exception(pe); |
2913808e | 1647 | return pe2; |
1da177e4 LT |
1648 | } |
1649 | ||
76df1c65 AK |
1650 | pe->e.old_chunk = chunk; |
1651 | bio_list_init(&pe->origin_bios); | |
1652 | bio_list_init(&pe->snapshot_bios); | |
76df1c65 | 1653 | pe->started = 0; |
a6e50b40 | 1654 | pe->full_bio = NULL; |
76df1c65 | 1655 | |
493df71c | 1656 | if (s->store->type->prepare_exception(s->store, &pe->e)) { |
76df1c65 AK |
1657 | free_pending_exception(pe); |
1658 | return NULL; | |
1659 | } | |
1660 | ||
230c83af MP |
1661 | pe->exception_sequence = s->exception_start_sequence++; |
1662 | ||
3510cb94 | 1663 | dm_insert_exception(&s->pending, &pe->e); |
76df1c65 | 1664 | |
1da177e4 LT |
1665 | return pe; |
1666 | } | |
1667 | ||
1d4989c8 | 1668 | static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, |
d74f81f8 | 1669 | struct bio *bio, chunk_t chunk) |
1da177e4 | 1670 | { |
fc56f6fb | 1671 | bio->bi_bdev = s->cow->bdev; |
4f024f37 KO |
1672 | bio->bi_iter.bi_sector = |
1673 | chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + | |
1674 | (chunk - e->old_chunk)) + | |
1675 | (bio->bi_iter.bi_sector & s->store->chunk_mask); | |
1da177e4 LT |
1676 | } |
1677 | ||
7de3ee57 | 1678 | static int snapshot_map(struct dm_target *ti, struct bio *bio) |
1da177e4 | 1679 | { |
1d4989c8 | 1680 | struct dm_exception *e; |
028867ac | 1681 | struct dm_snapshot *s = ti->private; |
d2a7ad29 | 1682 | int r = DM_MAPIO_REMAPPED; |
1da177e4 | 1683 | chunk_t chunk; |
028867ac | 1684 | struct dm_snap_pending_exception *pe = NULL; |
1da177e4 | 1685 | |
ee18026a MP |
1686 | init_tracked_chunk(bio); |
1687 | ||
d87f4c14 | 1688 | if (bio->bi_rw & REQ_FLUSH) { |
fc56f6fb | 1689 | bio->bi_bdev = s->cow->bdev; |
494b3ee7 MP |
1690 | return DM_MAPIO_REMAPPED; |
1691 | } | |
1692 | ||
4f024f37 | 1693 | chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); |
1da177e4 LT |
1694 | |
1695 | /* Full snapshots are not usable */ | |
76df1c65 | 1696 | /* To get here the table must be live so s->active is always set. */ |
1da177e4 | 1697 | if (!s->valid) |
f6a80ea8 | 1698 | return -EIO; |
1da177e4 | 1699 | |
ba40a2aa AK |
1700 | /* FIXME: should only take write lock if we need |
1701 | * to copy an exception */ | |
1702 | down_write(&s->lock); | |
1703 | ||
76c44f6d | 1704 | if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) { |
ba40a2aa AK |
1705 | r = -EIO; |
1706 | goto out_unlock; | |
1707 | } | |
1708 | ||
1709 | /* If the block is already remapped - use that, else remap it */ | |
3510cb94 | 1710 | e = dm_lookup_exception(&s->complete, chunk); |
ba40a2aa | 1711 | if (e) { |
d74f81f8 | 1712 | remap_exception(s, e, bio, chunk); |
ba40a2aa AK |
1713 | goto out_unlock; |
1714 | } | |
1715 | ||
1da177e4 LT |
1716 | /* |
1717 | * Write to snapshot - higher level takes care of RW/RO | |
1718 | * flags so we should only get this if we are | |
1719 | * writeable. | |
1720 | */ | |
1721 | if (bio_rw(bio) == WRITE) { | |
2913808e | 1722 | pe = __lookup_pending_exception(s, chunk); |
76df1c65 | 1723 | if (!pe) { |
c6621392 MP |
1724 | up_write(&s->lock); |
1725 | pe = alloc_pending_exception(s); | |
1726 | down_write(&s->lock); | |
1727 | ||
76c44f6d | 1728 | if (!s->valid || s->snapshot_overflowed) { |
c6621392 MP |
1729 | free_pending_exception(pe); |
1730 | r = -EIO; | |
1731 | goto out_unlock; | |
1732 | } | |
1733 | ||
3510cb94 | 1734 | e = dm_lookup_exception(&s->complete, chunk); |
35bf659b MP |
1735 | if (e) { |
1736 | free_pending_exception(pe); | |
1737 | remap_exception(s, e, bio, chunk); | |
1738 | goto out_unlock; | |
1739 | } | |
1740 | ||
c6621392 | 1741 | pe = __find_pending_exception(s, pe, chunk); |
2913808e | 1742 | if (!pe) { |
b0d3cc01 MS |
1743 | if (s->store->userspace_supports_overflow) { |
1744 | s->snapshot_overflowed = 1; | |
1745 | DMERR("Snapshot overflowed: Unable to allocate exception."); | |
1746 | } else | |
1747 | __invalidate_snapshot(s, -ENOMEM); | |
2913808e MP |
1748 | r = -EIO; |
1749 | goto out_unlock; | |
1750 | } | |
1da177e4 LT |
1751 | } |
1752 | ||
d74f81f8 | 1753 | remap_exception(s, &pe->e, bio, chunk); |
76df1c65 | 1754 | |
d2a7ad29 | 1755 | r = DM_MAPIO_SUBMITTED; |
ba40a2aa | 1756 | |
a6e50b40 | 1757 | if (!pe->started && |
4f024f37 KO |
1758 | bio->bi_iter.bi_size == |
1759 | (s->store->chunk_size << SECTOR_SHIFT)) { | |
a6e50b40 MP |
1760 | pe->started = 1; |
1761 | up_write(&s->lock); | |
1762 | start_full_bio(pe, bio); | |
1763 | goto out; | |
1764 | } | |
1765 | ||
1766 | bio_list_add(&pe->snapshot_bios, bio); | |
1767 | ||
76df1c65 AK |
1768 | if (!pe->started) { |
1769 | /* this is protected by snap->lock */ | |
1770 | pe->started = 1; | |
ba40a2aa | 1771 | up_write(&s->lock); |
76df1c65 | 1772 | start_copy(pe); |
ba40a2aa AK |
1773 | goto out; |
1774 | } | |
cd45daff | 1775 | } else { |
ba40a2aa | 1776 | bio->bi_bdev = s->origin->bdev; |
ee18026a | 1777 | track_chunk(s, bio, chunk); |
cd45daff | 1778 | } |
1da177e4 | 1779 | |
a2d2b034 | 1780 | out_unlock: |
ba40a2aa | 1781 | up_write(&s->lock); |
a2d2b034 | 1782 | out: |
1da177e4 LT |
1783 | return r; |
1784 | } | |
1785 | ||
3452c2a1 MP |
1786 | /* |
1787 | * A snapshot-merge target behaves like a combination of a snapshot | |
1788 | * target and a snapshot-origin target. It only generates new | |
1789 | * exceptions in other snapshots and not in the one that is being | |
1790 | * merged. | |
1791 | * | |
1792 | * For each chunk, if there is an existing exception, it is used to | |
1793 | * redirect I/O to the cow device. Otherwise I/O is sent to the origin, | |
1794 | * which in turn might generate exceptions in other snapshots. | |
9fe86254 MP |
1795 | * If merging is currently taking place on the chunk in question, the |
1796 | * I/O is deferred by adding it to s->bios_queued_during_merge. | |
3452c2a1 | 1797 | */ |
7de3ee57 | 1798 | static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) |
3452c2a1 MP |
1799 | { |
1800 | struct dm_exception *e; | |
1801 | struct dm_snapshot *s = ti->private; | |
1802 | int r = DM_MAPIO_REMAPPED; | |
1803 | chunk_t chunk; | |
1804 | ||
ee18026a MP |
1805 | init_tracked_chunk(bio); |
1806 | ||
d87f4c14 | 1807 | if (bio->bi_rw & REQ_FLUSH) { |
55a62eef | 1808 | if (!dm_bio_get_target_bio_nr(bio)) |
10b8106a MS |
1809 | bio->bi_bdev = s->origin->bdev; |
1810 | else | |
1811 | bio->bi_bdev = s->cow->bdev; | |
10b8106a MS |
1812 | return DM_MAPIO_REMAPPED; |
1813 | } | |
1814 | ||
4f024f37 | 1815 | chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); |
3452c2a1 | 1816 | |
9fe86254 | 1817 | down_write(&s->lock); |
3452c2a1 | 1818 | |
d2fdb776 MP |
1819 | /* Full merging snapshots are redirected to the origin */ |
1820 | if (!s->valid) | |
1821 | goto redirect_to_origin; | |
3452c2a1 MP |
1822 | |
1823 | /* If the block is already remapped - use that */ | |
1824 | e = dm_lookup_exception(&s->complete, chunk); | |
1825 | if (e) { | |
9fe86254 MP |
1826 | /* Queue writes overlapping with chunks being merged */ |
1827 | if (bio_rw(bio) == WRITE && | |
1828 | chunk >= s->first_merging_chunk && | |
1829 | chunk < (s->first_merging_chunk + | |
1830 | s->num_merging_chunks)) { | |
1831 | bio->bi_bdev = s->origin->bdev; | |
1832 | bio_list_add(&s->bios_queued_during_merge, bio); | |
1833 | r = DM_MAPIO_SUBMITTED; | |
1834 | goto out_unlock; | |
1835 | } | |
17aa0332 | 1836 | |
3452c2a1 | 1837 | remap_exception(s, e, bio, chunk); |
17aa0332 MP |
1838 | |
1839 | if (bio_rw(bio) == WRITE) | |
ee18026a | 1840 | track_chunk(s, bio, chunk); |
3452c2a1 MP |
1841 | goto out_unlock; |
1842 | } | |
1843 | ||
d2fdb776 | 1844 | redirect_to_origin: |
3452c2a1 MP |
1845 | bio->bi_bdev = s->origin->bdev; |
1846 | ||
1847 | if (bio_rw(bio) == WRITE) { | |
9fe86254 | 1848 | up_write(&s->lock); |
3452c2a1 MP |
1849 | return do_origin(s->origin, bio); |
1850 | } | |
1851 | ||
1852 | out_unlock: | |
9fe86254 | 1853 | up_write(&s->lock); |
3452c2a1 MP |
1854 | |
1855 | return r; | |
1856 | } | |
1857 | ||
7de3ee57 | 1858 | static int snapshot_end_io(struct dm_target *ti, struct bio *bio, int error) |
cd45daff MP |
1859 | { |
1860 | struct dm_snapshot *s = ti->private; | |
cd45daff | 1861 | |
ee18026a MP |
1862 | if (is_bio_tracked(bio)) |
1863 | stop_tracking_chunk(s, bio); | |
cd45daff MP |
1864 | |
1865 | return 0; | |
1866 | } | |
1867 | ||
1e03f97e MP |
1868 | static void snapshot_merge_presuspend(struct dm_target *ti) |
1869 | { | |
1870 | struct dm_snapshot *s = ti->private; | |
1871 | ||
1872 | stop_merge(s); | |
1873 | } | |
1874 | ||
c1f0c183 MS |
1875 | static int snapshot_preresume(struct dm_target *ti) |
1876 | { | |
1877 | int r = 0; | |
1878 | struct dm_snapshot *s = ti->private; | |
1879 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; | |
1880 | ||
1881 | down_read(&_origins_lock); | |
9d3b15c4 | 1882 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); |
c1f0c183 MS |
1883 | if (snap_src && snap_dest) { |
1884 | down_read(&snap_src->lock); | |
1885 | if (s == snap_src) { | |
1886 | DMERR("Unable to resume snapshot source until " | |
1887 | "handover completes."); | |
1888 | r = -EINVAL; | |
b83b2f29 | 1889 | } else if (!dm_suspended(snap_src->ti)) { |
c1f0c183 MS |
1890 | DMERR("Unable to perform snapshot handover until " |
1891 | "source is suspended."); | |
1892 | r = -EINVAL; | |
1893 | } | |
1894 | up_read(&snap_src->lock); | |
1895 | } | |
1896 | up_read(&_origins_lock); | |
1897 | ||
1898 | return r; | |
1899 | } | |
1900 | ||
1da177e4 LT |
1901 | static void snapshot_resume(struct dm_target *ti) |
1902 | { | |
028867ac | 1903 | struct dm_snapshot *s = ti->private; |
09ee96b2 | 1904 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL, *snap_merging = NULL; |
b735fede MP |
1905 | struct dm_origin *o; |
1906 | struct mapped_device *origin_md = NULL; | |
09ee96b2 | 1907 | bool must_restart_merging = false; |
c1f0c183 MS |
1908 | |
1909 | down_read(&_origins_lock); | |
b735fede MP |
1910 | |
1911 | o = __lookup_dm_origin(s->origin->bdev); | |
1912 | if (o) | |
1913 | origin_md = dm_table_get_md(o->ti->table); | |
09ee96b2 MP |
1914 | if (!origin_md) { |
1915 | (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging); | |
1916 | if (snap_merging) | |
1917 | origin_md = dm_table_get_md(snap_merging->ti->table); | |
1918 | } | |
b735fede MP |
1919 | if (origin_md == dm_table_get_md(ti->table)) |
1920 | origin_md = NULL; | |
09ee96b2 MP |
1921 | if (origin_md) { |
1922 | if (dm_hold(origin_md)) | |
1923 | origin_md = NULL; | |
1924 | } | |
b735fede | 1925 | |
09ee96b2 MP |
1926 | up_read(&_origins_lock); |
1927 | ||
1928 | if (origin_md) { | |
b735fede | 1929 | dm_internal_suspend_fast(origin_md); |
09ee96b2 MP |
1930 | if (snap_merging && test_bit(RUNNING_MERGE, &snap_merging->state_bits)) { |
1931 | must_restart_merging = true; | |
1932 | stop_merge(snap_merging); | |
1933 | } | |
1934 | } | |
1935 | ||
1936 | down_read(&_origins_lock); | |
b735fede | 1937 | |
9d3b15c4 | 1938 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); |
c1f0c183 MS |
1939 | if (snap_src && snap_dest) { |
1940 | down_write(&snap_src->lock); | |
1941 | down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); | |
1942 | __handover_exceptions(snap_src, snap_dest); | |
1943 | up_write(&snap_dest->lock); | |
1944 | up_write(&snap_src->lock); | |
1945 | } | |
b735fede | 1946 | |
c1f0c183 MS |
1947 | up_read(&_origins_lock); |
1948 | ||
09ee96b2 MP |
1949 | if (origin_md) { |
1950 | if (must_restart_merging) | |
1951 | start_merge(snap_merging); | |
1952 | dm_internal_resume_fast(origin_md); | |
1953 | dm_put(origin_md); | |
1954 | } | |
1955 | ||
c1f0c183 MS |
1956 | /* Now we have correct chunk size, reregister */ |
1957 | reregister_snapshot(s); | |
1da177e4 | 1958 | |
aa14edeb AK |
1959 | down_write(&s->lock); |
1960 | s->active = 1; | |
1961 | up_write(&s->lock); | |
1da177e4 LT |
1962 | } |
1963 | ||
542f9038 | 1964 | static uint32_t get_origin_minimum_chunksize(struct block_device *bdev) |
1e03f97e | 1965 | { |
542f9038 | 1966 | uint32_t min_chunksize; |
1e03f97e MP |
1967 | |
1968 | down_read(&_origins_lock); | |
1969 | min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); | |
1970 | up_read(&_origins_lock); | |
1971 | ||
1972 | return min_chunksize; | |
1973 | } | |
1974 | ||
1975 | static void snapshot_merge_resume(struct dm_target *ti) | |
1976 | { | |
1977 | struct dm_snapshot *s = ti->private; | |
1978 | ||
1979 | /* | |
1980 | * Handover exceptions from existing snapshot. | |
1981 | */ | |
1982 | snapshot_resume(ti); | |
1983 | ||
1984 | /* | |
542f9038 | 1985 | * snapshot-merge acts as an origin, so set ti->max_io_len |
1e03f97e | 1986 | */ |
542f9038 | 1987 | ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev); |
1e03f97e MP |
1988 | |
1989 | start_merge(s); | |
1990 | } | |
1991 | ||
fd7c092e MP |
1992 | static void snapshot_status(struct dm_target *ti, status_type_t type, |
1993 | unsigned status_flags, char *result, unsigned maxlen) | |
1da177e4 | 1994 | { |
2e4a31df | 1995 | unsigned sz = 0; |
028867ac | 1996 | struct dm_snapshot *snap = ti->private; |
1da177e4 LT |
1997 | |
1998 | switch (type) { | |
1999 | case STATUSTYPE_INFO: | |
94e76572 MP |
2000 | |
2001 | down_write(&snap->lock); | |
2002 | ||
1da177e4 | 2003 | if (!snap->valid) |
2e4a31df | 2004 | DMEMIT("Invalid"); |
d8ddb1cf MS |
2005 | else if (snap->merge_failed) |
2006 | DMEMIT("Merge failed"); | |
76c44f6d MP |
2007 | else if (snap->snapshot_overflowed) |
2008 | DMEMIT("Overflow"); | |
1da177e4 | 2009 | else { |
985903bb MS |
2010 | if (snap->store->type->usage) { |
2011 | sector_t total_sectors, sectors_allocated, | |
2012 | metadata_sectors; | |
2013 | snap->store->type->usage(snap->store, | |
2014 | &total_sectors, | |
2015 | §ors_allocated, | |
2016 | &metadata_sectors); | |
2017 | DMEMIT("%llu/%llu %llu", | |
2018 | (unsigned long long)sectors_allocated, | |
2019 | (unsigned long long)total_sectors, | |
2020 | (unsigned long long)metadata_sectors); | |
1da177e4 LT |
2021 | } |
2022 | else | |
2e4a31df | 2023 | DMEMIT("Unknown"); |
1da177e4 | 2024 | } |
94e76572 MP |
2025 | |
2026 | up_write(&snap->lock); | |
2027 | ||
1da177e4 LT |
2028 | break; |
2029 | ||
2030 | case STATUSTYPE_TABLE: | |
2031 | /* | |
2032 | * kdevname returns a static pointer so we need | |
2033 | * to make private copies if the output is to | |
2034 | * make sense. | |
2035 | */ | |
fc56f6fb | 2036 | DMEMIT("%s %s", snap->origin->name, snap->cow->name); |
1e302a92 JB |
2037 | snap->store->type->status(snap->store, type, result + sz, |
2038 | maxlen - sz); | |
1da177e4 LT |
2039 | break; |
2040 | } | |
1da177e4 LT |
2041 | } |
2042 | ||
8811f46c MS |
2043 | static int snapshot_iterate_devices(struct dm_target *ti, |
2044 | iterate_devices_callout_fn fn, void *data) | |
2045 | { | |
2046 | struct dm_snapshot *snap = ti->private; | |
1e5554c8 MP |
2047 | int r; |
2048 | ||
2049 | r = fn(ti, snap->origin, 0, ti->len, data); | |
8811f46c | 2050 | |
1e5554c8 MP |
2051 | if (!r) |
2052 | r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data); | |
2053 | ||
2054 | return r; | |
8811f46c MS |
2055 | } |
2056 | ||
2057 | ||
1da177e4 LT |
2058 | /*----------------------------------------------------------------- |
2059 | * Origin methods | |
2060 | *---------------------------------------------------------------*/ | |
9eaae8ff MP |
2061 | |
2062 | /* | |
2063 | * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any | |
2064 | * supplied bio was ignored. The caller may submit it immediately. | |
2065 | * (No remapping actually occurs as the origin is always a direct linear | |
2066 | * map.) | |
2067 | * | |
2068 | * If further exceptions are required, DM_MAPIO_SUBMITTED is returned | |
2069 | * and any supplied bio is added to a list to be submitted once all | |
2070 | * the necessary exceptions exist. | |
2071 | */ | |
2072 | static int __origin_write(struct list_head *snapshots, sector_t sector, | |
2073 | struct bio *bio) | |
1da177e4 | 2074 | { |
515ad66c | 2075 | int r = DM_MAPIO_REMAPPED; |
1da177e4 | 2076 | struct dm_snapshot *snap; |
1d4989c8 | 2077 | struct dm_exception *e; |
515ad66c MP |
2078 | struct dm_snap_pending_exception *pe; |
2079 | struct dm_snap_pending_exception *pe_to_start_now = NULL; | |
2080 | struct dm_snap_pending_exception *pe_to_start_last = NULL; | |
1da177e4 LT |
2081 | chunk_t chunk; |
2082 | ||
2083 | /* Do all the snapshots on this origin */ | |
2084 | list_for_each_entry (snap, snapshots, list) { | |
3452c2a1 MP |
2085 | /* |
2086 | * Don't make new exceptions in a merging snapshot | |
2087 | * because it has effectively been deleted | |
2088 | */ | |
2089 | if (dm_target_is_snapshot_merge(snap->ti)) | |
2090 | continue; | |
2091 | ||
76df1c65 AK |
2092 | down_write(&snap->lock); |
2093 | ||
aa14edeb AK |
2094 | /* Only deal with valid and active snapshots */ |
2095 | if (!snap->valid || !snap->active) | |
76df1c65 | 2096 | goto next_snapshot; |
1da177e4 | 2097 | |
d5e404c1 | 2098 | /* Nothing to do if writing beyond end of snapshot */ |
9eaae8ff | 2099 | if (sector >= dm_table_get_size(snap->ti->table)) |
76df1c65 | 2100 | goto next_snapshot; |
1da177e4 LT |
2101 | |
2102 | /* | |
2103 | * Remember, different snapshots can have | |
2104 | * different chunk sizes. | |
2105 | */ | |
9eaae8ff | 2106 | chunk = sector_to_chunk(snap->store, sector); |
1da177e4 LT |
2107 | |
2108 | /* | |
2109 | * Check exception table to see if block | |
2110 | * is already remapped in this snapshot | |
2111 | * and trigger an exception if not. | |
2112 | */ | |
3510cb94 | 2113 | e = dm_lookup_exception(&snap->complete, chunk); |
76df1c65 AK |
2114 | if (e) |
2115 | goto next_snapshot; | |
2116 | ||
2913808e | 2117 | pe = __lookup_pending_exception(snap, chunk); |
76df1c65 | 2118 | if (!pe) { |
c6621392 MP |
2119 | up_write(&snap->lock); |
2120 | pe = alloc_pending_exception(snap); | |
2121 | down_write(&snap->lock); | |
2122 | ||
2123 | if (!snap->valid) { | |
2124 | free_pending_exception(pe); | |
2125 | goto next_snapshot; | |
2126 | } | |
2127 | ||
3510cb94 | 2128 | e = dm_lookup_exception(&snap->complete, chunk); |
35bf659b MP |
2129 | if (e) { |
2130 | free_pending_exception(pe); | |
2131 | goto next_snapshot; | |
2132 | } | |
2133 | ||
c6621392 | 2134 | pe = __find_pending_exception(snap, pe, chunk); |
2913808e MP |
2135 | if (!pe) { |
2136 | __invalidate_snapshot(snap, -ENOMEM); | |
2137 | goto next_snapshot; | |
2138 | } | |
76df1c65 AK |
2139 | } |
2140 | ||
515ad66c | 2141 | r = DM_MAPIO_SUBMITTED; |
76df1c65 | 2142 | |
515ad66c MP |
2143 | /* |
2144 | * If an origin bio was supplied, queue it to wait for the | |
2145 | * completion of this exception, and start this one last, | |
2146 | * at the end of the function. | |
2147 | */ | |
2148 | if (bio) { | |
2149 | bio_list_add(&pe->origin_bios, bio); | |
2150 | bio = NULL; | |
76df1c65 | 2151 | |
515ad66c MP |
2152 | if (!pe->started) { |
2153 | pe->started = 1; | |
2154 | pe_to_start_last = pe; | |
2155 | } | |
76df1c65 AK |
2156 | } |
2157 | ||
2158 | if (!pe->started) { | |
2159 | pe->started = 1; | |
515ad66c | 2160 | pe_to_start_now = pe; |
1da177e4 LT |
2161 | } |
2162 | ||
a2d2b034 | 2163 | next_snapshot: |
1da177e4 | 2164 | up_write(&snap->lock); |
1da177e4 | 2165 | |
515ad66c MP |
2166 | if (pe_to_start_now) { |
2167 | start_copy(pe_to_start_now); | |
2168 | pe_to_start_now = NULL; | |
2169 | } | |
b4b610f6 AK |
2170 | } |
2171 | ||
1da177e4 | 2172 | /* |
515ad66c MP |
2173 | * Submit the exception against which the bio is queued last, |
2174 | * to give the other exceptions a head start. | |
1da177e4 | 2175 | */ |
515ad66c MP |
2176 | if (pe_to_start_last) |
2177 | start_copy(pe_to_start_last); | |
1da177e4 LT |
2178 | |
2179 | return r; | |
2180 | } | |
2181 | ||
2182 | /* | |
2183 | * Called on a write from the origin driver. | |
2184 | */ | |
2185 | static int do_origin(struct dm_dev *origin, struct bio *bio) | |
2186 | { | |
2187 | struct origin *o; | |
d2a7ad29 | 2188 | int r = DM_MAPIO_REMAPPED; |
1da177e4 LT |
2189 | |
2190 | down_read(&_origins_lock); | |
2191 | o = __lookup_origin(origin->bdev); | |
2192 | if (o) | |
4f024f37 | 2193 | r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); |
1da177e4 LT |
2194 | up_read(&_origins_lock); |
2195 | ||
2196 | return r; | |
2197 | } | |
2198 | ||
73dfd078 MP |
2199 | /* |
2200 | * Trigger exceptions in all non-merging snapshots. | |
2201 | * | |
2202 | * The chunk size of the merging snapshot may be larger than the chunk | |
2203 | * size of some other snapshot so we may need to reallocate multiple | |
2204 | * chunks in other snapshots. | |
2205 | * | |
2206 | * We scan all the overlapping exceptions in the other snapshots. | |
2207 | * Returns 1 if anything was reallocated and must be waited for, | |
2208 | * otherwise returns 0. | |
2209 | * | |
2210 | * size must be a multiple of merging_snap's chunk_size. | |
2211 | */ | |
2212 | static int origin_write_extent(struct dm_snapshot *merging_snap, | |
2213 | sector_t sector, unsigned size) | |
2214 | { | |
2215 | int must_wait = 0; | |
2216 | sector_t n; | |
2217 | struct origin *o; | |
2218 | ||
2219 | /* | |
542f9038 | 2220 | * The origin's __minimum_chunk_size() got stored in max_io_len |
73dfd078 MP |
2221 | * by snapshot_merge_resume(). |
2222 | */ | |
2223 | down_read(&_origins_lock); | |
2224 | o = __lookup_origin(merging_snap->origin->bdev); | |
542f9038 | 2225 | for (n = 0; n < size; n += merging_snap->ti->max_io_len) |
73dfd078 MP |
2226 | if (__origin_write(&o->snapshots, sector + n, NULL) == |
2227 | DM_MAPIO_SUBMITTED) | |
2228 | must_wait = 1; | |
2229 | up_read(&_origins_lock); | |
2230 | ||
2231 | return must_wait; | |
2232 | } | |
2233 | ||
1da177e4 LT |
2234 | /* |
2235 | * Origin: maps a linear range of a device, with hooks for snapshotting. | |
2236 | */ | |
2237 | ||
2238 | /* | |
2239 | * Construct an origin mapping: <dev_path> | |
2240 | * The context for an origin is merely a 'struct dm_dev *' | |
2241 | * pointing to the real device. | |
2242 | */ | |
2243 | static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
2244 | { | |
2245 | int r; | |
599cdf3b | 2246 | struct dm_origin *o; |
1da177e4 LT |
2247 | |
2248 | if (argc != 1) { | |
72d94861 | 2249 | ti->error = "origin: incorrect number of arguments"; |
1da177e4 LT |
2250 | return -EINVAL; |
2251 | } | |
2252 | ||
599cdf3b MP |
2253 | o = kmalloc(sizeof(struct dm_origin), GFP_KERNEL); |
2254 | if (!o) { | |
2255 | ti->error = "Cannot allocate private origin structure"; | |
2256 | r = -ENOMEM; | |
2257 | goto bad_alloc; | |
2258 | } | |
2259 | ||
2260 | r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &o->dev); | |
1da177e4 LT |
2261 | if (r) { |
2262 | ti->error = "Cannot get target device"; | |
599cdf3b | 2263 | goto bad_open; |
1da177e4 LT |
2264 | } |
2265 | ||
b735fede | 2266 | o->ti = ti; |
599cdf3b | 2267 | ti->private = o; |
55a62eef | 2268 | ti->num_flush_bios = 1; |
494b3ee7 | 2269 | |
1da177e4 | 2270 | return 0; |
599cdf3b MP |
2271 | |
2272 | bad_open: | |
2273 | kfree(o); | |
2274 | bad_alloc: | |
2275 | return r; | |
1da177e4 LT |
2276 | } |
2277 | ||
2278 | static void origin_dtr(struct dm_target *ti) | |
2279 | { | |
599cdf3b | 2280 | struct dm_origin *o = ti->private; |
b735fede | 2281 | |
599cdf3b MP |
2282 | dm_put_device(ti, o->dev); |
2283 | kfree(o); | |
1da177e4 LT |
2284 | } |
2285 | ||
7de3ee57 | 2286 | static int origin_map(struct dm_target *ti, struct bio *bio) |
1da177e4 | 2287 | { |
599cdf3b | 2288 | struct dm_origin *o = ti->private; |
298eaa89 | 2289 | unsigned available_sectors; |
1da177e4 | 2290 | |
599cdf3b | 2291 | bio->bi_bdev = o->dev->bdev; |
1da177e4 | 2292 | |
298eaa89 | 2293 | if (unlikely(bio->bi_rw & REQ_FLUSH)) |
494b3ee7 MP |
2294 | return DM_MAPIO_REMAPPED; |
2295 | ||
298eaa89 | 2296 | if (bio_rw(bio) != WRITE) |
494b3ee7 MP |
2297 | return DM_MAPIO_REMAPPED; |
2298 | ||
298eaa89 MP |
2299 | available_sectors = o->split_boundary - |
2300 | ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1)); | |
2301 | ||
2302 | if (bio_sectors(bio) > available_sectors) | |
2303 | dm_accept_partial_bio(bio, available_sectors); | |
2304 | ||
1da177e4 | 2305 | /* Only tell snapshots if this is a write */ |
298eaa89 | 2306 | return do_origin(o->dev, bio); |
1da177e4 LT |
2307 | } |
2308 | ||
1da177e4 | 2309 | /* |
542f9038 | 2310 | * Set the target "max_io_len" field to the minimum of all the snapshots' |
1da177e4 LT |
2311 | * chunk sizes. |
2312 | */ | |
2313 | static void origin_resume(struct dm_target *ti) | |
2314 | { | |
599cdf3b | 2315 | struct dm_origin *o = ti->private; |
1da177e4 | 2316 | |
298eaa89 | 2317 | o->split_boundary = get_origin_minimum_chunksize(o->dev->bdev); |
b735fede MP |
2318 | |
2319 | down_write(&_origins_lock); | |
2320 | __insert_dm_origin(o); | |
2321 | up_write(&_origins_lock); | |
2322 | } | |
2323 | ||
2324 | static void origin_postsuspend(struct dm_target *ti) | |
2325 | { | |
2326 | struct dm_origin *o = ti->private; | |
2327 | ||
2328 | down_write(&_origins_lock); | |
2329 | __remove_dm_origin(o); | |
2330 | up_write(&_origins_lock); | |
1da177e4 LT |
2331 | } |
2332 | ||
fd7c092e MP |
2333 | static void origin_status(struct dm_target *ti, status_type_t type, |
2334 | unsigned status_flags, char *result, unsigned maxlen) | |
1da177e4 | 2335 | { |
599cdf3b | 2336 | struct dm_origin *o = ti->private; |
1da177e4 LT |
2337 | |
2338 | switch (type) { | |
2339 | case STATUSTYPE_INFO: | |
2340 | result[0] = '\0'; | |
2341 | break; | |
2342 | ||
2343 | case STATUSTYPE_TABLE: | |
599cdf3b | 2344 | snprintf(result, maxlen, "%s", o->dev->name); |
1da177e4 LT |
2345 | break; |
2346 | } | |
1da177e4 LT |
2347 | } |
2348 | ||
8811f46c MS |
2349 | static int origin_iterate_devices(struct dm_target *ti, |
2350 | iterate_devices_callout_fn fn, void *data) | |
2351 | { | |
599cdf3b | 2352 | struct dm_origin *o = ti->private; |
8811f46c | 2353 | |
599cdf3b | 2354 | return fn(ti, o->dev, 0, ti->len, data); |
8811f46c MS |
2355 | } |
2356 | ||
1da177e4 LT |
2357 | static struct target_type origin_target = { |
2358 | .name = "snapshot-origin", | |
b735fede | 2359 | .version = {1, 9, 0}, |
1da177e4 LT |
2360 | .module = THIS_MODULE, |
2361 | .ctr = origin_ctr, | |
2362 | .dtr = origin_dtr, | |
2363 | .map = origin_map, | |
2364 | .resume = origin_resume, | |
b735fede | 2365 | .postsuspend = origin_postsuspend, |
1da177e4 | 2366 | .status = origin_status, |
8811f46c | 2367 | .iterate_devices = origin_iterate_devices, |
1da177e4 LT |
2368 | }; |
2369 | ||
2370 | static struct target_type snapshot_target = { | |
2371 | .name = "snapshot", | |
b0d3cc01 | 2372 | .version = {1, 15, 0}, |
1da177e4 LT |
2373 | .module = THIS_MODULE, |
2374 | .ctr = snapshot_ctr, | |
2375 | .dtr = snapshot_dtr, | |
2376 | .map = snapshot_map, | |
cd45daff | 2377 | .end_io = snapshot_end_io, |
c1f0c183 | 2378 | .preresume = snapshot_preresume, |
1da177e4 LT |
2379 | .resume = snapshot_resume, |
2380 | .status = snapshot_status, | |
8811f46c | 2381 | .iterate_devices = snapshot_iterate_devices, |
1da177e4 LT |
2382 | }; |
2383 | ||
d698aa45 MP |
2384 | static struct target_type merge_target = { |
2385 | .name = dm_snapshot_merge_target_name, | |
b0d3cc01 | 2386 | .version = {1, 4, 0}, |
d698aa45 MP |
2387 | .module = THIS_MODULE, |
2388 | .ctr = snapshot_ctr, | |
2389 | .dtr = snapshot_dtr, | |
3452c2a1 | 2390 | .map = snapshot_merge_map, |
d698aa45 | 2391 | .end_io = snapshot_end_io, |
1e03f97e | 2392 | .presuspend = snapshot_merge_presuspend, |
d698aa45 | 2393 | .preresume = snapshot_preresume, |
1e03f97e | 2394 | .resume = snapshot_merge_resume, |
d698aa45 MP |
2395 | .status = snapshot_status, |
2396 | .iterate_devices = snapshot_iterate_devices, | |
2397 | }; | |
2398 | ||
1da177e4 LT |
2399 | static int __init dm_snapshot_init(void) |
2400 | { | |
2401 | int r; | |
2402 | ||
4db6bfe0 AK |
2403 | r = dm_exception_store_init(); |
2404 | if (r) { | |
2405 | DMERR("Failed to initialize exception stores"); | |
2406 | return r; | |
2407 | } | |
2408 | ||
1da177e4 | 2409 | r = dm_register_target(&snapshot_target); |
d698aa45 | 2410 | if (r < 0) { |
1da177e4 | 2411 | DMERR("snapshot target register failed %d", r); |
034a186d | 2412 | goto bad_register_snapshot_target; |
1da177e4 LT |
2413 | } |
2414 | ||
2415 | r = dm_register_target(&origin_target); | |
2416 | if (r < 0) { | |
72d94861 | 2417 | DMERR("Origin target register failed %d", r); |
d698aa45 MP |
2418 | goto bad_register_origin_target; |
2419 | } | |
2420 | ||
2421 | r = dm_register_target(&merge_target); | |
2422 | if (r < 0) { | |
2423 | DMERR("Merge target register failed %d", r); | |
2424 | goto bad_register_merge_target; | |
1da177e4 LT |
2425 | } |
2426 | ||
2427 | r = init_origin_hash(); | |
2428 | if (r) { | |
2429 | DMERR("init_origin_hash failed."); | |
d698aa45 | 2430 | goto bad_origin_hash; |
1da177e4 LT |
2431 | } |
2432 | ||
1d4989c8 | 2433 | exception_cache = KMEM_CACHE(dm_exception, 0); |
1da177e4 LT |
2434 | if (!exception_cache) { |
2435 | DMERR("Couldn't create exception cache."); | |
2436 | r = -ENOMEM; | |
d698aa45 | 2437 | goto bad_exception_cache; |
1da177e4 LT |
2438 | } |
2439 | ||
028867ac | 2440 | pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); |
1da177e4 LT |
2441 | if (!pending_cache) { |
2442 | DMERR("Couldn't create pending cache."); | |
2443 | r = -ENOMEM; | |
d698aa45 | 2444 | goto bad_pending_cache; |
1da177e4 LT |
2445 | } |
2446 | ||
1da177e4 LT |
2447 | return 0; |
2448 | ||
d698aa45 | 2449 | bad_pending_cache: |
1da177e4 | 2450 | kmem_cache_destroy(exception_cache); |
d698aa45 | 2451 | bad_exception_cache: |
1da177e4 | 2452 | exit_origin_hash(); |
d698aa45 MP |
2453 | bad_origin_hash: |
2454 | dm_unregister_target(&merge_target); | |
2455 | bad_register_merge_target: | |
1da177e4 | 2456 | dm_unregister_target(&origin_target); |
d698aa45 | 2457 | bad_register_origin_target: |
1da177e4 | 2458 | dm_unregister_target(&snapshot_target); |
034a186d JB |
2459 | bad_register_snapshot_target: |
2460 | dm_exception_store_exit(); | |
d698aa45 | 2461 | |
1da177e4 LT |
2462 | return r; |
2463 | } | |
2464 | ||
2465 | static void __exit dm_snapshot_exit(void) | |
2466 | { | |
10d3bd09 MP |
2467 | dm_unregister_target(&snapshot_target); |
2468 | dm_unregister_target(&origin_target); | |
d698aa45 | 2469 | dm_unregister_target(&merge_target); |
1da177e4 LT |
2470 | |
2471 | exit_origin_hash(); | |
1da177e4 LT |
2472 | kmem_cache_destroy(pending_cache); |
2473 | kmem_cache_destroy(exception_cache); | |
4db6bfe0 AK |
2474 | |
2475 | dm_exception_store_exit(); | |
1da177e4 LT |
2476 | } |
2477 | ||
2478 | /* Module hooks */ | |
2479 | module_init(dm_snapshot_init); | |
2480 | module_exit(dm_snapshot_exit); | |
2481 | ||
2482 | MODULE_DESCRIPTION(DM_NAME " snapshot target"); | |
2483 | MODULE_AUTHOR("Joe Thornber"); | |
2484 | MODULE_LICENSE("GPL"); | |
23cb2109 MP |
2485 | MODULE_ALIAS("dm-snapshot-origin"); |
2486 | MODULE_ALIAS("dm-snapshot-merge"); |