Commit | Line | Data |
---|---|---|
4db6bfe0 AK |
1 | /* |
2 | * Copyright (C) 2001-2002 Sistina Software (UK) Limited. | |
3 | * Copyright (C) 2006-2008 Red Hat GmbH | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include "dm-exception-store.h" | |
4db6bfe0 AK |
9 | |
10 | #include <linux/mm.h> | |
11 | #include <linux/pagemap.h> | |
12 | #include <linux/vmalloc.h> | |
daaa5f7c | 13 | #include <linux/export.h> |
4db6bfe0 AK |
14 | #include <linux/slab.h> |
15 | #include <linux/dm-io.h> | |
55494bf2 | 16 | #include "dm-bufio.h" |
4db6bfe0 AK |
17 | |
18 | #define DM_MSG_PREFIX "persistent snapshot" | |
19 | #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ | |
20 | ||
55b082e6 MP |
21 | #define DM_PREFETCH_CHUNKS 12 |
22 | ||
4db6bfe0 AK |
23 | /*----------------------------------------------------------------- |
24 | * Persistent snapshots, by persistent we mean that the snapshot | |
25 | * will survive a reboot. | |
26 | *---------------------------------------------------------------*/ | |
27 | ||
28 | /* | |
29 | * We need to store a record of which parts of the origin have | |
30 | * been copied to the snapshot device. The snapshot code | |
31 | * requires that we copy exception chunks to chunk aligned areas | |
32 | * of the COW store. It makes sense therefore, to store the | |
33 | * metadata in chunk size blocks. | |
34 | * | |
35 | * There is no backward or forward compatibility implemented, | |
36 | * snapshots with different disk versions than the kernel will | |
37 | * not be usable. It is expected that "lvcreate" will blank out | |
38 | * the start of a fresh COW device before calling the snapshot | |
39 | * constructor. | |
40 | * | |
41 | * The first chunk of the COW device just contains the header. | |
42 | * After this there is a chunk filled with exception metadata, | |
43 | * followed by as many exception chunks as can fit in the | |
44 | * metadata areas. | |
45 | * | |
46 | * All on disk structures are in little-endian format. The end | |
47 | * of the exceptions info is indicated by an exception with a | |
48 | * new_chunk of 0, which is invalid since it would point to the | |
49 | * header chunk. | |
50 | */ | |
51 | ||
52 | /* | |
53 | * Magic for persistent snapshots: "SnAp" - Feeble isn't it. | |
54 | */ | |
55 | #define SNAP_MAGIC 0x70416e53 | |
56 | ||
57 | /* | |
58 | * The on-disk version of the metadata. | |
59 | */ | |
60 | #define SNAPSHOT_DISK_VERSION 1 | |
61 | ||
4454a621 MP |
62 | #define NUM_SNAPSHOT_HDR_CHUNKS 1 |
63 | ||
4db6bfe0 | 64 | struct disk_header { |
283a8328 | 65 | __le32 magic; |
4db6bfe0 AK |
66 | |
67 | /* | |
68 | * Is this snapshot valid. There is no way of recovering | |
69 | * an invalid snapshot. | |
70 | */ | |
283a8328 | 71 | __le32 valid; |
4db6bfe0 AK |
72 | |
73 | /* | |
74 | * Simple, incrementing version. no backward | |
75 | * compatibility. | |
76 | */ | |
283a8328 | 77 | __le32 version; |
4db6bfe0 AK |
78 | |
79 | /* In sectors */ | |
283a8328 AK |
80 | __le32 chunk_size; |
81 | } __packed; | |
4db6bfe0 AK |
82 | |
83 | struct disk_exception { | |
283a8328 AK |
84 | __le64 old_chunk; |
85 | __le64 new_chunk; | |
86 | } __packed; | |
87 | ||
88 | struct core_exception { | |
4db6bfe0 AK |
89 | uint64_t old_chunk; |
90 | uint64_t new_chunk; | |
91 | }; | |
92 | ||
93 | struct commit_callback { | |
94 | void (*callback)(void *, int success); | |
95 | void *context; | |
96 | }; | |
97 | ||
98 | /* | |
99 | * The top level structure for a persistent exception store. | |
100 | */ | |
101 | struct pstore { | |
71fab00a | 102 | struct dm_exception_store *store; |
4db6bfe0 AK |
103 | int version; |
104 | int valid; | |
105 | uint32_t exceptions_per_area; | |
106 | ||
107 | /* | |
108 | * Now that we have an asynchronous kcopyd there is no | |
109 | * need for large chunk sizes, so it wont hurt to have a | |
110 | * whole chunks worth of metadata in memory at once. | |
111 | */ | |
112 | void *area; | |
113 | ||
114 | /* | |
115 | * An area of zeros used to clear the next area. | |
116 | */ | |
117 | void *zero_area; | |
118 | ||
61578dcd MP |
119 | /* |
120 | * An area used for header. The header can be written | |
121 | * concurrently with metadata (when invalidating the snapshot), | |
122 | * so it needs a separate buffer. | |
123 | */ | |
124 | void *header_area; | |
125 | ||
4db6bfe0 AK |
126 | /* |
127 | * Used to keep track of which metadata area the data in | |
128 | * 'chunk' refers to. | |
129 | */ | |
130 | chunk_t current_area; | |
131 | ||
132 | /* | |
133 | * The next free chunk for an exception. | |
4454a621 MP |
134 | * |
135 | * When creating exceptions, all the chunks here and above are | |
136 | * free. It holds the next chunk to be allocated. On rare | |
137 | * occasions (e.g. after a system crash) holes can be left in | |
138 | * the exception store because chunks can be committed out of | |
139 | * order. | |
140 | * | |
141 | * When merging exceptions, it does not necessarily mean all the | |
142 | * chunks here and above are free. It holds the value it would | |
143 | * have held if all chunks had been committed in order of | |
144 | * allocation. Consequently the value may occasionally be | |
145 | * slightly too low, but since it's only used for 'status' and | |
146 | * it can never reach its minimum value too early this doesn't | |
147 | * matter. | |
4db6bfe0 | 148 | */ |
4454a621 | 149 | |
4db6bfe0 AK |
150 | chunk_t next_free; |
151 | ||
152 | /* | |
153 | * The index of next free exception in the current | |
154 | * metadata area. | |
155 | */ | |
156 | uint32_t current_committed; | |
157 | ||
158 | atomic_t pending_count; | |
159 | uint32_t callback_count; | |
160 | struct commit_callback *callbacks; | |
161 | struct dm_io_client *io_client; | |
162 | ||
163 | struct workqueue_struct *metadata_wq; | |
164 | }; | |
165 | ||
4db6bfe0 AK |
166 | static int alloc_area(struct pstore *ps) |
167 | { | |
168 | int r = -ENOMEM; | |
169 | size_t len; | |
170 | ||
71fab00a | 171 | len = ps->store->chunk_size << SECTOR_SHIFT; |
4db6bfe0 AK |
172 | |
173 | /* | |
174 | * Allocate the chunk_size block of memory that will hold | |
175 | * a single metadata area. | |
176 | */ | |
177 | ps->area = vmalloc(len); | |
178 | if (!ps->area) | |
61578dcd | 179 | goto err_area; |
4db6bfe0 | 180 | |
e29e65aa | 181 | ps->zero_area = vzalloc(len); |
61578dcd MP |
182 | if (!ps->zero_area) |
183 | goto err_zero_area; | |
4db6bfe0 | 184 | |
61578dcd MP |
185 | ps->header_area = vmalloc(len); |
186 | if (!ps->header_area) | |
187 | goto err_header_area; | |
188 | ||
4db6bfe0 | 189 | return 0; |
61578dcd MP |
190 | |
191 | err_header_area: | |
192 | vfree(ps->zero_area); | |
193 | ||
194 | err_zero_area: | |
195 | vfree(ps->area); | |
196 | ||
197 | err_area: | |
198 | return r; | |
4db6bfe0 AK |
199 | } |
200 | ||
201 | static void free_area(struct pstore *ps) | |
202 | { | |
a32079ce JB |
203 | if (ps->area) |
204 | vfree(ps->area); | |
4db6bfe0 | 205 | ps->area = NULL; |
a32079ce JB |
206 | |
207 | if (ps->zero_area) | |
208 | vfree(ps->zero_area); | |
4db6bfe0 | 209 | ps->zero_area = NULL; |
61578dcd MP |
210 | |
211 | if (ps->header_area) | |
212 | vfree(ps->header_area); | |
213 | ps->header_area = NULL; | |
4db6bfe0 AK |
214 | } |
215 | ||
216 | struct mdata_req { | |
217 | struct dm_io_region *where; | |
218 | struct dm_io_request *io_req; | |
219 | struct work_struct work; | |
220 | int result; | |
221 | }; | |
222 | ||
223 | static void do_metadata(struct work_struct *work) | |
224 | { | |
225 | struct mdata_req *req = container_of(work, struct mdata_req, work); | |
226 | ||
227 | req->result = dm_io(req->io_req, 1, req->where, NULL); | |
228 | } | |
229 | ||
230 | /* | |
231 | * Read or write a chunk aligned and sized block of data from a device. | |
232 | */ | |
02d2fd31 MP |
233 | static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, |
234 | int metadata) | |
4db6bfe0 AK |
235 | { |
236 | struct dm_io_region where = { | |
fc56f6fb | 237 | .bdev = dm_snap_cow(ps->store->snap)->bdev, |
71fab00a JB |
238 | .sector = ps->store->chunk_size * chunk, |
239 | .count = ps->store->chunk_size, | |
4db6bfe0 AK |
240 | }; |
241 | struct dm_io_request io_req = { | |
242 | .bi_rw = rw, | |
243 | .mem.type = DM_IO_VMA, | |
02d2fd31 | 244 | .mem.ptr.vma = area, |
4db6bfe0 AK |
245 | .client = ps->io_client, |
246 | .notify.fn = NULL, | |
247 | }; | |
248 | struct mdata_req req; | |
249 | ||
250 | if (!metadata) | |
251 | return dm_io(&io_req, 1, &where, NULL); | |
252 | ||
253 | req.where = &where; | |
254 | req.io_req = &io_req; | |
255 | ||
256 | /* | |
257 | * Issue the synchronous I/O from a different thread | |
258 | * to avoid generic_make_request recursion. | |
259 | */ | |
ca1cab37 | 260 | INIT_WORK_ONSTACK(&req.work, do_metadata); |
4db6bfe0 | 261 | queue_work(ps->metadata_wq, &req.work); |
5ea330a7 | 262 | flush_workqueue(ps->metadata_wq); |
c1a64160 | 263 | destroy_work_on_stack(&req.work); |
4db6bfe0 AK |
264 | |
265 | return req.result; | |
266 | } | |
267 | ||
268 | /* | |
269 | * Convert a metadata area index to a chunk index. | |
270 | */ | |
271 | static chunk_t area_location(struct pstore *ps, chunk_t area) | |
272 | { | |
87c961cb | 273 | return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); |
4db6bfe0 AK |
274 | } |
275 | ||
e9c6a182 MP |
276 | static void skip_metadata(struct pstore *ps) |
277 | { | |
278 | uint32_t stride = ps->exceptions_per_area + 1; | |
279 | chunk_t next_free = ps->next_free; | |
280 | if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS) | |
281 | ps->next_free++; | |
282 | } | |
283 | ||
4db6bfe0 AK |
284 | /* |
285 | * Read or write a metadata area. Remembering to skip the first | |
286 | * chunk which holds the header. | |
287 | */ | |
288 | static int area_io(struct pstore *ps, int rw) | |
289 | { | |
290 | int r; | |
291 | chunk_t chunk; | |
292 | ||
293 | chunk = area_location(ps, ps->current_area); | |
294 | ||
02d2fd31 | 295 | r = chunk_io(ps, ps->area, chunk, rw, 0); |
4db6bfe0 AK |
296 | if (r) |
297 | return r; | |
298 | ||
299 | return 0; | |
300 | } | |
301 | ||
302 | static void zero_memory_area(struct pstore *ps) | |
303 | { | |
71fab00a | 304 | memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); |
4db6bfe0 AK |
305 | } |
306 | ||
307 | static int zero_disk_area(struct pstore *ps, chunk_t area) | |
308 | { | |
02d2fd31 | 309 | return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0); |
4db6bfe0 AK |
310 | } |
311 | ||
312 | static int read_header(struct pstore *ps, int *new_snapshot) | |
313 | { | |
314 | int r; | |
315 | struct disk_header *dh; | |
df96eee6 | 316 | unsigned chunk_size; |
4db6bfe0 | 317 | int chunk_size_supplied = 1; |
ae0b7448 | 318 | char *chunk_err; |
4db6bfe0 AK |
319 | |
320 | /* | |
df96eee6 MP |
321 | * Use default chunk size (or logical_block_size, if larger) |
322 | * if none supplied | |
4db6bfe0 | 323 | */ |
71fab00a JB |
324 | if (!ps->store->chunk_size) { |
325 | ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, | |
fc56f6fb MS |
326 | bdev_logical_block_size(dm_snap_cow(ps->store->snap)-> |
327 | bdev) >> 9); | |
71fab00a JB |
328 | ps->store->chunk_mask = ps->store->chunk_size - 1; |
329 | ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; | |
4db6bfe0 AK |
330 | chunk_size_supplied = 0; |
331 | } | |
332 | ||
bda8efec | 333 | ps->io_client = dm_io_client_create(); |
4db6bfe0 AK |
334 | if (IS_ERR(ps->io_client)) |
335 | return PTR_ERR(ps->io_client); | |
336 | ||
337 | r = alloc_area(ps); | |
338 | if (r) | |
339 | return r; | |
340 | ||
61578dcd | 341 | r = chunk_io(ps, ps->header_area, 0, READ, 1); |
4db6bfe0 AK |
342 | if (r) |
343 | goto bad; | |
344 | ||
61578dcd | 345 | dh = ps->header_area; |
4db6bfe0 AK |
346 | |
347 | if (le32_to_cpu(dh->magic) == 0) { | |
348 | *new_snapshot = 1; | |
349 | return 0; | |
350 | } | |
351 | ||
352 | if (le32_to_cpu(dh->magic) != SNAP_MAGIC) { | |
353 | DMWARN("Invalid or corrupt snapshot"); | |
354 | r = -ENXIO; | |
355 | goto bad; | |
356 | } | |
357 | ||
358 | *new_snapshot = 0; | |
359 | ps->valid = le32_to_cpu(dh->valid); | |
360 | ps->version = le32_to_cpu(dh->version); | |
361 | chunk_size = le32_to_cpu(dh->chunk_size); | |
362 | ||
ae0b7448 | 363 | if (ps->store->chunk_size == chunk_size) |
4db6bfe0 AK |
364 | return 0; |
365 | ||
ae0b7448 | 366 | if (chunk_size_supplied) |
df96eee6 MP |
367 | DMWARN("chunk size %u in device metadata overrides " |
368 | "table chunk size of %u.", | |
369 | chunk_size, ps->store->chunk_size); | |
4db6bfe0 AK |
370 | |
371 | /* We had a bogus chunk_size. Fix stuff up. */ | |
372 | free_area(ps); | |
373 | ||
ae0b7448 MP |
374 | r = dm_exception_store_set_chunk_size(ps->store, chunk_size, |
375 | &chunk_err); | |
376 | if (r) { | |
df96eee6 MP |
377 | DMERR("invalid on-disk chunk size %u: %s.", |
378 | chunk_size, chunk_err); | |
ae0b7448 MP |
379 | return r; |
380 | } | |
4db6bfe0 | 381 | |
4db6bfe0 AK |
382 | r = alloc_area(ps); |
383 | return r; | |
384 | ||
385 | bad: | |
386 | free_area(ps); | |
387 | return r; | |
388 | } | |
389 | ||
390 | static int write_header(struct pstore *ps) | |
391 | { | |
392 | struct disk_header *dh; | |
393 | ||
61578dcd | 394 | memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); |
4db6bfe0 | 395 | |
61578dcd | 396 | dh = ps->header_area; |
4db6bfe0 AK |
397 | dh->magic = cpu_to_le32(SNAP_MAGIC); |
398 | dh->valid = cpu_to_le32(ps->valid); | |
399 | dh->version = cpu_to_le32(ps->version); | |
71fab00a | 400 | dh->chunk_size = cpu_to_le32(ps->store->chunk_size); |
4db6bfe0 | 401 | |
61578dcd | 402 | return chunk_io(ps, ps->header_area, 0, WRITE, 1); |
4db6bfe0 AK |
403 | } |
404 | ||
405 | /* | |
406 | * Access functions for the disk exceptions, these do the endian conversions. | |
407 | */ | |
2cadabd5 MP |
408 | static struct disk_exception *get_exception(struct pstore *ps, void *ps_area, |
409 | uint32_t index) | |
4db6bfe0 AK |
410 | { |
411 | BUG_ON(index >= ps->exceptions_per_area); | |
412 | ||
2cadabd5 | 413 | return ((struct disk_exception *) ps_area) + index; |
4db6bfe0 AK |
414 | } |
415 | ||
2cadabd5 | 416 | static void read_exception(struct pstore *ps, void *ps_area, |
283a8328 | 417 | uint32_t index, struct core_exception *result) |
4db6bfe0 | 418 | { |
2cadabd5 | 419 | struct disk_exception *de = get_exception(ps, ps_area, index); |
4db6bfe0 AK |
420 | |
421 | /* copy it */ | |
283a8328 AK |
422 | result->old_chunk = le64_to_cpu(de->old_chunk); |
423 | result->new_chunk = le64_to_cpu(de->new_chunk); | |
4db6bfe0 AK |
424 | } |
425 | ||
426 | static void write_exception(struct pstore *ps, | |
283a8328 | 427 | uint32_t index, struct core_exception *e) |
4db6bfe0 | 428 | { |
2cadabd5 | 429 | struct disk_exception *de = get_exception(ps, ps->area, index); |
4db6bfe0 AK |
430 | |
431 | /* copy it */ | |
283a8328 AK |
432 | de->old_chunk = cpu_to_le64(e->old_chunk); |
433 | de->new_chunk = cpu_to_le64(e->new_chunk); | |
4db6bfe0 AK |
434 | } |
435 | ||
4454a621 MP |
436 | static void clear_exception(struct pstore *ps, uint32_t index) |
437 | { | |
2cadabd5 | 438 | struct disk_exception *de = get_exception(ps, ps->area, index); |
4454a621 MP |
439 | |
440 | /* clear it */ | |
283a8328 AK |
441 | de->old_chunk = 0; |
442 | de->new_chunk = 0; | |
4454a621 MP |
443 | } |
444 | ||
4db6bfe0 AK |
445 | /* |
446 | * Registers the exceptions that are present in the current area. | |
447 | * 'full' is filled in to indicate if the area has been | |
448 | * filled. | |
449 | */ | |
2cadabd5 | 450 | static int insert_exceptions(struct pstore *ps, void *ps_area, |
a159c1ac JB |
451 | int (*callback)(void *callback_context, |
452 | chunk_t old, chunk_t new), | |
453 | void *callback_context, | |
454 | int *full) | |
4db6bfe0 AK |
455 | { |
456 | int r; | |
457 | unsigned int i; | |
283a8328 | 458 | struct core_exception e; |
4db6bfe0 AK |
459 | |
460 | /* presume the area is full */ | |
461 | *full = 1; | |
462 | ||
463 | for (i = 0; i < ps->exceptions_per_area; i++) { | |
2cadabd5 | 464 | read_exception(ps, ps_area, i, &e); |
4db6bfe0 AK |
465 | |
466 | /* | |
467 | * If the new_chunk is pointing at the start of | |
468 | * the COW device, where the first metadata area | |
469 | * is we know that we've hit the end of the | |
470 | * exceptions. Therefore the area is not full. | |
471 | */ | |
283a8328 | 472 | if (e.new_chunk == 0LL) { |
4db6bfe0 AK |
473 | ps->current_committed = i; |
474 | *full = 0; | |
475 | break; | |
476 | } | |
477 | ||
478 | /* | |
479 | * Keep track of the start of the free chunks. | |
480 | */ | |
283a8328 AK |
481 | if (ps->next_free <= e.new_chunk) |
482 | ps->next_free = e.new_chunk + 1; | |
4db6bfe0 AK |
483 | |
484 | /* | |
485 | * Otherwise we add the exception to the snapshot. | |
486 | */ | |
283a8328 | 487 | r = callback(callback_context, e.old_chunk, e.new_chunk); |
4db6bfe0 AK |
488 | if (r) |
489 | return r; | |
490 | } | |
491 | ||
492 | return 0; | |
493 | } | |
494 | ||
a159c1ac JB |
495 | static int read_exceptions(struct pstore *ps, |
496 | int (*callback)(void *callback_context, chunk_t old, | |
497 | chunk_t new), | |
498 | void *callback_context) | |
4db6bfe0 AK |
499 | { |
500 | int r, full = 1; | |
55494bf2 | 501 | struct dm_bufio_client *client; |
55b082e6 | 502 | chunk_t prefetch_area = 0; |
55494bf2 MP |
503 | |
504 | client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev, | |
505 | ps->store->chunk_size << SECTOR_SHIFT, | |
506 | 1, 0, NULL, NULL); | |
507 | ||
508 | if (IS_ERR(client)) | |
509 | return PTR_ERR(client); | |
4db6bfe0 | 510 | |
55b082e6 MP |
511 | /* |
512 | * Setup for one current buffer + desired readahead buffers. | |
513 | */ | |
514 | dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS); | |
515 | ||
4db6bfe0 AK |
516 | /* |
517 | * Keeping reading chunks and inserting exceptions until | |
518 | * we find a partially full area. | |
519 | */ | |
520 | for (ps->current_area = 0; full; ps->current_area++) { | |
55494bf2 MP |
521 | struct dm_buffer *bp; |
522 | void *area; | |
55b082e6 MP |
523 | chunk_t chunk; |
524 | ||
525 | if (unlikely(prefetch_area < ps->current_area)) | |
526 | prefetch_area = ps->current_area; | |
527 | ||
528 | if (DM_PREFETCH_CHUNKS) do { | |
529 | chunk_t pf_chunk = area_location(ps, prefetch_area); | |
530 | if (unlikely(pf_chunk >= dm_bufio_get_device_size(client))) | |
531 | break; | |
532 | dm_bufio_prefetch(client, pf_chunk, 1); | |
533 | prefetch_area++; | |
534 | if (unlikely(!prefetch_area)) | |
535 | break; | |
536 | } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS); | |
537 | ||
538 | chunk = area_location(ps, ps->current_area); | |
55494bf2 MP |
539 | |
540 | area = dm_bufio_read(client, chunk, &bp); | |
541 | if (unlikely(IS_ERR(area))) { | |
542 | r = PTR_ERR(area); | |
543 | goto ret_destroy_bufio; | |
544 | } | |
4db6bfe0 | 545 | |
55494bf2 | 546 | r = insert_exceptions(ps, area, callback, callback_context, |
2cadabd5 | 547 | &full); |
55494bf2 | 548 | |
2c945820 MP |
549 | if (!full) |
550 | memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); | |
551 | ||
55494bf2 MP |
552 | dm_bufio_release(bp); |
553 | ||
554 | dm_bufio_forget(client, chunk); | |
555 | ||
556 | if (unlikely(r)) | |
557 | goto ret_destroy_bufio; | |
4db6bfe0 AK |
558 | } |
559 | ||
560 | ps->current_area--; | |
561 | ||
e9c6a182 MP |
562 | skip_metadata(ps); |
563 | ||
55494bf2 MP |
564 | r = 0; |
565 | ||
566 | ret_destroy_bufio: | |
567 | dm_bufio_client_destroy(client); | |
568 | ||
569 | return r; | |
4db6bfe0 AK |
570 | } |
571 | ||
572 | static struct pstore *get_info(struct dm_exception_store *store) | |
573 | { | |
574 | return (struct pstore *) store->context; | |
575 | } | |
576 | ||
985903bb MS |
577 | static void persistent_usage(struct dm_exception_store *store, |
578 | sector_t *total_sectors, | |
579 | sector_t *sectors_allocated, | |
580 | sector_t *metadata_sectors) | |
4db6bfe0 | 581 | { |
985903bb MS |
582 | struct pstore *ps = get_info(store); |
583 | ||
584 | *sectors_allocated = ps->next_free * store->chunk_size; | |
fc56f6fb | 585 | *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); |
985903bb MS |
586 | |
587 | /* | |
588 | * First chunk is the fixed header. | |
589 | * Then there are (ps->current_area + 1) metadata chunks, each one | |
590 | * separated from the next by ps->exceptions_per_area data chunks. | |
591 | */ | |
4454a621 MP |
592 | *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) * |
593 | store->chunk_size; | |
4db6bfe0 AK |
594 | } |
595 | ||
493df71c | 596 | static void persistent_dtr(struct dm_exception_store *store) |
4db6bfe0 AK |
597 | { |
598 | struct pstore *ps = get_info(store); | |
599 | ||
600 | destroy_workqueue(ps->metadata_wq); | |
a32079ce JB |
601 | |
602 | /* Created in read_header */ | |
603 | if (ps->io_client) | |
604 | dm_io_client_destroy(ps->io_client); | |
4db6bfe0 | 605 | free_area(ps); |
a32079ce JB |
606 | |
607 | /* Allocated in persistent_read_metadata */ | |
608 | if (ps->callbacks) | |
609 | vfree(ps->callbacks); | |
610 | ||
4db6bfe0 AK |
611 | kfree(ps); |
612 | } | |
613 | ||
a159c1ac JB |
614 | static int persistent_read_metadata(struct dm_exception_store *store, |
615 | int (*callback)(void *callback_context, | |
616 | chunk_t old, chunk_t new), | |
617 | void *callback_context) | |
4db6bfe0 AK |
618 | { |
619 | int r, uninitialized_var(new_snapshot); | |
620 | struct pstore *ps = get_info(store); | |
621 | ||
622 | /* | |
623 | * Read the snapshot header. | |
624 | */ | |
625 | r = read_header(ps, &new_snapshot); | |
626 | if (r) | |
627 | return r; | |
628 | ||
629 | /* | |
630 | * Now we know correct chunk_size, complete the initialisation. | |
631 | */ | |
71fab00a JB |
632 | ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / |
633 | sizeof(struct disk_exception); | |
4db6bfe0 | 634 | ps->callbacks = dm_vcalloc(ps->exceptions_per_area, |
a2d2b034 | 635 | sizeof(*ps->callbacks)); |
4db6bfe0 AK |
636 | if (!ps->callbacks) |
637 | return -ENOMEM; | |
638 | ||
639 | /* | |
640 | * Do we need to setup a new snapshot ? | |
641 | */ | |
642 | if (new_snapshot) { | |
643 | r = write_header(ps); | |
644 | if (r) { | |
645 | DMWARN("write_header failed"); | |
646 | return r; | |
647 | } | |
648 | ||
649 | ps->current_area = 0; | |
650 | zero_memory_area(ps); | |
651 | r = zero_disk_area(ps, 0); | |
f5acc834 | 652 | if (r) |
4db6bfe0 | 653 | DMWARN("zero_disk_area(0) failed"); |
f5acc834 JB |
654 | return r; |
655 | } | |
656 | /* | |
657 | * Sanity checks. | |
658 | */ | |
659 | if (ps->version != SNAPSHOT_DISK_VERSION) { | |
660 | DMWARN("unable to handle snapshot disk version %d", | |
661 | ps->version); | |
662 | return -EINVAL; | |
663 | } | |
4db6bfe0 | 664 | |
f5acc834 JB |
665 | /* |
666 | * Metadata are valid, but snapshot is invalidated | |
667 | */ | |
668 | if (!ps->valid) | |
669 | return 1; | |
4db6bfe0 | 670 | |
f5acc834 JB |
671 | /* |
672 | * Read the metadata. | |
673 | */ | |
674 | r = read_exceptions(ps, callback, callback_context); | |
4db6bfe0 | 675 | |
f5acc834 | 676 | return r; |
4db6bfe0 AK |
677 | } |
678 | ||
a159c1ac | 679 | static int persistent_prepare_exception(struct dm_exception_store *store, |
1d4989c8 | 680 | struct dm_exception *e) |
4db6bfe0 AK |
681 | { |
682 | struct pstore *ps = get_info(store); | |
fc56f6fb | 683 | sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); |
4db6bfe0 AK |
684 | |
685 | /* Is there enough room ? */ | |
d0216849 | 686 | if (size < ((ps->next_free + 1) * store->chunk_size)) |
4db6bfe0 AK |
687 | return -ENOSPC; |
688 | ||
689 | e->new_chunk = ps->next_free; | |
690 | ||
691 | /* | |
692 | * Move onto the next free pending, making sure to take | |
693 | * into account the location of the metadata chunks. | |
694 | */ | |
e9c6a182 MP |
695 | ps->next_free++; |
696 | skip_metadata(ps); | |
4db6bfe0 AK |
697 | |
698 | atomic_inc(&ps->pending_count); | |
699 | return 0; | |
700 | } | |
701 | ||
a159c1ac | 702 | static void persistent_commit_exception(struct dm_exception_store *store, |
1d4989c8 | 703 | struct dm_exception *e, |
a159c1ac JB |
704 | void (*callback) (void *, int success), |
705 | void *callback_context) | |
4db6bfe0 AK |
706 | { |
707 | unsigned int i; | |
708 | struct pstore *ps = get_info(store); | |
283a8328 | 709 | struct core_exception ce; |
4db6bfe0 AK |
710 | struct commit_callback *cb; |
711 | ||
283a8328 AK |
712 | ce.old_chunk = e->old_chunk; |
713 | ce.new_chunk = e->new_chunk; | |
714 | write_exception(ps, ps->current_committed++, &ce); | |
4db6bfe0 AK |
715 | |
716 | /* | |
717 | * Add the callback to the back of the array. This code | |
718 | * is the only place where the callback array is | |
719 | * manipulated, and we know that it will never be called | |
720 | * multiple times concurrently. | |
721 | */ | |
722 | cb = ps->callbacks + ps->callback_count++; | |
723 | cb->callback = callback; | |
724 | cb->context = callback_context; | |
725 | ||
726 | /* | |
727 | * If there are exceptions in flight and we have not yet | |
728 | * filled this metadata area there's nothing more to do. | |
729 | */ | |
730 | if (!atomic_dec_and_test(&ps->pending_count) && | |
731 | (ps->current_committed != ps->exceptions_per_area)) | |
732 | return; | |
733 | ||
734 | /* | |
735 | * If we completely filled the current area, then wipe the next one. | |
736 | */ | |
737 | if ((ps->current_committed == ps->exceptions_per_area) && | |
a2d2b034 | 738 | zero_disk_area(ps, ps->current_area + 1)) |
4db6bfe0 AK |
739 | ps->valid = 0; |
740 | ||
741 | /* | |
742 | * Commit exceptions to disk. | |
743 | */ | |
d87f4c14 | 744 | if (ps->valid && area_io(ps, WRITE_FLUSH_FUA)) |
4db6bfe0 AK |
745 | ps->valid = 0; |
746 | ||
747 | /* | |
748 | * Advance to the next area if this one is full. | |
749 | */ | |
750 | if (ps->current_committed == ps->exceptions_per_area) { | |
751 | ps->current_committed = 0; | |
752 | ps->current_area++; | |
753 | zero_memory_area(ps); | |
754 | } | |
755 | ||
756 | for (i = 0; i < ps->callback_count; i++) { | |
757 | cb = ps->callbacks + i; | |
758 | cb->callback(cb->context, ps->valid); | |
759 | } | |
760 | ||
761 | ps->callback_count = 0; | |
762 | } | |
763 | ||
4454a621 MP |
764 | static int persistent_prepare_merge(struct dm_exception_store *store, |
765 | chunk_t *last_old_chunk, | |
766 | chunk_t *last_new_chunk) | |
767 | { | |
768 | struct pstore *ps = get_info(store); | |
283a8328 | 769 | struct core_exception ce; |
4454a621 MP |
770 | int nr_consecutive; |
771 | int r; | |
772 | ||
773 | /* | |
774 | * When current area is empty, move back to preceding area. | |
775 | */ | |
776 | if (!ps->current_committed) { | |
777 | /* | |
778 | * Have we finished? | |
779 | */ | |
780 | if (!ps->current_area) | |
781 | return 0; | |
782 | ||
783 | ps->current_area--; | |
784 | r = area_io(ps, READ); | |
785 | if (r < 0) | |
786 | return r; | |
787 | ps->current_committed = ps->exceptions_per_area; | |
788 | } | |
789 | ||
2cadabd5 | 790 | read_exception(ps, ps->area, ps->current_committed - 1, &ce); |
283a8328 AK |
791 | *last_old_chunk = ce.old_chunk; |
792 | *last_new_chunk = ce.new_chunk; | |
4454a621 MP |
793 | |
794 | /* | |
795 | * Find number of consecutive chunks within the current area, | |
796 | * working backwards. | |
797 | */ | |
798 | for (nr_consecutive = 1; nr_consecutive < ps->current_committed; | |
799 | nr_consecutive++) { | |
2cadabd5 MP |
800 | read_exception(ps, ps->area, |
801 | ps->current_committed - 1 - nr_consecutive, &ce); | |
283a8328 AK |
802 | if (ce.old_chunk != *last_old_chunk - nr_consecutive || |
803 | ce.new_chunk != *last_new_chunk - nr_consecutive) | |
4454a621 MP |
804 | break; |
805 | } | |
806 | ||
807 | return nr_consecutive; | |
808 | } | |
809 | ||
810 | static int persistent_commit_merge(struct dm_exception_store *store, | |
811 | int nr_merged) | |
812 | { | |
813 | int r, i; | |
814 | struct pstore *ps = get_info(store); | |
815 | ||
816 | BUG_ON(nr_merged > ps->current_committed); | |
817 | ||
818 | for (i = 0; i < nr_merged; i++) | |
819 | clear_exception(ps, ps->current_committed - 1 - i); | |
820 | ||
762a80d9 | 821 | r = area_io(ps, WRITE_FLUSH_FUA); |
4454a621 MP |
822 | if (r < 0) |
823 | return r; | |
824 | ||
825 | ps->current_committed -= nr_merged; | |
826 | ||
827 | /* | |
828 | * At this stage, only persistent_usage() uses ps->next_free, so | |
829 | * we make no attempt to keep ps->next_free strictly accurate | |
830 | * as exceptions may have been committed out-of-order originally. | |
831 | * Once a snapshot has become merging, we set it to the value it | |
832 | * would have held had all the exceptions been committed in order. | |
833 | * | |
834 | * ps->current_area does not get reduced by prepare_merge() until | |
835 | * after commit_merge() has removed the nr_merged previous exceptions. | |
836 | */ | |
87c961cb TK |
837 | ps->next_free = area_location(ps, ps->current_area) + |
838 | ps->current_committed + 1; | |
4454a621 MP |
839 | |
840 | return 0; | |
841 | } | |
842 | ||
a159c1ac | 843 | static void persistent_drop_snapshot(struct dm_exception_store *store) |
4db6bfe0 AK |
844 | { |
845 | struct pstore *ps = get_info(store); | |
846 | ||
847 | ps->valid = 0; | |
848 | if (write_header(ps)) | |
849 | DMWARN("write header failed"); | |
850 | } | |
851 | ||
493df71c JB |
852 | static int persistent_ctr(struct dm_exception_store *store, |
853 | unsigned argc, char **argv) | |
4db6bfe0 AK |
854 | { |
855 | struct pstore *ps; | |
856 | ||
857 | /* allocate the pstore */ | |
a32079ce | 858 | ps = kzalloc(sizeof(*ps), GFP_KERNEL); |
4db6bfe0 AK |
859 | if (!ps) |
860 | return -ENOMEM; | |
861 | ||
71fab00a | 862 | ps->store = store; |
4db6bfe0 AK |
863 | ps->valid = 1; |
864 | ps->version = SNAPSHOT_DISK_VERSION; | |
865 | ps->area = NULL; | |
61578dcd MP |
866 | ps->zero_area = NULL; |
867 | ps->header_area = NULL; | |
4454a621 | 868 | ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */ |
4db6bfe0 AK |
869 | ps->current_committed = 0; |
870 | ||
871 | ps->callback_count = 0; | |
872 | atomic_set(&ps->pending_count, 0); | |
873 | ps->callbacks = NULL; | |
874 | ||
239c8dd5 | 875 | ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); |
4db6bfe0 AK |
876 | if (!ps->metadata_wq) { |
877 | kfree(ps); | |
878 | DMERR("couldn't start header metadata update thread"); | |
879 | return -ENOMEM; | |
880 | } | |
881 | ||
4db6bfe0 AK |
882 | store->context = ps; |
883 | ||
884 | return 0; | |
885 | } | |
886 | ||
1e302a92 JB |
887 | static unsigned persistent_status(struct dm_exception_store *store, |
888 | status_type_t status, char *result, | |
889 | unsigned maxlen) | |
493df71c | 890 | { |
1e302a92 JB |
891 | unsigned sz = 0; |
892 | ||
893 | switch (status) { | |
894 | case STATUSTYPE_INFO: | |
895 | break; | |
896 | case STATUSTYPE_TABLE: | |
fc56f6fb | 897 | DMEMIT(" P %llu", (unsigned long long)store->chunk_size); |
1e302a92 | 898 | } |
493df71c JB |
899 | |
900 | return sz; | |
901 | } | |
902 | ||
903 | static struct dm_exception_store_type _persistent_type = { | |
904 | .name = "persistent", | |
905 | .module = THIS_MODULE, | |
906 | .ctr = persistent_ctr, | |
907 | .dtr = persistent_dtr, | |
908 | .read_metadata = persistent_read_metadata, | |
909 | .prepare_exception = persistent_prepare_exception, | |
910 | .commit_exception = persistent_commit_exception, | |
4454a621 MP |
911 | .prepare_merge = persistent_prepare_merge, |
912 | .commit_merge = persistent_commit_merge, | |
493df71c | 913 | .drop_snapshot = persistent_drop_snapshot, |
985903bb | 914 | .usage = persistent_usage, |
493df71c JB |
915 | .status = persistent_status, |
916 | }; | |
917 | ||
918 | static struct dm_exception_store_type _persistent_compat_type = { | |
919 | .name = "P", | |
920 | .module = THIS_MODULE, | |
921 | .ctr = persistent_ctr, | |
922 | .dtr = persistent_dtr, | |
923 | .read_metadata = persistent_read_metadata, | |
924 | .prepare_exception = persistent_prepare_exception, | |
925 | .commit_exception = persistent_commit_exception, | |
4454a621 MP |
926 | .prepare_merge = persistent_prepare_merge, |
927 | .commit_merge = persistent_commit_merge, | |
493df71c | 928 | .drop_snapshot = persistent_drop_snapshot, |
985903bb | 929 | .usage = persistent_usage, |
493df71c JB |
930 | .status = persistent_status, |
931 | }; | |
932 | ||
4db6bfe0 AK |
933 | int dm_persistent_snapshot_init(void) |
934 | { | |
493df71c JB |
935 | int r; |
936 | ||
937 | r = dm_exception_store_type_register(&_persistent_type); | |
938 | if (r) { | |
939 | DMERR("Unable to register persistent exception store type"); | |
940 | return r; | |
941 | } | |
942 | ||
943 | r = dm_exception_store_type_register(&_persistent_compat_type); | |
944 | if (r) { | |
945 | DMERR("Unable to register old-style persistent exception " | |
946 | "store type"); | |
947 | dm_exception_store_type_unregister(&_persistent_type); | |
948 | return r; | |
949 | } | |
950 | ||
951 | return r; | |
4db6bfe0 AK |
952 | } |
953 | ||
954 | void dm_persistent_snapshot_exit(void) | |
955 | { | |
493df71c JB |
956 | dm_exception_store_type_unregister(&_persistent_type); |
957 | dm_exception_store_type_unregister(&_persistent_compat_type); | |
4db6bfe0 | 958 | } |