dm thin metadata: rename init_pmd to __create_persistent_data_objects
[deliverable/linux.git] / drivers / md / dm-thin-metadata.c
1 /*
2 * Copyright (C) 2011 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include "dm-thin-metadata.h"
8 #include "persistent-data/dm-btree.h"
9 #include "persistent-data/dm-space-map.h"
10 #include "persistent-data/dm-space-map-disk.h"
11 #include "persistent-data/dm-transaction-manager.h"
12
13 #include <linux/list.h>
14 #include <linux/device-mapper.h>
15 #include <linux/workqueue.h>
16
17 /*--------------------------------------------------------------------------
18 * As far as the metadata goes, there is:
19 *
20 * - A superblock in block zero, taking up fewer than 512 bytes for
21 * atomic writes.
22 *
23 * - A space map managing the metadata blocks.
24 *
25 * - A space map managing the data blocks.
26 *
27 * - A btree mapping our internal thin dev ids onto struct disk_device_details.
28 *
29 * - A hierarchical btree, with 2 levels which effectively maps (thin
30 * dev id, virtual block) -> block_time. Block time is a 64-bit
31 * field holding the time in the low 24 bits, and block in the top 48
32 * bits.
33 *
34 * BTrees consist solely of btree_nodes, that fill a block. Some are
35 * internal nodes, as such their values are a __le64 pointing to other
36 * nodes. Leaf nodes can store data of any reasonable size (ie. much
37 * smaller than the block size). The nodes consist of the header,
38 * followed by an array of keys, followed by an array of values. We have
39 * to binary search on the keys so they're all held together to help the
40 * cpu cache.
41 *
42 * Space maps have 2 btrees:
43 *
44 * - One maps a uint64_t onto a struct index_entry. Which points to a
45 * bitmap block, and has some details about how many free entries there
46 * are etc.
47 *
48 * - The bitmap blocks have a header (for the checksum). Then the rest
49 * of the block is pairs of bits. With the meaning being:
50 *
51 * 0 - ref count is 0
52 * 1 - ref count is 1
53 * 2 - ref count is 2
54 * 3 - ref count is higher than 2
55 *
56 * - If the count is higher than 2 then the ref count is entered in a
57 * second btree that directly maps the block_address to a uint32_t ref
58 * count.
59 *
60 * The space map metadata variant doesn't have a bitmaps btree. Instead
61 * it has one single blocks worth of index_entries. This avoids
62 * recursive issues with the bitmap btree needing to allocate space in
63 * order to insert. With a small data block size such as 64k the
64 * metadata support data devices that are hundreds of terrabytes.
65 *
66 * The space maps allocate space linearly from front to back. Space that
67 * is freed in a transaction is never recycled within that transaction.
68 * To try and avoid fragmenting _free_ space the allocator always goes
69 * back and fills in gaps.
70 *
71 * All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
72 * from the block manager.
73 *--------------------------------------------------------------------------*/
74
75 #define DM_MSG_PREFIX "thin metadata"
76
77 #define THIN_SUPERBLOCK_MAGIC 27022010
78 #define THIN_SUPERBLOCK_LOCATION 0
79 #define THIN_VERSION 1
80 #define THIN_METADATA_CACHE_SIZE 64
81 #define SECTOR_TO_BLOCK_SHIFT 3
82
83 /*
84 * 3 for btree insert +
85 * 2 for btree lookup used within space map
86 */
87 #define THIN_MAX_CONCURRENT_LOCKS 5
88
89 /* This should be plenty */
90 #define SPACE_MAP_ROOT_SIZE 128
91
92 /*
93 * Little endian on-disk superblock and device details.
94 */
95 struct thin_disk_superblock {
96 __le32 csum; /* Checksum of superblock except for this field. */
97 __le32 flags;
98 __le64 blocknr; /* This block number, dm_block_t. */
99
100 __u8 uuid[16];
101 __le64 magic;
102 __le32 version;
103 __le32 time;
104
105 __le64 trans_id;
106
107 /*
108 * Root held by userspace transactions.
109 */
110 __le64 held_root;
111
112 __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
113 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
114
115 /*
116 * 2-level btree mapping (dev_id, (dev block, time)) -> data block
117 */
118 __le64 data_mapping_root;
119
120 /*
121 * Device detail root mapping dev_id -> device_details
122 */
123 __le64 device_details_root;
124
125 __le32 data_block_size; /* In 512-byte sectors. */
126
127 __le32 metadata_block_size; /* In 512-byte sectors. */
128 __le64 metadata_nr_blocks;
129
130 __le32 compat_flags;
131 __le32 compat_ro_flags;
132 __le32 incompat_flags;
133 } __packed;
134
135 struct disk_device_details {
136 __le64 mapped_blocks;
137 __le64 transaction_id; /* When created. */
138 __le32 creation_time;
139 __le32 snapshotted_time;
140 } __packed;
141
142 struct dm_pool_metadata {
143 struct hlist_node hash;
144
145 struct block_device *bdev;
146 struct dm_block_manager *bm;
147 struct dm_space_map *metadata_sm;
148 struct dm_space_map *data_sm;
149 struct dm_transaction_manager *tm;
150 struct dm_transaction_manager *nb_tm;
151
152 /*
153 * Two-level btree.
154 * First level holds thin_dev_t.
155 * Second level holds mappings.
156 */
157 struct dm_btree_info info;
158
159 /*
160 * Non-blocking version of the above.
161 */
162 struct dm_btree_info nb_info;
163
164 /*
165 * Just the top level for deleting whole devices.
166 */
167 struct dm_btree_info tl_info;
168
169 /*
170 * Just the bottom level for creating new devices.
171 */
172 struct dm_btree_info bl_info;
173
174 /*
175 * Describes the device details btree.
176 */
177 struct dm_btree_info details_info;
178
179 struct rw_semaphore root_lock;
180 uint32_t time;
181 dm_block_t root;
182 dm_block_t details_root;
183 struct list_head thin_devices;
184 uint64_t trans_id;
185 unsigned long flags;
186 sector_t data_block_size;
187 };
188
189 struct dm_thin_device {
190 struct list_head list;
191 struct dm_pool_metadata *pmd;
192 dm_thin_id id;
193
194 int open_count;
195 int changed;
196 uint64_t mapped_blocks;
197 uint64_t transaction_id;
198 uint32_t creation_time;
199 uint32_t snapshotted_time;
200 };
201
202 /*----------------------------------------------------------------
203 * superblock validator
204 *--------------------------------------------------------------*/
205
206 #define SUPERBLOCK_CSUM_XOR 160774
207
208 static void sb_prepare_for_write(struct dm_block_validator *v,
209 struct dm_block *b,
210 size_t block_size)
211 {
212 struct thin_disk_superblock *disk_super = dm_block_data(b);
213
214 disk_super->blocknr = cpu_to_le64(dm_block_location(b));
215 disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
216 block_size - sizeof(__le32),
217 SUPERBLOCK_CSUM_XOR));
218 }
219
220 static int sb_check(struct dm_block_validator *v,
221 struct dm_block *b,
222 size_t block_size)
223 {
224 struct thin_disk_superblock *disk_super = dm_block_data(b);
225 __le32 csum_le;
226
227 if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
228 DMERR("sb_check failed: blocknr %llu: "
229 "wanted %llu", le64_to_cpu(disk_super->blocknr),
230 (unsigned long long)dm_block_location(b));
231 return -ENOTBLK;
232 }
233
234 if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
235 DMERR("sb_check failed: magic %llu: "
236 "wanted %llu", le64_to_cpu(disk_super->magic),
237 (unsigned long long)THIN_SUPERBLOCK_MAGIC);
238 return -EILSEQ;
239 }
240
241 csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
242 block_size - sizeof(__le32),
243 SUPERBLOCK_CSUM_XOR));
244 if (csum_le != disk_super->csum) {
245 DMERR("sb_check failed: csum %u: wanted %u",
246 le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
247 return -EILSEQ;
248 }
249
250 return 0;
251 }
252
253 static struct dm_block_validator sb_validator = {
254 .name = "superblock",
255 .prepare_for_write = sb_prepare_for_write,
256 .check = sb_check
257 };
258
259 /*----------------------------------------------------------------
260 * Methods for the btree value types
261 *--------------------------------------------------------------*/
262
263 static uint64_t pack_block_time(dm_block_t b, uint32_t t)
264 {
265 return (b << 24) | t;
266 }
267
268 static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
269 {
270 *b = v >> 24;
271 *t = v & ((1 << 24) - 1);
272 }
273
274 static void data_block_inc(void *context, void *value_le)
275 {
276 struct dm_space_map *sm = context;
277 __le64 v_le;
278 uint64_t b;
279 uint32_t t;
280
281 memcpy(&v_le, value_le, sizeof(v_le));
282 unpack_block_time(le64_to_cpu(v_le), &b, &t);
283 dm_sm_inc_block(sm, b);
284 }
285
286 static void data_block_dec(void *context, void *value_le)
287 {
288 struct dm_space_map *sm = context;
289 __le64 v_le;
290 uint64_t b;
291 uint32_t t;
292
293 memcpy(&v_le, value_le, sizeof(v_le));
294 unpack_block_time(le64_to_cpu(v_le), &b, &t);
295 dm_sm_dec_block(sm, b);
296 }
297
298 static int data_block_equal(void *context, void *value1_le, void *value2_le)
299 {
300 __le64 v1_le, v2_le;
301 uint64_t b1, b2;
302 uint32_t t;
303
304 memcpy(&v1_le, value1_le, sizeof(v1_le));
305 memcpy(&v2_le, value2_le, sizeof(v2_le));
306 unpack_block_time(le64_to_cpu(v1_le), &b1, &t);
307 unpack_block_time(le64_to_cpu(v2_le), &b2, &t);
308
309 return b1 == b2;
310 }
311
312 static void subtree_inc(void *context, void *value)
313 {
314 struct dm_btree_info *info = context;
315 __le64 root_le;
316 uint64_t root;
317
318 memcpy(&root_le, value, sizeof(root_le));
319 root = le64_to_cpu(root_le);
320 dm_tm_inc(info->tm, root);
321 }
322
323 static void subtree_dec(void *context, void *value)
324 {
325 struct dm_btree_info *info = context;
326 __le64 root_le;
327 uint64_t root;
328
329 memcpy(&root_le, value, sizeof(root_le));
330 root = le64_to_cpu(root_le);
331 if (dm_btree_del(info, root))
332 DMERR("btree delete failed\n");
333 }
334
335 static int subtree_equal(void *context, void *value1_le, void *value2_le)
336 {
337 __le64 v1_le, v2_le;
338 memcpy(&v1_le, value1_le, sizeof(v1_le));
339 memcpy(&v2_le, value2_le, sizeof(v2_le));
340
341 return v1_le == v2_le;
342 }
343
344 /*----------------------------------------------------------------*/
345
346 static int superblock_lock_zero(struct dm_pool_metadata *pmd,
347 struct dm_block **sblock)
348 {
349 return dm_bm_write_lock_zero(pmd->bm, THIN_SUPERBLOCK_LOCATION,
350 &sb_validator, sblock);
351 }
352
353 static int superblock_lock(struct dm_pool_metadata *pmd,
354 struct dm_block **sblock)
355 {
356 return dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
357 &sb_validator, sblock);
358 }
359
360 static int superblock_all_zeroes(struct dm_block_manager *bm, int *result)
361 {
362 int r;
363 unsigned i;
364 struct dm_block *b;
365 __le64 *data_le, zero = cpu_to_le64(0);
366 unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
367
368 /*
369 * We can't use a validator here - it may be all zeroes.
370 */
371 r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION, NULL, &b);
372 if (r)
373 return r;
374
375 data_le = dm_block_data(b);
376 *result = 1;
377 for (i = 0; i < block_size; i++) {
378 if (data_le[i] != zero) {
379 *result = 0;
380 break;
381 }
382 }
383
384 return dm_bm_unlock(b);
385 }
386
387 static void __setup_btree_details(struct dm_pool_metadata *pmd)
388 {
389 pmd->info.tm = pmd->tm;
390 pmd->info.levels = 2;
391 pmd->info.value_type.context = pmd->data_sm;
392 pmd->info.value_type.size = sizeof(__le64);
393 pmd->info.value_type.inc = data_block_inc;
394 pmd->info.value_type.dec = data_block_dec;
395 pmd->info.value_type.equal = data_block_equal;
396
397 memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info));
398 pmd->nb_info.tm = pmd->nb_tm;
399
400 pmd->tl_info.tm = pmd->tm;
401 pmd->tl_info.levels = 1;
402 pmd->tl_info.value_type.context = &pmd->info;
403 pmd->tl_info.value_type.size = sizeof(__le64);
404 pmd->tl_info.value_type.inc = subtree_inc;
405 pmd->tl_info.value_type.dec = subtree_dec;
406 pmd->tl_info.value_type.equal = subtree_equal;
407
408 pmd->bl_info.tm = pmd->tm;
409 pmd->bl_info.levels = 1;
410 pmd->bl_info.value_type.context = pmd->data_sm;
411 pmd->bl_info.value_type.size = sizeof(__le64);
412 pmd->bl_info.value_type.inc = data_block_inc;
413 pmd->bl_info.value_type.dec = data_block_dec;
414 pmd->bl_info.value_type.equal = data_block_equal;
415
416 pmd->details_info.tm = pmd->tm;
417 pmd->details_info.levels = 1;
418 pmd->details_info.value_type.context = NULL;
419 pmd->details_info.value_type.size = sizeof(struct disk_device_details);
420 pmd->details_info.value_type.inc = NULL;
421 pmd->details_info.value_type.dec = NULL;
422 pmd->details_info.value_type.equal = NULL;
423 }
424
425 static int __create_persistent_data_objects(struct dm_pool_metadata *pmd,
426 struct dm_block_manager *bm,
427 dm_block_t nr_blocks, int create)
428 {
429 int r;
430 struct dm_space_map *sm, *data_sm;
431 struct dm_transaction_manager *tm;
432 struct dm_block *sblock;
433
434 if (create) {
435 r = dm_tm_create_with_sm(bm, THIN_SUPERBLOCK_LOCATION, &tm, &sm);
436 if (r < 0) {
437 DMERR("tm_create_with_sm failed");
438 return r;
439 }
440
441 data_sm = dm_sm_disk_create(tm, nr_blocks);
442 if (IS_ERR(data_sm)) {
443 DMERR("sm_disk_create failed");
444 r = PTR_ERR(data_sm);
445 goto bad;
446 }
447 } else {
448 struct thin_disk_superblock *disk_super;
449
450 r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION,
451 &sb_validator, &sblock);
452 if (r < 0) {
453 DMERR("couldn't read superblock");
454 return r;
455 }
456
457 disk_super = dm_block_data(sblock);
458 r = dm_tm_open_with_sm(bm, THIN_SUPERBLOCK_LOCATION,
459 disk_super->metadata_space_map_root,
460 sizeof(disk_super->metadata_space_map_root),
461 &tm, &sm);
462 if (r < 0) {
463 DMERR("tm_open_with_sm failed");
464 dm_bm_unlock(sblock);
465 return r;
466 }
467
468 data_sm = dm_sm_disk_open(tm, disk_super->data_space_map_root,
469 sizeof(disk_super->data_space_map_root));
470 if (IS_ERR(data_sm)) {
471 DMERR("sm_disk_open failed");
472 dm_bm_unlock(sblock);
473 r = PTR_ERR(data_sm);
474 goto bad;
475 }
476
477 dm_bm_unlock(sblock);
478 }
479
480 pmd->bm = bm;
481 pmd->metadata_sm = sm;
482 pmd->data_sm = data_sm;
483 pmd->tm = tm;
484 pmd->nb_tm = dm_tm_create_non_blocking_clone(tm);
485 if (!pmd->nb_tm) {
486 DMERR("could not create clone tm");
487 r = -ENOMEM;
488 goto bad_data_sm;
489 }
490
491 __setup_btree_details(pmd);
492 pmd->root = 0;
493
494 init_rwsem(&pmd->root_lock);
495 pmd->time = 0;
496 pmd->details_root = 0;
497 pmd->trans_id = 0;
498 pmd->flags = 0;
499 INIT_LIST_HEAD(&pmd->thin_devices);
500
501 return 0;
502
503 bad_data_sm:
504 dm_sm_destroy(data_sm);
505 bad:
506 dm_tm_destroy(tm);
507 dm_sm_destroy(sm);
508
509 return r;
510 }
511
512 static int __begin_transaction(struct dm_pool_metadata *pmd)
513 {
514 int r;
515 u32 features;
516 struct thin_disk_superblock *disk_super;
517 struct dm_block *sblock;
518
519 /*
520 * We re-read the superblock every time. Shouldn't need to do this
521 * really.
522 */
523 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
524 &sb_validator, &sblock);
525 if (r)
526 return r;
527
528 disk_super = dm_block_data(sblock);
529 pmd->time = le32_to_cpu(disk_super->time);
530 pmd->root = le64_to_cpu(disk_super->data_mapping_root);
531 pmd->details_root = le64_to_cpu(disk_super->device_details_root);
532 pmd->trans_id = le64_to_cpu(disk_super->trans_id);
533 pmd->flags = le32_to_cpu(disk_super->flags);
534 pmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
535
536 features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
537 if (features) {
538 DMERR("could not access metadata due to "
539 "unsupported optional features (%lx).",
540 (unsigned long)features);
541 r = -EINVAL;
542 goto out;
543 }
544
545 /*
546 * Check for read-only metadata to skip the following RDWR checks.
547 */
548 if (get_disk_ro(pmd->bdev->bd_disk))
549 goto out;
550
551 features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
552 if (features) {
553 DMERR("could not access metadata RDWR due to "
554 "unsupported optional features (%lx).",
555 (unsigned long)features);
556 r = -EINVAL;
557 }
558
559 out:
560 dm_bm_unlock(sblock);
561 return r;
562 }
563
564 static int __write_changed_details(struct dm_pool_metadata *pmd)
565 {
566 int r;
567 struct dm_thin_device *td, *tmp;
568 struct disk_device_details details;
569 uint64_t key;
570
571 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
572 if (!td->changed)
573 continue;
574
575 key = td->id;
576
577 details.mapped_blocks = cpu_to_le64(td->mapped_blocks);
578 details.transaction_id = cpu_to_le64(td->transaction_id);
579 details.creation_time = cpu_to_le32(td->creation_time);
580 details.snapshotted_time = cpu_to_le32(td->snapshotted_time);
581 __dm_bless_for_disk(&details);
582
583 r = dm_btree_insert(&pmd->details_info, pmd->details_root,
584 &key, &details, &pmd->details_root);
585 if (r)
586 return r;
587
588 if (td->open_count)
589 td->changed = 0;
590 else {
591 list_del(&td->list);
592 kfree(td);
593 }
594 }
595
596 return 0;
597 }
598
599 static int __commit_transaction(struct dm_pool_metadata *pmd)
600 {
601 /*
602 * FIXME: Associated pool should be made read-only on failure.
603 */
604 int r;
605 size_t metadata_len, data_len;
606 struct thin_disk_superblock *disk_super;
607 struct dm_block *sblock;
608
609 /*
610 * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
611 */
612 BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
613
614 r = __write_changed_details(pmd);
615 if (r < 0)
616 return r;
617
618 r = dm_sm_commit(pmd->data_sm);
619 if (r < 0)
620 return r;
621
622 r = dm_tm_pre_commit(pmd->tm);
623 if (r < 0)
624 return r;
625
626 r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
627 if (r < 0)
628 return r;
629
630 r = dm_sm_root_size(pmd->data_sm, &data_len);
631 if (r < 0)
632 return r;
633
634 r = superblock_lock(pmd, &sblock);
635 if (r)
636 return r;
637
638 disk_super = dm_block_data(sblock);
639 disk_super->time = cpu_to_le32(pmd->time);
640 disk_super->data_mapping_root = cpu_to_le64(pmd->root);
641 disk_super->device_details_root = cpu_to_le64(pmd->details_root);
642 disk_super->trans_id = cpu_to_le64(pmd->trans_id);
643 disk_super->flags = cpu_to_le32(pmd->flags);
644
645 r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root,
646 metadata_len);
647 if (r < 0)
648 goto out_locked;
649
650 r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root,
651 data_len);
652 if (r < 0)
653 goto out_locked;
654
655 return dm_tm_commit(pmd->tm, sblock);
656
657 out_locked:
658 dm_bm_unlock(sblock);
659 return r;
660 }
661
662 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
663 sector_t data_block_size)
664 {
665 int r;
666 struct thin_disk_superblock *disk_super;
667 struct dm_pool_metadata *pmd;
668 sector_t bdev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
669 struct dm_block_manager *bm;
670 int create;
671 struct dm_block *sblock;
672
673 pmd = kmalloc(sizeof(*pmd), GFP_KERNEL);
674 if (!pmd) {
675 DMERR("could not allocate metadata struct");
676 return ERR_PTR(-ENOMEM);
677 }
678
679 bm = dm_block_manager_create(bdev, THIN_METADATA_BLOCK_SIZE,
680 THIN_METADATA_CACHE_SIZE,
681 THIN_MAX_CONCURRENT_LOCKS);
682 if (IS_ERR(bm)) {
683 r = PTR_ERR(bm);
684 DMERR("could not create block manager");
685 kfree(pmd);
686 return ERR_PTR(r);
687 }
688
689 r = superblock_all_zeroes(bm, &create);
690 if (r) {
691 dm_block_manager_destroy(bm);
692 kfree(pmd);
693 return ERR_PTR(r);
694 }
695
696 r = __create_persistent_data_objects(pmd, bm, 0, create);
697 if (r) {
698 dm_block_manager_destroy(bm);
699 kfree(pmd);
700 return ERR_PTR(r);
701 }
702 pmd->bdev = bdev;
703
704 if (!create) {
705 r = __begin_transaction(pmd);
706 if (r < 0)
707 goto bad;
708 return pmd;
709 }
710
711 /*
712 * Create.
713 */
714 r = superblock_lock_zero(pmd, &sblock);
715 if (r)
716 goto bad;
717
718 if (bdev_size > THIN_METADATA_MAX_SECTORS)
719 bdev_size = THIN_METADATA_MAX_SECTORS;
720
721 disk_super = dm_block_data(sblock);
722 disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
723 disk_super->version = cpu_to_le32(THIN_VERSION);
724 disk_super->time = 0;
725 disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
726 disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
727 disk_super->data_block_size = cpu_to_le32(data_block_size);
728
729 r = dm_bm_unlock(sblock);
730 if (r < 0)
731 goto bad;
732
733 r = dm_btree_empty(&pmd->info, &pmd->root);
734 if (r < 0)
735 goto bad;
736
737 r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
738 if (r < 0) {
739 DMERR("couldn't create devices root");
740 goto bad;
741 }
742
743 pmd->flags = 0;
744 r = dm_pool_commit_metadata(pmd);
745 if (r < 0) {
746 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
747 __func__, r);
748 goto bad;
749 }
750
751 return pmd;
752
753 bad:
754 if (dm_pool_metadata_close(pmd) < 0)
755 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
756 return ERR_PTR(r);
757 }
758
759 int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
760 {
761 int r;
762 unsigned open_devices = 0;
763 struct dm_thin_device *td, *tmp;
764
765 down_read(&pmd->root_lock);
766 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
767 if (td->open_count)
768 open_devices++;
769 else {
770 list_del(&td->list);
771 kfree(td);
772 }
773 }
774 up_read(&pmd->root_lock);
775
776 if (open_devices) {
777 DMERR("attempt to close pmd when %u device(s) are still open",
778 open_devices);
779 return -EBUSY;
780 }
781
782 r = __commit_transaction(pmd);
783 if (r < 0)
784 DMWARN("%s: __commit_transaction() failed, error = %d",
785 __func__, r);
786
787 dm_tm_destroy(pmd->tm);
788 dm_tm_destroy(pmd->nb_tm);
789 dm_block_manager_destroy(pmd->bm);
790 dm_sm_destroy(pmd->metadata_sm);
791 dm_sm_destroy(pmd->data_sm);
792 kfree(pmd);
793
794 return 0;
795 }
796
797 /*
798 * __open_device: Returns @td corresponding to device with id @dev,
799 * creating it if @create is set and incrementing @td->open_count.
800 * On failure, @td is undefined.
801 */
802 static int __open_device(struct dm_pool_metadata *pmd,
803 dm_thin_id dev, int create,
804 struct dm_thin_device **td)
805 {
806 int r, changed = 0;
807 struct dm_thin_device *td2;
808 uint64_t key = dev;
809 struct disk_device_details details_le;
810
811 /*
812 * If the device is already open, return it.
813 */
814 list_for_each_entry(td2, &pmd->thin_devices, list)
815 if (td2->id == dev) {
816 /*
817 * May not create an already-open device.
818 */
819 if (create)
820 return -EEXIST;
821
822 td2->open_count++;
823 *td = td2;
824 return 0;
825 }
826
827 /*
828 * Check the device exists.
829 */
830 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
831 &key, &details_le);
832 if (r) {
833 if (r != -ENODATA || !create)
834 return r;
835
836 /*
837 * Create new device.
838 */
839 changed = 1;
840 details_le.mapped_blocks = 0;
841 details_le.transaction_id = cpu_to_le64(pmd->trans_id);
842 details_le.creation_time = cpu_to_le32(pmd->time);
843 details_le.snapshotted_time = cpu_to_le32(pmd->time);
844 }
845
846 *td = kmalloc(sizeof(**td), GFP_NOIO);
847 if (!*td)
848 return -ENOMEM;
849
850 (*td)->pmd = pmd;
851 (*td)->id = dev;
852 (*td)->open_count = 1;
853 (*td)->changed = changed;
854 (*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks);
855 (*td)->transaction_id = le64_to_cpu(details_le.transaction_id);
856 (*td)->creation_time = le32_to_cpu(details_le.creation_time);
857 (*td)->snapshotted_time = le32_to_cpu(details_le.snapshotted_time);
858
859 list_add(&(*td)->list, &pmd->thin_devices);
860
861 return 0;
862 }
863
864 static void __close_device(struct dm_thin_device *td)
865 {
866 --td->open_count;
867 }
868
869 static int __create_thin(struct dm_pool_metadata *pmd,
870 dm_thin_id dev)
871 {
872 int r;
873 dm_block_t dev_root;
874 uint64_t key = dev;
875 struct disk_device_details details_le;
876 struct dm_thin_device *td;
877 __le64 value;
878
879 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
880 &key, &details_le);
881 if (!r)
882 return -EEXIST;
883
884 /*
885 * Create an empty btree for the mappings.
886 */
887 r = dm_btree_empty(&pmd->bl_info, &dev_root);
888 if (r)
889 return r;
890
891 /*
892 * Insert it into the main mapping tree.
893 */
894 value = cpu_to_le64(dev_root);
895 __dm_bless_for_disk(&value);
896 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
897 if (r) {
898 dm_btree_del(&pmd->bl_info, dev_root);
899 return r;
900 }
901
902 r = __open_device(pmd, dev, 1, &td);
903 if (r) {
904 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
905 dm_btree_del(&pmd->bl_info, dev_root);
906 return r;
907 }
908 __close_device(td);
909
910 return r;
911 }
912
913 int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
914 {
915 int r;
916
917 down_write(&pmd->root_lock);
918 r = __create_thin(pmd, dev);
919 up_write(&pmd->root_lock);
920
921 return r;
922 }
923
924 static int __set_snapshot_details(struct dm_pool_metadata *pmd,
925 struct dm_thin_device *snap,
926 dm_thin_id origin, uint32_t time)
927 {
928 int r;
929 struct dm_thin_device *td;
930
931 r = __open_device(pmd, origin, 0, &td);
932 if (r)
933 return r;
934
935 td->changed = 1;
936 td->snapshotted_time = time;
937
938 snap->mapped_blocks = td->mapped_blocks;
939 snap->snapshotted_time = time;
940 __close_device(td);
941
942 return 0;
943 }
944
945 static int __create_snap(struct dm_pool_metadata *pmd,
946 dm_thin_id dev, dm_thin_id origin)
947 {
948 int r;
949 dm_block_t origin_root;
950 uint64_t key = origin, dev_key = dev;
951 struct dm_thin_device *td;
952 struct disk_device_details details_le;
953 __le64 value;
954
955 /* check this device is unused */
956 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
957 &dev_key, &details_le);
958 if (!r)
959 return -EEXIST;
960
961 /* find the mapping tree for the origin */
962 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value);
963 if (r)
964 return r;
965 origin_root = le64_to_cpu(value);
966
967 /* clone the origin, an inc will do */
968 dm_tm_inc(pmd->tm, origin_root);
969
970 /* insert into the main mapping tree */
971 value = cpu_to_le64(origin_root);
972 __dm_bless_for_disk(&value);
973 key = dev;
974 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
975 if (r) {
976 dm_tm_dec(pmd->tm, origin_root);
977 return r;
978 }
979
980 pmd->time++;
981
982 r = __open_device(pmd, dev, 1, &td);
983 if (r)
984 goto bad;
985
986 r = __set_snapshot_details(pmd, td, origin, pmd->time);
987 __close_device(td);
988
989 if (r)
990 goto bad;
991
992 return 0;
993
994 bad:
995 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
996 dm_btree_remove(&pmd->details_info, pmd->details_root,
997 &key, &pmd->details_root);
998 return r;
999 }
1000
1001 int dm_pool_create_snap(struct dm_pool_metadata *pmd,
1002 dm_thin_id dev,
1003 dm_thin_id origin)
1004 {
1005 int r;
1006
1007 down_write(&pmd->root_lock);
1008 r = __create_snap(pmd, dev, origin);
1009 up_write(&pmd->root_lock);
1010
1011 return r;
1012 }
1013
1014 static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev)
1015 {
1016 int r;
1017 uint64_t key = dev;
1018 struct dm_thin_device *td;
1019
1020 /* TODO: failure should mark the transaction invalid */
1021 r = __open_device(pmd, dev, 0, &td);
1022 if (r)
1023 return r;
1024
1025 if (td->open_count > 1) {
1026 __close_device(td);
1027 return -EBUSY;
1028 }
1029
1030 list_del(&td->list);
1031 kfree(td);
1032 r = dm_btree_remove(&pmd->details_info, pmd->details_root,
1033 &key, &pmd->details_root);
1034 if (r)
1035 return r;
1036
1037 r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1038 if (r)
1039 return r;
1040
1041 return 0;
1042 }
1043
1044 int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
1045 dm_thin_id dev)
1046 {
1047 int r;
1048
1049 down_write(&pmd->root_lock);
1050 r = __delete_device(pmd, dev);
1051 up_write(&pmd->root_lock);
1052
1053 return r;
1054 }
1055
1056 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
1057 uint64_t current_id,
1058 uint64_t new_id)
1059 {
1060 down_write(&pmd->root_lock);
1061 if (pmd->trans_id != current_id) {
1062 up_write(&pmd->root_lock);
1063 DMERR("mismatched transaction id");
1064 return -EINVAL;
1065 }
1066
1067 pmd->trans_id = new_id;
1068 up_write(&pmd->root_lock);
1069
1070 return 0;
1071 }
1072
1073 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
1074 uint64_t *result)
1075 {
1076 down_read(&pmd->root_lock);
1077 *result = pmd->trans_id;
1078 up_read(&pmd->root_lock);
1079
1080 return 0;
1081 }
1082
1083 static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1084 {
1085 int r, inc;
1086 struct thin_disk_superblock *disk_super;
1087 struct dm_block *copy, *sblock;
1088 dm_block_t held_root;
1089
1090 /*
1091 * Copy the superblock.
1092 */
1093 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
1094 r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
1095 &sb_validator, &copy, &inc);
1096 if (r)
1097 return r;
1098
1099 BUG_ON(!inc);
1100
1101 held_root = dm_block_location(copy);
1102 disk_super = dm_block_data(copy);
1103
1104 if (le64_to_cpu(disk_super->held_root)) {
1105 DMWARN("Pool metadata snapshot already exists: release this before taking another.");
1106
1107 dm_tm_dec(pmd->tm, held_root);
1108 dm_tm_unlock(pmd->tm, copy);
1109 return -EBUSY;
1110 }
1111
1112 /*
1113 * Wipe the spacemap since we're not publishing this.
1114 */
1115 memset(&disk_super->data_space_map_root, 0,
1116 sizeof(disk_super->data_space_map_root));
1117 memset(&disk_super->metadata_space_map_root, 0,
1118 sizeof(disk_super->metadata_space_map_root));
1119
1120 /*
1121 * Increment the data structures that need to be preserved.
1122 */
1123 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
1124 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
1125 dm_tm_unlock(pmd->tm, copy);
1126
1127 /*
1128 * Write the held root into the superblock.
1129 */
1130 r = superblock_lock(pmd, &sblock);
1131 if (r) {
1132 dm_tm_dec(pmd->tm, held_root);
1133 return r;
1134 }
1135
1136 disk_super = dm_block_data(sblock);
1137 disk_super->held_root = cpu_to_le64(held_root);
1138 dm_bm_unlock(sblock);
1139 return 0;
1140 }
1141
1142 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
1143 {
1144 int r;
1145
1146 down_write(&pmd->root_lock);
1147 r = __reserve_metadata_snap(pmd);
1148 up_write(&pmd->root_lock);
1149
1150 return r;
1151 }
1152
1153 static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1154 {
1155 int r;
1156 struct thin_disk_superblock *disk_super;
1157 struct dm_block *sblock, *copy;
1158 dm_block_t held_root;
1159
1160 r = superblock_lock(pmd, &sblock);
1161 if (r)
1162 return r;
1163
1164 disk_super = dm_block_data(sblock);
1165 held_root = le64_to_cpu(disk_super->held_root);
1166 disk_super->held_root = cpu_to_le64(0);
1167
1168 dm_bm_unlock(sblock);
1169
1170 if (!held_root) {
1171 DMWARN("No pool metadata snapshot found: nothing to release.");
1172 return -EINVAL;
1173 }
1174
1175 r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
1176 if (r)
1177 return r;
1178
1179 disk_super = dm_block_data(copy);
1180 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
1181 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
1182 dm_sm_dec_block(pmd->metadata_sm, held_root);
1183
1184 return dm_tm_unlock(pmd->tm, copy);
1185 }
1186
1187 int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
1188 {
1189 int r;
1190
1191 down_write(&pmd->root_lock);
1192 r = __release_metadata_snap(pmd);
1193 up_write(&pmd->root_lock);
1194
1195 return r;
1196 }
1197
1198 static int __get_metadata_snap(struct dm_pool_metadata *pmd,
1199 dm_block_t *result)
1200 {
1201 int r;
1202 struct thin_disk_superblock *disk_super;
1203 struct dm_block *sblock;
1204
1205 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1206 &sb_validator, &sblock);
1207 if (r)
1208 return r;
1209
1210 disk_super = dm_block_data(sblock);
1211 *result = le64_to_cpu(disk_super->held_root);
1212
1213 return dm_bm_unlock(sblock);
1214 }
1215
1216 int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
1217 dm_block_t *result)
1218 {
1219 int r;
1220
1221 down_read(&pmd->root_lock);
1222 r = __get_metadata_snap(pmd, result);
1223 up_read(&pmd->root_lock);
1224
1225 return r;
1226 }
1227
1228 int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
1229 struct dm_thin_device **td)
1230 {
1231 int r;
1232
1233 down_write(&pmd->root_lock);
1234 r = __open_device(pmd, dev, 0, td);
1235 up_write(&pmd->root_lock);
1236
1237 return r;
1238 }
1239
1240 int dm_pool_close_thin_device(struct dm_thin_device *td)
1241 {
1242 down_write(&td->pmd->root_lock);
1243 __close_device(td);
1244 up_write(&td->pmd->root_lock);
1245
1246 return 0;
1247 }
1248
1249 dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
1250 {
1251 return td->id;
1252 }
1253
1254 static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
1255 {
1256 return td->snapshotted_time > time;
1257 }
1258
1259 int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
1260 int can_block, struct dm_thin_lookup_result *result)
1261 {
1262 int r;
1263 uint64_t block_time = 0;
1264 __le64 value;
1265 struct dm_pool_metadata *pmd = td->pmd;
1266 dm_block_t keys[2] = { td->id, block };
1267
1268 if (can_block) {
1269 down_read(&pmd->root_lock);
1270 r = dm_btree_lookup(&pmd->info, pmd->root, keys, &value);
1271 if (!r)
1272 block_time = le64_to_cpu(value);
1273 up_read(&pmd->root_lock);
1274
1275 } else if (down_read_trylock(&pmd->root_lock)) {
1276 r = dm_btree_lookup(&pmd->nb_info, pmd->root, keys, &value);
1277 if (!r)
1278 block_time = le64_to_cpu(value);
1279 up_read(&pmd->root_lock);
1280
1281 } else
1282 return -EWOULDBLOCK;
1283
1284 if (!r) {
1285 dm_block_t exception_block;
1286 uint32_t exception_time;
1287 unpack_block_time(block_time, &exception_block,
1288 &exception_time);
1289 result->block = exception_block;
1290 result->shared = __snapshotted_since(td, exception_time);
1291 }
1292
1293 return r;
1294 }
1295
1296 static int __insert(struct dm_thin_device *td, dm_block_t block,
1297 dm_block_t data_block)
1298 {
1299 int r, inserted;
1300 __le64 value;
1301 struct dm_pool_metadata *pmd = td->pmd;
1302 dm_block_t keys[2] = { td->id, block };
1303
1304 value = cpu_to_le64(pack_block_time(data_block, pmd->time));
1305 __dm_bless_for_disk(&value);
1306
1307 r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value,
1308 &pmd->root, &inserted);
1309 if (r)
1310 return r;
1311
1312 if (inserted) {
1313 td->mapped_blocks++;
1314 td->changed = 1;
1315 }
1316
1317 return 0;
1318 }
1319
1320 int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
1321 dm_block_t data_block)
1322 {
1323 int r;
1324
1325 down_write(&td->pmd->root_lock);
1326 r = __insert(td, block, data_block);
1327 up_write(&td->pmd->root_lock);
1328
1329 return r;
1330 }
1331
1332 static int __remove(struct dm_thin_device *td, dm_block_t block)
1333 {
1334 int r;
1335 struct dm_pool_metadata *pmd = td->pmd;
1336 dm_block_t keys[2] = { td->id, block };
1337
1338 r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root);
1339 if (r)
1340 return r;
1341
1342 td->mapped_blocks--;
1343 td->changed = 1;
1344
1345 return 0;
1346 }
1347
1348 int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
1349 {
1350 int r;
1351
1352 down_write(&td->pmd->root_lock);
1353 r = __remove(td, block);
1354 up_write(&td->pmd->root_lock);
1355
1356 return r;
1357 }
1358
1359 int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
1360 {
1361 int r;
1362
1363 down_write(&pmd->root_lock);
1364 r = dm_sm_new_block(pmd->data_sm, result);
1365 up_write(&pmd->root_lock);
1366
1367 return r;
1368 }
1369
1370 int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
1371 {
1372 int r;
1373
1374 down_write(&pmd->root_lock);
1375
1376 r = __commit_transaction(pmd);
1377 if (r <= 0)
1378 goto out;
1379
1380 /*
1381 * Open the next transaction.
1382 */
1383 r = __begin_transaction(pmd);
1384 out:
1385 up_write(&pmd->root_lock);
1386 return r;
1387 }
1388
1389 int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result)
1390 {
1391 int r;
1392
1393 down_read(&pmd->root_lock);
1394 r = dm_sm_get_nr_free(pmd->data_sm, result);
1395 up_read(&pmd->root_lock);
1396
1397 return r;
1398 }
1399
1400 int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
1401 dm_block_t *result)
1402 {
1403 int r;
1404
1405 down_read(&pmd->root_lock);
1406 r = dm_sm_get_nr_free(pmd->metadata_sm, result);
1407 up_read(&pmd->root_lock);
1408
1409 return r;
1410 }
1411
1412 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
1413 dm_block_t *result)
1414 {
1415 int r;
1416
1417 down_read(&pmd->root_lock);
1418 r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
1419 up_read(&pmd->root_lock);
1420
1421 return r;
1422 }
1423
1424 int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result)
1425 {
1426 down_read(&pmd->root_lock);
1427 *result = pmd->data_block_size;
1428 up_read(&pmd->root_lock);
1429
1430 return 0;
1431 }
1432
1433 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
1434 {
1435 int r;
1436
1437 down_read(&pmd->root_lock);
1438 r = dm_sm_get_nr_blocks(pmd->data_sm, result);
1439 up_read(&pmd->root_lock);
1440
1441 return r;
1442 }
1443
1444 int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result)
1445 {
1446 struct dm_pool_metadata *pmd = td->pmd;
1447
1448 down_read(&pmd->root_lock);
1449 *result = td->mapped_blocks;
1450 up_read(&pmd->root_lock);
1451
1452 return 0;
1453 }
1454
1455 static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
1456 {
1457 int r;
1458 __le64 value_le;
1459 dm_block_t thin_root;
1460 struct dm_pool_metadata *pmd = td->pmd;
1461
1462 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le);
1463 if (r)
1464 return r;
1465
1466 thin_root = le64_to_cpu(value_le);
1467
1468 return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result);
1469 }
1470
1471 int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
1472 dm_block_t *result)
1473 {
1474 int r;
1475 struct dm_pool_metadata *pmd = td->pmd;
1476
1477 down_read(&pmd->root_lock);
1478 r = __highest_block(td, result);
1479 up_read(&pmd->root_lock);
1480
1481 return r;
1482 }
1483
1484 static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1485 {
1486 int r;
1487 dm_block_t old_count;
1488
1489 r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count);
1490 if (r)
1491 return r;
1492
1493 if (new_count == old_count)
1494 return 0;
1495
1496 if (new_count < old_count) {
1497 DMERR("cannot reduce size of data device");
1498 return -EINVAL;
1499 }
1500
1501 return dm_sm_extend(pmd->data_sm, new_count - old_count);
1502 }
1503
1504 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1505 {
1506 int r;
1507
1508 down_write(&pmd->root_lock);
1509 r = __resize_data_dev(pmd, new_count);
1510 up_write(&pmd->root_lock);
1511
1512 return r;
1513 }
This page took 0.063206 seconds and 6 git commands to generate.