dm thin metadata: introduce THIN_MAX_CONCURRENT_LOCKS
[deliverable/linux.git] / drivers / md / dm-thin-metadata.c
1 /*
2 * Copyright (C) 2011 Red Hat, Inc.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include "dm-thin-metadata.h"
8 #include "persistent-data/dm-btree.h"
9 #include "persistent-data/dm-space-map.h"
10 #include "persistent-data/dm-space-map-disk.h"
11 #include "persistent-data/dm-transaction-manager.h"
12
13 #include <linux/list.h>
14 #include <linux/device-mapper.h>
15 #include <linux/workqueue.h>
16
17 /*--------------------------------------------------------------------------
18 * As far as the metadata goes, there is:
19 *
20 * - A superblock in block zero, taking up fewer than 512 bytes for
21 * atomic writes.
22 *
23 * - A space map managing the metadata blocks.
24 *
25 * - A space map managing the data blocks.
26 *
27 * - A btree mapping our internal thin dev ids onto struct disk_device_details.
28 *
29 * - A hierarchical btree, with 2 levels which effectively maps (thin
30 * dev id, virtual block) -> block_time. Block time is a 64-bit
31 * field holding the time in the low 24 bits, and block in the top 48
32 * bits.
33 *
34 * BTrees consist solely of btree_nodes, that fill a block. Some are
35 * internal nodes, as such their values are a __le64 pointing to other
36 * nodes. Leaf nodes can store data of any reasonable size (ie. much
37 * smaller than the block size). The nodes consist of the header,
38 * followed by an array of keys, followed by an array of values. We have
39 * to binary search on the keys so they're all held together to help the
40 * cpu cache.
41 *
42 * Space maps have 2 btrees:
43 *
44 * - One maps a uint64_t onto a struct index_entry. Which points to a
45 * bitmap block, and has some details about how many free entries there
46 * are etc.
47 *
48 * - The bitmap blocks have a header (for the checksum). Then the rest
49 * of the block is pairs of bits. With the meaning being:
50 *
51 * 0 - ref count is 0
52 * 1 - ref count is 1
53 * 2 - ref count is 2
54 * 3 - ref count is higher than 2
55 *
56 * - If the count is higher than 2 then the ref count is entered in a
57 * second btree that directly maps the block_address to a uint32_t ref
58 * count.
59 *
60 * The space map metadata variant doesn't have a bitmaps btree. Instead
61 * it has one single blocks worth of index_entries. This avoids
62 * recursive issues with the bitmap btree needing to allocate space in
63 * order to insert. With a small data block size such as 64k the
64 * metadata support data devices that are hundreds of terrabytes.
65 *
66 * The space maps allocate space linearly from front to back. Space that
67 * is freed in a transaction is never recycled within that transaction.
68 * To try and avoid fragmenting _free_ space the allocator always goes
69 * back and fills in gaps.
70 *
71 * All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
72 * from the block manager.
73 *--------------------------------------------------------------------------*/
74
75 #define DM_MSG_PREFIX "thin metadata"
76
77 #define THIN_SUPERBLOCK_MAGIC 27022010
78 #define THIN_SUPERBLOCK_LOCATION 0
79 #define THIN_VERSION 1
80 #define THIN_METADATA_CACHE_SIZE 64
81 #define SECTOR_TO_BLOCK_SHIFT 3
82
83 /*
84 * 3 for btree insert +
85 * 2 for btree lookup used within space map
86 */
87 #define THIN_MAX_CONCURRENT_LOCKS 5
88
89 /* This should be plenty */
90 #define SPACE_MAP_ROOT_SIZE 128
91
92 /*
93 * Little endian on-disk superblock and device details.
94 */
95 struct thin_disk_superblock {
96 __le32 csum; /* Checksum of superblock except for this field. */
97 __le32 flags;
98 __le64 blocknr; /* This block number, dm_block_t. */
99
100 __u8 uuid[16];
101 __le64 magic;
102 __le32 version;
103 __le32 time;
104
105 __le64 trans_id;
106
107 /*
108 * Root held by userspace transactions.
109 */
110 __le64 held_root;
111
112 __u8 data_space_map_root[SPACE_MAP_ROOT_SIZE];
113 __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
114
115 /*
116 * 2-level btree mapping (dev_id, (dev block, time)) -> data block
117 */
118 __le64 data_mapping_root;
119
120 /*
121 * Device detail root mapping dev_id -> device_details
122 */
123 __le64 device_details_root;
124
125 __le32 data_block_size; /* In 512-byte sectors. */
126
127 __le32 metadata_block_size; /* In 512-byte sectors. */
128 __le64 metadata_nr_blocks;
129
130 __le32 compat_flags;
131 __le32 compat_ro_flags;
132 __le32 incompat_flags;
133 } __packed;
134
135 struct disk_device_details {
136 __le64 mapped_blocks;
137 __le64 transaction_id; /* When created. */
138 __le32 creation_time;
139 __le32 snapshotted_time;
140 } __packed;
141
142 struct dm_pool_metadata {
143 struct hlist_node hash;
144
145 struct block_device *bdev;
146 struct dm_block_manager *bm;
147 struct dm_space_map *metadata_sm;
148 struct dm_space_map *data_sm;
149 struct dm_transaction_manager *tm;
150 struct dm_transaction_manager *nb_tm;
151
152 /*
153 * Two-level btree.
154 * First level holds thin_dev_t.
155 * Second level holds mappings.
156 */
157 struct dm_btree_info info;
158
159 /*
160 * Non-blocking version of the above.
161 */
162 struct dm_btree_info nb_info;
163
164 /*
165 * Just the top level for deleting whole devices.
166 */
167 struct dm_btree_info tl_info;
168
169 /*
170 * Just the bottom level for creating new devices.
171 */
172 struct dm_btree_info bl_info;
173
174 /*
175 * Describes the device details btree.
176 */
177 struct dm_btree_info details_info;
178
179 struct rw_semaphore root_lock;
180 uint32_t time;
181 int need_commit;
182 dm_block_t root;
183 dm_block_t details_root;
184 struct list_head thin_devices;
185 uint64_t trans_id;
186 unsigned long flags;
187 sector_t data_block_size;
188 };
189
190 struct dm_thin_device {
191 struct list_head list;
192 struct dm_pool_metadata *pmd;
193 dm_thin_id id;
194
195 int open_count;
196 int changed;
197 uint64_t mapped_blocks;
198 uint64_t transaction_id;
199 uint32_t creation_time;
200 uint32_t snapshotted_time;
201 };
202
203 /*----------------------------------------------------------------
204 * superblock validator
205 *--------------------------------------------------------------*/
206
207 #define SUPERBLOCK_CSUM_XOR 160774
208
209 static void sb_prepare_for_write(struct dm_block_validator *v,
210 struct dm_block *b,
211 size_t block_size)
212 {
213 struct thin_disk_superblock *disk_super = dm_block_data(b);
214
215 disk_super->blocknr = cpu_to_le64(dm_block_location(b));
216 disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
217 block_size - sizeof(__le32),
218 SUPERBLOCK_CSUM_XOR));
219 }
220
221 static int sb_check(struct dm_block_validator *v,
222 struct dm_block *b,
223 size_t block_size)
224 {
225 struct thin_disk_superblock *disk_super = dm_block_data(b);
226 __le32 csum_le;
227
228 if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
229 DMERR("sb_check failed: blocknr %llu: "
230 "wanted %llu", le64_to_cpu(disk_super->blocknr),
231 (unsigned long long)dm_block_location(b));
232 return -ENOTBLK;
233 }
234
235 if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
236 DMERR("sb_check failed: magic %llu: "
237 "wanted %llu", le64_to_cpu(disk_super->magic),
238 (unsigned long long)THIN_SUPERBLOCK_MAGIC);
239 return -EILSEQ;
240 }
241
242 csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
243 block_size - sizeof(__le32),
244 SUPERBLOCK_CSUM_XOR));
245 if (csum_le != disk_super->csum) {
246 DMERR("sb_check failed: csum %u: wanted %u",
247 le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
248 return -EILSEQ;
249 }
250
251 return 0;
252 }
253
254 static struct dm_block_validator sb_validator = {
255 .name = "superblock",
256 .prepare_for_write = sb_prepare_for_write,
257 .check = sb_check
258 };
259
260 /*----------------------------------------------------------------
261 * Methods for the btree value types
262 *--------------------------------------------------------------*/
263
264 static uint64_t pack_block_time(dm_block_t b, uint32_t t)
265 {
266 return (b << 24) | t;
267 }
268
269 static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
270 {
271 *b = v >> 24;
272 *t = v & ((1 << 24) - 1);
273 }
274
275 static void data_block_inc(void *context, void *value_le)
276 {
277 struct dm_space_map *sm = context;
278 __le64 v_le;
279 uint64_t b;
280 uint32_t t;
281
282 memcpy(&v_le, value_le, sizeof(v_le));
283 unpack_block_time(le64_to_cpu(v_le), &b, &t);
284 dm_sm_inc_block(sm, b);
285 }
286
287 static void data_block_dec(void *context, void *value_le)
288 {
289 struct dm_space_map *sm = context;
290 __le64 v_le;
291 uint64_t b;
292 uint32_t t;
293
294 memcpy(&v_le, value_le, sizeof(v_le));
295 unpack_block_time(le64_to_cpu(v_le), &b, &t);
296 dm_sm_dec_block(sm, b);
297 }
298
299 static int data_block_equal(void *context, void *value1_le, void *value2_le)
300 {
301 __le64 v1_le, v2_le;
302 uint64_t b1, b2;
303 uint32_t t;
304
305 memcpy(&v1_le, value1_le, sizeof(v1_le));
306 memcpy(&v2_le, value2_le, sizeof(v2_le));
307 unpack_block_time(le64_to_cpu(v1_le), &b1, &t);
308 unpack_block_time(le64_to_cpu(v2_le), &b2, &t);
309
310 return b1 == b2;
311 }
312
313 static void subtree_inc(void *context, void *value)
314 {
315 struct dm_btree_info *info = context;
316 __le64 root_le;
317 uint64_t root;
318
319 memcpy(&root_le, value, sizeof(root_le));
320 root = le64_to_cpu(root_le);
321 dm_tm_inc(info->tm, root);
322 }
323
324 static void subtree_dec(void *context, void *value)
325 {
326 struct dm_btree_info *info = context;
327 __le64 root_le;
328 uint64_t root;
329
330 memcpy(&root_le, value, sizeof(root_le));
331 root = le64_to_cpu(root_le);
332 if (dm_btree_del(info, root))
333 DMERR("btree delete failed\n");
334 }
335
336 static int subtree_equal(void *context, void *value1_le, void *value2_le)
337 {
338 __le64 v1_le, v2_le;
339 memcpy(&v1_le, value1_le, sizeof(v1_le));
340 memcpy(&v2_le, value2_le, sizeof(v2_le));
341
342 return v1_le == v2_le;
343 }
344
345 /*----------------------------------------------------------------*/
346
347 static int superblock_all_zeroes(struct dm_block_manager *bm, int *result)
348 {
349 int r;
350 unsigned i;
351 struct dm_block *b;
352 __le64 *data_le, zero = cpu_to_le64(0);
353 unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
354
355 /*
356 * We can't use a validator here - it may be all zeroes.
357 */
358 r = dm_bm_read_lock(bm, THIN_SUPERBLOCK_LOCATION, NULL, &b);
359 if (r)
360 return r;
361
362 data_le = dm_block_data(b);
363 *result = 1;
364 for (i = 0; i < block_size; i++) {
365 if (data_le[i] != zero) {
366 *result = 0;
367 break;
368 }
369 }
370
371 return dm_bm_unlock(b);
372 }
373
374 static int init_pmd(struct dm_pool_metadata *pmd,
375 struct dm_block_manager *bm,
376 dm_block_t nr_blocks, int create)
377 {
378 int r;
379 struct dm_space_map *sm, *data_sm;
380 struct dm_transaction_manager *tm;
381 struct dm_block *sblock;
382
383 if (create) {
384 r = dm_tm_create_with_sm(bm, THIN_SUPERBLOCK_LOCATION,
385 &sb_validator, &tm, &sm, &sblock);
386 if (r < 0) {
387 DMERR("tm_create_with_sm failed");
388 return r;
389 }
390
391 data_sm = dm_sm_disk_create(tm, nr_blocks);
392 if (IS_ERR(data_sm)) {
393 DMERR("sm_disk_create failed");
394 dm_tm_unlock(tm, sblock);
395 r = PTR_ERR(data_sm);
396 goto bad;
397 }
398 } else {
399 struct thin_disk_superblock *disk_super = NULL;
400 size_t space_map_root_offset =
401 offsetof(struct thin_disk_superblock, metadata_space_map_root);
402
403 r = dm_tm_open_with_sm(bm, THIN_SUPERBLOCK_LOCATION,
404 &sb_validator, space_map_root_offset,
405 SPACE_MAP_ROOT_SIZE, &tm, &sm, &sblock);
406 if (r < 0) {
407 DMERR("tm_open_with_sm failed");
408 return r;
409 }
410
411 disk_super = dm_block_data(sblock);
412 data_sm = dm_sm_disk_open(tm, disk_super->data_space_map_root,
413 sizeof(disk_super->data_space_map_root));
414 if (IS_ERR(data_sm)) {
415 DMERR("sm_disk_open failed");
416 r = PTR_ERR(data_sm);
417 goto bad;
418 }
419 }
420
421
422 r = dm_tm_unlock(tm, sblock);
423 if (r < 0) {
424 DMERR("couldn't unlock superblock");
425 goto bad_data_sm;
426 }
427
428 pmd->bm = bm;
429 pmd->metadata_sm = sm;
430 pmd->data_sm = data_sm;
431 pmd->tm = tm;
432 pmd->nb_tm = dm_tm_create_non_blocking_clone(tm);
433 if (!pmd->nb_tm) {
434 DMERR("could not create clone tm");
435 r = -ENOMEM;
436 goto bad_data_sm;
437 }
438
439 pmd->info.tm = tm;
440 pmd->info.levels = 2;
441 pmd->info.value_type.context = pmd->data_sm;
442 pmd->info.value_type.size = sizeof(__le64);
443 pmd->info.value_type.inc = data_block_inc;
444 pmd->info.value_type.dec = data_block_dec;
445 pmd->info.value_type.equal = data_block_equal;
446
447 memcpy(&pmd->nb_info, &pmd->info, sizeof(pmd->nb_info));
448 pmd->nb_info.tm = pmd->nb_tm;
449
450 pmd->tl_info.tm = tm;
451 pmd->tl_info.levels = 1;
452 pmd->tl_info.value_type.context = &pmd->info;
453 pmd->tl_info.value_type.size = sizeof(__le64);
454 pmd->tl_info.value_type.inc = subtree_inc;
455 pmd->tl_info.value_type.dec = subtree_dec;
456 pmd->tl_info.value_type.equal = subtree_equal;
457
458 pmd->bl_info.tm = tm;
459 pmd->bl_info.levels = 1;
460 pmd->bl_info.value_type.context = pmd->data_sm;
461 pmd->bl_info.value_type.size = sizeof(__le64);
462 pmd->bl_info.value_type.inc = data_block_inc;
463 pmd->bl_info.value_type.dec = data_block_dec;
464 pmd->bl_info.value_type.equal = data_block_equal;
465
466 pmd->details_info.tm = tm;
467 pmd->details_info.levels = 1;
468 pmd->details_info.value_type.context = NULL;
469 pmd->details_info.value_type.size = sizeof(struct disk_device_details);
470 pmd->details_info.value_type.inc = NULL;
471 pmd->details_info.value_type.dec = NULL;
472 pmd->details_info.value_type.equal = NULL;
473
474 pmd->root = 0;
475
476 init_rwsem(&pmd->root_lock);
477 pmd->time = 0;
478 pmd->need_commit = 0;
479 pmd->details_root = 0;
480 pmd->trans_id = 0;
481 pmd->flags = 0;
482 INIT_LIST_HEAD(&pmd->thin_devices);
483
484 return 0;
485
486 bad_data_sm:
487 dm_sm_destroy(data_sm);
488 bad:
489 dm_tm_destroy(tm);
490 dm_sm_destroy(sm);
491
492 return r;
493 }
494
495 static int __begin_transaction(struct dm_pool_metadata *pmd)
496 {
497 int r;
498 u32 features;
499 struct thin_disk_superblock *disk_super;
500 struct dm_block *sblock;
501
502 /*
503 * __maybe_commit_transaction() resets these
504 */
505 WARN_ON(pmd->need_commit);
506
507 /*
508 * We re-read the superblock every time. Shouldn't need to do this
509 * really.
510 */
511 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
512 &sb_validator, &sblock);
513 if (r)
514 return r;
515
516 disk_super = dm_block_data(sblock);
517 pmd->time = le32_to_cpu(disk_super->time);
518 pmd->root = le64_to_cpu(disk_super->data_mapping_root);
519 pmd->details_root = le64_to_cpu(disk_super->device_details_root);
520 pmd->trans_id = le64_to_cpu(disk_super->trans_id);
521 pmd->flags = le32_to_cpu(disk_super->flags);
522 pmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
523
524 features = le32_to_cpu(disk_super->incompat_flags) & ~THIN_FEATURE_INCOMPAT_SUPP;
525 if (features) {
526 DMERR("could not access metadata due to "
527 "unsupported optional features (%lx).",
528 (unsigned long)features);
529 r = -EINVAL;
530 goto out;
531 }
532
533 /*
534 * Check for read-only metadata to skip the following RDWR checks.
535 */
536 if (get_disk_ro(pmd->bdev->bd_disk))
537 goto out;
538
539 features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP;
540 if (features) {
541 DMERR("could not access metadata RDWR due to "
542 "unsupported optional features (%lx).",
543 (unsigned long)features);
544 r = -EINVAL;
545 }
546
547 out:
548 dm_bm_unlock(sblock);
549 return r;
550 }
551
552 static int __write_changed_details(struct dm_pool_metadata *pmd)
553 {
554 int r;
555 struct dm_thin_device *td, *tmp;
556 struct disk_device_details details;
557 uint64_t key;
558
559 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
560 if (!td->changed)
561 continue;
562
563 key = td->id;
564
565 details.mapped_blocks = cpu_to_le64(td->mapped_blocks);
566 details.transaction_id = cpu_to_le64(td->transaction_id);
567 details.creation_time = cpu_to_le32(td->creation_time);
568 details.snapshotted_time = cpu_to_le32(td->snapshotted_time);
569 __dm_bless_for_disk(&details);
570
571 r = dm_btree_insert(&pmd->details_info, pmd->details_root,
572 &key, &details, &pmd->details_root);
573 if (r)
574 return r;
575
576 if (td->open_count)
577 td->changed = 0;
578 else {
579 list_del(&td->list);
580 kfree(td);
581 }
582
583 pmd->need_commit = 1;
584 }
585
586 return 0;
587 }
588
589 static int __commit_transaction(struct dm_pool_metadata *pmd)
590 {
591 /*
592 * FIXME: Associated pool should be made read-only on failure.
593 */
594 int r;
595 size_t metadata_len, data_len;
596 struct thin_disk_superblock *disk_super;
597 struct dm_block *sblock;
598
599 /*
600 * We need to know if the thin_disk_superblock exceeds a 512-byte sector.
601 */
602 BUILD_BUG_ON(sizeof(struct thin_disk_superblock) > 512);
603
604 r = __write_changed_details(pmd);
605 if (r < 0)
606 return r;
607
608 if (!pmd->need_commit)
609 return r;
610
611 r = dm_sm_commit(pmd->data_sm);
612 if (r < 0)
613 return r;
614
615 r = dm_tm_pre_commit(pmd->tm);
616 if (r < 0)
617 return r;
618
619 r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
620 if (r < 0)
621 return r;
622
623 r = dm_sm_root_size(pmd->data_sm, &data_len);
624 if (r < 0)
625 return r;
626
627 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
628 &sb_validator, &sblock);
629 if (r)
630 return r;
631
632 disk_super = dm_block_data(sblock);
633 disk_super->time = cpu_to_le32(pmd->time);
634 disk_super->data_mapping_root = cpu_to_le64(pmd->root);
635 disk_super->device_details_root = cpu_to_le64(pmd->details_root);
636 disk_super->trans_id = cpu_to_le64(pmd->trans_id);
637 disk_super->flags = cpu_to_le32(pmd->flags);
638
639 r = dm_sm_copy_root(pmd->metadata_sm, &disk_super->metadata_space_map_root,
640 metadata_len);
641 if (r < 0)
642 goto out_locked;
643
644 r = dm_sm_copy_root(pmd->data_sm, &disk_super->data_space_map_root,
645 data_len);
646 if (r < 0)
647 goto out_locked;
648
649 r = dm_tm_commit(pmd->tm, sblock);
650 if (!r)
651 pmd->need_commit = 0;
652
653 return r;
654
655 out_locked:
656 dm_bm_unlock(sblock);
657 return r;
658 }
659
660 struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
661 sector_t data_block_size)
662 {
663 int r;
664 struct thin_disk_superblock *disk_super;
665 struct dm_pool_metadata *pmd;
666 sector_t bdev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
667 struct dm_block_manager *bm;
668 int create;
669 struct dm_block *sblock;
670
671 pmd = kmalloc(sizeof(*pmd), GFP_KERNEL);
672 if (!pmd) {
673 DMERR("could not allocate metadata struct");
674 return ERR_PTR(-ENOMEM);
675 }
676
677 bm = dm_block_manager_create(bdev, THIN_METADATA_BLOCK_SIZE,
678 THIN_METADATA_CACHE_SIZE,
679 THIN_MAX_CONCURRENT_LOCKS);
680 if (!bm) {
681 DMERR("could not create block manager");
682 kfree(pmd);
683 return ERR_PTR(-ENOMEM);
684 }
685
686 r = superblock_all_zeroes(bm, &create);
687 if (r) {
688 dm_block_manager_destroy(bm);
689 kfree(pmd);
690 return ERR_PTR(r);
691 }
692
693
694 r = init_pmd(pmd, bm, 0, create);
695 if (r) {
696 dm_block_manager_destroy(bm);
697 kfree(pmd);
698 return ERR_PTR(r);
699 }
700 pmd->bdev = bdev;
701
702 if (!create) {
703 r = __begin_transaction(pmd);
704 if (r < 0)
705 goto bad;
706 return pmd;
707 }
708
709 /*
710 * Create.
711 */
712 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
713 &sb_validator, &sblock);
714 if (r)
715 goto bad;
716
717 if (bdev_size > THIN_METADATA_MAX_SECTORS)
718 bdev_size = THIN_METADATA_MAX_SECTORS;
719
720 disk_super = dm_block_data(sblock);
721 disk_super->magic = cpu_to_le64(THIN_SUPERBLOCK_MAGIC);
722 disk_super->version = cpu_to_le32(THIN_VERSION);
723 disk_super->time = 0;
724 disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
725 disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
726 disk_super->data_block_size = cpu_to_le32(data_block_size);
727
728 r = dm_bm_unlock(sblock);
729 if (r < 0)
730 goto bad;
731
732 r = dm_btree_empty(&pmd->info, &pmd->root);
733 if (r < 0)
734 goto bad;
735
736 r = dm_btree_empty(&pmd->details_info, &pmd->details_root);
737 if (r < 0) {
738 DMERR("couldn't create devices root");
739 goto bad;
740 }
741
742 pmd->flags = 0;
743 pmd->need_commit = 1;
744 r = dm_pool_commit_metadata(pmd);
745 if (r < 0) {
746 DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
747 __func__, r);
748 goto bad;
749 }
750
751 return pmd;
752
753 bad:
754 if (dm_pool_metadata_close(pmd) < 0)
755 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
756 return ERR_PTR(r);
757 }
758
759 int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
760 {
761 int r;
762 unsigned open_devices = 0;
763 struct dm_thin_device *td, *tmp;
764
765 down_read(&pmd->root_lock);
766 list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
767 if (td->open_count)
768 open_devices++;
769 else {
770 list_del(&td->list);
771 kfree(td);
772 }
773 }
774 up_read(&pmd->root_lock);
775
776 if (open_devices) {
777 DMERR("attempt to close pmd when %u device(s) are still open",
778 open_devices);
779 return -EBUSY;
780 }
781
782 r = __commit_transaction(pmd);
783 if (r < 0)
784 DMWARN("%s: __commit_transaction() failed, error = %d",
785 __func__, r);
786
787 dm_tm_destroy(pmd->tm);
788 dm_tm_destroy(pmd->nb_tm);
789 dm_block_manager_destroy(pmd->bm);
790 dm_sm_destroy(pmd->metadata_sm);
791 dm_sm_destroy(pmd->data_sm);
792 kfree(pmd);
793
794 return 0;
795 }
796
797 /*
798 * __open_device: Returns @td corresponding to device with id @dev,
799 * creating it if @create is set and incrementing @td->open_count.
800 * On failure, @td is undefined.
801 */
802 static int __open_device(struct dm_pool_metadata *pmd,
803 dm_thin_id dev, int create,
804 struct dm_thin_device **td)
805 {
806 int r, changed = 0;
807 struct dm_thin_device *td2;
808 uint64_t key = dev;
809 struct disk_device_details details_le;
810
811 /*
812 * If the device is already open, return it.
813 */
814 list_for_each_entry(td2, &pmd->thin_devices, list)
815 if (td2->id == dev) {
816 /*
817 * May not create an already-open device.
818 */
819 if (create)
820 return -EEXIST;
821
822 td2->open_count++;
823 *td = td2;
824 return 0;
825 }
826
827 /*
828 * Check the device exists.
829 */
830 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
831 &key, &details_le);
832 if (r) {
833 if (r != -ENODATA || !create)
834 return r;
835
836 /*
837 * Create new device.
838 */
839 changed = 1;
840 details_le.mapped_blocks = 0;
841 details_le.transaction_id = cpu_to_le64(pmd->trans_id);
842 details_le.creation_time = cpu_to_le32(pmd->time);
843 details_le.snapshotted_time = cpu_to_le32(pmd->time);
844 }
845
846 *td = kmalloc(sizeof(**td), GFP_NOIO);
847 if (!*td)
848 return -ENOMEM;
849
850 (*td)->pmd = pmd;
851 (*td)->id = dev;
852 (*td)->open_count = 1;
853 (*td)->changed = changed;
854 (*td)->mapped_blocks = le64_to_cpu(details_le.mapped_blocks);
855 (*td)->transaction_id = le64_to_cpu(details_le.transaction_id);
856 (*td)->creation_time = le32_to_cpu(details_le.creation_time);
857 (*td)->snapshotted_time = le32_to_cpu(details_le.snapshotted_time);
858
859 list_add(&(*td)->list, &pmd->thin_devices);
860
861 return 0;
862 }
863
864 static void __close_device(struct dm_thin_device *td)
865 {
866 --td->open_count;
867 }
868
869 static int __create_thin(struct dm_pool_metadata *pmd,
870 dm_thin_id dev)
871 {
872 int r;
873 dm_block_t dev_root;
874 uint64_t key = dev;
875 struct disk_device_details details_le;
876 struct dm_thin_device *td;
877 __le64 value;
878
879 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
880 &key, &details_le);
881 if (!r)
882 return -EEXIST;
883
884 /*
885 * Create an empty btree for the mappings.
886 */
887 r = dm_btree_empty(&pmd->bl_info, &dev_root);
888 if (r)
889 return r;
890
891 /*
892 * Insert it into the main mapping tree.
893 */
894 value = cpu_to_le64(dev_root);
895 __dm_bless_for_disk(&value);
896 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
897 if (r) {
898 dm_btree_del(&pmd->bl_info, dev_root);
899 return r;
900 }
901
902 r = __open_device(pmd, dev, 1, &td);
903 if (r) {
904 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
905 dm_btree_del(&pmd->bl_info, dev_root);
906 return r;
907 }
908 __close_device(td);
909
910 return r;
911 }
912
913 int dm_pool_create_thin(struct dm_pool_metadata *pmd, dm_thin_id dev)
914 {
915 int r;
916
917 down_write(&pmd->root_lock);
918 r = __create_thin(pmd, dev);
919 up_write(&pmd->root_lock);
920
921 return r;
922 }
923
924 static int __set_snapshot_details(struct dm_pool_metadata *pmd,
925 struct dm_thin_device *snap,
926 dm_thin_id origin, uint32_t time)
927 {
928 int r;
929 struct dm_thin_device *td;
930
931 r = __open_device(pmd, origin, 0, &td);
932 if (r)
933 return r;
934
935 td->changed = 1;
936 td->snapshotted_time = time;
937
938 snap->mapped_blocks = td->mapped_blocks;
939 snap->snapshotted_time = time;
940 __close_device(td);
941
942 return 0;
943 }
944
945 static int __create_snap(struct dm_pool_metadata *pmd,
946 dm_thin_id dev, dm_thin_id origin)
947 {
948 int r;
949 dm_block_t origin_root;
950 uint64_t key = origin, dev_key = dev;
951 struct dm_thin_device *td;
952 struct disk_device_details details_le;
953 __le64 value;
954
955 /* check this device is unused */
956 r = dm_btree_lookup(&pmd->details_info, pmd->details_root,
957 &dev_key, &details_le);
958 if (!r)
959 return -EEXIST;
960
961 /* find the mapping tree for the origin */
962 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &key, &value);
963 if (r)
964 return r;
965 origin_root = le64_to_cpu(value);
966
967 /* clone the origin, an inc will do */
968 dm_tm_inc(pmd->tm, origin_root);
969
970 /* insert into the main mapping tree */
971 value = cpu_to_le64(origin_root);
972 __dm_bless_for_disk(&value);
973 key = dev;
974 r = dm_btree_insert(&pmd->tl_info, pmd->root, &key, &value, &pmd->root);
975 if (r) {
976 dm_tm_dec(pmd->tm, origin_root);
977 return r;
978 }
979
980 pmd->time++;
981
982 r = __open_device(pmd, dev, 1, &td);
983 if (r)
984 goto bad;
985
986 r = __set_snapshot_details(pmd, td, origin, pmd->time);
987 __close_device(td);
988
989 if (r)
990 goto bad;
991
992 return 0;
993
994 bad:
995 dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
996 dm_btree_remove(&pmd->details_info, pmd->details_root,
997 &key, &pmd->details_root);
998 return r;
999 }
1000
1001 int dm_pool_create_snap(struct dm_pool_metadata *pmd,
1002 dm_thin_id dev,
1003 dm_thin_id origin)
1004 {
1005 int r;
1006
1007 down_write(&pmd->root_lock);
1008 r = __create_snap(pmd, dev, origin);
1009 up_write(&pmd->root_lock);
1010
1011 return r;
1012 }
1013
1014 static int __delete_device(struct dm_pool_metadata *pmd, dm_thin_id dev)
1015 {
1016 int r;
1017 uint64_t key = dev;
1018 struct dm_thin_device *td;
1019
1020 /* TODO: failure should mark the transaction invalid */
1021 r = __open_device(pmd, dev, 0, &td);
1022 if (r)
1023 return r;
1024
1025 if (td->open_count > 1) {
1026 __close_device(td);
1027 return -EBUSY;
1028 }
1029
1030 list_del(&td->list);
1031 kfree(td);
1032 r = dm_btree_remove(&pmd->details_info, pmd->details_root,
1033 &key, &pmd->details_root);
1034 if (r)
1035 return r;
1036
1037 r = dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
1038 if (r)
1039 return r;
1040
1041 pmd->need_commit = 1;
1042
1043 return 0;
1044 }
1045
1046 int dm_pool_delete_thin_device(struct dm_pool_metadata *pmd,
1047 dm_thin_id dev)
1048 {
1049 int r;
1050
1051 down_write(&pmd->root_lock);
1052 r = __delete_device(pmd, dev);
1053 up_write(&pmd->root_lock);
1054
1055 return r;
1056 }
1057
1058 int dm_pool_set_metadata_transaction_id(struct dm_pool_metadata *pmd,
1059 uint64_t current_id,
1060 uint64_t new_id)
1061 {
1062 down_write(&pmd->root_lock);
1063 if (pmd->trans_id != current_id) {
1064 up_write(&pmd->root_lock);
1065 DMERR("mismatched transaction id");
1066 return -EINVAL;
1067 }
1068
1069 pmd->trans_id = new_id;
1070 pmd->need_commit = 1;
1071 up_write(&pmd->root_lock);
1072
1073 return 0;
1074 }
1075
1076 int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
1077 uint64_t *result)
1078 {
1079 down_read(&pmd->root_lock);
1080 *result = pmd->trans_id;
1081 up_read(&pmd->root_lock);
1082
1083 return 0;
1084 }
1085
1086 static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1087 {
1088 int r, inc;
1089 struct thin_disk_superblock *disk_super;
1090 struct dm_block *copy, *sblock;
1091 dm_block_t held_root;
1092
1093 /*
1094 * Copy the superblock.
1095 */
1096 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
1097 r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
1098 &sb_validator, &copy, &inc);
1099 if (r)
1100 return r;
1101
1102 BUG_ON(!inc);
1103
1104 held_root = dm_block_location(copy);
1105 disk_super = dm_block_data(copy);
1106
1107 if (le64_to_cpu(disk_super->held_root)) {
1108 DMWARN("Pool metadata snapshot already exists: release this before taking another.");
1109
1110 dm_tm_dec(pmd->tm, held_root);
1111 dm_tm_unlock(pmd->tm, copy);
1112 pmd->need_commit = 1;
1113
1114 return -EBUSY;
1115 }
1116
1117 /*
1118 * Wipe the spacemap since we're not publishing this.
1119 */
1120 memset(&disk_super->data_space_map_root, 0,
1121 sizeof(disk_super->data_space_map_root));
1122 memset(&disk_super->metadata_space_map_root, 0,
1123 sizeof(disk_super->metadata_space_map_root));
1124
1125 /*
1126 * Increment the data structures that need to be preserved.
1127 */
1128 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
1129 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
1130 dm_tm_unlock(pmd->tm, copy);
1131
1132 /*
1133 * Write the held root into the superblock.
1134 */
1135 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1136 &sb_validator, &sblock);
1137 if (r) {
1138 dm_tm_dec(pmd->tm, held_root);
1139 pmd->need_commit = 1;
1140 return r;
1141 }
1142
1143 disk_super = dm_block_data(sblock);
1144 disk_super->held_root = cpu_to_le64(held_root);
1145 dm_bm_unlock(sblock);
1146
1147 pmd->need_commit = 1;
1148
1149 return 0;
1150 }
1151
1152 int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
1153 {
1154 int r;
1155
1156 down_write(&pmd->root_lock);
1157 r = __reserve_metadata_snap(pmd);
1158 up_write(&pmd->root_lock);
1159
1160 return r;
1161 }
1162
1163 static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1164 {
1165 int r;
1166 struct thin_disk_superblock *disk_super;
1167 struct dm_block *sblock, *copy;
1168 dm_block_t held_root;
1169
1170 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1171 &sb_validator, &sblock);
1172 if (r)
1173 return r;
1174
1175 disk_super = dm_block_data(sblock);
1176 held_root = le64_to_cpu(disk_super->held_root);
1177 disk_super->held_root = cpu_to_le64(0);
1178 pmd->need_commit = 1;
1179
1180 dm_bm_unlock(sblock);
1181
1182 if (!held_root) {
1183 DMWARN("No pool metadata snapshot found: nothing to release.");
1184 return -EINVAL;
1185 }
1186
1187 r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
1188 if (r)
1189 return r;
1190
1191 disk_super = dm_block_data(copy);
1192 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
1193 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
1194 dm_sm_dec_block(pmd->metadata_sm, held_root);
1195
1196 return dm_tm_unlock(pmd->tm, copy);
1197 }
1198
1199 int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
1200 {
1201 int r;
1202
1203 down_write(&pmd->root_lock);
1204 r = __release_metadata_snap(pmd);
1205 up_write(&pmd->root_lock);
1206
1207 return r;
1208 }
1209
1210 static int __get_metadata_snap(struct dm_pool_metadata *pmd,
1211 dm_block_t *result)
1212 {
1213 int r;
1214 struct thin_disk_superblock *disk_super;
1215 struct dm_block *sblock;
1216
1217 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1218 &sb_validator, &sblock);
1219 if (r)
1220 return r;
1221
1222 disk_super = dm_block_data(sblock);
1223 *result = le64_to_cpu(disk_super->held_root);
1224
1225 return dm_bm_unlock(sblock);
1226 }
1227
1228 int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
1229 dm_block_t *result)
1230 {
1231 int r;
1232
1233 down_read(&pmd->root_lock);
1234 r = __get_metadata_snap(pmd, result);
1235 up_read(&pmd->root_lock);
1236
1237 return r;
1238 }
1239
1240 int dm_pool_open_thin_device(struct dm_pool_metadata *pmd, dm_thin_id dev,
1241 struct dm_thin_device **td)
1242 {
1243 int r;
1244
1245 down_write(&pmd->root_lock);
1246 r = __open_device(pmd, dev, 0, td);
1247 up_write(&pmd->root_lock);
1248
1249 return r;
1250 }
1251
1252 int dm_pool_close_thin_device(struct dm_thin_device *td)
1253 {
1254 down_write(&td->pmd->root_lock);
1255 __close_device(td);
1256 up_write(&td->pmd->root_lock);
1257
1258 return 0;
1259 }
1260
1261 dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
1262 {
1263 return td->id;
1264 }
1265
1266 static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
1267 {
1268 return td->snapshotted_time > time;
1269 }
1270
1271 int dm_thin_find_block(struct dm_thin_device *td, dm_block_t block,
1272 int can_block, struct dm_thin_lookup_result *result)
1273 {
1274 int r;
1275 uint64_t block_time = 0;
1276 __le64 value;
1277 struct dm_pool_metadata *pmd = td->pmd;
1278 dm_block_t keys[2] = { td->id, block };
1279
1280 if (can_block) {
1281 down_read(&pmd->root_lock);
1282 r = dm_btree_lookup(&pmd->info, pmd->root, keys, &value);
1283 if (!r)
1284 block_time = le64_to_cpu(value);
1285 up_read(&pmd->root_lock);
1286
1287 } else if (down_read_trylock(&pmd->root_lock)) {
1288 r = dm_btree_lookup(&pmd->nb_info, pmd->root, keys, &value);
1289 if (!r)
1290 block_time = le64_to_cpu(value);
1291 up_read(&pmd->root_lock);
1292
1293 } else
1294 return -EWOULDBLOCK;
1295
1296 if (!r) {
1297 dm_block_t exception_block;
1298 uint32_t exception_time;
1299 unpack_block_time(block_time, &exception_block,
1300 &exception_time);
1301 result->block = exception_block;
1302 result->shared = __snapshotted_since(td, exception_time);
1303 }
1304
1305 return r;
1306 }
1307
1308 static int __insert(struct dm_thin_device *td, dm_block_t block,
1309 dm_block_t data_block)
1310 {
1311 int r, inserted;
1312 __le64 value;
1313 struct dm_pool_metadata *pmd = td->pmd;
1314 dm_block_t keys[2] = { td->id, block };
1315
1316 pmd->need_commit = 1;
1317 value = cpu_to_le64(pack_block_time(data_block, pmd->time));
1318 __dm_bless_for_disk(&value);
1319
1320 r = dm_btree_insert_notify(&pmd->info, pmd->root, keys, &value,
1321 &pmd->root, &inserted);
1322 if (r)
1323 return r;
1324
1325 if (inserted) {
1326 td->mapped_blocks++;
1327 td->changed = 1;
1328 }
1329
1330 return 0;
1331 }
1332
1333 int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
1334 dm_block_t data_block)
1335 {
1336 int r;
1337
1338 down_write(&td->pmd->root_lock);
1339 r = __insert(td, block, data_block);
1340 up_write(&td->pmd->root_lock);
1341
1342 return r;
1343 }
1344
1345 static int __remove(struct dm_thin_device *td, dm_block_t block)
1346 {
1347 int r;
1348 struct dm_pool_metadata *pmd = td->pmd;
1349 dm_block_t keys[2] = { td->id, block };
1350
1351 r = dm_btree_remove(&pmd->info, pmd->root, keys, &pmd->root);
1352 if (r)
1353 return r;
1354
1355 td->mapped_blocks--;
1356 td->changed = 1;
1357 pmd->need_commit = 1;
1358
1359 return 0;
1360 }
1361
1362 int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
1363 {
1364 int r;
1365
1366 down_write(&td->pmd->root_lock);
1367 r = __remove(td, block);
1368 up_write(&td->pmd->root_lock);
1369
1370 return r;
1371 }
1372
1373 int dm_pool_alloc_data_block(struct dm_pool_metadata *pmd, dm_block_t *result)
1374 {
1375 int r;
1376
1377 down_write(&pmd->root_lock);
1378
1379 r = dm_sm_new_block(pmd->data_sm, result);
1380 pmd->need_commit = 1;
1381
1382 up_write(&pmd->root_lock);
1383
1384 return r;
1385 }
1386
1387 int dm_pool_commit_metadata(struct dm_pool_metadata *pmd)
1388 {
1389 int r;
1390
1391 down_write(&pmd->root_lock);
1392
1393 r = __commit_transaction(pmd);
1394 if (r <= 0)
1395 goto out;
1396
1397 /*
1398 * Open the next transaction.
1399 */
1400 r = __begin_transaction(pmd);
1401 out:
1402 up_write(&pmd->root_lock);
1403 return r;
1404 }
1405
1406 int dm_pool_get_free_block_count(struct dm_pool_metadata *pmd, dm_block_t *result)
1407 {
1408 int r;
1409
1410 down_read(&pmd->root_lock);
1411 r = dm_sm_get_nr_free(pmd->data_sm, result);
1412 up_read(&pmd->root_lock);
1413
1414 return r;
1415 }
1416
1417 int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
1418 dm_block_t *result)
1419 {
1420 int r;
1421
1422 down_read(&pmd->root_lock);
1423 r = dm_sm_get_nr_free(pmd->metadata_sm, result);
1424 up_read(&pmd->root_lock);
1425
1426 return r;
1427 }
1428
1429 int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
1430 dm_block_t *result)
1431 {
1432 int r;
1433
1434 down_read(&pmd->root_lock);
1435 r = dm_sm_get_nr_blocks(pmd->metadata_sm, result);
1436 up_read(&pmd->root_lock);
1437
1438 return r;
1439 }
1440
1441 int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result)
1442 {
1443 down_read(&pmd->root_lock);
1444 *result = pmd->data_block_size;
1445 up_read(&pmd->root_lock);
1446
1447 return 0;
1448 }
1449
1450 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result)
1451 {
1452 int r;
1453
1454 down_read(&pmd->root_lock);
1455 r = dm_sm_get_nr_blocks(pmd->data_sm, result);
1456 up_read(&pmd->root_lock);
1457
1458 return r;
1459 }
1460
1461 int dm_thin_get_mapped_count(struct dm_thin_device *td, dm_block_t *result)
1462 {
1463 struct dm_pool_metadata *pmd = td->pmd;
1464
1465 down_read(&pmd->root_lock);
1466 *result = td->mapped_blocks;
1467 up_read(&pmd->root_lock);
1468
1469 return 0;
1470 }
1471
1472 static int __highest_block(struct dm_thin_device *td, dm_block_t *result)
1473 {
1474 int r;
1475 __le64 value_le;
1476 dm_block_t thin_root;
1477 struct dm_pool_metadata *pmd = td->pmd;
1478
1479 r = dm_btree_lookup(&pmd->tl_info, pmd->root, &td->id, &value_le);
1480 if (r)
1481 return r;
1482
1483 thin_root = le64_to_cpu(value_le);
1484
1485 return dm_btree_find_highest_key(&pmd->bl_info, thin_root, result);
1486 }
1487
1488 int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
1489 dm_block_t *result)
1490 {
1491 int r;
1492 struct dm_pool_metadata *pmd = td->pmd;
1493
1494 down_read(&pmd->root_lock);
1495 r = __highest_block(td, result);
1496 up_read(&pmd->root_lock);
1497
1498 return r;
1499 }
1500
1501 static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1502 {
1503 int r;
1504 dm_block_t old_count;
1505
1506 r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count);
1507 if (r)
1508 return r;
1509
1510 if (new_count == old_count)
1511 return 0;
1512
1513 if (new_count < old_count) {
1514 DMERR("cannot reduce size of data device");
1515 return -EINVAL;
1516 }
1517
1518 r = dm_sm_extend(pmd->data_sm, new_count - old_count);
1519 if (!r)
1520 pmd->need_commit = 1;
1521
1522 return r;
1523 }
1524
1525 int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count)
1526 {
1527 int r;
1528
1529 down_write(&pmd->root_lock);
1530 r = __resize_data_dev(pmd, new_count);
1531 up_write(&pmd->root_lock);
1532
1533 return r;
1534 }
This page took 0.093766 seconds and 5 git commands to generate.