2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <asm/div64.h>
25 #include "extent_map.h"
27 #include "transaction.h"
28 #include "print-tree.h"
39 struct btrfs_bio_stripe stripes
[];
42 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
43 (sizeof(struct btrfs_bio_stripe) * (n)))
45 static DEFINE_MUTEX(uuid_mutex
);
46 static LIST_HEAD(fs_uuids
);
48 int btrfs_cleanup_fs_uuids(void)
50 struct btrfs_fs_devices
*fs_devices
;
51 struct list_head
*uuid_cur
;
52 struct list_head
*devices_cur
;
53 struct btrfs_device
*dev
;
55 list_for_each(uuid_cur
, &fs_uuids
) {
56 fs_devices
= list_entry(uuid_cur
, struct btrfs_fs_devices
,
58 while(!list_empty(&fs_devices
->devices
)) {
59 devices_cur
= fs_devices
->devices
.next
;
60 dev
= list_entry(devices_cur
, struct btrfs_device
,
63 close_bdev_excl(dev
->bdev
);
65 list_del(&dev
->dev_list
);
72 static struct btrfs_device
*__find_device(struct list_head
*head
, u64 devid
,
75 struct btrfs_device
*dev
;
76 struct list_head
*cur
;
78 list_for_each(cur
, head
) {
79 dev
= list_entry(cur
, struct btrfs_device
, dev_list
);
80 if (dev
->devid
== devid
&&
81 (!uuid
|| !memcmp(dev
->uuid
, uuid
, BTRFS_UUID_SIZE
))) {
88 static struct btrfs_fs_devices
*find_fsid(u8
*fsid
)
90 struct list_head
*cur
;
91 struct btrfs_fs_devices
*fs_devices
;
93 list_for_each(cur
, &fs_uuids
) {
94 fs_devices
= list_entry(cur
, struct btrfs_fs_devices
, list
);
95 if (memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
) == 0)
101 static int device_list_add(const char *path
,
102 struct btrfs_super_block
*disk_super
,
103 u64 devid
, struct btrfs_fs_devices
**fs_devices_ret
)
105 struct btrfs_device
*device
;
106 struct btrfs_fs_devices
*fs_devices
;
107 u64 found_transid
= btrfs_super_generation(disk_super
);
109 fs_devices
= find_fsid(disk_super
->fsid
);
111 fs_devices
= kmalloc(sizeof(*fs_devices
), GFP_NOFS
);
114 INIT_LIST_HEAD(&fs_devices
->devices
);
115 INIT_LIST_HEAD(&fs_devices
->alloc_list
);
116 list_add(&fs_devices
->list
, &fs_uuids
);
117 memcpy(fs_devices
->fsid
, disk_super
->fsid
, BTRFS_FSID_SIZE
);
118 fs_devices
->latest_devid
= devid
;
119 fs_devices
->latest_trans
= found_transid
;
120 fs_devices
->lowest_devid
= (u64
)-1;
121 fs_devices
->num_devices
= 0;
124 device
= __find_device(&fs_devices
->devices
, devid
,
125 disk_super
->dev_item
.uuid
);
128 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
130 /* we can safely leave the fs_devices entry around */
133 device
->devid
= devid
;
134 memcpy(device
->uuid
, disk_super
->dev_item
.uuid
,
136 device
->barriers
= 1;
137 spin_lock_init(&device
->io_lock
);
138 device
->name
= kstrdup(path
, GFP_NOFS
);
143 list_add(&device
->dev_list
, &fs_devices
->devices
);
144 list_add(&device
->dev_alloc_list
, &fs_devices
->alloc_list
);
145 fs_devices
->num_devices
++;
148 if (found_transid
> fs_devices
->latest_trans
) {
149 fs_devices
->latest_devid
= devid
;
150 fs_devices
->latest_trans
= found_transid
;
152 if (fs_devices
->lowest_devid
> devid
) {
153 fs_devices
->lowest_devid
= devid
;
155 *fs_devices_ret
= fs_devices
;
159 int btrfs_close_devices(struct btrfs_fs_devices
*fs_devices
)
161 struct list_head
*head
= &fs_devices
->devices
;
162 struct list_head
*cur
;
163 struct btrfs_device
*device
;
165 mutex_lock(&uuid_mutex
);
166 list_for_each(cur
, head
) {
167 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
169 close_bdev_excl(device
->bdev
);
173 mutex_unlock(&uuid_mutex
);
177 int btrfs_open_devices(struct btrfs_fs_devices
*fs_devices
,
178 int flags
, void *holder
)
180 struct block_device
*bdev
;
181 struct list_head
*head
= &fs_devices
->devices
;
182 struct list_head
*cur
;
183 struct btrfs_device
*device
;
186 mutex_lock(&uuid_mutex
);
187 list_for_each(cur
, head
) {
188 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
189 bdev
= open_bdev_excl(device
->name
, flags
, holder
);
192 printk("open %s failed\n", device
->name
);
196 if (device
->devid
== fs_devices
->latest_devid
)
197 fs_devices
->latest_bdev
= bdev
;
198 if (device
->devid
== fs_devices
->lowest_devid
) {
199 fs_devices
->lowest_bdev
= bdev
;
203 mutex_unlock(&uuid_mutex
);
206 mutex_unlock(&uuid_mutex
);
207 btrfs_close_devices(fs_devices
);
211 int btrfs_scan_one_device(const char *path
, int flags
, void *holder
,
212 struct btrfs_fs_devices
**fs_devices_ret
)
214 struct btrfs_super_block
*disk_super
;
215 struct block_device
*bdev
;
216 struct buffer_head
*bh
;
221 mutex_lock(&uuid_mutex
);
223 bdev
= open_bdev_excl(path
, flags
, holder
);
230 ret
= set_blocksize(bdev
, 4096);
233 bh
= __bread(bdev
, BTRFS_SUPER_INFO_OFFSET
/ 4096, 4096);
238 disk_super
= (struct btrfs_super_block
*)bh
->b_data
;
239 if (strncmp((char *)(&disk_super
->magic
), BTRFS_MAGIC
,
240 sizeof(disk_super
->magic
))) {
244 devid
= le64_to_cpu(disk_super
->dev_item
.devid
);
245 transid
= btrfs_super_generation(disk_super
);
246 if (disk_super
->label
[0])
247 printk("device label %s ", disk_super
->label
);
249 /* FIXME, make a readl uuid parser */
250 printk("device fsid %llx-%llx ",
251 *(unsigned long long *)disk_super
->fsid
,
252 *(unsigned long long *)(disk_super
->fsid
+ 8));
254 printk("devid %Lu transid %Lu %s\n", devid
, transid
, path
);
255 ret
= device_list_add(path
, disk_super
, devid
, fs_devices_ret
);
260 close_bdev_excl(bdev
);
262 mutex_unlock(&uuid_mutex
);
267 * this uses a pretty simple search, the expectation is that it is
268 * called very infrequently and that a given device has a small number
271 static int find_free_dev_extent(struct btrfs_trans_handle
*trans
,
272 struct btrfs_device
*device
,
273 struct btrfs_path
*path
,
274 u64 num_bytes
, u64
*start
)
276 struct btrfs_key key
;
277 struct btrfs_root
*root
= device
->dev_root
;
278 struct btrfs_dev_extent
*dev_extent
= NULL
;
281 u64 search_start
= 0;
282 u64 search_end
= device
->total_bytes
;
286 struct extent_buffer
*l
;
291 /* FIXME use last free of some kind */
293 /* we don't want to overwrite the superblock on the drive,
294 * so we make sure to start at an offset of at least 1MB
296 search_start
= max((u64
)1024 * 1024, search_start
);
298 if (root
->fs_info
->alloc_start
+ num_bytes
<= device
->total_bytes
)
299 search_start
= max(root
->fs_info
->alloc_start
, search_start
);
301 key
.objectid
= device
->devid
;
302 key
.offset
= search_start
;
303 key
.type
= BTRFS_DEV_EXTENT_KEY
;
304 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 0);
307 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
311 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
314 slot
= path
->slots
[0];
315 if (slot
>= btrfs_header_nritems(l
)) {
316 ret
= btrfs_next_leaf(root
, path
);
323 if (search_start
>= search_end
) {
327 *start
= search_start
;
331 *start
= last_byte
> search_start
?
332 last_byte
: search_start
;
333 if (search_end
<= *start
) {
339 btrfs_item_key_to_cpu(l
, &key
, slot
);
341 if (key
.objectid
< device
->devid
)
344 if (key
.objectid
> device
->devid
)
347 if (key
.offset
>= search_start
&& key
.offset
> last_byte
&&
349 if (last_byte
< search_start
)
350 last_byte
= search_start
;
351 hole_size
= key
.offset
- last_byte
;
352 if (key
.offset
> last_byte
&&
353 hole_size
>= num_bytes
) {
358 if (btrfs_key_type(&key
) != BTRFS_DEV_EXTENT_KEY
) {
363 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
364 last_byte
= key
.offset
+ btrfs_dev_extent_length(l
, dev_extent
);
370 /* we have to make sure we didn't find an extent that has already
371 * been allocated by the map tree or the original allocation
373 btrfs_release_path(root
, path
);
374 BUG_ON(*start
< search_start
);
376 if (*start
+ num_bytes
> search_end
) {
380 /* check for pending inserts here */
384 btrfs_release_path(root
, path
);
388 int btrfs_free_dev_extent(struct btrfs_trans_handle
*trans
,
389 struct btrfs_device
*device
,
393 struct btrfs_path
*path
;
394 struct btrfs_root
*root
= device
->dev_root
;
395 struct btrfs_key key
;
397 path
= btrfs_alloc_path();
401 key
.objectid
= device
->devid
;
403 key
.type
= BTRFS_DEV_EXTENT_KEY
;
405 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
408 ret
= btrfs_del_item(trans
, root
, path
);
411 btrfs_free_path(path
);
415 int btrfs_alloc_dev_extent(struct btrfs_trans_handle
*trans
,
416 struct btrfs_device
*device
,
417 u64 chunk_tree
, u64 chunk_objectid
,
419 u64 num_bytes
, u64
*start
)
422 struct btrfs_path
*path
;
423 struct btrfs_root
*root
= device
->dev_root
;
424 struct btrfs_dev_extent
*extent
;
425 struct extent_buffer
*leaf
;
426 struct btrfs_key key
;
428 path
= btrfs_alloc_path();
432 ret
= find_free_dev_extent(trans
, device
, path
, num_bytes
, start
);
437 key
.objectid
= device
->devid
;
439 key
.type
= BTRFS_DEV_EXTENT_KEY
;
440 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
444 leaf
= path
->nodes
[0];
445 extent
= btrfs_item_ptr(leaf
, path
->slots
[0],
446 struct btrfs_dev_extent
);
447 btrfs_set_dev_extent_chunk_tree(leaf
, extent
, chunk_tree
);
448 btrfs_set_dev_extent_chunk_objectid(leaf
, extent
, chunk_objectid
);
449 btrfs_set_dev_extent_chunk_offset(leaf
, extent
, chunk_offset
);
451 write_extent_buffer(leaf
, root
->fs_info
->chunk_tree_uuid
,
452 (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent
),
455 btrfs_set_dev_extent_length(leaf
, extent
, num_bytes
);
456 btrfs_mark_buffer_dirty(leaf
);
458 btrfs_free_path(path
);
462 static int find_next_chunk(struct btrfs_root
*root
, u64 objectid
, u64
*offset
)
464 struct btrfs_path
*path
;
466 struct btrfs_key key
;
467 struct btrfs_chunk
*chunk
;
468 struct btrfs_key found_key
;
470 path
= btrfs_alloc_path();
473 key
.objectid
= objectid
;
474 key
.offset
= (u64
)-1;
475 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
477 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
483 ret
= btrfs_previous_item(root
, path
, 0, BTRFS_CHUNK_ITEM_KEY
);
487 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
489 if (found_key
.objectid
!= objectid
)
492 chunk
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
494 *offset
= found_key
.offset
+
495 btrfs_chunk_length(path
->nodes
[0], chunk
);
500 btrfs_free_path(path
);
504 static int find_next_devid(struct btrfs_root
*root
, struct btrfs_path
*path
,
508 struct btrfs_key key
;
509 struct btrfs_key found_key
;
511 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
512 key
.type
= BTRFS_DEV_ITEM_KEY
;
513 key
.offset
= (u64
)-1;
515 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
521 ret
= btrfs_previous_item(root
, path
, BTRFS_DEV_ITEMS_OBJECTID
,
526 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
528 *objectid
= found_key
.offset
+ 1;
532 btrfs_release_path(root
, path
);
537 * the device information is stored in the chunk root
538 * the btrfs_device struct should be fully filled in
540 int btrfs_add_device(struct btrfs_trans_handle
*trans
,
541 struct btrfs_root
*root
,
542 struct btrfs_device
*device
)
545 struct btrfs_path
*path
;
546 struct btrfs_dev_item
*dev_item
;
547 struct extent_buffer
*leaf
;
548 struct btrfs_key key
;
552 root
= root
->fs_info
->chunk_root
;
554 path
= btrfs_alloc_path();
558 ret
= find_next_devid(root
, path
, &free_devid
);
562 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
563 key
.type
= BTRFS_DEV_ITEM_KEY
;
564 key
.offset
= free_devid
;
566 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
571 leaf
= path
->nodes
[0];
572 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
574 device
->devid
= free_devid
;
575 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
576 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
577 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
578 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
579 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
580 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
581 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
582 btrfs_set_device_group(leaf
, dev_item
, 0);
583 btrfs_set_device_seek_speed(leaf
, dev_item
, 0);
584 btrfs_set_device_bandwidth(leaf
, dev_item
, 0);
586 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
587 write_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
588 btrfs_mark_buffer_dirty(leaf
);
592 btrfs_free_path(path
);
596 int btrfs_init_new_device(struct btrfs_root
*root
, char *device_path
)
598 struct btrfs_trans_handle
*trans
;
599 struct btrfs_device
*device
;
600 struct block_device
*bdev
;
601 struct list_head
*cur
;
602 struct list_head
*devices
;
607 bdev
= open_bdev_excl(device_path
, 0, root
->fs_info
->bdev_holder
);
611 mutex_lock(&root
->fs_info
->fs_mutex
);
612 trans
= btrfs_start_transaction(root
, 1);
613 devices
= &root
->fs_info
->fs_devices
->devices
;
614 list_for_each(cur
, devices
) {
615 device
= list_entry(cur
, struct btrfs_device
, dev_list
);
616 if (device
->bdev
== bdev
) {
622 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
624 /* we can safely leave the fs_devices entry around */
629 device
->barriers
= 1;
630 generate_random_uuid(device
->uuid
);
631 spin_lock_init(&device
->io_lock
);
632 device
->name
= kstrdup(device_path
, GFP_NOFS
);
637 device
->io_width
= root
->sectorsize
;
638 device
->io_align
= root
->sectorsize
;
639 device
->sector_size
= root
->sectorsize
;
640 device
->total_bytes
= i_size_read(bdev
->bd_inode
);
641 device
->dev_root
= root
->fs_info
->dev_root
;
644 ret
= btrfs_add_device(trans
, root
, device
);
648 total_bytes
= btrfs_super_total_bytes(&root
->fs_info
->super_copy
);
649 btrfs_set_super_total_bytes(&root
->fs_info
->super_copy
,
650 total_bytes
+ device
->total_bytes
);
652 total_bytes
= btrfs_super_num_devices(&root
->fs_info
->super_copy
);
653 btrfs_set_super_num_devices(&root
->fs_info
->super_copy
,
656 list_add(&device
->dev_list
, &root
->fs_info
->fs_devices
->devices
);
657 list_add(&device
->dev_alloc_list
,
658 &root
->fs_info
->fs_devices
->alloc_list
);
659 root
->fs_info
->fs_devices
->num_devices
++;
661 btrfs_end_transaction(trans
, root
);
662 mutex_unlock(&root
->fs_info
->fs_mutex
);
666 close_bdev_excl(bdev
);
670 int btrfs_update_device(struct btrfs_trans_handle
*trans
,
671 struct btrfs_device
*device
)
674 struct btrfs_path
*path
;
675 struct btrfs_root
*root
;
676 struct btrfs_dev_item
*dev_item
;
677 struct extent_buffer
*leaf
;
678 struct btrfs_key key
;
680 root
= device
->dev_root
->fs_info
->chunk_root
;
682 path
= btrfs_alloc_path();
686 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
687 key
.type
= BTRFS_DEV_ITEM_KEY
;
688 key
.offset
= device
->devid
;
690 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
699 leaf
= path
->nodes
[0];
700 dev_item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dev_item
);
702 btrfs_set_device_id(leaf
, dev_item
, device
->devid
);
703 btrfs_set_device_type(leaf
, dev_item
, device
->type
);
704 btrfs_set_device_io_align(leaf
, dev_item
, device
->io_align
);
705 btrfs_set_device_io_width(leaf
, dev_item
, device
->io_width
);
706 btrfs_set_device_sector_size(leaf
, dev_item
, device
->sector_size
);
707 btrfs_set_device_total_bytes(leaf
, dev_item
, device
->total_bytes
);
708 btrfs_set_device_bytes_used(leaf
, dev_item
, device
->bytes_used
);
709 btrfs_mark_buffer_dirty(leaf
);
712 btrfs_free_path(path
);
716 int btrfs_grow_device(struct btrfs_trans_handle
*trans
,
717 struct btrfs_device
*device
, u64 new_size
)
719 struct btrfs_super_block
*super_copy
=
720 &device
->dev_root
->fs_info
->super_copy
;
721 u64 old_total
= btrfs_super_total_bytes(super_copy
);
722 u64 diff
= new_size
- device
->total_bytes
;
724 btrfs_set_super_total_bytes(super_copy
, old_total
+ diff
);
725 return btrfs_update_device(trans
, device
);
728 static int btrfs_free_chunk(struct btrfs_trans_handle
*trans
,
729 struct btrfs_root
*root
,
730 u64 chunk_tree
, u64 chunk_objectid
,
734 struct btrfs_path
*path
;
735 struct btrfs_key key
;
737 root
= root
->fs_info
->chunk_root
;
738 path
= btrfs_alloc_path();
742 key
.objectid
= chunk_objectid
;
743 key
.offset
= chunk_offset
;
744 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
746 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
749 ret
= btrfs_del_item(trans
, root
, path
);
752 btrfs_free_path(path
);
756 int btrfs_del_sys_chunk(struct btrfs_root
*root
, u64 chunk_objectid
, u64
759 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
760 struct btrfs_disk_key
*disk_key
;
761 struct btrfs_chunk
*chunk
;
768 struct btrfs_key key
;
770 array_size
= btrfs_super_sys_array_size(super_copy
);
772 ptr
= super_copy
->sys_chunk_array
;
775 while (cur
< array_size
) {
776 disk_key
= (struct btrfs_disk_key
*)ptr
;
777 btrfs_disk_key_to_cpu(&key
, disk_key
);
779 len
= sizeof(*disk_key
);
781 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
782 chunk
= (struct btrfs_chunk
*)(ptr
+ len
);
783 num_stripes
= btrfs_stack_chunk_num_stripes(chunk
);
784 len
+= btrfs_chunk_item_size(num_stripes
);
789 if (key
.objectid
== chunk_objectid
&&
790 key
.offset
== chunk_offset
) {
791 memmove(ptr
, ptr
+ len
, array_size
- (cur
+ len
));
793 btrfs_set_super_sys_array_size(super_copy
, array_size
);
803 int btrfs_relocate_chunk(struct btrfs_root
*root
,
804 u64 chunk_tree
, u64 chunk_objectid
,
807 struct extent_map_tree
*em_tree
;
808 struct btrfs_root
*extent_root
;
809 struct btrfs_trans_handle
*trans
;
810 struct extent_map
*em
;
811 struct map_lookup
*map
;
815 root
= root
->fs_info
->chunk_root
;
816 extent_root
= root
->fs_info
->extent_root
;
817 em_tree
= &root
->fs_info
->mapping_tree
.map_tree
;
819 /* step one, relocate all the extents inside this chunk */
820 ret
= btrfs_shrink_extent_tree(extent_root
, chunk_offset
);
823 trans
= btrfs_start_transaction(root
, 1);
827 * step two, delete the device extents and the
830 spin_lock(&em_tree
->lock
);
831 em
= lookup_extent_mapping(em_tree
, chunk_offset
, 1);
832 spin_unlock(&em_tree
->lock
);
834 BUG_ON(em
->start
> chunk_offset
|| em
->start
+ em
->len
< chunk_offset
);
835 map
= (struct map_lookup
*)em
->bdev
;
837 for (i
= 0; i
< map
->num_stripes
; i
++) {
838 ret
= btrfs_free_dev_extent(trans
, map
->stripes
[i
].dev
,
839 map
->stripes
[i
].physical
);
842 ret
= btrfs_free_chunk(trans
, root
, chunk_tree
, chunk_objectid
,
847 if (map
->type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
848 ret
= btrfs_del_sys_chunk(root
, chunk_objectid
, chunk_offset
);
855 spin_lock(&em_tree
->lock
);
856 remove_extent_mapping(em_tree
, em
);
860 /* once for the tree */
862 spin_unlock(&em_tree
->lock
);
868 btrfs_end_transaction(trans
, root
);
873 * shrinking a device means finding all of the device extents past
874 * the new size, and then following the back refs to the chunks.
875 * The chunk relocation code actually frees the device extent
877 int btrfs_shrink_device(struct btrfs_device
*device
, u64 new_size
)
879 struct btrfs_trans_handle
*trans
;
880 struct btrfs_root
*root
= device
->dev_root
;
881 struct btrfs_dev_extent
*dev_extent
= NULL
;
882 struct btrfs_path
*path
;
889 struct extent_buffer
*l
;
890 struct btrfs_key key
;
891 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
892 u64 old_total
= btrfs_super_total_bytes(super_copy
);
893 u64 diff
= device
->total_bytes
- new_size
;
896 path
= btrfs_alloc_path();
900 trans
= btrfs_start_transaction(root
, 1);
908 device
->total_bytes
= new_size
;
909 ret
= btrfs_update_device(trans
, device
);
911 btrfs_end_transaction(trans
, root
);
914 WARN_ON(diff
> old_total
);
915 btrfs_set_super_total_bytes(super_copy
, old_total
- diff
);
916 btrfs_end_transaction(trans
, root
);
918 key
.objectid
= device
->devid
;
919 key
.offset
= (u64
)-1;
920 key
.type
= BTRFS_DEV_EXTENT_KEY
;
923 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
927 ret
= btrfs_previous_item(root
, path
, 0, key
.type
);
936 slot
= path
->slots
[0];
937 btrfs_item_key_to_cpu(l
, &key
, path
->slots
[0]);
939 if (key
.objectid
!= device
->devid
)
942 dev_extent
= btrfs_item_ptr(l
, slot
, struct btrfs_dev_extent
);
943 length
= btrfs_dev_extent_length(l
, dev_extent
);
945 if (key
.offset
+ length
<= new_size
)
948 chunk_tree
= btrfs_dev_extent_chunk_tree(l
, dev_extent
);
949 chunk_objectid
= btrfs_dev_extent_chunk_objectid(l
, dev_extent
);
950 chunk_offset
= btrfs_dev_extent_chunk_offset(l
, dev_extent
);
951 btrfs_release_path(root
, path
);
953 ret
= btrfs_relocate_chunk(root
, chunk_tree
, chunk_objectid
,
960 btrfs_free_path(path
);
964 int btrfs_add_system_chunk(struct btrfs_trans_handle
*trans
,
965 struct btrfs_root
*root
,
966 struct btrfs_key
*key
,
967 struct btrfs_chunk
*chunk
, int item_size
)
969 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
970 struct btrfs_disk_key disk_key
;
974 array_size
= btrfs_super_sys_array_size(super_copy
);
975 if (array_size
+ item_size
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE
)
978 ptr
= super_copy
->sys_chunk_array
+ array_size
;
979 btrfs_cpu_key_to_disk(&disk_key
, key
);
980 memcpy(ptr
, &disk_key
, sizeof(disk_key
));
981 ptr
+= sizeof(disk_key
);
982 memcpy(ptr
, chunk
, item_size
);
983 item_size
+= sizeof(disk_key
);
984 btrfs_set_super_sys_array_size(super_copy
, array_size
+ item_size
);
988 static u64
div_factor(u64 num
, int factor
)
997 static u64
chunk_bytes_by_type(u64 type
, u64 calc_size
, int num_stripes
,
1000 if (type
& (BTRFS_BLOCK_GROUP_RAID1
| BTRFS_BLOCK_GROUP_DUP
))
1002 else if (type
& BTRFS_BLOCK_GROUP_RAID10
)
1003 return calc_size
* (num_stripes
/ sub_stripes
);
1005 return calc_size
* num_stripes
;
1009 int btrfs_alloc_chunk(struct btrfs_trans_handle
*trans
,
1010 struct btrfs_root
*extent_root
, u64
*start
,
1011 u64
*num_bytes
, u64 type
)
1014 struct btrfs_fs_info
*info
= extent_root
->fs_info
;
1015 struct btrfs_root
*chunk_root
= extent_root
->fs_info
->chunk_root
;
1016 struct btrfs_path
*path
;
1017 struct btrfs_stripe
*stripes
;
1018 struct btrfs_device
*device
= NULL
;
1019 struct btrfs_chunk
*chunk
;
1020 struct list_head private_devs
;
1021 struct list_head
*dev_list
;
1022 struct list_head
*cur
;
1023 struct extent_map_tree
*em_tree
;
1024 struct map_lookup
*map
;
1025 struct extent_map
*em
;
1026 int min_stripe_size
= 1 * 1024 * 1024;
1028 u64 calc_size
= 1024 * 1024 * 1024;
1029 u64 max_chunk_size
= calc_size
;
1034 int num_stripes
= 1;
1035 int min_stripes
= 1;
1036 int sub_stripes
= 0;
1040 int stripe_len
= 64 * 1024;
1041 struct btrfs_key key
;
1043 dev_list
= &extent_root
->fs_info
->fs_devices
->alloc_list
;
1044 if (list_empty(dev_list
))
1047 if (type
& (BTRFS_BLOCK_GROUP_RAID0
)) {
1048 num_stripes
= btrfs_super_num_devices(&info
->super_copy
);
1051 if (type
& (BTRFS_BLOCK_GROUP_DUP
)) {
1055 if (type
& (BTRFS_BLOCK_GROUP_RAID1
)) {
1056 num_stripes
= min_t(u64
, 2,
1057 btrfs_super_num_devices(&info
->super_copy
));
1058 if (num_stripes
< 2)
1062 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
1063 num_stripes
= btrfs_super_num_devices(&info
->super_copy
);
1064 if (num_stripes
< 4)
1066 num_stripes
&= ~(u32
)1;
1071 if (type
& BTRFS_BLOCK_GROUP_DATA
) {
1072 max_chunk_size
= 10 * calc_size
;
1073 min_stripe_size
= 64 * 1024 * 1024;
1074 } else if (type
& BTRFS_BLOCK_GROUP_METADATA
) {
1075 max_chunk_size
= 4 * calc_size
;
1076 min_stripe_size
= 32 * 1024 * 1024;
1077 } else if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1078 calc_size
= 8 * 1024 * 1024;
1079 max_chunk_size
= calc_size
* 2;
1080 min_stripe_size
= 1 * 1024 * 1024;
1083 path
= btrfs_alloc_path();
1087 /* we don't want a chunk larger than 10% of the FS */
1088 percent_max
= div_factor(btrfs_super_total_bytes(&info
->super_copy
), 1);
1089 max_chunk_size
= min(percent_max
, max_chunk_size
);
1092 if (calc_size
* num_stripes
> max_chunk_size
) {
1093 calc_size
= max_chunk_size
;
1094 do_div(calc_size
, num_stripes
);
1095 do_div(calc_size
, stripe_len
);
1096 calc_size
*= stripe_len
;
1098 /* we don't want tiny stripes */
1099 calc_size
= max_t(u64
, min_stripe_size
, calc_size
);
1101 do_div(calc_size
, stripe_len
);
1102 calc_size
*= stripe_len
;
1104 INIT_LIST_HEAD(&private_devs
);
1105 cur
= dev_list
->next
;
1108 if (type
& BTRFS_BLOCK_GROUP_DUP
)
1109 min_free
= calc_size
* 2;
1111 min_free
= calc_size
;
1113 /* we add 1MB because we never use the first 1MB of the device */
1114 min_free
+= 1024 * 1024;
1116 /* build a private list of devices we will allocate from */
1117 while(index
< num_stripes
) {
1118 device
= list_entry(cur
, struct btrfs_device
, dev_alloc_list
);
1120 avail
= device
->total_bytes
- device
->bytes_used
;
1123 if (avail
>= min_free
) {
1124 u64 ignored_start
= 0;
1125 ret
= find_free_dev_extent(trans
, device
, path
,
1129 list_move_tail(&device
->dev_alloc_list
,
1132 if (type
& BTRFS_BLOCK_GROUP_DUP
)
1135 } else if (avail
> max_avail
)
1137 if (cur
== dev_list
)
1140 if (index
< num_stripes
) {
1141 list_splice(&private_devs
, dev_list
);
1142 if (index
>= min_stripes
) {
1143 num_stripes
= index
;
1144 if (type
& (BTRFS_BLOCK_GROUP_RAID10
)) {
1145 num_stripes
/= sub_stripes
;
1146 num_stripes
*= sub_stripes
;
1151 if (!looped
&& max_avail
> 0) {
1153 calc_size
= max_avail
;
1156 btrfs_free_path(path
);
1159 key
.objectid
= BTRFS_FIRST_CHUNK_TREE_OBJECTID
;
1160 key
.type
= BTRFS_CHUNK_ITEM_KEY
;
1161 ret
= find_next_chunk(chunk_root
, BTRFS_FIRST_CHUNK_TREE_OBJECTID
,
1164 btrfs_free_path(path
);
1168 chunk
= kmalloc(btrfs_chunk_item_size(num_stripes
), GFP_NOFS
);
1170 btrfs_free_path(path
);
1174 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
1177 btrfs_free_path(path
);
1180 btrfs_free_path(path
);
1183 stripes
= &chunk
->stripe
;
1184 *num_bytes
= chunk_bytes_by_type(type
, calc_size
,
1185 num_stripes
, sub_stripes
);
1188 printk("new chunk type %Lu start %Lu size %Lu\n", type
, key
.offset
, *num_bytes
);
1189 while(index
< num_stripes
) {
1190 struct btrfs_stripe
*stripe
;
1191 BUG_ON(list_empty(&private_devs
));
1192 cur
= private_devs
.next
;
1193 device
= list_entry(cur
, struct btrfs_device
, dev_alloc_list
);
1195 /* loop over this device again if we're doing a dup group */
1196 if (!(type
& BTRFS_BLOCK_GROUP_DUP
) ||
1197 (index
== num_stripes
- 1))
1198 list_move_tail(&device
->dev_alloc_list
, dev_list
);
1200 ret
= btrfs_alloc_dev_extent(trans
, device
,
1201 info
->chunk_root
->root_key
.objectid
,
1202 BTRFS_FIRST_CHUNK_TREE_OBJECTID
, key
.offset
,
1203 calc_size
, &dev_offset
);
1205 printk("alloc chunk start %Lu size %Lu from dev %Lu type %Lu\n", key
.offset
, calc_size
, device
->devid
, type
);
1206 device
->bytes_used
+= calc_size
;
1207 ret
= btrfs_update_device(trans
, device
);
1210 map
->stripes
[index
].dev
= device
;
1211 map
->stripes
[index
].physical
= dev_offset
;
1212 stripe
= stripes
+ index
;
1213 btrfs_set_stack_stripe_devid(stripe
, device
->devid
);
1214 btrfs_set_stack_stripe_offset(stripe
, dev_offset
);
1215 memcpy(stripe
->dev_uuid
, device
->uuid
, BTRFS_UUID_SIZE
);
1216 physical
= dev_offset
;
1219 BUG_ON(!list_empty(&private_devs
));
1221 /* key was set above */
1222 btrfs_set_stack_chunk_length(chunk
, *num_bytes
);
1223 btrfs_set_stack_chunk_owner(chunk
, extent_root
->root_key
.objectid
);
1224 btrfs_set_stack_chunk_stripe_len(chunk
, stripe_len
);
1225 btrfs_set_stack_chunk_type(chunk
, type
);
1226 btrfs_set_stack_chunk_num_stripes(chunk
, num_stripes
);
1227 btrfs_set_stack_chunk_io_align(chunk
, stripe_len
);
1228 btrfs_set_stack_chunk_io_width(chunk
, stripe_len
);
1229 btrfs_set_stack_chunk_sector_size(chunk
, extent_root
->sectorsize
);
1230 btrfs_set_stack_chunk_sub_stripes(chunk
, sub_stripes
);
1231 map
->sector_size
= extent_root
->sectorsize
;
1232 map
->stripe_len
= stripe_len
;
1233 map
->io_align
= stripe_len
;
1234 map
->io_width
= stripe_len
;
1236 map
->num_stripes
= num_stripes
;
1237 map
->sub_stripes
= sub_stripes
;
1239 ret
= btrfs_insert_item(trans
, chunk_root
, &key
, chunk
,
1240 btrfs_chunk_item_size(num_stripes
));
1242 *start
= key
.offset
;;
1244 em
= alloc_extent_map(GFP_NOFS
);
1247 em
->bdev
= (struct block_device
*)map
;
1248 em
->start
= key
.offset
;
1249 em
->len
= *num_bytes
;
1250 em
->block_start
= 0;
1252 if (type
& BTRFS_BLOCK_GROUP_SYSTEM
) {
1253 ret
= btrfs_add_system_chunk(trans
, chunk_root
, &key
,
1254 chunk
, btrfs_chunk_item_size(num_stripes
));
1259 em_tree
= &extent_root
->fs_info
->mapping_tree
.map_tree
;
1260 spin_lock(&em_tree
->lock
);
1261 ret
= add_extent_mapping(em_tree
, em
);
1262 spin_unlock(&em_tree
->lock
);
1264 free_extent_map(em
);
1268 void btrfs_mapping_init(struct btrfs_mapping_tree
*tree
)
1270 extent_map_tree_init(&tree
->map_tree
, GFP_NOFS
);
1273 void btrfs_mapping_tree_free(struct btrfs_mapping_tree
*tree
)
1275 struct extent_map
*em
;
1278 spin_lock(&tree
->map_tree
.lock
);
1279 em
= lookup_extent_mapping(&tree
->map_tree
, 0, (u64
)-1);
1281 remove_extent_mapping(&tree
->map_tree
, em
);
1282 spin_unlock(&tree
->map_tree
.lock
);
1287 free_extent_map(em
);
1288 /* once for the tree */
1289 free_extent_map(em
);
1293 int btrfs_num_copies(struct btrfs_mapping_tree
*map_tree
, u64 logical
, u64 len
)
1295 struct extent_map
*em
;
1296 struct map_lookup
*map
;
1297 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
1300 spin_lock(&em_tree
->lock
);
1301 em
= lookup_extent_mapping(em_tree
, logical
, len
);
1302 spin_unlock(&em_tree
->lock
);
1305 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
1306 map
= (struct map_lookup
*)em
->bdev
;
1307 if (map
->type
& (BTRFS_BLOCK_GROUP_DUP
| BTRFS_BLOCK_GROUP_RAID1
))
1308 ret
= map
->num_stripes
;
1309 else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
)
1310 ret
= map
->sub_stripes
;
1313 free_extent_map(em
);
1317 static int __btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
1318 u64 logical
, u64
*length
,
1319 struct btrfs_multi_bio
**multi_ret
,
1320 int mirror_num
, struct page
*unplug_page
)
1322 struct extent_map
*em
;
1323 struct map_lookup
*map
;
1324 struct extent_map_tree
*em_tree
= &map_tree
->map_tree
;
1328 int stripes_allocated
= 8;
1329 int stripes_required
= 1;
1333 struct btrfs_multi_bio
*multi
= NULL
;
1335 if (multi_ret
&& !(rw
& (1 << BIO_RW
))) {
1336 stripes_allocated
= 1;
1340 multi
= kzalloc(btrfs_multi_bio_size(stripes_allocated
),
1346 spin_lock(&em_tree
->lock
);
1347 em
= lookup_extent_mapping(em_tree
, logical
, *length
);
1348 spin_unlock(&em_tree
->lock
);
1350 if (!em
&& unplug_page
)
1354 printk("unable to find logical %Lu\n", logical
);
1358 BUG_ON(em
->start
> logical
|| em
->start
+ em
->len
< logical
);
1359 map
= (struct map_lookup
*)em
->bdev
;
1360 offset
= logical
- em
->start
;
1362 if (mirror_num
> map
->num_stripes
)
1365 /* if our multi bio struct is too small, back off and try again */
1366 if (rw
& (1 << BIO_RW
)) {
1367 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID1
|
1368 BTRFS_BLOCK_GROUP_DUP
)) {
1369 stripes_required
= map
->num_stripes
;
1370 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1371 stripes_required
= map
->sub_stripes
;
1374 if (multi_ret
&& rw
== WRITE
&&
1375 stripes_allocated
< stripes_required
) {
1376 stripes_allocated
= map
->num_stripes
;
1377 free_extent_map(em
);
1383 * stripe_nr counts the total number of stripes we have to stride
1384 * to get to this block
1386 do_div(stripe_nr
, map
->stripe_len
);
1388 stripe_offset
= stripe_nr
* map
->stripe_len
;
1389 BUG_ON(offset
< stripe_offset
);
1391 /* stripe_offset is the offset of this block in its stripe*/
1392 stripe_offset
= offset
- stripe_offset
;
1394 if (map
->type
& (BTRFS_BLOCK_GROUP_RAID0
| BTRFS_BLOCK_GROUP_RAID1
|
1395 BTRFS_BLOCK_GROUP_RAID10
|
1396 BTRFS_BLOCK_GROUP_DUP
)) {
1397 /* we limit the length of each bio to what fits in a stripe */
1398 *length
= min_t(u64
, em
->len
- offset
,
1399 map
->stripe_len
- stripe_offset
);
1401 *length
= em
->len
- offset
;
1404 if (!multi_ret
&& !unplug_page
)
1409 if (map
->type
& BTRFS_BLOCK_GROUP_RAID1
) {
1410 if (unplug_page
|| (rw
& (1 << BIO_RW
)))
1411 num_stripes
= map
->num_stripes
;
1412 else if (mirror_num
) {
1413 stripe_index
= mirror_num
- 1;
1415 u64 orig_stripe_nr
= stripe_nr
;
1416 stripe_index
= do_div(orig_stripe_nr
, num_stripes
);
1418 } else if (map
->type
& BTRFS_BLOCK_GROUP_DUP
) {
1419 if (rw
& (1 << BIO_RW
))
1420 num_stripes
= map
->num_stripes
;
1421 else if (mirror_num
)
1422 stripe_index
= mirror_num
- 1;
1423 } else if (map
->type
& BTRFS_BLOCK_GROUP_RAID10
) {
1424 int factor
= map
->num_stripes
/ map
->sub_stripes
;
1426 stripe_index
= do_div(stripe_nr
, factor
);
1427 stripe_index
*= map
->sub_stripes
;
1429 if (unplug_page
|| (rw
& (1 << BIO_RW
)))
1430 num_stripes
= map
->sub_stripes
;
1431 else if (mirror_num
)
1432 stripe_index
+= mirror_num
- 1;
1434 u64 orig_stripe_nr
= stripe_nr
;
1435 stripe_index
+= do_div(orig_stripe_nr
,
1440 * after this do_div call, stripe_nr is the number of stripes
1441 * on this device we have to walk to find the data, and
1442 * stripe_index is the number of our device in the stripe array
1444 stripe_index
= do_div(stripe_nr
, map
->num_stripes
);
1446 BUG_ON(stripe_index
>= map
->num_stripes
);
1448 for (i
= 0; i
< num_stripes
; i
++) {
1450 struct btrfs_device
*device
;
1451 struct backing_dev_info
*bdi
;
1453 device
= map
->stripes
[stripe_index
].dev
;
1454 bdi
= blk_get_backing_dev_info(device
->bdev
);
1455 if (bdi
->unplug_io_fn
) {
1456 bdi
->unplug_io_fn(bdi
, unplug_page
);
1459 multi
->stripes
[i
].physical
=
1460 map
->stripes
[stripe_index
].physical
+
1461 stripe_offset
+ stripe_nr
* map
->stripe_len
;
1462 multi
->stripes
[i
].dev
= map
->stripes
[stripe_index
].dev
;
1468 multi
->num_stripes
= num_stripes
;
1471 free_extent_map(em
);
1475 int btrfs_map_block(struct btrfs_mapping_tree
*map_tree
, int rw
,
1476 u64 logical
, u64
*length
,
1477 struct btrfs_multi_bio
**multi_ret
, int mirror_num
)
1479 return __btrfs_map_block(map_tree
, rw
, logical
, length
, multi_ret
,
1483 int btrfs_unplug_page(struct btrfs_mapping_tree
*map_tree
,
1484 u64 logical
, struct page
*page
)
1486 u64 length
= PAGE_CACHE_SIZE
;
1487 return __btrfs_map_block(map_tree
, READ
, logical
, &length
,
1492 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1493 static void end_bio_multi_stripe(struct bio
*bio
, int err
)
1495 static int end_bio_multi_stripe(struct bio
*bio
,
1496 unsigned int bytes_done
, int err
)
1499 struct btrfs_multi_bio
*multi
= bio
->bi_private
;
1501 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1508 if (atomic_dec_and_test(&multi
->stripes_pending
)) {
1509 bio
->bi_private
= multi
->private;
1510 bio
->bi_end_io
= multi
->end_io
;
1512 if (!err
&& multi
->error
)
1516 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1517 bio_endio(bio
, bio
->bi_size
, err
);
1519 bio_endio(bio
, err
);
1524 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1529 int btrfs_map_bio(struct btrfs_root
*root
, int rw
, struct bio
*bio
,
1532 struct btrfs_mapping_tree
*map_tree
;
1533 struct btrfs_device
*dev
;
1534 struct bio
*first_bio
= bio
;
1535 u64 logical
= bio
->bi_sector
<< 9;
1538 struct btrfs_multi_bio
*multi
= NULL
;
1543 length
= bio
->bi_size
;
1544 map_tree
= &root
->fs_info
->mapping_tree
;
1545 map_length
= length
;
1547 ret
= btrfs_map_block(map_tree
, rw
, logical
, &map_length
, &multi
,
1551 total_devs
= multi
->num_stripes
;
1552 if (map_length
< length
) {
1553 printk("mapping failed logical %Lu bio len %Lu "
1554 "len %Lu\n", logical
, length
, map_length
);
1557 multi
->end_io
= first_bio
->bi_end_io
;
1558 multi
->private = first_bio
->bi_private
;
1559 atomic_set(&multi
->stripes_pending
, multi
->num_stripes
);
1561 while(dev_nr
< total_devs
) {
1562 if (total_devs
> 1) {
1563 if (dev_nr
< total_devs
- 1) {
1564 bio
= bio_clone(first_bio
, GFP_NOFS
);
1569 bio
->bi_private
= multi
;
1570 bio
->bi_end_io
= end_bio_multi_stripe
;
1572 bio
->bi_sector
= multi
->stripes
[dev_nr
].physical
>> 9;
1573 dev
= multi
->stripes
[dev_nr
].dev
;
1575 bio
->bi_bdev
= dev
->bdev
;
1576 spin_lock(&dev
->io_lock
);
1578 spin_unlock(&dev
->io_lock
);
1579 submit_bio(rw
, bio
);
1582 if (total_devs
== 1)
1587 struct btrfs_device
*btrfs_find_device(struct btrfs_root
*root
, u64 devid
,
1590 struct list_head
*head
= &root
->fs_info
->fs_devices
->devices
;
1592 return __find_device(head
, devid
, uuid
);
1595 static int read_one_chunk(struct btrfs_root
*root
, struct btrfs_key
*key
,
1596 struct extent_buffer
*leaf
,
1597 struct btrfs_chunk
*chunk
)
1599 struct btrfs_mapping_tree
*map_tree
= &root
->fs_info
->mapping_tree
;
1600 struct map_lookup
*map
;
1601 struct extent_map
*em
;
1605 u8 uuid
[BTRFS_UUID_SIZE
];
1610 logical
= key
->offset
;
1611 length
= btrfs_chunk_length(leaf
, chunk
);
1612 spin_lock(&map_tree
->map_tree
.lock
);
1613 em
= lookup_extent_mapping(&map_tree
->map_tree
, logical
, 1);
1614 spin_unlock(&map_tree
->map_tree
.lock
);
1616 /* already mapped? */
1617 if (em
&& em
->start
<= logical
&& em
->start
+ em
->len
> logical
) {
1618 free_extent_map(em
);
1621 free_extent_map(em
);
1624 map
= kzalloc(sizeof(*map
), GFP_NOFS
);
1628 em
= alloc_extent_map(GFP_NOFS
);
1631 num_stripes
= btrfs_chunk_num_stripes(leaf
, chunk
);
1632 map
= kmalloc(map_lookup_size(num_stripes
), GFP_NOFS
);
1634 free_extent_map(em
);
1638 em
->bdev
= (struct block_device
*)map
;
1639 em
->start
= logical
;
1641 em
->block_start
= 0;
1643 map
->num_stripes
= num_stripes
;
1644 map
->io_width
= btrfs_chunk_io_width(leaf
, chunk
);
1645 map
->io_align
= btrfs_chunk_io_align(leaf
, chunk
);
1646 map
->sector_size
= btrfs_chunk_sector_size(leaf
, chunk
);
1647 map
->stripe_len
= btrfs_chunk_stripe_len(leaf
, chunk
);
1648 map
->type
= btrfs_chunk_type(leaf
, chunk
);
1649 map
->sub_stripes
= btrfs_chunk_sub_stripes(leaf
, chunk
);
1650 for (i
= 0; i
< num_stripes
; i
++) {
1651 map
->stripes
[i
].physical
=
1652 btrfs_stripe_offset_nr(leaf
, chunk
, i
);
1653 devid
= btrfs_stripe_devid_nr(leaf
, chunk
, i
);
1654 read_extent_buffer(leaf
, uuid
, (unsigned long)
1655 btrfs_stripe_dev_uuid_nr(chunk
, i
),
1657 map
->stripes
[i
].dev
= btrfs_find_device(root
, devid
, uuid
);
1658 if (!map
->stripes
[i
].dev
) {
1660 free_extent_map(em
);
1665 spin_lock(&map_tree
->map_tree
.lock
);
1666 ret
= add_extent_mapping(&map_tree
->map_tree
, em
);
1667 spin_unlock(&map_tree
->map_tree
.lock
);
1669 free_extent_map(em
);
1674 static int fill_device_from_item(struct extent_buffer
*leaf
,
1675 struct btrfs_dev_item
*dev_item
,
1676 struct btrfs_device
*device
)
1680 device
->devid
= btrfs_device_id(leaf
, dev_item
);
1681 device
->total_bytes
= btrfs_device_total_bytes(leaf
, dev_item
);
1682 device
->bytes_used
= btrfs_device_bytes_used(leaf
, dev_item
);
1683 device
->type
= btrfs_device_type(leaf
, dev_item
);
1684 device
->io_align
= btrfs_device_io_align(leaf
, dev_item
);
1685 device
->io_width
= btrfs_device_io_width(leaf
, dev_item
);
1686 device
->sector_size
= btrfs_device_sector_size(leaf
, dev_item
);
1688 ptr
= (unsigned long)btrfs_device_uuid(dev_item
);
1689 read_extent_buffer(leaf
, device
->uuid
, ptr
, BTRFS_UUID_SIZE
);
1694 static int read_one_dev(struct btrfs_root
*root
,
1695 struct extent_buffer
*leaf
,
1696 struct btrfs_dev_item
*dev_item
)
1698 struct btrfs_device
*device
;
1701 u8 dev_uuid
[BTRFS_UUID_SIZE
];
1703 devid
= btrfs_device_id(leaf
, dev_item
);
1704 read_extent_buffer(leaf
, dev_uuid
,
1705 (unsigned long)btrfs_device_uuid(dev_item
),
1707 device
= btrfs_find_device(root
, devid
, dev_uuid
);
1709 printk("warning devid %Lu not found already\n", devid
);
1710 device
= kzalloc(sizeof(*device
), GFP_NOFS
);
1713 list_add(&device
->dev_list
,
1714 &root
->fs_info
->fs_devices
->devices
);
1715 list_add(&device
->dev_alloc_list
,
1716 &root
->fs_info
->fs_devices
->alloc_list
);
1717 device
->barriers
= 1;
1718 spin_lock_init(&device
->io_lock
);
1721 fill_device_from_item(leaf
, dev_item
, device
);
1722 device
->dev_root
= root
->fs_info
->dev_root
;
1725 ret
= btrfs_open_device(device
);
1733 int btrfs_read_super_device(struct btrfs_root
*root
, struct extent_buffer
*buf
)
1735 struct btrfs_dev_item
*dev_item
;
1737 dev_item
= (struct btrfs_dev_item
*)offsetof(struct btrfs_super_block
,
1739 return read_one_dev(root
, buf
, dev_item
);
1742 int btrfs_read_sys_array(struct btrfs_root
*root
)
1744 struct btrfs_super_block
*super_copy
= &root
->fs_info
->super_copy
;
1745 struct extent_buffer
*sb
= root
->fs_info
->sb_buffer
;
1746 struct btrfs_disk_key
*disk_key
;
1747 struct btrfs_chunk
*chunk
;
1749 unsigned long sb_ptr
;
1755 struct btrfs_key key
;
1757 array_size
= btrfs_super_sys_array_size(super_copy
);
1759 ptr
= super_copy
->sys_chunk_array
;
1760 sb_ptr
= offsetof(struct btrfs_super_block
, sys_chunk_array
);
1763 while (cur
< array_size
) {
1764 disk_key
= (struct btrfs_disk_key
*)ptr
;
1765 btrfs_disk_key_to_cpu(&key
, disk_key
);
1767 len
= sizeof(*disk_key
);
1772 if (key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1773 chunk
= (struct btrfs_chunk
*)sb_ptr
;
1774 ret
= read_one_chunk(root
, &key
, sb
, chunk
);
1777 num_stripes
= btrfs_chunk_num_stripes(sb
, chunk
);
1778 len
= btrfs_chunk_item_size(num_stripes
);
1790 int btrfs_read_chunk_tree(struct btrfs_root
*root
)
1792 struct btrfs_path
*path
;
1793 struct extent_buffer
*leaf
;
1794 struct btrfs_key key
;
1795 struct btrfs_key found_key
;
1799 root
= root
->fs_info
->chunk_root
;
1801 path
= btrfs_alloc_path();
1805 /* first we search for all of the device items, and then we
1806 * read in all of the chunk items. This way we can create chunk
1807 * mappings that reference all of the devices that are afound
1809 key
.objectid
= BTRFS_DEV_ITEMS_OBJECTID
;
1813 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1815 leaf
= path
->nodes
[0];
1816 slot
= path
->slots
[0];
1817 if (slot
>= btrfs_header_nritems(leaf
)) {
1818 ret
= btrfs_next_leaf(root
, path
);
1825 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
1826 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
1827 if (found_key
.objectid
!= BTRFS_DEV_ITEMS_OBJECTID
)
1829 if (found_key
.type
== BTRFS_DEV_ITEM_KEY
) {
1830 struct btrfs_dev_item
*dev_item
;
1831 dev_item
= btrfs_item_ptr(leaf
, slot
,
1832 struct btrfs_dev_item
);
1833 ret
= read_one_dev(root
, leaf
, dev_item
);
1836 } else if (found_key
.type
== BTRFS_CHUNK_ITEM_KEY
) {
1837 struct btrfs_chunk
*chunk
;
1838 chunk
= btrfs_item_ptr(leaf
, slot
, struct btrfs_chunk
);
1839 ret
= read_one_chunk(root
, &found_key
, leaf
, chunk
);
1843 if (key
.objectid
== BTRFS_DEV_ITEMS_OBJECTID
) {
1845 btrfs_release_path(root
, path
);
1849 btrfs_free_path(path
);